diff --git a/.gitattributes b/.gitattributes
index 92cdc6e8fd567806b3f60c87a7e5c5303171b071..6a339e16e160192c8fa588a675a3cb925c05c904 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -340,3 +340,5 @@ parrot/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diopha
parrot/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+parrot/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+parrot/lib/libcrypto.so.3 filter=lfs diff=lfs merge=lfs -text
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_common.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..4011d425cc4afea3c7ee8937526b13f1f92b0850
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_common.py
@@ -0,0 +1,5 @@
+from collections import namedtuple
+
+
+ConfidenceInterval = namedtuple("ConfidenceInterval", ["low", "high"])
+ConfidenceInterval. __doc__ = "Class for confidence intervals."
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi b/llava_next/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..1006385a43179478a9a4a32ae5f825aa5b8b35c4
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_qmc_cy.pyi
@@ -0,0 +1,54 @@
+import numpy as np
+from scipy._lib._util import DecimalNumber, IntNumber
+
+
+def _cy_wrapper_centered_discrepancy(
+ sample: np.ndarray,
+ iterative: bool,
+ workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_wrap_around_discrepancy(
+ sample: np.ndarray,
+ iterative: bool,
+ workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_mixture_discrepancy(
+ sample: np.ndarray,
+ iterative: bool,
+ workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_l2_star_discrepancy(
+ sample: np.ndarray,
+ iterative: bool,
+ workers: IntNumber,
+) -> float: ...
+
+
+def _cy_wrapper_update_discrepancy(
+ x_new_view: np.ndarray,
+ sample_view: np.ndarray,
+ initial_disc: DecimalNumber,
+) -> float: ...
+
+
+def _cy_van_der_corput(
+ n: IntNumber,
+ base: IntNumber,
+ start_index: IntNumber,
+ workers: IntNumber,
+) -> np.ndarray: ...
+
+
+def _cy_van_der_corput_scrambled(
+ n: IntNumber,
+ base: IntNumber,
+ start_index: IntNumber,
+ permutations: np.ndarray,
+ workers: IntNumber,
+) -> np.ndarray: ...
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py b/llava_next/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py
new file mode 100644
index 0000000000000000000000000000000000000000..c59be3100489a7b9a8b8290f9e00616096b3c633
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/scipy/stats/_wilcoxon.py
@@ -0,0 +1,246 @@
+import warnings
+import numpy as np
+
+from scipy import stats
+from ._stats_py import _get_pvalue, _rankdata, _SimpleNormal
+from . import _morestats
+from ._axis_nan_policy import _broadcast_arrays
+from ._hypotests import _get_wilcoxon_distr
+from scipy._lib._util import _lazywhere, _get_nan
+
+
+class WilcoxonDistribution:
+
+ def __init__(self, n):
+ n = np.asarray(n).astype(int, copy=False)
+ self.n = n
+ self._dists = {ni: _get_wilcoxon_distr(ni) for ni in np.unique(n)}
+
+ def _cdf1(self, k, n):
+ pmfs = self._dists[n]
+ return pmfs[:k + 1].sum()
+
+ def _cdf(self, k, n):
+ return np.vectorize(self._cdf1, otypes=[float])(k, n)
+
+ def _sf1(self, k, n):
+ pmfs = self._dists[n]
+ return pmfs[k:].sum()
+
+ def _sf(self, k, n):
+ return np.vectorize(self._sf1, otypes=[float])(k, n)
+
+ def mean(self):
+ return self.n * (self.n + 1) / 4
+
+ def _prep(self, k):
+ k = np.asarray(k).astype(int, copy=False)
+ mn = self.mean()
+ out = np.empty(k.shape, dtype=np.float64)
+ return k, mn, out
+
+ def cdf(self, k):
+ k, mn, out = self._prep(k)
+ return _lazywhere(k <= mn, (k, self.n), self._cdf,
+ f2=lambda k, n: 1 - self._sf(k+1, n))[()]
+
+ def sf(self, k):
+ k, mn, out = self._prep(k)
+ return _lazywhere(k <= mn, (k, self.n), self._sf,
+ f2=lambda k, n: 1 - self._cdf(k-1, n))[()]
+
+
+def _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis):
+
+ axis = np.asarray(axis)[()]
+ message = "`axis` must be an integer."
+ if not np.issubdtype(axis.dtype, np.integer) or axis.ndim != 0:
+ raise ValueError(message)
+
+ message = '`axis` must be compatible with the shape(s) of `x` (and `y`)'
+ try:
+ if y is None:
+ x = np.asarray(x)
+ d = x
+ else:
+ x, y = _broadcast_arrays((x, y), axis=axis)
+ d = x - y
+ d = np.moveaxis(d, axis, -1)
+ except np.AxisError as e:
+ raise ValueError(message) from e
+
+ message = "`x` and `y` must have the same length along `axis`."
+ if y is not None and x.shape[axis] != y.shape[axis]:
+ raise ValueError(message)
+
+ message = "`x` (and `y`, if provided) must be an array of real numbers."
+ if np.issubdtype(d.dtype, np.integer):
+ d = d.astype(np.float64)
+ if not np.issubdtype(d.dtype, np.floating):
+ raise ValueError(message)
+
+ zero_method = str(zero_method).lower()
+ zero_methods = {"wilcox", "pratt", "zsplit"}
+ message = f"`zero_method` must be one of {zero_methods}."
+ if zero_method not in zero_methods:
+ raise ValueError(message)
+
+ corrections = {True, False}
+ message = f"`correction` must be one of {corrections}."
+ if correction not in corrections:
+ raise ValueError(message)
+
+ alternative = str(alternative).lower()
+ alternatives = {"two-sided", "less", "greater"}
+ message = f"`alternative` must be one of {alternatives}."
+ if alternative not in alternatives:
+ raise ValueError(message)
+
+ if not isinstance(method, stats.PermutationMethod):
+ methods = {"auto", "approx", "exact"}
+ message = (f"`method` must be one of {methods} or "
+ "an instance of `stats.PermutationMethod`.")
+ if method not in methods:
+ raise ValueError(message)
+ output_z = True if method == 'approx' else False
+
+ # logic unchanged here for backward compatibility
+ n_zero = np.sum(d == 0, axis=-1)
+ has_zeros = np.any(n_zero > 0)
+ if method == "auto":
+ if d.shape[-1] <= 50 and not has_zeros:
+ method = "exact"
+ else:
+ method = "approx"
+
+ n_zero = np.sum(d == 0)
+ if n_zero > 0 and method == "exact":
+ method = "approx"
+ warnings.warn("Exact p-value calculation does not work if there are "
+ "zeros. Switching to normal approximation.",
+ stacklevel=2)
+
+ if (method == "approx" and zero_method in ["wilcox", "pratt"]
+ and n_zero == d.size and d.size > 0 and d.ndim == 1):
+ raise ValueError("zero_method 'wilcox' and 'pratt' do not "
+ "work if x - y is zero for all elements.")
+
+ if 0 < d.shape[-1] < 10 and method == "approx":
+ warnings.warn("Sample size too small for normal approximation.", stacklevel=2)
+
+ return d, zero_method, correction, alternative, method, axis, output_z
+
+
+def _wilcoxon_statistic(d, zero_method='wilcox'):
+
+ i_zeros = (d == 0)
+
+ if zero_method == 'wilcox':
+ # Wilcoxon's method for treating zeros was to remove them from
+ # the calculation. We do this by replacing 0s with NaNs, which
+ # are ignored anyway.
+ if not d.flags['WRITEABLE']:
+ d = d.copy()
+ d[i_zeros] = np.nan
+
+ i_nan = np.isnan(d)
+ n_nan = np.sum(i_nan, axis=-1)
+ count = d.shape[-1] - n_nan
+
+ r, t = _rankdata(abs(d), 'average', return_ties=True)
+
+ r_plus = np.sum((d > 0) * r, axis=-1)
+ r_minus = np.sum((d < 0) * r, axis=-1)
+
+ if zero_method == "zsplit":
+ # The "zero-split" method for treating zeros is to add half their contribution
+ # to r_plus and half to r_minus.
+ # See gh-2263 for the origin of this method.
+ r_zero_2 = np.sum(i_zeros * r, axis=-1) / 2
+ r_plus += r_zero_2
+ r_minus += r_zero_2
+
+ mn = count * (count + 1.) * 0.25
+ se = count * (count + 1.) * (2. * count + 1.)
+
+ if zero_method == "pratt":
+ # Pratt's method for treating zeros was just to modify the z-statistic.
+
+ # normal approximation needs to be adjusted, see Cureton (1967)
+ n_zero = i_zeros.sum(axis=-1)
+ mn -= n_zero * (n_zero + 1.) * 0.25
+ se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.)
+
+ # zeros are not to be included in tie-correction.
+ # any tie counts corresponding with zeros are in the 0th column
+ t[i_zeros.any(axis=-1), 0] = 0
+
+ tie_correct = (t**3 - t).sum(axis=-1)
+ se -= tie_correct/2
+ se = np.sqrt(se / 24)
+
+ z = (r_plus - mn) / se
+
+ return r_plus, r_minus, se, z, count
+
+
+def _correction_sign(z, alternative):
+ if alternative == 'greater':
+ return 1
+ elif alternative == 'less':
+ return -1
+ else:
+ return np.sign(z)
+
+
+def _wilcoxon_nd(x, y=None, zero_method='wilcox', correction=True,
+ alternative='two-sided', method='auto', axis=0):
+
+ temp = _wilcoxon_iv(x, y, zero_method, correction, alternative, method, axis)
+ d, zero_method, correction, alternative, method, axis, output_z = temp
+
+ if d.size == 0:
+ NaN = _get_nan(d)
+ res = _morestats.WilcoxonResult(statistic=NaN, pvalue=NaN)
+ if method == 'approx':
+ res.zstatistic = NaN
+ return res
+
+ r_plus, r_minus, se, z, count = _wilcoxon_statistic(d, zero_method)
+
+ if method == 'approx':
+ if correction:
+ sign = _correction_sign(z, alternative)
+ z -= sign * 0.5 / se
+ p = _get_pvalue(z, _SimpleNormal(), alternative, xp=np)
+ elif method == 'exact':
+ dist = WilcoxonDistribution(count)
+ # The null distribution in `dist` is exact only if there are no ties
+ # or zeros. If there are ties or zeros, the statistic can be non-
+ # integral, but the null distribution is only defined for integral
+ # values of the statistic. Therefore, we're conservative: round
+ # non-integral statistic up before computing CDF and down before
+ # computing SF. This preserves symmetry w.r.t. alternatives and
+ # order of the input arguments. See gh-19872.
+ if alternative == 'less':
+ p = dist.cdf(np.ceil(r_plus))
+ elif alternative == 'greater':
+ p = dist.sf(np.floor(r_plus))
+ else:
+ p = 2 * np.minimum(dist.sf(np.floor(r_plus)),
+ dist.cdf(np.ceil(r_plus)))
+ p = np.clip(p, 0, 1)
+ else: # `PermutationMethod` instance (already validated)
+ p = stats.permutation_test(
+ (d,), lambda d: _wilcoxon_statistic(d, zero_method)[0],
+ permutation_type='samples', **method._asdict(),
+ alternative=alternative, axis=-1).pvalue
+
+ # for backward compatibility...
+ statistic = np.minimum(r_plus, r_minus) if alternative=='two-sided' else r_plus
+ z = -np.abs(z) if (alternative == 'two-sided' and method == 'approx') else z
+
+ res = _morestats.WilcoxonResult(statistic=statistic, pvalue=p[()])
+ if output_z:
+ res.zstatistic = z[()]
+ return res
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/kde.py b/llava_next/lib/python3.10/site-packages/scipy/stats/kde.py
new file mode 100644
index 0000000000000000000000000000000000000000..4401da5a30f4452ab394232d3928493d0e3b77ec
--- /dev/null
+++ b/llava_next/lib/python3.10/site-packages/scipy/stats/kde.py
@@ -0,0 +1,18 @@
+# This file is not meant for public use and will be removed in SciPy v2.0.0.
+# Use the `scipy.stats` namespace for importing the functions
+# included below.
+
+from scipy._lib.deprecation import _sub_module_deprecation
+
+
+__all__ = ["gaussian_kde"] # noqa: F822
+
+
+def __dir__():
+ return __all__
+
+
+def __getattr__(name):
+ return _sub_module_deprecation(sub_package="stats", module="kde",
+ private_modules=["_kde"], all=__all__,
+ attribute=name)
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12e582f83e0617edb4a7b2d11614d4ae70b7d8d8
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_binned_statistic.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9f74a985ed5f9d904f06f955dd15c3659d57d78
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_censored_data.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..87e5dc29d56e06e191805cd3814d3308480fb892
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_crosstab.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..603966e86177827375edcc56ca222cd2e689ac80
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_fast_gen_inversion.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29988901b8abe65b29d1cd3173a704157c9b3d08
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_mstats_extras.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91a2697c4bee25540035b04c3eacb447dcec2d49
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_odds_ratio.cpython-310.pyc differ
diff --git a/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0afd1a09afc67b1f2d7f5118d3122569f3108faf
Binary files /dev/null and b/llava_next/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_variation.cpython-310.pyc differ
diff --git a/parrot/lib/libcrypto.so.3 b/parrot/lib/libcrypto.so.3
new file mode 100644
index 0000000000000000000000000000000000000000..0fe7d1905f0dd65efe3e0d2be4d601464f474b32
--- /dev/null
+++ b/parrot/lib/libcrypto.so.3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f28833f99177db7caa0028e467f41fd67a566bca481a7a60da8453edfb3b5cf0
+size 5172040
diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d6f3bb41c3840c0eb9c32f2b180d106ab16c260
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__init__.py b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5919de3bbf4cdb56705f83e3cfa63e3c332925dd
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__init__.py
@@ -0,0 +1 @@
+# Just here to make importlib.resources work
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ab92396229e151d1ccdcb9a7e0224c71a6ddd23
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3ea3bdbb05edc3cd1f1171858cbc502b2e98ad6d
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66343c49296d1c5fb00dd00fd18f14f85324762a
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40b02fe7902e16372c28ac4403bf0635949440bd
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..edf575a856693097a4130a8dc17a7d9e57a9513c
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3369592443cfe4efd0aa9ae787f311c19f57376
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f94fec96ccd6f71debc11110ba407b46740b3308
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce5ee6e41dbe8ced38c647ca8b248bae004c39b4
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b8c8ce89859897045bd7a36364794b6efec432a
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47a52e3d4e48a5a4c8b8a6fdf95bb5c7a3e0f64d
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cdfba1caf9fbabbf51553833c8bd28f93c274f2f
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d4af2a84675918c8b9c5c8882ff4ee88f64bf56
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..207059b90a022bfc6870f30567cf06d7f78f8542
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..caa1ba2bfc9b9744adae8a66b416ce0167dcb064
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..636ceb63de8c85e19bf08474e7a9d5637a2e3c33
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f0b7ad40b5ded69bcc68bd52ae63bc8b4a09772b
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4881df6b5619f58e623dc6e053b237187b2054eb
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..498b74cca020a7b77dd713499975f46888a7aa15
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29f6e4852ba2b39696497a6f63a279b0f9c94a4e
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..835eebbde6bdac531feefcd4dc8a4bd59fe6c452
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46f2420c0c4d44890777c35408c3d732b507121a
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7556700018f8b848e1d3050827885d732c13f20f
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/__init__.py b/parrot/lib/python3.10/site-packages/joblib/externals/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e19d4c8a9c78766aeb55190ed5d8a02f4b2e8e27
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58a8d086ff616b2ef75ab0d788d990e749f96e8d
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
@@ -0,0 +1,18 @@
+from . import cloudpickle
+from .cloudpickle import * # noqa
+
+__doc__ = cloudpickle.__doc__
+
+__version__ = "3.0.0"
+
+__all__ = [ # noqa
+ "__version__",
+ "Pickler",
+ "CloudPickler",
+ "dumps",
+ "loads",
+ "dump",
+ "load",
+ "register_pickle_by_value",
+ "unregister_pickle_by_value",
+]
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7eed487981c7c11727781742fa957395deee9186
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c911e8fbfec3f437814dcb8e5e160cf744e6e9a2
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe280de0f9f692dab354bf8e2d971675f4310e4c
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb43a9676bbb11bdecf187e7f6cde51f793ff3fc
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
@@ -0,0 +1,1487 @@
+"""Pickler class to extend the standard pickle.Pickler functionality
+
+The main objective is to make it natural to perform distributed computing on
+clusters (such as PySpark, Dask, Ray...) with interactively defined code
+(functions, classes, ...) written in notebooks or console.
+
+In particular this pickler adds the following features:
+- serialize interactively-defined or locally-defined functions, classes,
+ enums, typevars, lambdas and nested functions to compiled byte code;
+- deal with some other non-serializable objects in an ad-hoc manner where
+ applicable.
+
+This pickler is therefore meant to be used for the communication between short
+lived Python processes running the same version of Python and libraries. In
+particular, it is not meant to be used for long term storage of Python objects.
+
+It does not include an unpickler, as standard Python unpickling suffices.
+
+This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
+`_.
+
+Copyright (c) 2012-now, CloudPickle developers and contributors.
+Copyright (c) 2012, Regents of the University of California.
+Copyright (c) 2009 `PiCloud, Inc. `_.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the University of California, Berkeley nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import _collections_abc
+from collections import ChainMap, OrderedDict
+import abc
+import builtins
+import copyreg
+import dataclasses
+import dis
+from enum import Enum
+import io
+import itertools
+import logging
+import opcode
+import pickle
+from pickle import _getattribute
+import platform
+import struct
+import sys
+import threading
+import types
+import typing
+import uuid
+import warnings
+import weakref
+
+# The following import is required to be imported in the cloudpickle
+# namespace to be able to load pickle files generated with older versions of
+# cloudpickle. See: tests/test_backward_compat.py
+from types import CellType # noqa: F401
+
+
+# cloudpickle is meant for inter process communication: we expect all
+# communicating processes to run the same Python version hence we favor
+# communication speed over compatibility:
+DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+# Names of modules whose resources should be treated as dynamic.
+_PICKLE_BY_VALUE_MODULES = set()
+
+# Track the provenance of reconstructed dynamic classes to make it possible to
+# reconstruct instances from the matching singleton class definition when
+# appropriate and preserve the usual "isinstance" semantics of Python objects.
+_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
+_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
+_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
+
+PYPY = platform.python_implementation() == "PyPy"
+
+builtin_code_type = None
+if PYPY:
+ # builtin-code objects only exist in pypy
+ builtin_code_type = type(float.__new__.__code__)
+
+_extract_code_globals_cache = weakref.WeakKeyDictionary()
+
+
+def _get_or_create_tracker_id(class_def):
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
+ if class_tracker_id is None:
+ class_tracker_id = uuid.uuid4().hex
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
+ return class_tracker_id
+
+
+def _lookup_class_or_track(class_tracker_id, class_def):
+ if class_tracker_id is not None:
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
+ class_tracker_id, class_def
+ )
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
+ return class_def
+
+
+def register_pickle_by_value(module):
+ """Register a module to make it functions and classes picklable by value.
+
+ By default, functions and classes that are attributes of an importable
+ module are to be pickled by reference, that is relying on re-importing
+ the attribute from the module at load time.
+
+ If `register_pickle_by_value(module)` is called, all its functions and
+ classes are subsequently to be pickled by value, meaning that they can
+ be loaded in Python processes where the module is not importable.
+
+ This is especially useful when developing a module in a distributed
+ execution environment: restarting the client Python process with the new
+ source code is enough: there is no need to re-install the new version
+ of the module on all the worker nodes nor to restart the workers.
+
+ Note: this feature is considered experimental. See the cloudpickle
+ README.md file for more details and limitations.
+ """
+ if not isinstance(module, types.ModuleType):
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
+ # In the future, cloudpickle may need a way to access any module registered
+ # for pickling by value in order to introspect relative imports inside
+ # functions pickled by value. (see
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
+ # This access can be ensured by checking that module is present in
+ # sys.modules at registering time and assuming that it will still be in
+ # there when accessed during pickling. Another alternative would be to
+ # store a weakref to the module. Even though cloudpickle does not implement
+ # this introspection yet, in order to avoid a possible breaking change
+ # later, we still enforce the presence of module inside sys.modules.
+ if module.__name__ not in sys.modules:
+ raise ValueError(
+ f"{module} was not imported correctly, have you used an "
+ "`import` statement to access it?"
+ )
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
+
+
+def unregister_pickle_by_value(module):
+ """Unregister that the input module should be pickled by value."""
+ if not isinstance(module, types.ModuleType):
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
+ raise ValueError(f"{module} is not registered for pickle by value")
+ else:
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
+
+
+def list_registry_pickle_by_value():
+ return _PICKLE_BY_VALUE_MODULES.copy()
+
+
+def _is_registered_pickle_by_value(module):
+ module_name = module.__name__
+ if module_name in _PICKLE_BY_VALUE_MODULES:
+ return True
+ while True:
+ parent_name = module_name.rsplit(".", 1)[0]
+ if parent_name == module_name:
+ break
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
+ return True
+ module_name = parent_name
+ return False
+
+
+def _whichmodule(obj, name):
+ """Find the module an object belongs to.
+
+ This function differs from ``pickle.whichmodule`` in two ways:
+ - it does not mangle the cases where obj's module is __main__ and obj was
+ not found in any module.
+ - Errors arising during module introspection are ignored, as those errors
+ are considered unwanted side effects.
+ """
+ module_name = getattr(obj, "__module__", None)
+
+ if module_name is not None:
+ return module_name
+ # Protect the iteration by using a copy of sys.modules against dynamic
+ # modules that trigger imports of other modules upon calls to getattr or
+ # other threads importing at the same time.
+ for module_name, module in sys.modules.copy().items():
+ # Some modules such as coverage can inject non-module objects inside
+ # sys.modules
+ if (
+ module_name == "__main__"
+ or module is None
+ or not isinstance(module, types.ModuleType)
+ ):
+ continue
+ try:
+ if _getattribute(module, name)[0] is obj:
+ return module_name
+ except Exception:
+ pass
+ return None
+
+
+def _should_pickle_by_reference(obj, name=None):
+ """Test whether an function or a class should be pickled by reference
+
+ Pickling by reference means by that the object (typically a function or a
+ class) is an attribute of a module that is assumed to be importable in the
+ target Python environment. Loading will therefore rely on importing the
+ module and then calling `getattr` on it to access the function or class.
+
+ Pickling by reference is the only option to pickle functions and classes
+ in the standard library. In cloudpickle the alternative option is to
+ pickle by value (for instance for interactively or locally defined
+ functions and classes or for attributes of modules that have been
+ explicitly registered to be pickled by value.
+ """
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
+ if module_and_name is None:
+ return False
+ module, name = module_and_name
+ return not _is_registered_pickle_by_value(module)
+
+ elif isinstance(obj, types.ModuleType):
+ # We assume that sys.modules is primarily used as a cache mechanism for
+ # the Python import machinery. Checking if a module has been added in
+ # is sys.modules therefore a cheap and simple heuristic to tell us
+ # whether we can assume that a given module could be imported by name
+ # in another Python process.
+ if _is_registered_pickle_by_value(obj):
+ return False
+ return obj.__name__ in sys.modules
+ else:
+ raise TypeError(
+ "cannot check importability of {} instances".format(type(obj).__name__)
+ )
+
+
+def _lookup_module_and_qualname(obj, name=None):
+ if name is None:
+ name = getattr(obj, "__qualname__", None)
+ if name is None: # pragma: no cover
+ # This used to be needed for Python 2.7 support but is probably not
+ # needed anymore. However we keep the __name__ introspection in case
+ # users of cloudpickle rely on this old behavior for unknown reasons.
+ name = getattr(obj, "__name__", None)
+
+ module_name = _whichmodule(obj, name)
+
+ if module_name is None:
+ # In this case, obj.__module__ is None AND obj was not found in any
+ # imported module. obj is thus treated as dynamic.
+ return None
+
+ if module_name == "__main__":
+ return None
+
+ # Note: if module_name is in sys.modules, the corresponding module is
+ # assumed importable at unpickling time. See #357
+ module = sys.modules.get(module_name, None)
+ if module is None:
+ # The main reason why obj's module would not be imported is that this
+ # module has been dynamically created, using for example
+ # types.ModuleType. The other possibility is that module was removed
+ # from sys.modules after obj was created/imported. But this case is not
+ # supported, as the standard pickle does not support it either.
+ return None
+
+ try:
+ obj2, parent = _getattribute(module, name)
+ except AttributeError:
+ # obj was not found inside the module it points to
+ return None
+ if obj2 is not obj:
+ return None
+ return module, name
+
+
+def _extract_code_globals(co):
+ """Find all globals names read or written to by codeblock co."""
+ out_names = _extract_code_globals_cache.get(co)
+ if out_names is None:
+ # We use a dict with None values instead of a set to get a
+ # deterministic order and avoid introducing non-deterministic pickle
+ # bytes as a results.
+ out_names = {name: None for name in _walk_global_ops(co)}
+
+ # Declaring a function inside another one using the "def ..." syntax
+ # generates a constant code object corresponding to the one of the
+ # nested function's As the nested function may itself need global
+ # variables, we need to introspect its code, extract its globals, (look
+ # for code object in it's co_consts attribute..) and add the result to
+ # code_globals
+ if co.co_consts:
+ for const in co.co_consts:
+ if isinstance(const, types.CodeType):
+ out_names.update(_extract_code_globals(const))
+
+ _extract_code_globals_cache[co] = out_names
+
+ return out_names
+
+
+def _find_imported_submodules(code, top_level_dependencies):
+ """Find currently imported submodules used by a function.
+
+ Submodules used by a function need to be detected and referenced for the
+ function to work correctly at depickling time. Because submodules can be
+ referenced as attribute of their parent package (``package.submodule``), we
+ need a special introspection technique that does not rely on GLOBAL-related
+ opcodes to find references of them in a code object.
+
+ Example:
+ ```
+ import concurrent.futures
+ import cloudpickle
+ def func():
+ x = concurrent.futures.ThreadPoolExecutor
+ if __name__ == '__main__':
+ cloudpickle.dumps(func)
+ ```
+ The globals extracted by cloudpickle in the function's state include the
+ concurrent package, but not its submodule (here, concurrent.futures), which
+ is the module used by func. Find_imported_submodules will detect the usage
+ of concurrent.futures. Saving this module alongside with func will ensure
+ that calling func once depickled does not fail due to concurrent.futures
+ not being imported
+ """
+
+ subimports = []
+ # check if any known dependency is an imported package
+ for x in top_level_dependencies:
+ if (
+ isinstance(x, types.ModuleType)
+ and hasattr(x, "__package__")
+ and x.__package__
+ ):
+ # check if the package has any currently loaded sub-imports
+ prefix = x.__name__ + "."
+ # A concurrent thread could mutate sys.modules,
+ # make sure we iterate over a copy to avoid exceptions
+ for name in list(sys.modules):
+ # Older versions of pytest will add a "None" module to
+ # sys.modules.
+ if name is not None and name.startswith(prefix):
+ # check whether the function can address the sub-module
+ tokens = set(name[len(prefix) :].split("."))
+ if not tokens - set(code.co_names):
+ subimports.append(sys.modules[name])
+ return subimports
+
+
+# relevant opcodes
+STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
+DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
+LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
+GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
+HAVE_ARGUMENT = dis.HAVE_ARGUMENT
+EXTENDED_ARG = dis.EXTENDED_ARG
+
+
+_BUILTIN_TYPE_NAMES = {}
+for k, v in types.__dict__.items():
+ if type(v) is type:
+ _BUILTIN_TYPE_NAMES[v] = k
+
+
+def _builtin_type(name):
+ if name == "ClassType": # pragma: no cover
+ # Backward compat to load pickle files generated with cloudpickle
+ # < 1.3 even if loading pickle files from older versions is not
+ # officially supported.
+ return type
+ return getattr(types, name)
+
+
+def _walk_global_ops(code):
+ """Yield referenced name for global-referencing instructions in code."""
+ for instr in dis.get_instructions(code):
+ op = instr.opcode
+ if op in GLOBAL_OPS:
+ yield instr.argval
+
+
+def _extract_class_dict(cls):
+ """Retrieve a copy of the dict of a class without the inherited method."""
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
+ if len(cls.__bases__) == 1:
+ inherited_dict = cls.__bases__[0].__dict__
+ else:
+ inherited_dict = {}
+ for base in reversed(cls.__bases__):
+ inherited_dict.update(base.__dict__)
+ to_remove = []
+ for name, value in clsdict.items():
+ try:
+ base_value = inherited_dict[name]
+ if value is base_value:
+ to_remove.append(name)
+ except KeyError:
+ pass
+ for name in to_remove:
+ clsdict.pop(name)
+ return clsdict
+
+
+def is_tornado_coroutine(func):
+ """Return whether `func` is a Tornado coroutine function.
+
+ Running coroutines are not supported.
+ """
+ warnings.warn(
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
+ "directly instead.",
+ category=DeprecationWarning,
+ )
+ if "tornado.gen" not in sys.modules:
+ return False
+ gen = sys.modules["tornado.gen"]
+ if not hasattr(gen, "is_coroutine_function"):
+ # Tornado version is too old
+ return False
+ return gen.is_coroutine_function(func)
+
+
+def subimport(name):
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
+ # the name of a submodule, __import__ will return the top-level root module
+ # of this submodule. For instance, __import__('os.path') returns the `os`
+ # module.
+ __import__(name)
+ return sys.modules[name]
+
+
+def dynamic_subimport(name, vars):
+ mod = types.ModuleType(name)
+ mod.__dict__.update(vars)
+ mod.__dict__["__builtins__"] = builtins.__dict__
+ return mod
+
+
+def _get_cell_contents(cell):
+ try:
+ return cell.cell_contents
+ except ValueError:
+ # Handle empty cells explicitly with a sentinel value.
+ return _empty_cell_value
+
+
+def instance(cls):
+ """Create a new instance of a class.
+
+ Parameters
+ ----------
+ cls : type
+ The class to create an instance of.
+
+ Returns
+ -------
+ instance : cls
+ A new instance of ``cls``.
+ """
+ return cls()
+
+
+@instance
+class _empty_cell_value:
+ """Sentinel for empty closures."""
+
+ @classmethod
+ def __reduce__(cls):
+ return cls.__name__
+
+
+def _make_function(code, globals, name, argdefs, closure):
+ # Setting __builtins__ in globals is needed for nogil CPython.
+ globals["__builtins__"] = __builtins__
+ return types.FunctionType(code, globals, name, argdefs, closure)
+
+
+def _make_empty_cell():
+ if False:
+ # trick the compiler into creating an empty cell in our lambda
+ cell = None
+ raise AssertionError("this route should not be executed")
+
+ return (lambda: cell).__closure__[0]
+
+
+def _make_cell(value=_empty_cell_value):
+ cell = _make_empty_cell()
+ if value is not _empty_cell_value:
+ cell.cell_contents = value
+ return cell
+
+
+def _make_skeleton_class(
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
+):
+ """Build dynamic class with an empty __dict__ to be filled once memoized
+
+ If class_tracker_id is not None, try to lookup an existing class definition
+ matching that id. If none is found, track a newly reconstructed class
+ definition under that id so that other instances stemming from the same
+ class id will also reuse this class definition.
+
+ The "extra" variable is meant to be a dict (or None) that can be used for
+ forward compatibility shall the need arise.
+ """
+ skeleton_class = types.new_class(
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
+ )
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
+
+
+def _make_skeleton_enum(
+ bases, name, qualname, members, module, class_tracker_id, extra
+):
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
+
+ The creation of the enum class is inspired by the code of
+ EnumMeta._create_.
+
+ If class_tracker_id is not None, try to lookup an existing enum definition
+ matching that id. If none is found, track a newly reconstructed enum
+ definition under that id so that other instances stemming from the same
+ class id will also reuse this enum definition.
+
+ The "extra" variable is meant to be a dict (or None) that can be used for
+ forward compatibility shall the need arise.
+ """
+ # enums always inherit from their base Enum class at the last position in
+ # the list of base classes:
+ enum_base = bases[-1]
+ metacls = enum_base.__class__
+ classdict = metacls.__prepare__(name, bases)
+
+ for member_name, member_value in members.items():
+ classdict[member_name] = member_value
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
+ enum_class.__module__ = module
+ enum_class.__qualname__ = qualname
+
+ return _lookup_class_or_track(class_tracker_id, enum_class)
+
+
+def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
+ tv = typing.TypeVar(
+ name,
+ *constraints,
+ bound=bound,
+ covariant=covariant,
+ contravariant=contravariant,
+ )
+ return _lookup_class_or_track(class_tracker_id, tv)
+
+
+def _decompose_typevar(obj):
+ return (
+ obj.__name__,
+ obj.__bound__,
+ obj.__constraints__,
+ obj.__covariant__,
+ obj.__contravariant__,
+ _get_or_create_tracker_id(obj),
+ )
+
+
+def _typevar_reduce(obj):
+ # TypeVar instances require the module information hence why we
+ # are not using the _should_pickle_by_reference directly
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
+
+ if module_and_name is None:
+ return (_make_typevar, _decompose_typevar(obj))
+ elif _is_registered_pickle_by_value(module_and_name[0]):
+ return (_make_typevar, _decompose_typevar(obj))
+
+ return (getattr, module_and_name)
+
+
+def _get_bases(typ):
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
+ # For generic types (see PEP 560)
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
+ # correct. Subclasses of a fully-parameterized generic class does not
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
+ # will return True because it's defined in the base class.
+ bases_attr = "__orig_bases__"
+ else:
+ # For regular class objects
+ bases_attr = "__bases__"
+ return getattr(typ, bases_attr)
+
+
+def _make_dict_keys(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict.fromkeys(obj).keys()
+ else:
+ return dict.fromkeys(obj).keys()
+
+
+def _make_dict_values(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
+ else:
+ return {i: _ for i, _ in enumerate(obj)}.values()
+
+
+def _make_dict_items(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict(obj).items()
+ else:
+ return obj.items()
+
+
+# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
+# -------------------------------------------------
+
+
+def _class_getnewargs(obj):
+ type_kwargs = {}
+ if "__module__" in obj.__dict__:
+ type_kwargs["__module__"] = obj.__module__
+
+ __dict__ = obj.__dict__.get("__dict__", None)
+ if isinstance(__dict__, property):
+ type_kwargs["__dict__"] = __dict__
+
+ return (
+ type(obj),
+ obj.__name__,
+ _get_bases(obj),
+ type_kwargs,
+ _get_or_create_tracker_id(obj),
+ None,
+ )
+
+
+def _enum_getnewargs(obj):
+ members = {e.name: e.value for e in obj}
+ return (
+ obj.__bases__,
+ obj.__name__,
+ obj.__qualname__,
+ members,
+ obj.__module__,
+ _get_or_create_tracker_id(obj),
+ None,
+ )
+
+
+# COLLECTION OF OBJECTS RECONSTRUCTORS
+# ------------------------------------
+def _file_reconstructor(retval):
+ return retval
+
+
+# COLLECTION OF OBJECTS STATE GETTERS
+# -----------------------------------
+
+
+def _function_getstate(func):
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
+ # attributes will be restored at unpickling time using
+ # f.__dict__.update(state)
+ # - Put func's members into slotstate. Such attributes will be restored at
+ # unpickling time by iterating over slotstate and calling setattr(func,
+ # slotname, slotvalue)
+ slotstate = {
+ "__name__": func.__name__,
+ "__qualname__": func.__qualname__,
+ "__annotations__": func.__annotations__,
+ "__kwdefaults__": func.__kwdefaults__,
+ "__defaults__": func.__defaults__,
+ "__module__": func.__module__,
+ "__doc__": func.__doc__,
+ "__closure__": func.__closure__,
+ }
+
+ f_globals_ref = _extract_code_globals(func.__code__)
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
+
+ if func.__closure__ is not None:
+ closure_values = list(map(_get_cell_contents, func.__closure__))
+ else:
+ closure_values = ()
+
+ # Extract currently-imported submodules used by func. Storing these modules
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
+ # trigger the side effect of importing these modules at unpickling time
+ # (which is necessary for func to work correctly once depickled)
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
+ )
+ slotstate["__globals__"] = f_globals
+
+ state = func.__dict__
+ return state, slotstate
+
+
+def _class_getstate(obj):
+ clsdict = _extract_class_dict(obj)
+ clsdict.pop("__weakref__", None)
+
+ if issubclass(type(obj), abc.ABCMeta):
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
+ # cache/negative caches populated during isinstance/issubclass
+ # checks, but pickle the list of registered subclasses of obj.
+ clsdict.pop("_abc_cache", None)
+ clsdict.pop("_abc_negative_cache", None)
+ clsdict.pop("_abc_negative_cache_version", None)
+ registry = clsdict.pop("_abc_registry", None)
+ if registry is None:
+ # The abc caches and registered subclasses of a
+ # class are bundled into the single _abc_impl attribute
+ clsdict.pop("_abc_impl", None)
+ (registry, _, _, _) = abc._get_dump(obj)
+
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
+ else:
+ # In the above if clause, registry is a set of weakrefs -- in
+ # this case, registry is a WeakSet
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
+
+ if "__slots__" in clsdict:
+ # pickle string length optimization: member descriptors of obj are
+ # created automatically from obj's __slots__ attribute, no need to
+ # save them in obj's state
+ if isinstance(obj.__slots__, str):
+ clsdict.pop(obj.__slots__)
+ else:
+ for k in obj.__slots__:
+ clsdict.pop(k, None)
+
+ clsdict.pop("__dict__", None) # unpicklable property object
+
+ return (clsdict, {})
+
+
+def _enum_getstate(obj):
+ clsdict, slotstate = _class_getstate(obj)
+
+ members = {e.name: e.value for e in obj}
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
+ # Those attributes are already handled by the metaclass.
+ for attrname in [
+ "_generate_next_value_",
+ "_member_names_",
+ "_member_map_",
+ "_member_type_",
+ "_value2member_map_",
+ ]:
+ clsdict.pop(attrname, None)
+ for member in members:
+ clsdict.pop(member)
+ # Special handling of Enum subclasses
+ return clsdict, slotstate
+
+
+# COLLECTIONS OF OBJECTS REDUCERS
+# -------------------------------
+# A reducer is a function taking a single argument (obj), and that returns a
+# tuple with all the necessary data to re-construct obj. Apart from a few
+# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
+# correctly pickle an object.
+# While many built-in objects (Exceptions objects, instances of the "object"
+# class, etc), are shipped with their own built-in reducer (invoked using
+# obj.__reduce__), some do not. The following methods were created to "fill
+# these holes".
+
+
+def _code_reduce(obj):
+ """code object reducer."""
+ # If you are not sure about the order of arguments, take a look at help
+ # of the specific type from types, for example:
+ # >>> from types import CodeType
+ # >>> help(CodeType)
+ if hasattr(obj, "co_exceptiontable"):
+ # Python 3.11 and later: there are some new attributes
+ # related to the enhanced exceptions.
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_qualname,
+ obj.co_firstlineno,
+ obj.co_linetable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_linetable"):
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
+ # expects obj.co_linetable instead.
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_linetable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
+ # "nogil" Python: modified attributes from 3.9
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_framesize,
+ obj.co_ndefaultargs,
+ obj.co_nmeta,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_lnotab,
+ obj.co_exc_handlers,
+ obj.co_jump_table,
+ obj.co_freevars,
+ obj.co_cellvars,
+ obj.co_free2reg,
+ obj.co_cell2reg,
+ )
+ else:
+ # Backward compat for 3.8 and 3.9
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ return types.CodeType, args
+
+
+def _cell_reduce(obj):
+ """Cell (containing values of a function's free variables) reducer."""
+ try:
+ obj.cell_contents
+ except ValueError: # cell is empty
+ return _make_empty_cell, ()
+ else:
+ return _make_cell, (obj.cell_contents,)
+
+
+def _classmethod_reduce(obj):
+ orig_func = obj.__func__
+ return type(obj), (orig_func,)
+
+
+def _file_reduce(obj):
+ """Save a file."""
+ import io
+
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
+ raise pickle.PicklingError(
+ "Cannot pickle files that do not map to an actual file"
+ )
+ if obj is sys.stdout:
+ return getattr, (sys, "stdout")
+ if obj is sys.stderr:
+ return getattr, (sys, "stderr")
+ if obj is sys.stdin:
+ raise pickle.PicklingError("Cannot pickle standard input")
+ if obj.closed:
+ raise pickle.PicklingError("Cannot pickle closed files")
+ if hasattr(obj, "isatty") and obj.isatty():
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
+ if "r" not in obj.mode and "+" not in obj.mode:
+ raise pickle.PicklingError(
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
+ )
+
+ name = obj.name
+
+ retval = io.StringIO()
+
+ try:
+ # Read the whole file
+ curloc = obj.tell()
+ obj.seek(0)
+ contents = obj.read()
+ obj.seek(curloc)
+ except OSError as e:
+ raise pickle.PicklingError(
+ "Cannot pickle file %s as it cannot be read" % name
+ ) from e
+ retval.write(contents)
+ retval.seek(curloc)
+
+ retval.name = name
+ return _file_reconstructor, (retval,)
+
+
+def _getset_descriptor_reduce(obj):
+ return getattr, (obj.__objclass__, obj.__name__)
+
+
+def _mappingproxy_reduce(obj):
+ return types.MappingProxyType, (dict(obj),)
+
+
+def _memoryview_reduce(obj):
+ return bytes, (obj.tobytes(),)
+
+
+def _module_reduce(obj):
+ if _should_pickle_by_reference(obj):
+ return subimport, (obj.__name__,)
+ else:
+ # Some external libraries can populate the "__builtins__" entry of a
+ # module's `__dict__` with unpicklable objects (see #316). For that
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
+ # restore a default value for it at unpickling time.
+ state = obj.__dict__.copy()
+ state.pop("__builtins__", None)
+ return dynamic_subimport, (obj.__name__, state)
+
+
+def _method_reduce(obj):
+ return (types.MethodType, (obj.__func__, obj.__self__))
+
+
+def _logger_reduce(obj):
+ return logging.getLogger, (obj.name,)
+
+
+def _root_logger_reduce(obj):
+ return logging.getLogger, ()
+
+
+def _property_reduce(obj):
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
+
+
+def _weakset_reduce(obj):
+ return weakref.WeakSet, (list(obj),)
+
+
+def _dynamic_class_reduce(obj):
+ """Save a class that can't be referenced as a module attribute.
+
+ This method is used to serialize classes that are defined inside
+ functions, or that otherwise can't be serialized as attribute lookups
+ from importable modules.
+ """
+ if Enum is not None and issubclass(obj, Enum):
+ return (
+ _make_skeleton_enum,
+ _enum_getnewargs(obj),
+ _enum_getstate(obj),
+ None,
+ None,
+ _class_setstate,
+ )
+ else:
+ return (
+ _make_skeleton_class,
+ _class_getnewargs(obj),
+ _class_getstate(obj),
+ None,
+ None,
+ _class_setstate,
+ )
+
+
+def _class_reduce(obj):
+ """Select the reducer depending on the dynamic nature of the class obj."""
+ if obj is type(None): # noqa
+ return type, (None,)
+ elif obj is type(Ellipsis):
+ return type, (Ellipsis,)
+ elif obj is type(NotImplemented):
+ return type, (NotImplemented,)
+ elif obj in _BUILTIN_TYPE_NAMES:
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
+ elif not _should_pickle_by_reference(obj):
+ return _dynamic_class_reduce(obj)
+ return NotImplemented
+
+
+def _dict_keys_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_keys, (list(obj),)
+
+
+def _dict_values_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_values, (list(obj),)
+
+
+def _dict_items_reduce(obj):
+ return _make_dict_items, (dict(obj),)
+
+
+def _odict_keys_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_keys, (list(obj), True)
+
+
+def _odict_values_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_values, (list(obj), True)
+
+
+def _odict_items_reduce(obj):
+ return _make_dict_items, (dict(obj), True)
+
+
+def _dataclass_field_base_reduce(obj):
+ return _get_dataclass_field_type_sentinel, (obj.name,)
+
+
+# COLLECTIONS OF OBJECTS STATE SETTERS
+# ------------------------------------
+# state setters are called at unpickling time, once the object is created and
+# it has to be updated to how it was at unpickling time.
+
+
+def _function_setstate(obj, state):
+ """Update the state of a dynamic function.
+
+ As __closure__ and __globals__ are readonly attributes of a function, we
+ cannot rely on the native setstate routine of pickle.load_build, that calls
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
+ """
+ state, slotstate = state
+ obj.__dict__.update(state)
+
+ obj_globals = slotstate.pop("__globals__")
+ obj_closure = slotstate.pop("__closure__")
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
+ # the pickled function to work correctly at unpickling time. Now that these
+ # submodules are depickled (hence imported), they can be removed from the
+ # object's state (the object state only served as a reference holder to
+ # these submodules)
+ slotstate.pop("_cloudpickle_submodules")
+
+ obj.__globals__.update(obj_globals)
+ obj.__globals__["__builtins__"] = __builtins__
+
+ if obj_closure is not None:
+ for i, cell in enumerate(obj_closure):
+ try:
+ value = cell.cell_contents
+ except ValueError: # cell is empty
+ continue
+ obj.__closure__[i].cell_contents = value
+
+ for k, v in slotstate.items():
+ setattr(obj, k, v)
+
+
+def _class_setstate(obj, state):
+ state, slotstate = state
+ registry = None
+ for attrname, attr in state.items():
+ if attrname == "_abc_impl":
+ registry = attr
+ else:
+ setattr(obj, attrname, attr)
+ if registry is not None:
+ for subclass in registry:
+ obj.register(subclass)
+
+ return obj
+
+
+# COLLECTION OF DATACLASS UTILITIES
+# ---------------------------------
+# There are some internal sentinel values whose identity must be preserved when
+# unpickling dataclass fields. Each sentinel value has a unique name that we can
+# use to retrieve its identity at unpickling time.
+
+
+_DATACLASSE_FIELD_TYPE_SENTINELS = {
+ dataclasses._FIELD.name: dataclasses._FIELD,
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
+}
+
+
+def _get_dataclass_field_type_sentinel(name):
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
+
+
+class Pickler(pickle.Pickler):
+ # set of reducers defined and used by cloudpickle (private)
+ _dispatch_table = {}
+ _dispatch_table[classmethod] = _classmethod_reduce
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
+ _dispatch_table[logging.Logger] = _logger_reduce
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
+ _dispatch_table[memoryview] = _memoryview_reduce
+ _dispatch_table[property] = _property_reduce
+ _dispatch_table[staticmethod] = _classmethod_reduce
+ _dispatch_table[CellType] = _cell_reduce
+ _dispatch_table[types.CodeType] = _code_reduce
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
+ _dispatch_table[types.ModuleType] = _module_reduce
+ _dispatch_table[types.MethodType] = _method_reduce
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractproperty] = _property_reduce
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
+
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
+
+ # function reducers are defined as instance methods of cloudpickle.Pickler
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
+ def _dynamic_function_reduce(self, func):
+ """Reduce a function that is not pickleable via attribute lookup."""
+ newargs = self._function_getnewargs(func)
+ state = _function_getstate(func)
+ return (_make_function, newargs, state, None, None, _function_setstate)
+
+ def _function_reduce(self, obj):
+ """Reducer for function objects.
+
+ If obj is a top-level attribute of a file-backed module, this reducer
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
+ obj using a custom cloudpickle reducer designed specifically to handle
+ dynamic functions.
+ """
+ if _should_pickle_by_reference(obj):
+ return NotImplemented
+ else:
+ return self._dynamic_function_reduce(obj)
+
+ def _function_getnewargs(self, func):
+ code = func.__code__
+
+ # base_globals represents the future global namespace of func at
+ # unpickling time. Looking it up and storing it in
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
+ # globals at pickling time to also share them once unpickled, at one
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
+ # instance, and that a new cloudpickle.Pickler is created each time
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
+ # to be saved within the same invocation of
+ # cloudpickle.dump/cloudpickle.dumps (for example:
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
+ # bound to the same cloudpickle.Pickler instance.
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
+
+ if base_globals == {}:
+ # Add module attributes used to resolve relative imports
+ # instructions inside func.
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
+ if k in func.__globals__:
+ base_globals[k] = func.__globals__[k]
+
+ # Do not bind the free variables before the function is created to
+ # avoid infinite recursion.
+ if func.__closure__ is None:
+ closure = None
+ else:
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
+
+ return code, base_globals, None, None, closure
+
+ def dump(self, obj):
+ try:
+ return super().dump(obj)
+ except RuntimeError as e:
+ if len(e.args) > 0 and "recursion" in e.args[0]:
+ msg = "Could not pickle object as excessively deep recursion required."
+ raise pickle.PicklingError(msg) from e
+ else:
+ raise
+
+ def __init__(self, file, protocol=None, buffer_callback=None):
+ if protocol is None:
+ protocol = DEFAULT_PROTOCOL
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
+ # map functions __globals__ attribute ids, to ensure that functions
+ # sharing the same global namespace at pickling time also share
+ # their global namespace at unpickling time.
+ self.globals_ref = {}
+ self.proto = int(protocol)
+
+ if not PYPY:
+ # pickle.Pickler is the C implementation of the CPython pickler and
+ # therefore we rely on reduce_override method to customize the pickler
+ # behavior.
+
+ # `cloudpickle.Pickler.dispatch` is only left for backward
+ # compatibility - note that when using protocol 5,
+ # `cloudpickle.Pickler.dispatch` is not an extension of
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
+ # storing all reducers implemented by cloudpickle, but the attribute
+ # name was not a great choice given because it would collide with a
+ # similarly named attribute in the pure-Python `pickle._Pickler`
+ # implementation in the standard library.
+ dispatch = dispatch_table
+
+ # Implementation of the reducer_override callback, in order to
+ # efficiently serialize dynamic functions and classes by subclassing
+ # the C-implemented `pickle.Pickler`.
+ # TODO: decorrelate reducer_override (which is tied to CPython's
+ # implementation - would it make sense to backport it to pypy? - and
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
+ # availability of both notions coincide on CPython's pickle, but it may
+ # not be the case anymore when pypy implements protocol 5.
+
+ def reducer_override(self, obj):
+ """Type-agnostic reducing callback for function and classes.
+
+ For performance reasons, subclasses of the C `pickle.Pickler` class
+ cannot register custom reducers for functions and classes in the
+ dispatch_table attribute. Reducers for such types must instead
+ implemented via the special `reducer_override` method.
+
+ Note that this method will be called for any object except a few
+ builtin-types (int, lists, dicts etc.), which differs from reducers
+ in the Pickler's dispatch_table, each of them being invoked for
+ objects of a specific type only.
+
+ This property comes in handy for classes: although most classes are
+ instances of the ``type`` metaclass, some of them can be instances
+ of other custom metaclasses (such as enum.EnumMeta for example). In
+ particular, the metaclass will likely not be known in advance, and
+ thus cannot be special-cased using an entry in the dispatch_table.
+ reducer_override, among other things, allows us to register a
+ reducer that will be called for any class, independently of its
+ type.
+
+ Notes:
+
+ * reducer_override has the priority over dispatch_table-registered
+ reducers.
+ * reducer_override can be used to fix other limitations of
+ cloudpickle for other types that suffered from type-specific
+ reducers, such as Exceptions. See
+ https://github.com/cloudpipe/cloudpickle/issues/248
+ """
+ t = type(obj)
+ try:
+ is_anyclass = issubclass(t, type)
+ except TypeError: # t is not a class (old Boost; see SF #502085)
+ is_anyclass = False
+
+ if is_anyclass:
+ return _class_reduce(obj)
+ elif isinstance(obj, types.FunctionType):
+ return self._function_reduce(obj)
+ else:
+ # fallback to save_global, including the Pickler's
+ # dispatch_table
+ return NotImplemented
+
+ else:
+ # When reducer_override is not available, hack the pure-Python
+ # Pickler's types.FunctionType and type savers. Note: the type saver
+ # must override Pickler.save_global, because pickle.py contains a
+ # hard-coded call to save_global when pickling meta-classes.
+ dispatch = pickle.Pickler.dispatch.copy()
+
+ def _save_reduce_pickle5(
+ self,
+ func,
+ args,
+ state=None,
+ listitems=None,
+ dictitems=None,
+ state_setter=None,
+ obj=None,
+ ):
+ save = self.save
+ write = self.write
+ self.save_reduce(
+ func,
+ args,
+ state=None,
+ listitems=listitems,
+ dictitems=dictitems,
+ obj=obj,
+ )
+ # backport of the Python 3.8 state_setter pickle operations
+ save(state_setter)
+ save(obj) # simple BINGET opcode as obj is already memoized.
+ save(state)
+ write(pickle.TUPLE2)
+ # Trigger a state_setter(obj, state) function call.
+ write(pickle.REDUCE)
+ # The purpose of state_setter is to carry-out an
+ # inplace modification of obj. We do not care about what the
+ # method might return, so its output is eventually removed from
+ # the stack.
+ write(pickle.POP)
+
+ def save_global(self, obj, name=None, pack=struct.pack):
+ """Main dispatch method.
+
+ The name of this method is somewhat misleading: all types get
+ dispatched here.
+ """
+ if obj is type(None): # noqa
+ return self.save_reduce(type, (None,), obj=obj)
+ elif obj is type(Ellipsis):
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
+ elif obj is type(NotImplemented):
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
+ elif obj in _BUILTIN_TYPE_NAMES:
+ return self.save_reduce(
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
+ )
+
+ if name is not None:
+ super().save_global(obj, name=name)
+ elif not _should_pickle_by_reference(obj, name=name):
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
+ else:
+ super().save_global(obj, name=name)
+
+ dispatch[type] = save_global
+
+ def save_function(self, obj, name=None):
+ """Registered with the dispatch to handle all function types.
+
+ Determines what kind of function obj is (e.g. lambda, defined at
+ interactive prompt, etc) and handles the pickling appropriately.
+ """
+ if _should_pickle_by_reference(obj, name=name):
+ return super().save_global(obj, name=name)
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
+ return self.save_pypy_builtin_func(obj)
+ else:
+ return self._save_reduce_pickle5(
+ *self._dynamic_function_reduce(obj), obj=obj
+ )
+
+ def save_pypy_builtin_func(self, obj):
+ """Save pypy equivalent of builtin functions.
+
+ PyPy does not have the concept of builtin-functions. Instead,
+ builtin-functions are simple function instances, but with a
+ builtin-code attribute.
+ Most of the time, builtin functions should be pickled by attribute.
+ But PyPy has flaky support for __qualname__, so some builtin
+ functions such as float.__new__ will be classified as dynamic. For
+ this reason only, we created this special routine. Because
+ builtin-functions are not expected to have closure or globals,
+ there is no additional hack (compared the one already implemented
+ in pickle) to protect ourselves from reference cycles. A simple
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
+ also that PyPy improved their support for __qualname__ in v3.6, so
+ this routing should be removed when cloudpickle supports only PyPy
+ 3.6 and later.
+ """
+ rv = (
+ types.FunctionType,
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
+ obj.__dict__,
+ )
+ self.save_reduce(*rv, obj=obj)
+
+ dispatch[types.FunctionType] = save_function
+
+
+# Shorthands similar to pickle.dump/pickle.dumps
+
+
+def dump(obj, file, protocol=None, buffer_callback=None):
+ """Serialize obj as bytes streamed into file
+
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
+ speed between processes running the same Python version.
+
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
+ compatibility with older versions of Python (although this is not always
+ guaranteed to work because cloudpickle relies on some internal
+ implementation details that can change from one Python version to the
+ next).
+ """
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
+
+
+def dumps(obj, protocol=None, buffer_callback=None):
+ """Serialize obj as a string of bytes allocated in memory
+
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
+ speed between processes running the same Python version.
+
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
+ compatibility with older versions of Python (although this is not always
+ guaranteed to work because cloudpickle relies on some internal
+ implementation details that can change from one Python version to the
+ next).
+ """
+ with io.BytesIO() as file:
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
+ cp.dump(obj)
+ return file.getvalue()
+
+
+# Include pickles unloading functions in this namespace for convenience.
+load, loads = pickle.load, pickle.loads
+
+# Backward compat alias.
+CloudPickler = Pickler
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..52d6732e44ebcc0053b24969943f7c3b742268bb
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
@@ -0,0 +1,13 @@
+"""Compatibility module.
+
+It can be necessary to load files generated by previous versions of cloudpickle
+that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
+namespace.
+
+See: tests/test_backward_compat.py
+"""
+from . import cloudpickle
+
+
+def __getattr__(name):
+ return getattr(cloudpickle, name)
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/__init__.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5886d2a62092bdc9f444d7a22058d065de567818
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__init__.py
@@ -0,0 +1,44 @@
+r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
+It provides a robust and dynamic implementation os the
+:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
+hide the pool management under the hood.
+"""
+from concurrent.futures import (
+ ALL_COMPLETED,
+ FIRST_COMPLETED,
+ FIRST_EXCEPTION,
+ CancelledError,
+ Executor,
+ TimeoutError,
+ as_completed,
+ wait,
+)
+
+from ._base import Future
+from .backend.context import cpu_count
+from .backend.reduction import set_loky_pickler
+from .reusable_executor import get_reusable_executor
+from .cloudpickle_wrapper import wrap_non_picklable_objects
+from .process_executor import BrokenProcessPool, ProcessPoolExecutor
+
+
+__all__ = [
+ "get_reusable_executor",
+ "cpu_count",
+ "wait",
+ "as_completed",
+ "Future",
+ "Executor",
+ "ProcessPoolExecutor",
+ "BrokenProcessPool",
+ "CancelledError",
+ "TimeoutError",
+ "FIRST_COMPLETED",
+ "FIRST_EXCEPTION",
+ "ALL_COMPLETED",
+ "wrap_non_picklable_objects",
+ "set_loky_pickler",
+]
+
+
+__version__ = "3.4.1"
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8784bba5835c1d8d92537a989f5605ecdb2d6e8
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9502fa5dd214972b157569c5f0734242cfc4f3df
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13f62425e4d57886887c709752ffc0cd8360a9d9
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3835a78c6420beac9da360292ea5c48e59154c70
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cda4dcb5a94c4f9face8c43c7b6a8a9be83e71e
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/_base.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..da0abc1e7fa18363e6342a3b67410f1429e6fa10
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/_base.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Modification of concurrent.futures.Future
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from concurrent/futures/_base.py (17/02/2017)
+# * Do not use yield from
+# * Use old super syntax
+#
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+from concurrent.futures import Future as _BaseFuture
+from concurrent.futures._base import LOGGER
+
+
+# To make loky._base.Future instances awaitable by concurrent.futures.wait,
+# derive our custom Future class from _BaseFuture. _invoke_callback is the only
+# modification made to this class in loky.
+# TODO investigate why using `concurrent.futures.Future` directly does not
+# always work in our test suite.
+class Future(_BaseFuture):
+ def _invoke_callbacks(self):
+ for callback in self._done_callbacks:
+ try:
+ callback(self)
+ except BaseException:
+ LOGGER.exception(f"exception calling callback for {self!r}")
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5527d1fe3369b1f2663d2b3f22e06e95e679f62a
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9bd7df58a60bfc8b74f2e8ffb6105d07d2af0be
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..056cd3c2e930f2ab4c79b3afb2fd205cdb297060
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b90dff941453f39d8bc3d0ff003e171bc548ce70
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a363c1adc7a35423432269051a6cc43c4e612c43
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ce13e787f78f5ff81976d22d59462f041981644
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d670456845cb42e2cc0ce7d34665dac173b8bfba
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0f590317e75752fdd0b4962b9f3ecbbbaf50b37
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py
@@ -0,0 +1,378 @@
+###############################################################################
+# Basic context management with LokyContext
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from multiprocessing/context.py
+# * Create a context ensuring loky uses only objects that are compatible
+# * Add LokyContext to the list of context of multiprocessing so loky can be
+# used with multiprocessing.set_start_method
+# * Implement a CFS-aware amd physical-core aware cpu_count function.
+#
+import os
+import sys
+import math
+import subprocess
+import traceback
+import warnings
+import multiprocessing as mp
+from multiprocessing import get_context as mp_get_context
+from multiprocessing.context import BaseContext
+
+
+from .process import LokyProcess, LokyInitMainProcess
+
+# Apparently, on older Python versions, loky cannot work 61 workers on Windows
+# but instead 60: ¯\_(ツ)_/¯
+if sys.version_info >= (3, 8):
+ from concurrent.futures.process import _MAX_WINDOWS_WORKERS
+
+ if sys.version_info < (3, 10):
+ _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1
+else:
+ # compat for versions before 3.8 which do not define this.
+ _MAX_WINDOWS_WORKERS = 60
+
+START_METHODS = ["loky", "loky_init_main", "spawn"]
+if sys.platform != "win32":
+ START_METHODS += ["fork", "forkserver"]
+
+_DEFAULT_START_METHOD = None
+
+# Cache for the number of physical cores to avoid repeating subprocess calls.
+# It should not change during the lifetime of the program.
+physical_cores_cache = None
+
+
+def get_context(method=None):
+ # Try to overload the default context
+ method = method or _DEFAULT_START_METHOD or "loky"
+ if method == "fork":
+ # If 'fork' is explicitly requested, warn user about potential issues.
+ warnings.warn(
+ "`fork` start method should not be used with "
+ "`loky` as it does not respect POSIX. Try using "
+ "`spawn` or `loky` instead.",
+ UserWarning,
+ )
+ try:
+ return mp_get_context(method)
+ except ValueError:
+ raise ValueError(
+ f"Unknown context '{method}'. Value should be in "
+ f"{START_METHODS}."
+ )
+
+
+def set_start_method(method, force=False):
+ global _DEFAULT_START_METHOD
+ if _DEFAULT_START_METHOD is not None and not force:
+ raise RuntimeError("context has already been set")
+ assert method is None or method in START_METHODS, (
+ f"'{method}' is not a valid start_method. It should be in "
+ f"{START_METHODS}"
+ )
+
+ _DEFAULT_START_METHOD = method
+
+
+def get_start_method():
+ return _DEFAULT_START_METHOD
+
+
+def cpu_count(only_physical_cores=False):
+ """Return the number of CPUs the current process can use.
+
+ The returned number of CPUs accounts for:
+ * the number of CPUs in the system, as given by
+ ``multiprocessing.cpu_count``;
+ * the CPU affinity settings of the current process
+ (available on some Unix systems);
+ * Cgroup CPU bandwidth limit (available on Linux only, typically
+ set by docker and similar container orchestration systems);
+ * the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
+ and is given as the minimum of these constraints.
+
+ If ``only_physical_cores`` is True, return the number of physical cores
+ instead of the number of logical cores (hyperthreading / SMT). Note that
+ this option is not enforced if the number of usable cores is controlled in
+ any other way such as: process affinity, Cgroup restricted CPU bandwidth
+ or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
+ cores is not found, return the number of logical cores.
+
+ Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
+ Python < 3.10), see:
+ https://bugs.python.org/issue26903.
+
+ It is also always larger or equal to 1.
+ """
+ # Note: os.cpu_count() is allowed to return None in its docstring
+ os_cpu_count = os.cpu_count() or 1
+ if sys.platform == "win32":
+ # On Windows, attempting to use more than 61 CPUs would result in a
+ # OS-level error. See https://bugs.python.org/issue26903. According to
+ # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
+ # it might be possible to go beyond with a lot of extra work but this
+ # does not look easy.
+ os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
+
+ cpu_count_user = _cpu_count_user(os_cpu_count)
+ aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
+
+ if not only_physical_cores:
+ return aggregate_cpu_count
+
+ if cpu_count_user < os_cpu_count:
+ # Respect user setting
+ return max(cpu_count_user, 1)
+
+ cpu_count_physical, exception = _count_physical_cores()
+ if cpu_count_physical != "not found":
+ return cpu_count_physical
+
+ # Fallback to default behavior
+ if exception is not None:
+ # warns only the first time
+ warnings.warn(
+ "Could not find the number of physical cores for the "
+ f"following reason:\n{exception}\n"
+ "Returning the number of logical cores instead. You can "
+ "silence this warning by setting LOKY_MAX_CPU_COUNT to "
+ "the number of cores you want to use."
+ )
+ traceback.print_tb(exception.__traceback__)
+
+ return aggregate_cpu_count
+
+
+def _cpu_count_cgroup(os_cpu_count):
+ # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
+ cpu_max_fname = "/sys/fs/cgroup/cpu.max"
+ cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
+ cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
+ if os.path.exists(cpu_max_fname):
+ # cgroup v2
+ # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
+ with open(cpu_max_fname) as fh:
+ cpu_quota_us, cpu_period_us = fh.read().strip().split()
+ elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
+ # cgroup v1
+ # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
+ with open(cfs_quota_fname) as fh:
+ cpu_quota_us = fh.read().strip()
+ with open(cfs_period_fname) as fh:
+ cpu_period_us = fh.read().strip()
+ else:
+ # No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
+ cpu_quota_us = "max"
+ cpu_period_us = 100_000 # unused, for consistency with default values
+
+ if cpu_quota_us == "max":
+ # No active Cgroup quota on a Cgroup-capable platform
+ return os_cpu_count
+ else:
+ cpu_quota_us = int(cpu_quota_us)
+ cpu_period_us = int(cpu_period_us)
+ if cpu_quota_us > 0 and cpu_period_us > 0:
+ return math.ceil(cpu_quota_us / cpu_period_us)
+ else: # pragma: no cover
+ # Setting a negative cpu_quota_us value is a valid way to disable
+ # cgroup CPU bandwith limits
+ return os_cpu_count
+
+
+def _cpu_count_affinity(os_cpu_count):
+ # Number of available CPUs given affinity settings
+ if hasattr(os, "sched_getaffinity"):
+ try:
+ return len(os.sched_getaffinity(0))
+ except NotImplementedError:
+ pass
+
+ # On PyPy and possibly other platforms, os.sched_getaffinity does not exist
+ # or raises NotImplementedError, let's try with the psutil if installed.
+ try:
+ import psutil
+
+ p = psutil.Process()
+ if hasattr(p, "cpu_affinity"):
+ return len(p.cpu_affinity())
+
+ except ImportError: # pragma: no cover
+ if (
+ sys.platform == "linux"
+ and os.environ.get("LOKY_MAX_CPU_COUNT") is None
+ ):
+ # PyPy does not implement os.sched_getaffinity on Linux which
+ # can cause severe oversubscription problems. Better warn the
+ # user in this particularly pathological case which can wreck
+ # havoc, typically on CI workers.
+ warnings.warn(
+ "Failed to inspect CPU affinity constraints on this system. "
+ "Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
+ )
+
+ # This can happen for platforms that do not implement any kind of CPU
+ # infinity such as macOS-based platforms.
+ return os_cpu_count
+
+
+def _cpu_count_user(os_cpu_count):
+ """Number of user defined available CPUs"""
+ cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
+
+ cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
+
+ # User defined soft-limit passed as a loky specific environment variable.
+ cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
+
+ return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
+
+
+def _count_physical_cores():
+ """Return a tuple (number of physical cores, exception)
+
+ If the number of physical cores is found, exception is set to None.
+ If it has not been found, return ("not found", exception).
+
+ The number of physical cores is cached to avoid repeating subprocess calls.
+ """
+ exception = None
+
+ # First check if the value is cached
+ global physical_cores_cache
+ if physical_cores_cache is not None:
+ return physical_cores_cache, exception
+
+ # Not cached yet, find it
+ try:
+ if sys.platform == "linux":
+ cpu_info = subprocess.run(
+ "lscpu --parse=core".split(), capture_output=True, text=True
+ )
+ cpu_info = cpu_info.stdout.splitlines()
+ cpu_info = {line for line in cpu_info if not line.startswith("#")}
+ cpu_count_physical = len(cpu_info)
+ elif sys.platform == "win32":
+ cpu_info = subprocess.run(
+ "wmic CPU Get NumberOfCores /Format:csv".split(),
+ capture_output=True,
+ text=True,
+ )
+ cpu_info = cpu_info.stdout.splitlines()
+ cpu_info = [
+ l.split(",")[1]
+ for l in cpu_info
+ if (l and l != "Node,NumberOfCores")
+ ]
+ cpu_count_physical = sum(map(int, cpu_info))
+ elif sys.platform == "darwin":
+ cpu_info = subprocess.run(
+ "sysctl -n hw.physicalcpu".split(),
+ capture_output=True,
+ text=True,
+ )
+ cpu_info = cpu_info.stdout
+ cpu_count_physical = int(cpu_info)
+ else:
+ raise NotImplementedError(f"unsupported platform: {sys.platform}")
+
+ # if cpu_count_physical < 1, we did not find a valid value
+ if cpu_count_physical < 1:
+ raise ValueError(f"found {cpu_count_physical} physical cores < 1")
+
+ except Exception as e:
+ exception = e
+ cpu_count_physical = "not found"
+
+ # Put the result in cache
+ physical_cores_cache = cpu_count_physical
+
+ return cpu_count_physical, exception
+
+
+class LokyContext(BaseContext):
+ """Context relying on the LokyProcess."""
+
+ _name = "loky"
+ Process = LokyProcess
+ cpu_count = staticmethod(cpu_count)
+
+ def Queue(self, maxsize=0, reducers=None):
+ """Returns a queue object"""
+ from .queues import Queue
+
+ return Queue(maxsize, reducers=reducers, ctx=self.get_context())
+
+ def SimpleQueue(self, reducers=None):
+ """Returns a queue object"""
+ from .queues import SimpleQueue
+
+ return SimpleQueue(reducers=reducers, ctx=self.get_context())
+
+ if sys.platform != "win32":
+ """For Unix platform, use our custom implementation of synchronize
+ ensuring that we use the loky.backend.resource_tracker to clean-up
+ the semaphores in case of a worker crash.
+ """
+
+ def Semaphore(self, value=1):
+ """Returns a semaphore object"""
+ from .synchronize import Semaphore
+
+ return Semaphore(value=value)
+
+ def BoundedSemaphore(self, value):
+ """Returns a bounded semaphore object"""
+ from .synchronize import BoundedSemaphore
+
+ return BoundedSemaphore(value)
+
+ def Lock(self):
+ """Returns a lock object"""
+ from .synchronize import Lock
+
+ return Lock()
+
+ def RLock(self):
+ """Returns a recurrent lock object"""
+ from .synchronize import RLock
+
+ return RLock()
+
+ def Condition(self, lock=None):
+ """Returns a condition object"""
+ from .synchronize import Condition
+
+ return Condition(lock)
+
+ def Event(self):
+ """Returns an event object"""
+ from .synchronize import Event
+
+ return Event()
+
+
+class LokyInitMainContext(LokyContext):
+ """Extra context with LokyProcess, which does load the main module
+
+ This context is used for compatibility in the case ``cloudpickle`` is not
+ present on the running system. This permits to load functions defined in
+ the ``main`` module, using proper safeguards. The declaration of the
+ ``executor`` should be protected by ``if __name__ == "__main__":`` and the
+ functions and variable used from main should be out of this block.
+
+ This mimics the default behavior of multiprocessing under Windows and the
+ behavior of the ``spawn`` start method on a posix system.
+ For more details, see the end of the following section of python doc
+ https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
+ """
+
+ _name = "loky_init_main"
+ Process = LokyInitMainProcess
+
+
+# Register loky context so it works with multiprocessing.get_context
+ctx_loky = LokyContext()
+mp.context._concrete_contexts["loky"] = ctx_loky
+mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext()
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa089f7a1bf9b577455775f6d6249baf4bd430de
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py
@@ -0,0 +1,181 @@
+import os
+import sys
+import time
+import errno
+import signal
+import warnings
+import subprocess
+import traceback
+
+try:
+ import psutil
+except ImportError:
+ psutil = None
+
+
+def kill_process_tree(process, use_psutil=True):
+ """Terminate process and its descendants with SIGKILL"""
+ if use_psutil and psutil is not None:
+ _kill_process_tree_with_psutil(process)
+ else:
+ _kill_process_tree_without_psutil(process)
+
+
+def recursive_terminate(process, use_psutil=True):
+ warnings.warn(
+ "recursive_terminate is deprecated in loky 3.2, use kill_process_tree"
+ "instead",
+ DeprecationWarning,
+ )
+ kill_process_tree(process, use_psutil=use_psutil)
+
+
+def _kill_process_tree_with_psutil(process):
+ try:
+ descendants = psutil.Process(process.pid).children(recursive=True)
+ except psutil.NoSuchProcess:
+ return
+
+ # Kill the descendants in reverse order to avoid killing the parents before
+ # the descendant in cases where there are more processes nested.
+ for descendant in descendants[::-1]:
+ try:
+ descendant.kill()
+ except psutil.NoSuchProcess:
+ pass
+
+ try:
+ psutil.Process(process.pid).kill()
+ except psutil.NoSuchProcess:
+ pass
+ process.join()
+
+
+def _kill_process_tree_without_psutil(process):
+ """Terminate a process and its descendants."""
+ try:
+ if sys.platform == "win32":
+ _windows_taskkill_process_tree(process.pid)
+ else:
+ _posix_recursive_kill(process.pid)
+ except Exception: # pragma: no cover
+ details = traceback.format_exc()
+ warnings.warn(
+ "Failed to kill subprocesses on this platform. Please install"
+ "psutil: https://github.com/giampaolo/psutil\n"
+ f"Details:\n{details}"
+ )
+ # In case we cannot introspect or kill the descendants, we fall back to
+ # only killing the main process.
+ #
+ # Note: on Windows, process.kill() is an alias for process.terminate()
+ # which in turns calls the Win32 API function TerminateProcess().
+ process.kill()
+ process.join()
+
+
+def _windows_taskkill_process_tree(pid):
+ # On windows, the taskkill function with option `/T` terminate a given
+ # process pid and its children.
+ try:
+ subprocess.check_output(
+ ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None
+ )
+ except subprocess.CalledProcessError as e:
+ # In Windows, taskkill returns 128, 255 for no process found.
+ if e.returncode not in [128, 255]:
+ # Let's raise to let the caller log the error details in a
+ # warning and only kill the root process.
+ raise # pragma: no cover
+
+
+def _kill(pid):
+ # Not all systems (e.g. Windows) have a SIGKILL, but the C specification
+ # mandates a SIGTERM signal. While Windows is handled specifically above,
+ # let's try to be safe for other hypothetic platforms that only have
+ # SIGTERM without SIGKILL.
+ kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
+ try:
+ os.kill(pid, kill_signal)
+ except OSError as e:
+ # if OSError is raised with [Errno 3] no such process, the process
+ # is already terminated, else, raise the error and let the top
+ # level function raise a warning and retry to kill the process.
+ if e.errno != errno.ESRCH:
+ raise # pragma: no cover
+
+
+def _posix_recursive_kill(pid):
+ """Recursively kill the descendants of a process before killing it."""
+ try:
+ children_pids = subprocess.check_output(
+ ["pgrep", "-P", str(pid)], stderr=None, text=True
+ )
+ except subprocess.CalledProcessError as e:
+ # `ps` returns 1 when no child process has been found
+ if e.returncode == 1:
+ children_pids = ""
+ else:
+ raise # pragma: no cover
+
+ # Decode the result, split the cpid and remove the trailing line
+ for cpid in children_pids.splitlines():
+ cpid = int(cpid)
+ _posix_recursive_kill(cpid)
+
+ _kill(pid)
+
+
+def get_exitcodes_terminated_worker(processes):
+ """Return a formatted string with the exitcodes of terminated workers.
+
+ If necessary, wait (up to .25s) for the system to correctly set the
+ exitcode of one terminated worker.
+ """
+ patience = 5
+
+ # Catch the exitcode of the terminated workers. There should at least be
+ # one. If not, wait a bit for the system to correctly set the exitcode of
+ # the terminated worker.
+ exitcodes = [
+ p.exitcode for p in list(processes.values()) if p.exitcode is not None
+ ]
+ while not exitcodes and patience > 0:
+ patience -= 1
+ exitcodes = [
+ p.exitcode
+ for p in list(processes.values())
+ if p.exitcode is not None
+ ]
+ time.sleep(0.05)
+
+ return _format_exitcodes(exitcodes)
+
+
+def _format_exitcodes(exitcodes):
+ """Format a list of exit code with names of the signals if possible"""
+ str_exitcodes = [
+ f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None
+ ]
+ return "{" + ", ".join(str_exitcodes) + "}"
+
+
+def _get_exitcode_name(exitcode):
+ if sys.platform == "win32":
+ # The exitcode are unreliable on windows (see bpo-31863).
+ # For this case, return UNKNOWN
+ return "UNKNOWN"
+
+ if exitcode < 0:
+ try:
+ import signal
+
+ return signal.Signals(-exitcode).name
+ except ValueError:
+ return "UNKNOWN"
+ elif exitcode != 255:
+ # The exitcode are unreliable on forkserver were 255 is always returned
+ # (see bpo-30589). For this case, return UNKNOWN
+ return "EXIT"
+
+ return "UNKNOWN"
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..099debcb711c6695f0570861293b198047bd6093
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
@@ -0,0 +1,102 @@
+import inspect
+from functools import partial
+from joblib.externals.cloudpickle import dumps, loads
+
+
+WRAP_CACHE = {}
+
+
+class CloudpickledObjectWrapper:
+ def __init__(self, obj, keep_wrapper=False):
+ self._obj = obj
+ self._keep_wrapper = keep_wrapper
+
+ def __reduce__(self):
+ _pickled_object = dumps(self._obj)
+ if not self._keep_wrapper:
+ return loads, (_pickled_object,)
+
+ return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
+
+ def __getattr__(self, attr):
+ # Ensure that the wrapped object can be used seemlessly as the
+ # previous object.
+ if attr not in ["_obj", "_keep_wrapper"]:
+ return getattr(self._obj, attr)
+ return getattr(self, attr)
+
+
+# Make sure the wrapped object conserves the callable property
+class CallableObjectWrapper(CloudpickledObjectWrapper):
+ def __call__(self, *args, **kwargs):
+ return self._obj(*args, **kwargs)
+
+
+def _wrap_non_picklable_objects(obj, keep_wrapper):
+ if callable(obj):
+ return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
+ return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
+
+
+def _reconstruct_wrapper(_pickled_object, keep_wrapper):
+ obj = loads(_pickled_object)
+ return _wrap_non_picklable_objects(obj, keep_wrapper)
+
+
+def _wrap_objects_when_needed(obj):
+ # Function to introspect an object and decide if it should be wrapped or
+ # not.
+ need_wrap = "__main__" in getattr(obj, "__module__", "")
+ if isinstance(obj, partial):
+ return partial(
+ _wrap_objects_when_needed(obj.func),
+ *[_wrap_objects_when_needed(a) for a in obj.args],
+ **{
+ k: _wrap_objects_when_needed(v)
+ for k, v in obj.keywords.items()
+ }
+ )
+ if callable(obj):
+ # Need wrap if the object is a function defined in a local scope of
+ # another function.
+ func_code = getattr(obj, "__code__", "")
+ need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
+
+ # Need wrap if the obj is a lambda expression
+ func_name = getattr(obj, "__name__", "")
+ need_wrap |= "" in func_name
+
+ if not need_wrap:
+ return obj
+
+ wrapped_obj = WRAP_CACHE.get(obj)
+ if wrapped_obj is None:
+ wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
+ WRAP_CACHE[obj] = wrapped_obj
+ return wrapped_obj
+
+
+def wrap_non_picklable_objects(obj, keep_wrapper=True):
+ """Wrapper for non-picklable object to use cloudpickle to serialize them.
+
+ Note that this wrapper tends to slow down the serialization process as it
+ is done with cloudpickle which is typically slower compared to pickle. The
+ proper way to solve serialization issues is to avoid defining functions and
+ objects in the main scripts and to implement __reduce__ functions for
+ complex classes.
+ """
+ # If obj is a class, create a CloudpickledClassWrapper which instantiates
+ # the object internally and wrap it directly in a CloudpickledObjectWrapper
+ if inspect.isclass(obj):
+
+ class CloudpickledClassWrapper(CloudpickledObjectWrapper):
+ def __init__(self, *args, **kwargs):
+ self._obj = obj(*args, **kwargs)
+ self._keep_wrapper = keep_wrapper
+
+ CloudpickledClassWrapper.__name__ = obj.__name__
+ return CloudpickledClassWrapper
+
+ # If obj is an instance of a class, just wrap it in a regular
+ # CloudpickledObjectWrapper
+ return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/initializers.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/initializers.py
new file mode 100644
index 0000000000000000000000000000000000000000..aea0e56c25d0d74e04788493058549a1399f8342
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/initializers.py
@@ -0,0 +1,80 @@
+import warnings
+
+
+def _viztracer_init(init_kwargs):
+ """Initialize viztracer's profiler in worker processes"""
+ from viztracer import VizTracer
+
+ tracer = VizTracer(**init_kwargs)
+ tracer.register_exit()
+ tracer.start()
+
+
+def _make_viztracer_initializer_and_initargs():
+ try:
+ import viztracer
+
+ tracer = viztracer.get_tracer()
+ if tracer is not None and getattr(tracer, "enable", False):
+ # Profiler is active: introspect its configuration to
+ # initialize the workers with the same configuration.
+ return _viztracer_init, (tracer.init_kwargs,)
+ except ImportError:
+ # viztracer is not installed: nothing to do
+ pass
+ except Exception as e:
+ # In case viztracer's API evolve, we do not want to crash loky but
+ # we want to know about it to be able to update loky.
+ warnings.warn(f"Unable to introspect viztracer state: {e}")
+ return None, ()
+
+
+class _ChainedInitializer:
+ """Compound worker initializer
+
+ This is meant to be used in conjunction with _chain_initializers to
+ produce the necessary chained_args list to be passed to __call__.
+ """
+
+ def __init__(self, initializers):
+ self._initializers = initializers
+
+ def __call__(self, *chained_args):
+ for initializer, args in zip(self._initializers, chained_args):
+ initializer(*args)
+
+
+def _chain_initializers(initializer_and_args):
+ """Convenience helper to combine a sequence of initializers.
+
+ If some initializers are None, they are filtered out.
+ """
+ filtered_initializers = []
+ filtered_initargs = []
+ for initializer, initargs in initializer_and_args:
+ if initializer is not None:
+ filtered_initializers.append(initializer)
+ filtered_initargs.append(initargs)
+
+ if not filtered_initializers:
+ return None, ()
+ elif len(filtered_initializers) == 1:
+ return filtered_initializers[0], filtered_initargs[0]
+ else:
+ return _ChainedInitializer(filtered_initializers), filtered_initargs
+
+
+def _prepare_initializer(initializer, initargs):
+ if initializer is not None and not callable(initializer):
+ raise TypeError(
+ f"initializer must be a callable, got: {initializer!r}"
+ )
+
+ # Introspect runtime to determine if we need to propagate the viztracer
+ # profiler information to the workers:
+ return _chain_initializers(
+ [
+ (initializer, initargs),
+ _make_viztracer_initializer_and_initargs(),
+ ]
+ )
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..3040719579f74ecc7d5645e4894dbad138f0a5c1
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py
@@ -0,0 +1,1314 @@
+###############################################################################
+# Re-implementation of the ProcessPoolExecutor more robust to faults
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from concurrent/futures/process_pool_executor.py (17/02/2017)
+# * Add an extra management thread to detect executor_manager_thread failures,
+# * Improve the shutdown process to avoid deadlocks,
+# * Add timeout for workers,
+# * More robust pickling process.
+#
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Implements ProcessPoolExecutor.
+
+The follow diagram and text describe the data-flow through the system:
+
+|======================= In-process =====================|== Out-of-process ==|
+
++----------+ +----------+ +--------+ +-----------+ +---------+
+| | => | Work Ids | | | | Call Q | | Process |
+| | +----------+ | | +-----------+ | Pool |
+| | | ... | | | | ... | +---------+
+| | | 6 | => | | => | 5, call() | => | |
+| | | 7 | | | | ... | | |
+| Process | | ... | | Local | +-----------+ | Process |
+| Pool | +----------+ | Worker | | #1..n |
+| Executor | | Thread | | |
+| | +----------- + | | +-----------+ | |
+| | <=> | Work Items | <=> | | <= | Result Q | <= | |
+| | +------------+ | | +-----------+ | |
+| | | 6: call() | | | | ... | | |
+| | | future | +--------+ | 4, result | | |
+| | | ... | | 3, except | | |
++----------+ +------------+ +-----------+ +---------+
+
+Executor.submit() called:
+- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
+- adds the id of the _WorkItem to the "Work Ids" queue
+
+Local worker thread:
+- reads work ids from the "Work Ids" queue and looks up the corresponding
+ WorkItem from the "Work Items" dict: if the work item has been cancelled then
+ it is simply removed from the dict, otherwise it is repackaged as a
+ _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
+ until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
+ calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
+- reads _ResultItems from "Result Q", updates the future stored in the
+ "Work Items" dict and deletes the dict entry
+
+Process #1..n:
+- reads _CallItems from "Call Q", executes the calls, and puts the resulting
+ _ResultItems in "Result Q"
+"""
+
+
+__author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)"
+
+
+import os
+import gc
+import sys
+import queue
+import struct
+import weakref
+import warnings
+import itertools
+import traceback
+import threading
+from time import time, sleep
+import multiprocessing as mp
+from functools import partial
+from pickle import PicklingError
+from concurrent.futures import Executor
+from concurrent.futures._base import LOGGER
+from concurrent.futures.process import BrokenProcessPool as _BPPException
+from multiprocessing.connection import wait
+
+from ._base import Future
+from .backend import get_context
+from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS
+from .backend.queues import Queue, SimpleQueue
+from .backend.reduction import set_loky_pickler, get_loky_pickler_name
+from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
+from .initializers import _prepare_initializer
+
+
+# Mechanism to prevent infinite process spawning. When a worker of a
+# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new
+# Executor, a LokyRecursionError is raised
+MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
+_CURRENT_DEPTH = 0
+
+# Minimum time interval between two consecutive memory leak protection checks.
+_MEMORY_LEAK_CHECK_DELAY = 1.0
+
+# Number of bytes of memory usage allowed over the reference process size.
+_MAX_MEMORY_LEAK_SIZE = int(3e8)
+
+
+try:
+ from psutil import Process
+
+ _USE_PSUTIL = True
+
+ def _get_memory_usage(pid, force_gc=False):
+ if force_gc:
+ gc.collect()
+
+ mem_size = Process(pid).memory_info().rss
+ mp.util.debug(f"psutil return memory size: {mem_size}")
+ return mem_size
+
+except ImportError:
+ _USE_PSUTIL = False
+
+
+class _ThreadWakeup:
+ def __init__(self):
+ self._closed = False
+ self._reader, self._writer = mp.Pipe(duplex=False)
+
+ def close(self):
+ if not self._closed:
+ self._closed = True
+ self._writer.close()
+ self._reader.close()
+
+ def wakeup(self):
+ if not self._closed:
+ self._writer.send_bytes(b"")
+
+ def clear(self):
+ if not self._closed:
+ while self._reader.poll():
+ self._reader.recv_bytes()
+
+
+class _ExecutorFlags:
+ """necessary references to maintain executor states without preventing gc
+
+ It permits to keep the information needed by executor_manager_thread
+ and crash_detection_thread to maintain the pool without preventing the
+ garbage collection of unreferenced executors.
+ """
+
+ def __init__(self, shutdown_lock):
+
+ self.shutdown = False
+ self.broken = None
+ self.kill_workers = False
+ self.shutdown_lock = shutdown_lock
+
+ def flag_as_shutting_down(self, kill_workers=None):
+ with self.shutdown_lock:
+ self.shutdown = True
+ if kill_workers is not None:
+ self.kill_workers = kill_workers
+
+ def flag_as_broken(self, broken):
+ with self.shutdown_lock:
+ self.shutdown = True
+ self.broken = broken
+
+
+# Prior to 3.9, executor_manager_thread is created as daemon thread. This means
+# that it is not joined automatically when the interpreter is shutting down.
+# To work around this problem, an exit handler is installed to tell the
+# thread to exit when the interpreter is shutting down and then waits until
+# it finishes. The thread needs to be daemonized because the atexit hooks are
+# called after all non daemonized threads are joined.
+#
+# Starting 3.9, there exists a specific atexit hook to be called before joining
+# the threads so the executor_manager_thread does not need to be daemonized
+# anymore.
+#
+# The atexit hooks are registered when starting the first ProcessPoolExecutor
+# to avoid import having an effect on the interpreter.
+
+_global_shutdown = False
+_global_shutdown_lock = threading.Lock()
+_threads_wakeups = weakref.WeakKeyDictionary()
+
+
+def _python_exit():
+ global _global_shutdown
+ _global_shutdown = True
+
+ # Materialize the list of items to avoid error due to iterating over
+ # changing size dictionary.
+ items = list(_threads_wakeups.items())
+ if len(items) > 0:
+ mp.util.debug(
+ "Interpreter shutting down. Waking up {len(items)}"
+ f"executor_manager_thread:\n{items}"
+ )
+
+ # Wake up the executor_manager_thread's so they can detect the interpreter
+ # is shutting down and exit.
+ for _, (shutdown_lock, thread_wakeup) in items:
+ with shutdown_lock:
+ thread_wakeup.wakeup()
+
+ # Collect the executor_manager_thread's to make sure we exit cleanly.
+ for thread, _ in items:
+ # This locks is to prevent situations where an executor is gc'ed in one
+ # thread while the atexit finalizer is running in another thread. This
+ # can happen when joblib is used in pypy for instance.
+ with _global_shutdown_lock:
+ thread.join()
+
+
+# With the fork context, _thread_wakeups is propagated to children.
+# Clear it after fork to avoid some situation that can cause some
+# freeze when joining the workers.
+mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
+
+
+# Module variable to register the at_exit call
+process_pool_executor_at_exit = None
+
+# Controls how many more calls than processes will be queued in the call queue.
+# A smaller number will mean that processes spend more time idle waiting for
+# work while a larger number will make Future.cancel() succeed less frequently
+# (Futures in the call queue cannot be cancelled).
+EXTRA_QUEUED_CALLS = 1
+
+
+class _RemoteTraceback(Exception):
+ """Embed stringification of remote traceback in local traceback"""
+
+ def __init__(self, tb=None):
+ self.tb = f'\n"""\n{tb}"""'
+
+ def __str__(self):
+ return self.tb
+
+
+# Do not inherit from BaseException to mirror
+# concurrent.futures.process._ExceptionWithTraceback
+class _ExceptionWithTraceback:
+ def __init__(self, exc):
+ tb = getattr(exc, "__traceback__", None)
+ if tb is None:
+ _, _, tb = sys.exc_info()
+ tb = traceback.format_exception(type(exc), exc, tb)
+ tb = "".join(tb)
+ self.exc = exc
+ self.tb = tb
+
+ def __reduce__(self):
+ return _rebuild_exc, (self.exc, self.tb)
+
+
+def _rebuild_exc(exc, tb):
+ exc.__cause__ = _RemoteTraceback(tb)
+ return exc
+
+
+class _WorkItem:
+
+ __slots__ = ["future", "fn", "args", "kwargs"]
+
+ def __init__(self, future, fn, args, kwargs):
+ self.future = future
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+
+class _ResultItem:
+ def __init__(self, work_id, exception=None, result=None):
+ self.work_id = work_id
+ self.exception = exception
+ self.result = result
+
+
+class _CallItem:
+ def __init__(self, work_id, fn, args, kwargs):
+ self.work_id = work_id
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+ # Store the current loky_pickler so it is correctly set in the worker
+ self.loky_pickler = get_loky_pickler_name()
+
+ def __call__(self):
+ set_loky_pickler(self.loky_pickler)
+ return self.fn(*self.args, **self.kwargs)
+
+ def __repr__(self):
+ return (
+ f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
+ )
+
+
+class _SafeQueue(Queue):
+ """Safe Queue set exception to the future object linked to a job"""
+
+ def __init__(
+ self,
+ max_size=0,
+ ctx=None,
+ pending_work_items=None,
+ running_work_items=None,
+ thread_wakeup=None,
+ reducers=None,
+ ):
+ self.thread_wakeup = thread_wakeup
+ self.pending_work_items = pending_work_items
+ self.running_work_items = running_work_items
+ super().__init__(max_size, reducers=reducers, ctx=ctx)
+
+ def _on_queue_feeder_error(self, e, obj):
+ if isinstance(obj, _CallItem):
+ # format traceback only works on python3
+ if isinstance(e, struct.error):
+ raised_error = RuntimeError(
+ "The task could not be sent to the workers as it is too "
+ "large for `send_bytes`."
+ )
+ else:
+ raised_error = PicklingError(
+ "Could not pickle the task to send it to the workers."
+ )
+ tb = traceback.format_exception(
+ type(e), e, getattr(e, "__traceback__", None)
+ )
+ raised_error.__cause__ = _RemoteTraceback("".join(tb))
+ work_item = self.pending_work_items.pop(obj.work_id, None)
+ self.running_work_items.remove(obj.work_id)
+ # work_item can be None if another process terminated. In this
+ # case, the executor_manager_thread fails all work_items with
+ # BrokenProcessPool
+ if work_item is not None:
+ work_item.future.set_exception(raised_error)
+ del work_item
+ self.thread_wakeup.wakeup()
+ else:
+ super()._on_queue_feeder_error(e, obj)
+
+
+def _get_chunks(chunksize, *iterables):
+ """Iterates over zip()ed iterables in chunks."""
+ it = zip(*iterables)
+ while True:
+ chunk = tuple(itertools.islice(it, chunksize))
+ if not chunk:
+ return
+ yield chunk
+
+
+def _process_chunk(fn, chunk):
+ """Processes a chunk of an iterable passed to map.
+
+ Runs the function passed to map() on a chunk of the
+ iterable passed to map.
+
+ This function is run in a separate process.
+
+ """
+ return [fn(*args) for args in chunk]
+
+
+def _sendback_result(result_queue, work_id, result=None, exception=None):
+ """Safely send back the given result or exception"""
+ try:
+ result_queue.put(
+ _ResultItem(work_id, result=result, exception=exception)
+ )
+ except BaseException as e:
+ exc = _ExceptionWithTraceback(e)
+ result_queue.put(_ResultItem(work_id, exception=exc))
+
+
+def _process_worker(
+ call_queue,
+ result_queue,
+ initializer,
+ initargs,
+ processes_management_lock,
+ timeout,
+ worker_exit_lock,
+ current_depth,
+):
+ """Evaluates calls from call_queue and places the results in result_queue.
+
+ This worker is run in a separate process.
+
+ Args:
+ call_queue: A ctx.Queue of _CallItems that will be read and
+ evaluated by the worker.
+ result_queue: A ctx.Queue of _ResultItems that will written
+ to by the worker.
+ initializer: A callable initializer, or None
+ initargs: A tuple of args for the initializer
+ processes_management_lock: A ctx.Lock avoiding worker timeout while
+ some workers are being spawned.
+ timeout: maximum time to wait for a new item in the call_queue. If that
+ time is expired, the worker will shutdown.
+ worker_exit_lock: Lock to avoid flagging the executor as broken on
+ workers timeout.
+ current_depth: Nested parallelism level, to avoid infinite spawning.
+ """
+ if initializer is not None:
+ try:
+ initializer(*initargs)
+ except BaseException:
+ LOGGER.critical("Exception in initializer:", exc_info=True)
+ # The parent will notice that the process stopped and
+ # mark the pool broken
+ return
+
+ # set the global _CURRENT_DEPTH mechanism to limit recursive call
+ global _CURRENT_DEPTH
+ _CURRENT_DEPTH = current_depth
+ _process_reference_size = None
+ _last_memory_leak_check = None
+ pid = os.getpid()
+
+ mp.util.debug(f"Worker started with timeout={timeout}")
+ while True:
+ try:
+ call_item = call_queue.get(block=True, timeout=timeout)
+ if call_item is None:
+ mp.util.info("Shutting down worker on sentinel")
+ except queue.Empty:
+ mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
+ if processes_management_lock.acquire(block=False):
+ processes_management_lock.release()
+ call_item = None
+ else:
+ mp.util.info("Could not acquire processes_management_lock")
+ continue
+ except BaseException:
+ previous_tb = traceback.format_exc()
+ try:
+ result_queue.put(_RemoteTraceback(previous_tb))
+ except BaseException:
+ # If we cannot format correctly the exception, at least print
+ # the traceback.
+ print(previous_tb)
+ mp.util.debug("Exiting with code 1")
+ sys.exit(1)
+ if call_item is None:
+ # Notify queue management thread about worker shutdown
+ result_queue.put(pid)
+ is_clean = worker_exit_lock.acquire(True, timeout=30)
+
+ # Early notify any loky executor running in this worker process
+ # (nested parallelism) that this process is about to shutdown to
+ # avoid a deadlock waiting undifinitely for the worker to finish.
+ _python_exit()
+
+ if is_clean:
+ mp.util.debug("Exited cleanly")
+ else:
+ mp.util.info("Main process did not release worker_exit")
+ return
+ try:
+ r = call_item()
+ except BaseException as e:
+ exc = _ExceptionWithTraceback(e)
+ result_queue.put(_ResultItem(call_item.work_id, exception=exc))
+ else:
+ _sendback_result(result_queue, call_item.work_id, result=r)
+ del r
+
+ # Free the resource as soon as possible, to avoid holding onto
+ # open files or shared memory that is not needed anymore
+ del call_item
+
+ if _USE_PSUTIL:
+ if _process_reference_size is None:
+ # Make reference measurement after the first call
+ _process_reference_size = _get_memory_usage(pid, force_gc=True)
+ _last_memory_leak_check = time()
+ continue
+ if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
+ mem_usage = _get_memory_usage(pid)
+ _last_memory_leak_check = time()
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
+ # Memory usage stays within bounds: everything is fine.
+ continue
+
+ # Check again memory usage; this time take the measurement
+ # after a forced garbage collection to break any reference
+ # cycles.
+ mem_usage = _get_memory_usage(pid, force_gc=True)
+ _last_memory_leak_check = time()
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
+ # The GC managed to free the memory: everything is fine.
+ continue
+
+ # The process is leaking memory: let the main process
+ # know that we need to start a new worker.
+ mp.util.info("Memory leak detected: shutting down worker")
+ result_queue.put(pid)
+ with worker_exit_lock:
+ mp.util.debug("Exit due to memory leak")
+ return
+ else:
+ # if psutil is not installed, trigger gc.collect events
+ # regularly to limit potential memory leaks due to reference cycles
+ if _last_memory_leak_check is None or (
+ time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY
+ ):
+ gc.collect()
+ _last_memory_leak_check = time()
+
+
+class _ExecutorManagerThread(threading.Thread):
+ """Manages the communication between this process and the worker processes.
+
+ The manager is run in a local thread.
+
+ Args:
+ executor: A reference to the ProcessPoolExecutor that owns
+ this thread. A weakref will be own by the manager as well as
+ references to internal objects used to introspect the state of
+ the executor.
+ """
+
+ def __init__(self, executor):
+ # Store references to necessary internals of the executor.
+
+ # A _ThreadWakeup to allow waking up the executor_manager_thread from
+ # the main Thread and avoid deadlocks caused by permanently
+ # locked queues.
+ self.thread_wakeup = executor._executor_manager_thread_wakeup
+ self.shutdown_lock = executor._shutdown_lock
+
+ # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
+ # to determine if the ProcessPoolExecutor has been garbage collected
+ # and that the manager can exit.
+ # When the executor gets garbage collected, the weakref callback
+ # will wake up the queue management thread so that it can terminate
+ # if there is no pending work item.
+ def weakref_cb(
+ _,
+ thread_wakeup=self.thread_wakeup,
+ shutdown_lock=self.shutdown_lock,
+ ):
+ if mp is not None:
+ # At this point, the multiprocessing module can already be
+ # garbage collected. We only log debug info when still
+ # possible.
+ mp.util.debug(
+ "Executor collected: triggering callback for"
+ " QueueManager wakeup"
+ )
+ with shutdown_lock:
+ thread_wakeup.wakeup()
+
+ self.executor_reference = weakref.ref(executor, weakref_cb)
+
+ # The flags of the executor
+ self.executor_flags = executor._flags
+
+ # A list of the ctx.Process instances used as workers.
+ self.processes = executor._processes
+
+ # A ctx.Queue that will be filled with _CallItems derived from
+ # _WorkItems for processing by the process workers.
+ self.call_queue = executor._call_queue
+
+ # A ctx.SimpleQueue of _ResultItems generated by the process workers.
+ self.result_queue = executor._result_queue
+
+ # A queue.Queue of work ids e.g. Queue([5, 6, ...]).
+ self.work_ids_queue = executor._work_ids
+
+ # A dict mapping work ids to _WorkItems e.g.
+ # {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
+ self.pending_work_items = executor._pending_work_items
+
+ # A list of the work_ids that are currently running
+ self.running_work_items = executor._running_work_items
+
+ # A lock to avoid concurrent shutdown of workers on timeout and spawn
+ # of new processes or shut down
+ self.processes_management_lock = executor._processes_management_lock
+
+ super().__init__(name="ExecutorManagerThread")
+ if sys.version_info < (3, 9):
+ self.daemon = True
+
+ def run(self):
+ # Main loop for the executor manager thread.
+
+ while True:
+ self.add_call_item_to_queue()
+
+ result_item, is_broken, bpe = self.wait_result_broken_or_wakeup()
+
+ if is_broken:
+ self.terminate_broken(bpe)
+ return
+ if result_item is not None:
+ self.process_result_item(result_item)
+ # Delete reference to result_item to avoid keeping references
+ # while waiting on new results.
+ del result_item
+
+ if self.is_shutting_down():
+ self.flag_executor_shutting_down()
+
+ # Since no new work items can be added, it is safe to shutdown
+ # this thread if there are no pending work items.
+ if not self.pending_work_items:
+ self.join_executor_internals()
+ return
+
+ def add_call_item_to_queue(self):
+ # Fills call_queue with _WorkItems from pending_work_items.
+ # This function never blocks.
+ while True:
+ if self.call_queue.full():
+ return
+ try:
+ work_id = self.work_ids_queue.get(block=False)
+ except queue.Empty:
+ return
+ else:
+ work_item = self.pending_work_items[work_id]
+
+ if work_item.future.set_running_or_notify_cancel():
+ self.running_work_items += [work_id]
+ self.call_queue.put(
+ _CallItem(
+ work_id,
+ work_item.fn,
+ work_item.args,
+ work_item.kwargs,
+ ),
+ block=True,
+ )
+ else:
+ del self.pending_work_items[work_id]
+ continue
+
+ def wait_result_broken_or_wakeup(self):
+ # Wait for a result to be ready in the result_queue while checking
+ # that all worker processes are still running, or for a wake up
+ # signal send. The wake up signals come either from new tasks being
+ # submitted, from the executor being shutdown/gc-ed, or from the
+ # shutdown of the python interpreter.
+ result_reader = self.result_queue._reader
+ wakeup_reader = self.thread_wakeup._reader
+ readers = [result_reader, wakeup_reader]
+ worker_sentinels = [p.sentinel for p in list(self.processes.values())]
+ ready = wait(readers + worker_sentinels)
+
+ bpe = None
+ is_broken = True
+ result_item = None
+ if result_reader in ready:
+ try:
+ result_item = result_reader.recv()
+ if isinstance(result_item, _RemoteTraceback):
+ bpe = BrokenProcessPool(
+ "A task has failed to un-serialize. Please ensure that"
+ " the arguments of the function are all picklable."
+ )
+ bpe.__cause__ = result_item
+ else:
+ is_broken = False
+ except BaseException as e:
+ bpe = BrokenProcessPool(
+ "A result has failed to un-serialize. Please ensure that "
+ "the objects returned by the function are always "
+ "picklable."
+ )
+ tb = traceback.format_exception(
+ type(e), e, getattr(e, "__traceback__", None)
+ )
+ bpe.__cause__ = _RemoteTraceback("".join(tb))
+
+ elif wakeup_reader in ready:
+ # This is simply a wake-up event that might either trigger putting
+ # more tasks in the queue or trigger the clean up of resources.
+ is_broken = False
+ else:
+ # A worker has terminated and we don't know why, set the state of
+ # the executor as broken
+ exit_codes = ""
+ if sys.platform != "win32":
+ # In Windows, introspecting terminated workers exitcodes seems
+ # unstable, therefore they are not appended in the exception
+ # message.
+ exit_codes = (
+ "\nThe exit codes of the workers are "
+ f"{get_exitcodes_terminated_worker(self.processes)}"
+ )
+ mp.util.debug(
+ "A worker unexpectedly terminated. Workers that "
+ "might have caused the breakage: "
+ + str(
+ {
+ p.name: p.exitcode
+ for p in list(self.processes.values())
+ if p is not None and p.sentinel in ready
+ }
+ )
+ )
+ bpe = TerminatedWorkerError(
+ "A worker process managed by the executor was unexpectedly "
+ "terminated. This could be caused by a segmentation fault "
+ "while calling the function or by an excessive memory usage "
+ "causing the Operating System to kill the worker.\n"
+ f"{exit_codes}"
+ )
+
+ self.thread_wakeup.clear()
+
+ return result_item, is_broken, bpe
+
+ def process_result_item(self, result_item):
+ # Process the received a result_item. This can be either the PID of a
+ # worker that exited gracefully or a _ResultItem
+
+ if isinstance(result_item, int):
+ # Clean shutdown of a worker using its PID, either on request
+ # by the executor.shutdown method or by the timeout of the worker
+ # itself: we should not mark the executor as broken.
+ with self.processes_management_lock:
+ p = self.processes.pop(result_item, None)
+
+ # p can be None if the executor is concurrently shutting down.
+ if p is not None:
+ p._worker_exit_lock.release()
+ mp.util.debug(
+ f"joining {p.name} when processing {p.pid} as result_item"
+ )
+ p.join()
+ del p
+
+ # Make sure the executor have the right number of worker, even if a
+ # worker timeout while some jobs were submitted. If some work is
+ # pending or there is less processes than running items, we need to
+ # start a new Process and raise a warning.
+ n_pending = len(self.pending_work_items)
+ n_running = len(self.running_work_items)
+ if n_pending - n_running > 0 or n_running > len(self.processes):
+ executor = self.executor_reference()
+ if (
+ executor is not None
+ and len(self.processes) < executor._max_workers
+ ):
+ warnings.warn(
+ "A worker stopped while some jobs were given to the "
+ "executor. This can be caused by a too short worker "
+ "timeout or by a memory leak.",
+ UserWarning,
+ )
+ with executor._processes_management_lock:
+ executor._adjust_process_count()
+ executor = None
+ else:
+ # Received a _ResultItem so mark the future as completed.
+ work_item = self.pending_work_items.pop(result_item.work_id, None)
+ # work_item can be None if another process terminated (see above)
+ if work_item is not None:
+ if result_item.exception:
+ work_item.future.set_exception(result_item.exception)
+ else:
+ work_item.future.set_result(result_item.result)
+ self.running_work_items.remove(result_item.work_id)
+
+ def is_shutting_down(self):
+ # Check whether we should start shutting down the executor.
+ executor = self.executor_reference()
+ # No more work items can be added if:
+ # - The interpreter is shutting down OR
+ # - The executor that owns this thread is not broken AND
+ # * The executor that owns this worker has been collected OR
+ # * The executor that owns this worker has been shutdown.
+ # If the executor is broken, it should be detected in the next loop.
+ return _global_shutdown or (
+ (executor is None or self.executor_flags.shutdown)
+ and not self.executor_flags.broken
+ )
+
+ def terminate_broken(self, bpe):
+ # Terminate the executor because it is in a broken state. The bpe
+ # argument can be used to display more information on the error that
+ # lead the executor into becoming broken.
+
+ # Mark the process pool broken so that submits fail right now.
+ self.executor_flags.flag_as_broken(bpe)
+
+ # Mark pending tasks as failed.
+ for work_item in self.pending_work_items.values():
+ work_item.future.set_exception(bpe)
+ # Delete references to object. See issue16284
+ del work_item
+ self.pending_work_items.clear()
+
+ # Terminate remaining workers forcibly: the queues or their
+ # locks may be in a dirty state and block forever.
+ self.kill_workers(reason="broken executor")
+
+ # clean up resources
+ self.join_executor_internals()
+
+ def flag_executor_shutting_down(self):
+ # Flag the executor as shutting down and cancel remaining tasks if
+ # requested as early as possible if it is not gc-ed yet.
+ self.executor_flags.flag_as_shutting_down()
+
+ # Cancel pending work items if requested.
+ if self.executor_flags.kill_workers:
+ while self.pending_work_items:
+ _, work_item = self.pending_work_items.popitem()
+ work_item.future.set_exception(
+ ShutdownExecutorError(
+ "The Executor was shutdown with `kill_workers=True` "
+ "before this job could complete."
+ )
+ )
+ del work_item
+
+ # Kill the remaining worker forcibly to no waste time joining them
+ self.kill_workers(reason="executor shutting down")
+
+ def kill_workers(self, reason=""):
+ # Terminate the remaining workers using SIGKILL. This function also
+ # terminates descendant workers of the children in case there is some
+ # nested parallelism.
+ while self.processes:
+ _, p = self.processes.popitem()
+ mp.util.debug(f"terminate process {p.name}, reason: {reason}")
+ try:
+ kill_process_tree(p)
+ except ProcessLookupError: # pragma: no cover
+ pass
+
+ def shutdown_workers(self):
+ # shutdown all workers in self.processes
+
+ # Create a list to avoid RuntimeError due to concurrent modification of
+ # processes. nb_children_alive is thus an upper bound. Also release the
+ # processes' _worker_exit_lock to accelerate the shutdown procedure, as
+ # there is no need for hand-shake here.
+ with self.processes_management_lock:
+ n_children_to_stop = 0
+ for p in list(self.processes.values()):
+ mp.util.debug(f"releasing worker exit lock on {p.name}")
+ p._worker_exit_lock.release()
+ n_children_to_stop += 1
+
+ mp.util.debug(f"found {n_children_to_stop} processes to stop")
+
+ # Send the right number of sentinels, to make sure all children are
+ # properly terminated. Do it with a mechanism that avoid hanging on
+ # Full queue when all workers have already been shutdown.
+ n_sentinels_sent = 0
+ cooldown_time = 0.001
+ while (
+ n_sentinels_sent < n_children_to_stop
+ and self.get_n_children_alive() > 0
+ ):
+ for _ in range(n_children_to_stop - n_sentinels_sent):
+ try:
+ self.call_queue.put_nowait(None)
+ n_sentinels_sent += 1
+ except queue.Full as e:
+ if cooldown_time > 5.0:
+ mp.util.info(
+ "failed to send all sentinels and exit with error."
+ f"\ncall_queue size={self.call_queue._maxsize}; "
+ f" full is {self.call_queue.full()}; "
+ )
+ raise e
+ mp.util.info(
+ "full call_queue prevented to send all sentinels at "
+ "once, waiting..."
+ )
+ sleep(cooldown_time)
+ cooldown_time *= 1.2
+ break
+
+ mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue")
+
+ def join_executor_internals(self):
+ self.shutdown_workers()
+
+ # Release the queue's resources as soon as possible. Flag the feeder
+ # thread for clean exit to avoid having the crash detection thread flag
+ # the Executor as broken during the shutdown. This is safe as either:
+ # * We don't need to communicate with the workers anymore
+ # * There is nothing left in the Queue buffer except None sentinels
+ mp.util.debug("closing call_queue")
+ self.call_queue.close()
+ self.call_queue.join_thread()
+
+ # Closing result_queue
+ mp.util.debug("closing result_queue")
+ self.result_queue.close()
+
+ mp.util.debug("closing thread_wakeup")
+ with self.shutdown_lock:
+ self.thread_wakeup.close()
+
+ # If .join() is not called on the created processes then
+ # some ctx.Queue methods may deadlock on macOS.
+ with self.processes_management_lock:
+ mp.util.debug(f"joining {len(self.processes)} processes")
+ n_joined_processes = 0
+ while True:
+ try:
+ pid, p = self.processes.popitem()
+ mp.util.debug(f"joining process {p.name} with pid {pid}")
+ p.join()
+ n_joined_processes += 1
+ except KeyError:
+ break
+
+ mp.util.debug(
+ "executor management thread clean shutdown of "
+ f"{n_joined_processes} workers"
+ )
+
+ def get_n_children_alive(self):
+ # This is an upper bound on the number of children alive.
+ with self.processes_management_lock:
+ return sum(p.is_alive() for p in list(self.processes.values()))
+
+
+_system_limits_checked = False
+_system_limited = None
+
+
+def _check_system_limits():
+ global _system_limits_checked, _system_limited
+ if _system_limits_checked and _system_limited:
+ raise NotImplementedError(_system_limited)
+ _system_limits_checked = True
+ try:
+ nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
+ except (AttributeError, ValueError):
+ # sysconf not available or setting not available
+ return
+ if nsems_max == -1:
+ # undetermined limit, assume that limit is determined
+ # by available memory only
+ return
+ if nsems_max >= 256:
+ # minimum number of semaphores available
+ # according to POSIX
+ return
+ _system_limited = (
+ f"system provides too few semaphores ({nsems_max} available, "
+ "256 necessary)"
+ )
+ raise NotImplementedError(_system_limited)
+
+
+def _chain_from_iterable_of_lists(iterable):
+ """
+ Specialized implementation of itertools.chain.from_iterable.
+ Each item in *iterable* should be a list. This function is
+ careful not to keep references to yielded objects.
+ """
+ for element in iterable:
+ element.reverse()
+ while element:
+ yield element.pop()
+
+
+def _check_max_depth(context):
+ # Limit the maxmal recursion level
+ global _CURRENT_DEPTH
+ if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
+ raise LokyRecursionError(
+ "Could not spawn extra nested processes at depth superior to "
+ "MAX_DEPTH=1. It is not possible to increase this limit when "
+ "using the 'fork' start method."
+ )
+
+ if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
+ raise LokyRecursionError(
+ "Could not spawn extra nested processes at depth superior to "
+ f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
+ "this limit with the LOKY_MAX_DEPTH environment variable."
+ )
+
+
+class LokyRecursionError(RuntimeError):
+ """A process tries to spawn too many levels of nested processes."""
+
+
+class BrokenProcessPool(_BPPException):
+ """
+ Raised when the executor is broken while a future was in the running state.
+ The cause can an error raised when unpickling the task in the worker
+ process or when unpickling the result value in the parent process. It can
+ also be caused by a worker process being terminated unexpectedly.
+ """
+
+
+class TerminatedWorkerError(BrokenProcessPool):
+ """
+ Raised when a process in a ProcessPoolExecutor terminated abruptly
+ while a future was in the running state.
+ """
+
+
+# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do
+# not use in new code.
+BrokenExecutor = BrokenProcessPool
+
+
+class ShutdownExecutorError(RuntimeError):
+
+ """
+ Raised when a ProcessPoolExecutor is shutdown while a future was in the
+ running or pending state.
+ """
+
+
+class ProcessPoolExecutor(Executor):
+
+ _at_exit = None
+
+ def __init__(
+ self,
+ max_workers=None,
+ job_reducers=None,
+ result_reducers=None,
+ timeout=None,
+ context=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+ ):
+ """Initializes a new ProcessPoolExecutor instance.
+
+ Args:
+ max_workers: int, optional (default: cpu_count())
+ The maximum number of processes that can be used to execute the
+ given calls. If None or not given then as many worker processes
+ will be created as the number of CPUs the current process
+ can use.
+ job_reducers, result_reducers: dict(type: reducer_func)
+ Custom reducer for pickling the jobs and the results from the
+ Executor. If only `job_reducers` is provided, `result_reducer`
+ will use the same reducers
+ timeout: int, optional (default: None)
+ Idle workers exit after timeout seconds. If a new job is
+ submitted after the timeout, the executor will start enough
+ new Python processes to make sure the pool of workers is full.
+ context: A multiprocessing context to launch the workers. This
+ object should provide SimpleQueue, Queue and Process.
+ initializer: An callable used to initialize worker processes.
+ initargs: A tuple of arguments to pass to the initializer.
+ env: A dict of environment variable to overwrite in the child
+ process. The environment variables are set before any module is
+ loaded. Note that this only works with the loky context.
+ """
+ _check_system_limits()
+
+ if max_workers is None:
+ self._max_workers = cpu_count()
+ else:
+ if max_workers <= 0:
+ raise ValueError("max_workers must be greater than 0")
+ self._max_workers = max_workers
+
+ if (
+ sys.platform == "win32"
+ and self._max_workers > _MAX_WINDOWS_WORKERS
+ ):
+ warnings.warn(
+ f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} "
+ "due to limitations of the operating system."
+ )
+ self._max_workers = _MAX_WINDOWS_WORKERS
+
+ if context is None:
+ context = get_context()
+ self._context = context
+ self._env = env
+
+ self._initializer, self._initargs = _prepare_initializer(
+ initializer, initargs
+ )
+ _check_max_depth(self._context)
+
+ if result_reducers is None:
+ result_reducers = job_reducers
+
+ # Timeout
+ self._timeout = timeout
+
+ # Management thread
+ self._executor_manager_thread = None
+
+ # Map of pids to processes
+ self._processes = {}
+
+ # Internal variables of the ProcessPoolExecutor
+ self._processes = {}
+ self._queue_count = 0
+ self._pending_work_items = {}
+ self._running_work_items = []
+ self._work_ids = queue.Queue()
+ self._processes_management_lock = self._context.Lock()
+ self._executor_manager_thread = None
+ self._shutdown_lock = threading.Lock()
+
+ # _ThreadWakeup is a communication channel used to interrupt the wait
+ # of the main loop of executor_manager_thread from another thread (e.g.
+ # when calling executor.submit or executor.shutdown). We do not use the
+ # _result_queue to send wakeup signals to the executor_manager_thread
+ # as it could result in a deadlock if a worker process dies with the
+ # _result_queue write lock still acquired.
+ #
+ # _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
+ self._executor_manager_thread_wakeup = _ThreadWakeup()
+
+ # Flag to hold the state of the Executor. This permits to introspect
+ # the Executor state even once it has been garbage collected.
+ self._flags = _ExecutorFlags(self._shutdown_lock)
+
+ # Finally setup the queues for interprocess communication
+ self._setup_queues(job_reducers, result_reducers)
+
+ mp.util.debug("ProcessPoolExecutor is setup")
+
+ def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
+ # Make the call queue slightly larger than the number of processes to
+ # prevent the worker processes from idling. But don't make it too big
+ # because futures in the call queue cannot be cancelled.
+ if queue_size is None:
+ queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
+ self._call_queue = _SafeQueue(
+ max_size=queue_size,
+ pending_work_items=self._pending_work_items,
+ running_work_items=self._running_work_items,
+ thread_wakeup=self._executor_manager_thread_wakeup,
+ reducers=job_reducers,
+ ctx=self._context,
+ )
+ # Killed worker processes can produce spurious "broken pipe"
+ # tracebacks in the queue's own worker thread. But we detect killed
+ # processes anyway, so silence the tracebacks.
+ self._call_queue._ignore_epipe = True
+
+ self._result_queue = SimpleQueue(
+ reducers=result_reducers, ctx=self._context
+ )
+
+ def _start_executor_manager_thread(self):
+ if self._executor_manager_thread is None:
+ mp.util.debug("_start_executor_manager_thread called")
+
+ # Start the processes so that their sentinels are known.
+ self._executor_manager_thread = _ExecutorManagerThread(self)
+ self._executor_manager_thread.start()
+
+ # register this executor in a mechanism that ensures it will wakeup
+ # when the interpreter is exiting.
+ _threads_wakeups[self._executor_manager_thread] = (
+ self._shutdown_lock,
+ self._executor_manager_thread_wakeup,
+ )
+
+ global process_pool_executor_at_exit
+ if process_pool_executor_at_exit is None:
+ # Ensure that the _python_exit function will be called before
+ # the multiprocessing.Queue._close finalizers which have an
+ # exitpriority of 10.
+
+ if sys.version_info < (3, 9):
+ process_pool_executor_at_exit = mp.util.Finalize(
+ None, _python_exit, exitpriority=20
+ )
+ else:
+ process_pool_executor_at_exit = threading._register_atexit(
+ _python_exit
+ )
+
+ def _adjust_process_count(self):
+ while len(self._processes) < self._max_workers:
+ worker_exit_lock = self._context.BoundedSemaphore(1)
+ args = (
+ self._call_queue,
+ self._result_queue,
+ self._initializer,
+ self._initargs,
+ self._processes_management_lock,
+ self._timeout,
+ worker_exit_lock,
+ _CURRENT_DEPTH + 1,
+ )
+ worker_exit_lock.acquire()
+ try:
+ # Try to spawn the process with some environment variable to
+ # overwrite but it only works with the loky context for now.
+ p = self._context.Process(
+ target=_process_worker, args=args, env=self._env
+ )
+ except TypeError:
+ p = self._context.Process(target=_process_worker, args=args)
+ p._worker_exit_lock = worker_exit_lock
+ p.start()
+ self._processes[p.pid] = p
+ mp.util.debug(
+ f"Adjusted process count to {self._max_workers}: "
+ f"{[(p.name, pid) for pid, p in self._processes.items()]}"
+ )
+
+ def _ensure_executor_running(self):
+ """ensures all workers and management thread are running"""
+ with self._processes_management_lock:
+ if len(self._processes) != self._max_workers:
+ self._adjust_process_count()
+ self._start_executor_manager_thread()
+
+ def submit(self, fn, *args, **kwargs):
+ with self._flags.shutdown_lock:
+ if self._flags.broken is not None:
+ raise self._flags.broken
+ if self._flags.shutdown:
+ raise ShutdownExecutorError(
+ "cannot schedule new futures after shutdown"
+ )
+
+ # Cannot submit a new calls once the interpreter is shutting down.
+ # This check avoids spawning new processes at exit.
+ if _global_shutdown:
+ raise RuntimeError(
+ "cannot schedule new futures after " "interpreter shutdown"
+ )
+
+ f = Future()
+ w = _WorkItem(f, fn, args, kwargs)
+
+ self._pending_work_items[self._queue_count] = w
+ self._work_ids.put(self._queue_count)
+ self._queue_count += 1
+ # Wake up queue management thread
+ self._executor_manager_thread_wakeup.wakeup()
+
+ self._ensure_executor_running()
+ return f
+
+ submit.__doc__ = Executor.submit.__doc__
+
+ def map(self, fn, *iterables, **kwargs):
+ """Returns an iterator equivalent to map(fn, iter).
+
+ Args:
+ fn: A callable that will take as many arguments as there are
+ passed iterables.
+ timeout: The maximum number of seconds to wait. If None, then there
+ is no limit on the wait time.
+ chunksize: If greater than one, the iterables will be chopped into
+ chunks of size chunksize and submitted to the process pool.
+ If set to one, the items in the list will be sent one at a
+ time.
+
+ Returns:
+ An iterator equivalent to: map(func, *iterables) but the calls may
+ be evaluated out-of-order.
+
+ Raises:
+ TimeoutError: If the entire result iterator could not be generated
+ before the given timeout.
+ Exception: If fn(*args) raises for any values.
+ """
+ timeout = kwargs.get("timeout", None)
+ chunksize = kwargs.get("chunksize", 1)
+ if chunksize < 1:
+ raise ValueError("chunksize must be >= 1.")
+
+ results = super().map(
+ partial(_process_chunk, fn),
+ _get_chunks(chunksize, *iterables),
+ timeout=timeout,
+ )
+ return _chain_from_iterable_of_lists(results)
+
+ def shutdown(self, wait=True, kill_workers=False):
+ mp.util.debug(f"shutting down executor {self}")
+
+ self._flags.flag_as_shutting_down(kill_workers)
+ executor_manager_thread = self._executor_manager_thread
+ executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
+
+ if executor_manager_thread_wakeup is not None:
+ # Wake up queue management thread
+ with self._shutdown_lock:
+ self._executor_manager_thread_wakeup.wakeup()
+
+ if executor_manager_thread is not None and wait:
+ # This locks avoids concurrent join if the interpreter
+ # is shutting down.
+ with _global_shutdown_lock:
+ executor_manager_thread.join()
+ _threads_wakeups.pop(executor_manager_thread, None)
+
+ # To reduce the risk of opening too many files, remove references to
+ # objects that use file descriptors.
+ self._executor_manager_thread = None
+ self._executor_manager_thread_wakeup = None
+ self._call_queue = None
+ self._result_queue = None
+ self._processes_management_lock = None
+
+ shutdown.__doc__ = Executor.shutdown.__doc__
diff --git a/parrot/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py b/parrot/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad016fd389762a1c458200ffe7b310239da3a3f3
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py
@@ -0,0 +1,285 @@
+###############################################################################
+# Reusable ProcessPoolExecutor
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+import time
+import warnings
+import threading
+import multiprocessing as mp
+
+from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
+from .backend.context import cpu_count
+from .backend import get_context
+
+__all__ = ["get_reusable_executor"]
+
+# Singleton executor and id management
+_executor_lock = threading.RLock()
+_next_executor_id = 0
+_executor = None
+_executor_kwargs = None
+
+
+def _get_next_executor_id():
+ """Ensure that each successive executor instance has a unique, monotonic id.
+
+ The purpose of this monotonic id is to help debug and test automated
+ instance creation.
+ """
+ global _next_executor_id
+ with _executor_lock:
+ executor_id = _next_executor_id
+ _next_executor_id += 1
+ return executor_id
+
+
+def get_reusable_executor(
+ max_workers=None,
+ context=None,
+ timeout=10,
+ kill_workers=False,
+ reuse="auto",
+ job_reducers=None,
+ result_reducers=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+):
+ """Return the current ReusableExectutor instance.
+
+ Start a new instance if it has not been started already or if the previous
+ instance was left in a broken state.
+
+ If the previous instance does not have the requested number of workers, the
+ executor is dynamically resized to adjust the number of workers prior to
+ returning.
+
+ Reusing a singleton instance spares the overhead of starting new worker
+ processes and importing common python packages each time.
+
+ ``max_workers`` controls the maximum number of tasks that can be running in
+ parallel in worker processes. By default this is set to the number of
+ CPUs on the host.
+
+ Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
+ so as to release system resources. New workers are respawn upon submission
+ of new tasks so that ``max_workers`` are available to accept the newly
+ submitted tasks. Setting ``timeout`` to around 100 times the time required
+ to spawn new processes and import packages in them (on the order of 100ms)
+ ensures that the overhead of spawning workers is negligible.
+
+ Setting ``kill_workers=True`` makes it possible to forcibly interrupt
+ previously spawned jobs to get a new instance of the reusable executor
+ with new constructor argument values.
+
+ The ``job_reducers`` and ``result_reducers`` are used to customize the
+ pickling of tasks and results send to the executor.
+
+ When provided, the ``initializer`` is run first in newly spawned
+ processes with argument ``initargs``.
+
+ The environment variable in the child process are a copy of the values in
+ the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
+ ``VAL`` are string literals to overwrite the environment variable ``ENV``
+ in the child processes to value ``VAL``. The environment variables are set
+ in the children before any module is loaded. This only works with the
+ ``loky`` context.
+ """
+ _executor, _ = _ReusablePoolExecutor.get_reusable_executor(
+ max_workers=max_workers,
+ context=context,
+ timeout=timeout,
+ kill_workers=kill_workers,
+ reuse=reuse,
+ job_reducers=job_reducers,
+ result_reducers=result_reducers,
+ initializer=initializer,
+ initargs=initargs,
+ env=env,
+ )
+ return _executor
+
+
+class _ReusablePoolExecutor(ProcessPoolExecutor):
+ def __init__(
+ self,
+ submit_resize_lock,
+ max_workers=None,
+ context=None,
+ timeout=None,
+ executor_id=0,
+ job_reducers=None,
+ result_reducers=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+ ):
+ super().__init__(
+ max_workers=max_workers,
+ context=context,
+ timeout=timeout,
+ job_reducers=job_reducers,
+ result_reducers=result_reducers,
+ initializer=initializer,
+ initargs=initargs,
+ env=env,
+ )
+ self.executor_id = executor_id
+ self._submit_resize_lock = submit_resize_lock
+
+ @classmethod
+ def get_reusable_executor(
+ cls,
+ max_workers=None,
+ context=None,
+ timeout=10,
+ kill_workers=False,
+ reuse="auto",
+ job_reducers=None,
+ result_reducers=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+ ):
+ with _executor_lock:
+ global _executor, _executor_kwargs
+ executor = _executor
+
+ if max_workers is None:
+ if reuse is True and executor is not None:
+ max_workers = executor._max_workers
+ else:
+ max_workers = cpu_count()
+ elif max_workers <= 0:
+ raise ValueError(
+ f"max_workers must be greater than 0, got {max_workers}."
+ )
+
+ if isinstance(context, str):
+ context = get_context(context)
+ if context is not None and context.get_start_method() == "fork":
+ raise ValueError(
+ "Cannot use reusable executor with the 'fork' context"
+ )
+
+ kwargs = dict(
+ context=context,
+ timeout=timeout,
+ job_reducers=job_reducers,
+ result_reducers=result_reducers,
+ initializer=initializer,
+ initargs=initargs,
+ env=env,
+ )
+ if executor is None:
+ is_reused = False
+ mp.util.debug(
+ f"Create a executor with max_workers={max_workers}."
+ )
+ executor_id = _get_next_executor_id()
+ _executor_kwargs = kwargs
+ _executor = executor = cls(
+ _executor_lock,
+ max_workers=max_workers,
+ executor_id=executor_id,
+ **kwargs,
+ )
+ else:
+ if reuse == "auto":
+ reuse = kwargs == _executor_kwargs
+ if (
+ executor._flags.broken
+ or executor._flags.shutdown
+ or not reuse
+ ):
+ if executor._flags.broken:
+ reason = "broken"
+ elif executor._flags.shutdown:
+ reason = "shutdown"
+ else:
+ reason = "arguments have changed"
+ mp.util.debug(
+ "Creating a new executor with max_workers="
+ f"{max_workers} as the previous instance cannot be "
+ f"reused ({reason})."
+ )
+ executor.shutdown(wait=True, kill_workers=kill_workers)
+ _executor = executor = _executor_kwargs = None
+ # Recursive call to build a new instance
+ return cls.get_reusable_executor(
+ max_workers=max_workers, **kwargs
+ )
+ else:
+ mp.util.debug(
+ "Reusing existing executor with "
+ f"max_workers={executor._max_workers}."
+ )
+ is_reused = True
+ executor._resize(max_workers)
+
+ return executor, is_reused
+
+ def submit(self, fn, *args, **kwargs):
+ with self._submit_resize_lock:
+ return super().submit(fn, *args, **kwargs)
+
+ def _resize(self, max_workers):
+ with self._submit_resize_lock:
+ if max_workers is None:
+ raise ValueError("Trying to resize with max_workers=None")
+ elif max_workers == self._max_workers:
+ return
+
+ if self._executor_manager_thread is None:
+ # If the executor_manager_thread has not been started
+ # then no processes have been spawned and we can just
+ # update _max_workers and return
+ self._max_workers = max_workers
+ return
+
+ self._wait_job_completion()
+
+ # Some process might have returned due to timeout so check how many
+ # children are still alive. Use the _process_management_lock to
+ # ensure that no process are spawned or timeout during the resize.
+ with self._processes_management_lock:
+ processes = list(self._processes.values())
+ nb_children_alive = sum(p.is_alive() for p in processes)
+ self._max_workers = max_workers
+ for _ in range(max_workers, nb_children_alive):
+ self._call_queue.put(None)
+ while (
+ len(self._processes) > max_workers and not self._flags.broken
+ ):
+ time.sleep(1e-3)
+
+ self._adjust_process_count()
+ processes = list(self._processes.values())
+ while not all(p.is_alive() for p in processes):
+ time.sleep(1e-3)
+
+ def _wait_job_completion(self):
+ """Wait for the cache to be empty before resizing the pool."""
+ # Issue a warning to the user about the bad effect of this usage.
+ if self._pending_work_items:
+ warnings.warn(
+ "Trying to resize an executor with running jobs: "
+ "waiting for jobs completion before resizing.",
+ UserWarning,
+ )
+ mp.util.debug(
+ f"Executor {self.executor_id} waiting for jobs completion "
+ "before resizing"
+ )
+ # Wait for the completion of the jobs
+ while self._pending_work_items:
+ time.sleep(1e-3)
+
+ def _setup_queues(self, job_reducers, result_reducers):
+ # As this executor can be resized, use a large queue size to avoid
+ # underestimating capacity and introducing overhead
+ queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
+ super()._setup_queues(
+ job_reducers, result_reducers, queue_size=queue_size
+ )
diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/__init__.py b/parrot/lib/python3.10/site-packages/joblib/test/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b535ba2e2fe3418adbb2adc15252a8b003938553
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc b/parrot/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f32792f8b2a7c2a2c37e827836a96ff66e1297a9
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py b/parrot/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba903d6cc2cd75879eed60ff31ecdf7ffe230d45
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py
@@ -0,0 +1,95 @@
+"""
+This script is used to generate test data for joblib/test/test_numpy_pickle.py
+"""
+
+import sys
+import re
+
+# pytest needs to be able to import this module even when numpy is
+# not installed
+try:
+ import numpy as np
+except ImportError:
+ np = None
+
+import joblib
+
+
+def get_joblib_version(joblib_version=joblib.__version__):
+ """Normalize joblib version by removing suffix.
+
+ >>> get_joblib_version('0.8.4')
+ '0.8.4'
+ >>> get_joblib_version('0.8.4b1')
+ '0.8.4'
+ >>> get_joblib_version('0.9.dev0')
+ '0.9'
+ """
+ matches = [re.match(r'(\d+).*', each)
+ for each in joblib_version.split('.')]
+ return '.'.join([m.group(1) for m in matches if m is not None])
+
+
+def write_test_pickle(to_pickle, args):
+ kwargs = {}
+ compress = args.compress
+ method = args.method
+ joblib_version = get_joblib_version()
+ py_version = '{0[0]}{0[1]}'.format(sys.version_info)
+ numpy_version = ''.join(np.__version__.split('.')[:2])
+
+ # The game here is to generate the right filename according to the options.
+ body = '_compressed' if (compress and method == 'zlib') else ''
+ if compress:
+ if method == 'zlib':
+ kwargs['compress'] = True
+ extension = '.gz'
+ else:
+ kwargs['compress'] = (method, 3)
+ extension = '.pkl.{}'.format(method)
+ if args.cache_size:
+ kwargs['cache_size'] = 0
+ body += '_cache_size'
+ else:
+ extension = '.pkl'
+
+ pickle_filename = 'joblib_{}{}_pickle_py{}_np{}{}'.format(
+ joblib_version, body, py_version, numpy_version, extension)
+
+ try:
+ joblib.dump(to_pickle, pickle_filename, **kwargs)
+ except Exception as e:
+ # With old python version (=< 3.3.), we can arrive there when
+ # dumping compressed pickle with LzmaFile.
+ print("Error: cannot generate file '{}' with arguments '{}'. "
+ "Error was: {}".format(pickle_filename, kwargs, e))
+ else:
+ print("File '{}' generated successfully.".format(pickle_filename))
+
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser(description="Joblib pickle data "
+ "generator.")
+ parser.add_argument('--cache_size', action="store_true",
+ help="Force creation of companion numpy "
+ "files for pickled arrays.")
+ parser.add_argument('--compress', action="store_true",
+ help="Generate compress pickles.")
+ parser.add_argument('--method', type=str, default='zlib',
+ choices=['zlib', 'gzip', 'bz2', 'xz', 'lzma', 'lz4'],
+ help="Set compression method.")
+ # We need to be specific about dtypes in particular endianness
+ # because the pickles can be generated on one architecture and
+ # the tests run on another one. See
+ # https://github.com/joblib/joblib/issues/279.
+ to_pickle = [np.arange(5, dtype=np.dtype('=3
+License-File: License.txt
+
+Provides libraries to enable third party tools using GPU profiling APIs.
diff --git a/parrot/lib/python3.10/site-packages/tests/slow/test_sft_slow.py b/parrot/lib/python3.10/site-packages/tests/slow/test_sft_slow.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6c73825d4ae7590e2f53bee89443b551084e201
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/tests/slow/test_sft_slow.py
@@ -0,0 +1,429 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import gc
+import itertools
+import tempfile
+import unittest
+
+import torch
+from accelerate.utils.memory import release_memory
+from datasets import load_dataset
+from parameterized import parameterized
+from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
+from transformers.testing_utils import (
+ require_bitsandbytes,
+ require_peft,
+ require_torch_accelerator,
+ require_torch_multi_accelerator,
+)
+from transformers.utils import is_peft_available
+
+from trl import SFTConfig, SFTTrainer
+from trl.models.utils import setup_chat_format
+
+from ..testing_utils import require_liger_kernel
+from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS
+
+
+if is_peft_available():
+ from peft import LoraConfig, PeftModel
+
+
+@require_torch_accelerator
+class SFTTrainerSlowTester(unittest.TestCase):
+ def setUp(self):
+ self.train_dataset = load_dataset("stanfordnlp/imdb", split="train[:10%]")
+ self.eval_dataset = load_dataset("stanfordnlp/imdb", split="test[:10%]")
+ self.dataset_text_field = "text"
+ self.max_seq_length = 128
+ self.peft_config = LoraConfig(
+ lora_alpha=16,
+ lora_dropout=0.1,
+ r=8,
+ bias="none",
+ task_type="CAUSAL_LM",
+ )
+
+ def tearDown(self):
+ gc.collect()
+ torch.cuda.empty_cache()
+ gc.collect()
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
+ def test_sft_trainer_str(self, model_name, packing):
+ """
+ Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer
+ as expected.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ )
+
+ trainer = SFTTrainer(
+ model_name,
+ args=args,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ )
+
+ trainer.train()
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
+ def test_sft_trainer_transformers(self, model_name, packing):
+ """
+ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
+ as expected.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ )
+
+ model = AutoModelForCausalLM.from_pretrained(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ )
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
+ @require_peft
+ def test_sft_trainer_peft(self, model_name, packing):
+ """
+ Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer
+ as expected.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ fp16=True,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ )
+
+ model = AutoModelForCausalLM.from_pretrained(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ peft_config=self.peft_config,
+ )
+
+ assert isinstance(trainer.model, PeftModel)
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
+ def test_sft_trainer_transformers_mp(self, model_name, packing):
+ """
+ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
+ as expected in mixed precision.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ fp16=True, # this is sufficient to enable amp
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ )
+
+ model = AutoModelForCausalLM.from_pretrained(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ )
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS)))
+ def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs):
+ """
+ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
+ as expected in mixed precision + different scenarios of gradient_checkpointing.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ fp16=True, # this is sufficient to enable amp
+ gradient_checkpointing=True,
+ gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
+ )
+
+ model = AutoModelForCausalLM.from_pretrained(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ )
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS)))
+ @require_peft
+ def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs):
+ """
+ Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer
+ as expected in mixed precision + different scenarios of gradient_checkpointing.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ fp16=True, # this is sufficient to enable amp
+ gradient_checkpointing=True,
+ gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
+ )
+
+ model = AutoModelForCausalLM.from_pretrained(model_name)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ peft_config=self.peft_config,
+ )
+
+ assert isinstance(trainer.model, PeftModel)
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(
+ list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS))
+ )
+ @require_torch_multi_accelerator
+ def test_sft_trainer_transformers_mp_gc_device_map(
+ self, model_name, packing, gradient_checkpointing_kwargs, device_map
+ ):
+ """
+ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer
+ as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc).
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ fp16=True, # this is sufficient to enable amp
+ gradient_checkpointing=True,
+ gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
+ )
+
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ )
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS)))
+ @require_peft
+ @require_bitsandbytes
+ def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs):
+ """
+ Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer
+ as expected in mixed precision + different scenarios of gradient_checkpointing.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ fp16=True, # this is sufficient to enable amp
+ gradient_checkpointing=True,
+ gradient_checkpointing_kwargs=gradient_checkpointing_kwargs,
+ )
+
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
+
+ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ peft_config=self.peft_config,
+ )
+
+ assert isinstance(trainer.model, PeftModel)
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
+ @require_peft
+ @require_bitsandbytes
+ def test_sft_trainer_with_chat_format_qlora(self, model_name, packing):
+ """
+ Simply tests if using setup_chat_format with a transformers model + peft + bnb config to `SFTTrainer` loads and runs the trainer
+ as expected.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ train_dataset = load_dataset("trl-internal-testing/dolly-chatml-sft", split="train")
+
+ args = SFTConfig(
+ packing=packing,
+ max_seq_length=self.max_seq_length,
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=10,
+ fp16=True,
+ )
+
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
+
+ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config)
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ model, tokenizer = setup_chat_format(model, tokenizer)
+
+ trainer = SFTTrainer(
+ model,
+ args=args,
+ tokenizer=tokenizer,
+ train_dataset=train_dataset,
+ peft_config=self.peft_config,
+ )
+
+ assert isinstance(trainer.model, PeftModel)
+
+ trainer.train()
+
+ release_memory(model, trainer)
+
+ @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS)))
+ @require_liger_kernel
+ def test_sft_trainer_with_liger(self, model_name, packing):
+ """
+ Tests if passing use_liger=True to SFTConfig loads and runs the trainer
+ with AutoLigerKernelForCausalLM as expected.
+ """
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ args = SFTConfig(
+ output_dir=tmp_dir,
+ logging_strategy="no",
+ report_to="none",
+ per_device_train_batch_size=2,
+ max_steps=2,
+ packing=packing,
+ dataset_text_field=self.dataset_text_field,
+ max_seq_length=self.max_seq_length,
+ use_liger=True,
+ )
+
+ trainer = SFTTrainer(
+ model_name,
+ args=args,
+ train_dataset=self.train_dataset,
+ eval_dataset=self.eval_dataset,
+ )
+
+ # check that the components of the trainer.model are monkey patched:
+ self.assertTrue(any("Liger" in type(module).__name__ for module in trainer.model.model.modules()))
+ trainer.train()
+
+ release_memory(trainer.model, trainer)