diff --git a/.gitattributes b/.gitattributes
index 269d67841f845401db39caaed420402c7e967111..d333fdcf1bd778e873c67d9b1ce7ff9886969cde 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1654,3 +1654,4 @@ evalkit_internvl/lib/python3.10/site-packages/imageio_ffmpeg/binaries/ffmpeg-lin
evalkit_internvl/lib/python3.10/site-packages/mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libcudart.60cfec8e.so.11.0 filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11 filter=lfs diff=lfs merge=lfs -text
diff --git a/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/METADATA b/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..fd12277a0117b77f150daa0bffacc872fb6977c0
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/METADATA
@@ -0,0 +1,131 @@
+Metadata-Version: 2.1
+Name: decorator
+Version: 4.4.2
+Summary: Decorators for Humans
+Home-page: https://github.com/micheles/decorator
+Author: Michele Simionato
+Author-email: michele.simionato@gmail.com
+License: new BSD License
+Keywords: decorators generic utility
+Platform: All
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*
+
+Decorators for Humans
+=====================
+
+The goal of the decorator module is to make it easy to define
+signature-preserving function decorators and decorator factories.
+It also includes an implementation of multiple dispatch and other niceties
+(please check the docs). It is released under a two-clauses
+BSD license, i.e. basically you can do whatever you want with it but I am not
+responsible.
+
+Installation
+-------------
+
+If you are lazy, just perform
+
+ ``$ pip install decorator``
+
+which will install just the module on your system.
+
+If you prefer to install the full distribution from source, including
+the documentation, clone the `GitHub repo`_ or download the tarball_, unpack it and run
+
+ ``$ pip install .``
+
+in the main directory, possibly as superuser.
+
+.. _tarball: https://pypi.org/project/decorator/#files
+.. _GitHub repo: https://github.com/micheles/decorator
+
+Testing
+--------
+
+If you have the source code installation you can run the tests with
+
+ `$ python src/tests/test.py -v`
+
+or (if you have setuptools installed)
+
+ `$ python setup.py test`
+
+Notice that you may run into trouble if in your system there
+is an older version of the decorator module; in such a case remove the
+old version. It is safe even to copy the module `decorator.py` over
+an existing one, since we kept backward-compatibility for a long time.
+
+Repository
+---------------
+
+The project is hosted on GitHub. You can look at the source here:
+
+ https://github.com/micheles/decorator
+
+Documentation
+---------------
+
+The documentation has been moved to https://github.com/micheles/decorator/blob/master/docs/documentation.md
+
+From there you can get a PDF version by simply using the print
+functionality of your browser.
+
+Here is the documentation for previous versions of the module:
+
+https://github.com/micheles/decorator/blob/4.3.2/docs/tests.documentation.rst
+https://github.com/micheles/decorator/blob/4.2.1/docs/tests.documentation.rst
+https://github.com/micheles/decorator/blob/4.1.2/docs/tests.documentation.rst
+https://github.com/micheles/decorator/blob/4.0.0/documentation.rst
+https://github.com/micheles/decorator/blob/3.4.2/documentation.rst
+
+For the impatient
+-----------------
+
+Here is an example of how to define a family of decorators tracing slow
+operations:
+
+.. code-block:: python
+
+ from decorator import decorator
+
+ @decorator
+ def warn_slow(func, timelimit=60, *args, **kw):
+ t0 = time.time()
+ result = func(*args, **kw)
+ dt = time.time() - t0
+ if dt > timelimit:
+ logging.warn('%s took %d seconds', func.__name__, dt)
+ else:
+ logging.info('%s took %d seconds', func.__name__, dt)
+ return result
+
+ @warn_slow # warn if it takes more than 1 minute
+ def preprocess_input_files(inputdir, tempdir):
+ ...
+
+ @warn_slow(timelimit=600) # warn if it takes more than 10 minutes
+ def run_calculation(tempdir, outdir):
+ ...
+
+Enjoy!
+
+
diff --git a/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..78e6f69d1d8fe46bdd9dd3bbfdee02380aaede3b
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/decorator-4.4.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.4)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97e06ee2ec5d9f054edbbd08edd15ab85cee5246
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5fcfefca070fc0862c380b25660a57adb020452b
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a454eea2aeada7abd88b4ff2499cb84061d37e98
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66a34e4d6519891c9eb1278b34294e3bb4c13298
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4e28a74dcf6edced752f6019eb4ff6f663c8946
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba78152c2c376bce2bbe7af407f47495b6e03d92
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7cda156f0bfd8647b82362cb70492dcf3ac82dc2
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3de8abb234f36e00046be993589de4e19e627d5
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aef2c68d87b3c1ad1af7cff8de01d90cea1f57db
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de7d9ac29bf7f0811d1c3e4e23487db3397e4c12
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..284300d0e153cb6962de48255bfb48827924dad1
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a9d3ed33140723389c500909ee9223e64b2e24e
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3f580f5447da72b8041ed68047d7fa3d6c82ad9
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cebc477ce769e377bc6be51f399bd1de325d621e
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3b17bacd8880ea9d79ada496c87ff4e28399e9c
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58a8d086ff616b2ef75ab0d788d990e749f96e8d
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py
@@ -0,0 +1,18 @@
+from . import cloudpickle
+from .cloudpickle import * # noqa
+
+__doc__ = cloudpickle.__doc__
+
+__version__ = "3.0.0"
+
+__all__ = [ # noqa
+ "__version__",
+ "Pickler",
+ "CloudPickler",
+ "dumps",
+ "loads",
+ "dump",
+ "load",
+ "register_pickle_by_value",
+ "unregister_pickle_by_value",
+]
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2622d633b05094a1815b619ca1aae5a0174e9da5
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9cbfd002f95b02efdac20f8f9ea11d2f6768f188
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84d654d2f97cb9e59f0d380ab79c75d7f1950092
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb43a9676bbb11bdecf187e7f6cde51f793ff3fc
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py
@@ -0,0 +1,1487 @@
+"""Pickler class to extend the standard pickle.Pickler functionality
+
+The main objective is to make it natural to perform distributed computing on
+clusters (such as PySpark, Dask, Ray...) with interactively defined code
+(functions, classes, ...) written in notebooks or console.
+
+In particular this pickler adds the following features:
+- serialize interactively-defined or locally-defined functions, classes,
+ enums, typevars, lambdas and nested functions to compiled byte code;
+- deal with some other non-serializable objects in an ad-hoc manner where
+ applicable.
+
+This pickler is therefore meant to be used for the communication between short
+lived Python processes running the same version of Python and libraries. In
+particular, it is not meant to be used for long term storage of Python objects.
+
+It does not include an unpickler, as standard Python unpickling suffices.
+
+This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
+`_.
+
+Copyright (c) 2012-now, CloudPickle developers and contributors.
+Copyright (c) 2012, Regents of the University of California.
+Copyright (c) 2009 `PiCloud, Inc. `_.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the University of California, Berkeley nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import _collections_abc
+from collections import ChainMap, OrderedDict
+import abc
+import builtins
+import copyreg
+import dataclasses
+import dis
+from enum import Enum
+import io
+import itertools
+import logging
+import opcode
+import pickle
+from pickle import _getattribute
+import platform
+import struct
+import sys
+import threading
+import types
+import typing
+import uuid
+import warnings
+import weakref
+
+# The following import is required to be imported in the cloudpickle
+# namespace to be able to load pickle files generated with older versions of
+# cloudpickle. See: tests/test_backward_compat.py
+from types import CellType # noqa: F401
+
+
+# cloudpickle is meant for inter process communication: we expect all
+# communicating processes to run the same Python version hence we favor
+# communication speed over compatibility:
+DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+# Names of modules whose resources should be treated as dynamic.
+_PICKLE_BY_VALUE_MODULES = set()
+
+# Track the provenance of reconstructed dynamic classes to make it possible to
+# reconstruct instances from the matching singleton class definition when
+# appropriate and preserve the usual "isinstance" semantics of Python objects.
+_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
+_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
+_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
+
+PYPY = platform.python_implementation() == "PyPy"
+
+builtin_code_type = None
+if PYPY:
+ # builtin-code objects only exist in pypy
+ builtin_code_type = type(float.__new__.__code__)
+
+_extract_code_globals_cache = weakref.WeakKeyDictionary()
+
+
+def _get_or_create_tracker_id(class_def):
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
+ if class_tracker_id is None:
+ class_tracker_id = uuid.uuid4().hex
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
+ return class_tracker_id
+
+
+def _lookup_class_or_track(class_tracker_id, class_def):
+ if class_tracker_id is not None:
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
+ class_tracker_id, class_def
+ )
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
+ return class_def
+
+
+def register_pickle_by_value(module):
+ """Register a module to make it functions and classes picklable by value.
+
+ By default, functions and classes that are attributes of an importable
+ module are to be pickled by reference, that is relying on re-importing
+ the attribute from the module at load time.
+
+ If `register_pickle_by_value(module)` is called, all its functions and
+ classes are subsequently to be pickled by value, meaning that they can
+ be loaded in Python processes where the module is not importable.
+
+ This is especially useful when developing a module in a distributed
+ execution environment: restarting the client Python process with the new
+ source code is enough: there is no need to re-install the new version
+ of the module on all the worker nodes nor to restart the workers.
+
+ Note: this feature is considered experimental. See the cloudpickle
+ README.md file for more details and limitations.
+ """
+ if not isinstance(module, types.ModuleType):
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
+ # In the future, cloudpickle may need a way to access any module registered
+ # for pickling by value in order to introspect relative imports inside
+ # functions pickled by value. (see
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
+ # This access can be ensured by checking that module is present in
+ # sys.modules at registering time and assuming that it will still be in
+ # there when accessed during pickling. Another alternative would be to
+ # store a weakref to the module. Even though cloudpickle does not implement
+ # this introspection yet, in order to avoid a possible breaking change
+ # later, we still enforce the presence of module inside sys.modules.
+ if module.__name__ not in sys.modules:
+ raise ValueError(
+ f"{module} was not imported correctly, have you used an "
+ "`import` statement to access it?"
+ )
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
+
+
+def unregister_pickle_by_value(module):
+ """Unregister that the input module should be pickled by value."""
+ if not isinstance(module, types.ModuleType):
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
+ raise ValueError(f"{module} is not registered for pickle by value")
+ else:
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
+
+
+def list_registry_pickle_by_value():
+ return _PICKLE_BY_VALUE_MODULES.copy()
+
+
+def _is_registered_pickle_by_value(module):
+ module_name = module.__name__
+ if module_name in _PICKLE_BY_VALUE_MODULES:
+ return True
+ while True:
+ parent_name = module_name.rsplit(".", 1)[0]
+ if parent_name == module_name:
+ break
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
+ return True
+ module_name = parent_name
+ return False
+
+
+def _whichmodule(obj, name):
+ """Find the module an object belongs to.
+
+ This function differs from ``pickle.whichmodule`` in two ways:
+ - it does not mangle the cases where obj's module is __main__ and obj was
+ not found in any module.
+ - Errors arising during module introspection are ignored, as those errors
+ are considered unwanted side effects.
+ """
+ module_name = getattr(obj, "__module__", None)
+
+ if module_name is not None:
+ return module_name
+ # Protect the iteration by using a copy of sys.modules against dynamic
+ # modules that trigger imports of other modules upon calls to getattr or
+ # other threads importing at the same time.
+ for module_name, module in sys.modules.copy().items():
+ # Some modules such as coverage can inject non-module objects inside
+ # sys.modules
+ if (
+ module_name == "__main__"
+ or module is None
+ or not isinstance(module, types.ModuleType)
+ ):
+ continue
+ try:
+ if _getattribute(module, name)[0] is obj:
+ return module_name
+ except Exception:
+ pass
+ return None
+
+
+def _should_pickle_by_reference(obj, name=None):
+ """Test whether an function or a class should be pickled by reference
+
+ Pickling by reference means by that the object (typically a function or a
+ class) is an attribute of a module that is assumed to be importable in the
+ target Python environment. Loading will therefore rely on importing the
+ module and then calling `getattr` on it to access the function or class.
+
+ Pickling by reference is the only option to pickle functions and classes
+ in the standard library. In cloudpickle the alternative option is to
+ pickle by value (for instance for interactively or locally defined
+ functions and classes or for attributes of modules that have been
+ explicitly registered to be pickled by value.
+ """
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
+ if module_and_name is None:
+ return False
+ module, name = module_and_name
+ return not _is_registered_pickle_by_value(module)
+
+ elif isinstance(obj, types.ModuleType):
+ # We assume that sys.modules is primarily used as a cache mechanism for
+ # the Python import machinery. Checking if a module has been added in
+ # is sys.modules therefore a cheap and simple heuristic to tell us
+ # whether we can assume that a given module could be imported by name
+ # in another Python process.
+ if _is_registered_pickle_by_value(obj):
+ return False
+ return obj.__name__ in sys.modules
+ else:
+ raise TypeError(
+ "cannot check importability of {} instances".format(type(obj).__name__)
+ )
+
+
+def _lookup_module_and_qualname(obj, name=None):
+ if name is None:
+ name = getattr(obj, "__qualname__", None)
+ if name is None: # pragma: no cover
+ # This used to be needed for Python 2.7 support but is probably not
+ # needed anymore. However we keep the __name__ introspection in case
+ # users of cloudpickle rely on this old behavior for unknown reasons.
+ name = getattr(obj, "__name__", None)
+
+ module_name = _whichmodule(obj, name)
+
+ if module_name is None:
+ # In this case, obj.__module__ is None AND obj was not found in any
+ # imported module. obj is thus treated as dynamic.
+ return None
+
+ if module_name == "__main__":
+ return None
+
+ # Note: if module_name is in sys.modules, the corresponding module is
+ # assumed importable at unpickling time. See #357
+ module = sys.modules.get(module_name, None)
+ if module is None:
+ # The main reason why obj's module would not be imported is that this
+ # module has been dynamically created, using for example
+ # types.ModuleType. The other possibility is that module was removed
+ # from sys.modules after obj was created/imported. But this case is not
+ # supported, as the standard pickle does not support it either.
+ return None
+
+ try:
+ obj2, parent = _getattribute(module, name)
+ except AttributeError:
+ # obj was not found inside the module it points to
+ return None
+ if obj2 is not obj:
+ return None
+ return module, name
+
+
+def _extract_code_globals(co):
+ """Find all globals names read or written to by codeblock co."""
+ out_names = _extract_code_globals_cache.get(co)
+ if out_names is None:
+ # We use a dict with None values instead of a set to get a
+ # deterministic order and avoid introducing non-deterministic pickle
+ # bytes as a results.
+ out_names = {name: None for name in _walk_global_ops(co)}
+
+ # Declaring a function inside another one using the "def ..." syntax
+ # generates a constant code object corresponding to the one of the
+ # nested function's As the nested function may itself need global
+ # variables, we need to introspect its code, extract its globals, (look
+ # for code object in it's co_consts attribute..) and add the result to
+ # code_globals
+ if co.co_consts:
+ for const in co.co_consts:
+ if isinstance(const, types.CodeType):
+ out_names.update(_extract_code_globals(const))
+
+ _extract_code_globals_cache[co] = out_names
+
+ return out_names
+
+
+def _find_imported_submodules(code, top_level_dependencies):
+ """Find currently imported submodules used by a function.
+
+ Submodules used by a function need to be detected and referenced for the
+ function to work correctly at depickling time. Because submodules can be
+ referenced as attribute of their parent package (``package.submodule``), we
+ need a special introspection technique that does not rely on GLOBAL-related
+ opcodes to find references of them in a code object.
+
+ Example:
+ ```
+ import concurrent.futures
+ import cloudpickle
+ def func():
+ x = concurrent.futures.ThreadPoolExecutor
+ if __name__ == '__main__':
+ cloudpickle.dumps(func)
+ ```
+ The globals extracted by cloudpickle in the function's state include the
+ concurrent package, but not its submodule (here, concurrent.futures), which
+ is the module used by func. Find_imported_submodules will detect the usage
+ of concurrent.futures. Saving this module alongside with func will ensure
+ that calling func once depickled does not fail due to concurrent.futures
+ not being imported
+ """
+
+ subimports = []
+ # check if any known dependency is an imported package
+ for x in top_level_dependencies:
+ if (
+ isinstance(x, types.ModuleType)
+ and hasattr(x, "__package__")
+ and x.__package__
+ ):
+ # check if the package has any currently loaded sub-imports
+ prefix = x.__name__ + "."
+ # A concurrent thread could mutate sys.modules,
+ # make sure we iterate over a copy to avoid exceptions
+ for name in list(sys.modules):
+ # Older versions of pytest will add a "None" module to
+ # sys.modules.
+ if name is not None and name.startswith(prefix):
+ # check whether the function can address the sub-module
+ tokens = set(name[len(prefix) :].split("."))
+ if not tokens - set(code.co_names):
+ subimports.append(sys.modules[name])
+ return subimports
+
+
+# relevant opcodes
+STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
+DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
+LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
+GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
+HAVE_ARGUMENT = dis.HAVE_ARGUMENT
+EXTENDED_ARG = dis.EXTENDED_ARG
+
+
+_BUILTIN_TYPE_NAMES = {}
+for k, v in types.__dict__.items():
+ if type(v) is type:
+ _BUILTIN_TYPE_NAMES[v] = k
+
+
+def _builtin_type(name):
+ if name == "ClassType": # pragma: no cover
+ # Backward compat to load pickle files generated with cloudpickle
+ # < 1.3 even if loading pickle files from older versions is not
+ # officially supported.
+ return type
+ return getattr(types, name)
+
+
+def _walk_global_ops(code):
+ """Yield referenced name for global-referencing instructions in code."""
+ for instr in dis.get_instructions(code):
+ op = instr.opcode
+ if op in GLOBAL_OPS:
+ yield instr.argval
+
+
+def _extract_class_dict(cls):
+ """Retrieve a copy of the dict of a class without the inherited method."""
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
+ if len(cls.__bases__) == 1:
+ inherited_dict = cls.__bases__[0].__dict__
+ else:
+ inherited_dict = {}
+ for base in reversed(cls.__bases__):
+ inherited_dict.update(base.__dict__)
+ to_remove = []
+ for name, value in clsdict.items():
+ try:
+ base_value = inherited_dict[name]
+ if value is base_value:
+ to_remove.append(name)
+ except KeyError:
+ pass
+ for name in to_remove:
+ clsdict.pop(name)
+ return clsdict
+
+
+def is_tornado_coroutine(func):
+ """Return whether `func` is a Tornado coroutine function.
+
+ Running coroutines are not supported.
+ """
+ warnings.warn(
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
+ "directly instead.",
+ category=DeprecationWarning,
+ )
+ if "tornado.gen" not in sys.modules:
+ return False
+ gen = sys.modules["tornado.gen"]
+ if not hasattr(gen, "is_coroutine_function"):
+ # Tornado version is too old
+ return False
+ return gen.is_coroutine_function(func)
+
+
+def subimport(name):
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
+ # the name of a submodule, __import__ will return the top-level root module
+ # of this submodule. For instance, __import__('os.path') returns the `os`
+ # module.
+ __import__(name)
+ return sys.modules[name]
+
+
+def dynamic_subimport(name, vars):
+ mod = types.ModuleType(name)
+ mod.__dict__.update(vars)
+ mod.__dict__["__builtins__"] = builtins.__dict__
+ return mod
+
+
+def _get_cell_contents(cell):
+ try:
+ return cell.cell_contents
+ except ValueError:
+ # Handle empty cells explicitly with a sentinel value.
+ return _empty_cell_value
+
+
+def instance(cls):
+ """Create a new instance of a class.
+
+ Parameters
+ ----------
+ cls : type
+ The class to create an instance of.
+
+ Returns
+ -------
+ instance : cls
+ A new instance of ``cls``.
+ """
+ return cls()
+
+
+@instance
+class _empty_cell_value:
+ """Sentinel for empty closures."""
+
+ @classmethod
+ def __reduce__(cls):
+ return cls.__name__
+
+
+def _make_function(code, globals, name, argdefs, closure):
+ # Setting __builtins__ in globals is needed for nogil CPython.
+ globals["__builtins__"] = __builtins__
+ return types.FunctionType(code, globals, name, argdefs, closure)
+
+
+def _make_empty_cell():
+ if False:
+ # trick the compiler into creating an empty cell in our lambda
+ cell = None
+ raise AssertionError("this route should not be executed")
+
+ return (lambda: cell).__closure__[0]
+
+
+def _make_cell(value=_empty_cell_value):
+ cell = _make_empty_cell()
+ if value is not _empty_cell_value:
+ cell.cell_contents = value
+ return cell
+
+
+def _make_skeleton_class(
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
+):
+ """Build dynamic class with an empty __dict__ to be filled once memoized
+
+ If class_tracker_id is not None, try to lookup an existing class definition
+ matching that id. If none is found, track a newly reconstructed class
+ definition under that id so that other instances stemming from the same
+ class id will also reuse this class definition.
+
+ The "extra" variable is meant to be a dict (or None) that can be used for
+ forward compatibility shall the need arise.
+ """
+ skeleton_class = types.new_class(
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
+ )
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
+
+
+def _make_skeleton_enum(
+ bases, name, qualname, members, module, class_tracker_id, extra
+):
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
+
+ The creation of the enum class is inspired by the code of
+ EnumMeta._create_.
+
+ If class_tracker_id is not None, try to lookup an existing enum definition
+ matching that id. If none is found, track a newly reconstructed enum
+ definition under that id so that other instances stemming from the same
+ class id will also reuse this enum definition.
+
+ The "extra" variable is meant to be a dict (or None) that can be used for
+ forward compatibility shall the need arise.
+ """
+ # enums always inherit from their base Enum class at the last position in
+ # the list of base classes:
+ enum_base = bases[-1]
+ metacls = enum_base.__class__
+ classdict = metacls.__prepare__(name, bases)
+
+ for member_name, member_value in members.items():
+ classdict[member_name] = member_value
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
+ enum_class.__module__ = module
+ enum_class.__qualname__ = qualname
+
+ return _lookup_class_or_track(class_tracker_id, enum_class)
+
+
+def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
+ tv = typing.TypeVar(
+ name,
+ *constraints,
+ bound=bound,
+ covariant=covariant,
+ contravariant=contravariant,
+ )
+ return _lookup_class_or_track(class_tracker_id, tv)
+
+
+def _decompose_typevar(obj):
+ return (
+ obj.__name__,
+ obj.__bound__,
+ obj.__constraints__,
+ obj.__covariant__,
+ obj.__contravariant__,
+ _get_or_create_tracker_id(obj),
+ )
+
+
+def _typevar_reduce(obj):
+ # TypeVar instances require the module information hence why we
+ # are not using the _should_pickle_by_reference directly
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
+
+ if module_and_name is None:
+ return (_make_typevar, _decompose_typevar(obj))
+ elif _is_registered_pickle_by_value(module_and_name[0]):
+ return (_make_typevar, _decompose_typevar(obj))
+
+ return (getattr, module_and_name)
+
+
+def _get_bases(typ):
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
+ # For generic types (see PEP 560)
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
+ # correct. Subclasses of a fully-parameterized generic class does not
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
+ # will return True because it's defined in the base class.
+ bases_attr = "__orig_bases__"
+ else:
+ # For regular class objects
+ bases_attr = "__bases__"
+ return getattr(typ, bases_attr)
+
+
+def _make_dict_keys(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict.fromkeys(obj).keys()
+ else:
+ return dict.fromkeys(obj).keys()
+
+
+def _make_dict_values(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
+ else:
+ return {i: _ for i, _ in enumerate(obj)}.values()
+
+
+def _make_dict_items(obj, is_ordered=False):
+ if is_ordered:
+ return OrderedDict(obj).items()
+ else:
+ return obj.items()
+
+
+# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
+# -------------------------------------------------
+
+
+def _class_getnewargs(obj):
+ type_kwargs = {}
+ if "__module__" in obj.__dict__:
+ type_kwargs["__module__"] = obj.__module__
+
+ __dict__ = obj.__dict__.get("__dict__", None)
+ if isinstance(__dict__, property):
+ type_kwargs["__dict__"] = __dict__
+
+ return (
+ type(obj),
+ obj.__name__,
+ _get_bases(obj),
+ type_kwargs,
+ _get_or_create_tracker_id(obj),
+ None,
+ )
+
+
+def _enum_getnewargs(obj):
+ members = {e.name: e.value for e in obj}
+ return (
+ obj.__bases__,
+ obj.__name__,
+ obj.__qualname__,
+ members,
+ obj.__module__,
+ _get_or_create_tracker_id(obj),
+ None,
+ )
+
+
+# COLLECTION OF OBJECTS RECONSTRUCTORS
+# ------------------------------------
+def _file_reconstructor(retval):
+ return retval
+
+
+# COLLECTION OF OBJECTS STATE GETTERS
+# -----------------------------------
+
+
+def _function_getstate(func):
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
+ # attributes will be restored at unpickling time using
+ # f.__dict__.update(state)
+ # - Put func's members into slotstate. Such attributes will be restored at
+ # unpickling time by iterating over slotstate and calling setattr(func,
+ # slotname, slotvalue)
+ slotstate = {
+ "__name__": func.__name__,
+ "__qualname__": func.__qualname__,
+ "__annotations__": func.__annotations__,
+ "__kwdefaults__": func.__kwdefaults__,
+ "__defaults__": func.__defaults__,
+ "__module__": func.__module__,
+ "__doc__": func.__doc__,
+ "__closure__": func.__closure__,
+ }
+
+ f_globals_ref = _extract_code_globals(func.__code__)
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
+
+ if func.__closure__ is not None:
+ closure_values = list(map(_get_cell_contents, func.__closure__))
+ else:
+ closure_values = ()
+
+ # Extract currently-imported submodules used by func. Storing these modules
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
+ # trigger the side effect of importing these modules at unpickling time
+ # (which is necessary for func to work correctly once depickled)
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
+ )
+ slotstate["__globals__"] = f_globals
+
+ state = func.__dict__
+ return state, slotstate
+
+
+def _class_getstate(obj):
+ clsdict = _extract_class_dict(obj)
+ clsdict.pop("__weakref__", None)
+
+ if issubclass(type(obj), abc.ABCMeta):
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
+ # cache/negative caches populated during isinstance/issubclass
+ # checks, but pickle the list of registered subclasses of obj.
+ clsdict.pop("_abc_cache", None)
+ clsdict.pop("_abc_negative_cache", None)
+ clsdict.pop("_abc_negative_cache_version", None)
+ registry = clsdict.pop("_abc_registry", None)
+ if registry is None:
+ # The abc caches and registered subclasses of a
+ # class are bundled into the single _abc_impl attribute
+ clsdict.pop("_abc_impl", None)
+ (registry, _, _, _) = abc._get_dump(obj)
+
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
+ else:
+ # In the above if clause, registry is a set of weakrefs -- in
+ # this case, registry is a WeakSet
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
+
+ if "__slots__" in clsdict:
+ # pickle string length optimization: member descriptors of obj are
+ # created automatically from obj's __slots__ attribute, no need to
+ # save them in obj's state
+ if isinstance(obj.__slots__, str):
+ clsdict.pop(obj.__slots__)
+ else:
+ for k in obj.__slots__:
+ clsdict.pop(k, None)
+
+ clsdict.pop("__dict__", None) # unpicklable property object
+
+ return (clsdict, {})
+
+
+def _enum_getstate(obj):
+ clsdict, slotstate = _class_getstate(obj)
+
+ members = {e.name: e.value for e in obj}
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
+ # Those attributes are already handled by the metaclass.
+ for attrname in [
+ "_generate_next_value_",
+ "_member_names_",
+ "_member_map_",
+ "_member_type_",
+ "_value2member_map_",
+ ]:
+ clsdict.pop(attrname, None)
+ for member in members:
+ clsdict.pop(member)
+ # Special handling of Enum subclasses
+ return clsdict, slotstate
+
+
+# COLLECTIONS OF OBJECTS REDUCERS
+# -------------------------------
+# A reducer is a function taking a single argument (obj), and that returns a
+# tuple with all the necessary data to re-construct obj. Apart from a few
+# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
+# correctly pickle an object.
+# While many built-in objects (Exceptions objects, instances of the "object"
+# class, etc), are shipped with their own built-in reducer (invoked using
+# obj.__reduce__), some do not. The following methods were created to "fill
+# these holes".
+
+
+def _code_reduce(obj):
+ """code object reducer."""
+ # If you are not sure about the order of arguments, take a look at help
+ # of the specific type from types, for example:
+ # >>> from types import CodeType
+ # >>> help(CodeType)
+ if hasattr(obj, "co_exceptiontable"):
+ # Python 3.11 and later: there are some new attributes
+ # related to the enhanced exceptions.
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_qualname,
+ obj.co_firstlineno,
+ obj.co_linetable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_linetable"):
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
+ # expects obj.co_linetable instead.
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_linetable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
+ # "nogil" Python: modified attributes from 3.9
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_framesize,
+ obj.co_ndefaultargs,
+ obj.co_nmeta,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_lnotab,
+ obj.co_exc_handlers,
+ obj.co_jump_table,
+ obj.co_freevars,
+ obj.co_cellvars,
+ obj.co_free2reg,
+ obj.co_cell2reg,
+ )
+ else:
+ # Backward compat for 3.8 and 3.9
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ obj.co_filename,
+ obj.co_name,
+ obj.co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ return types.CodeType, args
+
+
+def _cell_reduce(obj):
+ """Cell (containing values of a function's free variables) reducer."""
+ try:
+ obj.cell_contents
+ except ValueError: # cell is empty
+ return _make_empty_cell, ()
+ else:
+ return _make_cell, (obj.cell_contents,)
+
+
+def _classmethod_reduce(obj):
+ orig_func = obj.__func__
+ return type(obj), (orig_func,)
+
+
+def _file_reduce(obj):
+ """Save a file."""
+ import io
+
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
+ raise pickle.PicklingError(
+ "Cannot pickle files that do not map to an actual file"
+ )
+ if obj is sys.stdout:
+ return getattr, (sys, "stdout")
+ if obj is sys.stderr:
+ return getattr, (sys, "stderr")
+ if obj is sys.stdin:
+ raise pickle.PicklingError("Cannot pickle standard input")
+ if obj.closed:
+ raise pickle.PicklingError("Cannot pickle closed files")
+ if hasattr(obj, "isatty") and obj.isatty():
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
+ if "r" not in obj.mode and "+" not in obj.mode:
+ raise pickle.PicklingError(
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
+ )
+
+ name = obj.name
+
+ retval = io.StringIO()
+
+ try:
+ # Read the whole file
+ curloc = obj.tell()
+ obj.seek(0)
+ contents = obj.read()
+ obj.seek(curloc)
+ except OSError as e:
+ raise pickle.PicklingError(
+ "Cannot pickle file %s as it cannot be read" % name
+ ) from e
+ retval.write(contents)
+ retval.seek(curloc)
+
+ retval.name = name
+ return _file_reconstructor, (retval,)
+
+
+def _getset_descriptor_reduce(obj):
+ return getattr, (obj.__objclass__, obj.__name__)
+
+
+def _mappingproxy_reduce(obj):
+ return types.MappingProxyType, (dict(obj),)
+
+
+def _memoryview_reduce(obj):
+ return bytes, (obj.tobytes(),)
+
+
+def _module_reduce(obj):
+ if _should_pickle_by_reference(obj):
+ return subimport, (obj.__name__,)
+ else:
+ # Some external libraries can populate the "__builtins__" entry of a
+ # module's `__dict__` with unpicklable objects (see #316). For that
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
+ # restore a default value for it at unpickling time.
+ state = obj.__dict__.copy()
+ state.pop("__builtins__", None)
+ return dynamic_subimport, (obj.__name__, state)
+
+
+def _method_reduce(obj):
+ return (types.MethodType, (obj.__func__, obj.__self__))
+
+
+def _logger_reduce(obj):
+ return logging.getLogger, (obj.name,)
+
+
+def _root_logger_reduce(obj):
+ return logging.getLogger, ()
+
+
+def _property_reduce(obj):
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
+
+
+def _weakset_reduce(obj):
+ return weakref.WeakSet, (list(obj),)
+
+
+def _dynamic_class_reduce(obj):
+ """Save a class that can't be referenced as a module attribute.
+
+ This method is used to serialize classes that are defined inside
+ functions, or that otherwise can't be serialized as attribute lookups
+ from importable modules.
+ """
+ if Enum is not None and issubclass(obj, Enum):
+ return (
+ _make_skeleton_enum,
+ _enum_getnewargs(obj),
+ _enum_getstate(obj),
+ None,
+ None,
+ _class_setstate,
+ )
+ else:
+ return (
+ _make_skeleton_class,
+ _class_getnewargs(obj),
+ _class_getstate(obj),
+ None,
+ None,
+ _class_setstate,
+ )
+
+
+def _class_reduce(obj):
+ """Select the reducer depending on the dynamic nature of the class obj."""
+ if obj is type(None): # noqa
+ return type, (None,)
+ elif obj is type(Ellipsis):
+ return type, (Ellipsis,)
+ elif obj is type(NotImplemented):
+ return type, (NotImplemented,)
+ elif obj in _BUILTIN_TYPE_NAMES:
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
+ elif not _should_pickle_by_reference(obj):
+ return _dynamic_class_reduce(obj)
+ return NotImplemented
+
+
+def _dict_keys_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_keys, (list(obj),)
+
+
+def _dict_values_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_values, (list(obj),)
+
+
+def _dict_items_reduce(obj):
+ return _make_dict_items, (dict(obj),)
+
+
+def _odict_keys_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_keys, (list(obj), True)
+
+
+def _odict_values_reduce(obj):
+ # Safer not to ship the full dict as sending the rest might
+ # be unintended and could potentially cause leaking of
+ # sensitive information
+ return _make_dict_values, (list(obj), True)
+
+
+def _odict_items_reduce(obj):
+ return _make_dict_items, (dict(obj), True)
+
+
+def _dataclass_field_base_reduce(obj):
+ return _get_dataclass_field_type_sentinel, (obj.name,)
+
+
+# COLLECTIONS OF OBJECTS STATE SETTERS
+# ------------------------------------
+# state setters are called at unpickling time, once the object is created and
+# it has to be updated to how it was at unpickling time.
+
+
+def _function_setstate(obj, state):
+ """Update the state of a dynamic function.
+
+ As __closure__ and __globals__ are readonly attributes of a function, we
+ cannot rely on the native setstate routine of pickle.load_build, that calls
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
+ """
+ state, slotstate = state
+ obj.__dict__.update(state)
+
+ obj_globals = slotstate.pop("__globals__")
+ obj_closure = slotstate.pop("__closure__")
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
+ # the pickled function to work correctly at unpickling time. Now that these
+ # submodules are depickled (hence imported), they can be removed from the
+ # object's state (the object state only served as a reference holder to
+ # these submodules)
+ slotstate.pop("_cloudpickle_submodules")
+
+ obj.__globals__.update(obj_globals)
+ obj.__globals__["__builtins__"] = __builtins__
+
+ if obj_closure is not None:
+ for i, cell in enumerate(obj_closure):
+ try:
+ value = cell.cell_contents
+ except ValueError: # cell is empty
+ continue
+ obj.__closure__[i].cell_contents = value
+
+ for k, v in slotstate.items():
+ setattr(obj, k, v)
+
+
+def _class_setstate(obj, state):
+ state, slotstate = state
+ registry = None
+ for attrname, attr in state.items():
+ if attrname == "_abc_impl":
+ registry = attr
+ else:
+ setattr(obj, attrname, attr)
+ if registry is not None:
+ for subclass in registry:
+ obj.register(subclass)
+
+ return obj
+
+
+# COLLECTION OF DATACLASS UTILITIES
+# ---------------------------------
+# There are some internal sentinel values whose identity must be preserved when
+# unpickling dataclass fields. Each sentinel value has a unique name that we can
+# use to retrieve its identity at unpickling time.
+
+
+_DATACLASSE_FIELD_TYPE_SENTINELS = {
+ dataclasses._FIELD.name: dataclasses._FIELD,
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
+}
+
+
+def _get_dataclass_field_type_sentinel(name):
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
+
+
+class Pickler(pickle.Pickler):
+ # set of reducers defined and used by cloudpickle (private)
+ _dispatch_table = {}
+ _dispatch_table[classmethod] = _classmethod_reduce
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
+ _dispatch_table[logging.Logger] = _logger_reduce
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
+ _dispatch_table[memoryview] = _memoryview_reduce
+ _dispatch_table[property] = _property_reduce
+ _dispatch_table[staticmethod] = _classmethod_reduce
+ _dispatch_table[CellType] = _cell_reduce
+ _dispatch_table[types.CodeType] = _code_reduce
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
+ _dispatch_table[types.ModuleType] = _module_reduce
+ _dispatch_table[types.MethodType] = _method_reduce
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
+ _dispatch_table[abc.abstractproperty] = _property_reduce
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
+
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
+
+ # function reducers are defined as instance methods of cloudpickle.Pickler
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
+ def _dynamic_function_reduce(self, func):
+ """Reduce a function that is not pickleable via attribute lookup."""
+ newargs = self._function_getnewargs(func)
+ state = _function_getstate(func)
+ return (_make_function, newargs, state, None, None, _function_setstate)
+
+ def _function_reduce(self, obj):
+ """Reducer for function objects.
+
+ If obj is a top-level attribute of a file-backed module, this reducer
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
+ obj using a custom cloudpickle reducer designed specifically to handle
+ dynamic functions.
+ """
+ if _should_pickle_by_reference(obj):
+ return NotImplemented
+ else:
+ return self._dynamic_function_reduce(obj)
+
+ def _function_getnewargs(self, func):
+ code = func.__code__
+
+ # base_globals represents the future global namespace of func at
+ # unpickling time. Looking it up and storing it in
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
+ # globals at pickling time to also share them once unpickled, at one
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
+ # instance, and that a new cloudpickle.Pickler is created each time
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
+ # to be saved within the same invocation of
+ # cloudpickle.dump/cloudpickle.dumps (for example:
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
+ # bound to the same cloudpickle.Pickler instance.
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
+
+ if base_globals == {}:
+ # Add module attributes used to resolve relative imports
+ # instructions inside func.
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
+ if k in func.__globals__:
+ base_globals[k] = func.__globals__[k]
+
+ # Do not bind the free variables before the function is created to
+ # avoid infinite recursion.
+ if func.__closure__ is None:
+ closure = None
+ else:
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
+
+ return code, base_globals, None, None, closure
+
+ def dump(self, obj):
+ try:
+ return super().dump(obj)
+ except RuntimeError as e:
+ if len(e.args) > 0 and "recursion" in e.args[0]:
+ msg = "Could not pickle object as excessively deep recursion required."
+ raise pickle.PicklingError(msg) from e
+ else:
+ raise
+
+ def __init__(self, file, protocol=None, buffer_callback=None):
+ if protocol is None:
+ protocol = DEFAULT_PROTOCOL
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
+ # map functions __globals__ attribute ids, to ensure that functions
+ # sharing the same global namespace at pickling time also share
+ # their global namespace at unpickling time.
+ self.globals_ref = {}
+ self.proto = int(protocol)
+
+ if not PYPY:
+ # pickle.Pickler is the C implementation of the CPython pickler and
+ # therefore we rely on reduce_override method to customize the pickler
+ # behavior.
+
+ # `cloudpickle.Pickler.dispatch` is only left for backward
+ # compatibility - note that when using protocol 5,
+ # `cloudpickle.Pickler.dispatch` is not an extension of
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
+ # storing all reducers implemented by cloudpickle, but the attribute
+ # name was not a great choice given because it would collide with a
+ # similarly named attribute in the pure-Python `pickle._Pickler`
+ # implementation in the standard library.
+ dispatch = dispatch_table
+
+ # Implementation of the reducer_override callback, in order to
+ # efficiently serialize dynamic functions and classes by subclassing
+ # the C-implemented `pickle.Pickler`.
+ # TODO: decorrelate reducer_override (which is tied to CPython's
+ # implementation - would it make sense to backport it to pypy? - and
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
+ # availability of both notions coincide on CPython's pickle, but it may
+ # not be the case anymore when pypy implements protocol 5.
+
+ def reducer_override(self, obj):
+ """Type-agnostic reducing callback for function and classes.
+
+ For performance reasons, subclasses of the C `pickle.Pickler` class
+ cannot register custom reducers for functions and classes in the
+ dispatch_table attribute. Reducers for such types must instead
+ implemented via the special `reducer_override` method.
+
+ Note that this method will be called for any object except a few
+ builtin-types (int, lists, dicts etc.), which differs from reducers
+ in the Pickler's dispatch_table, each of them being invoked for
+ objects of a specific type only.
+
+ This property comes in handy for classes: although most classes are
+ instances of the ``type`` metaclass, some of them can be instances
+ of other custom metaclasses (such as enum.EnumMeta for example). In
+ particular, the metaclass will likely not be known in advance, and
+ thus cannot be special-cased using an entry in the dispatch_table.
+ reducer_override, among other things, allows us to register a
+ reducer that will be called for any class, independently of its
+ type.
+
+ Notes:
+
+ * reducer_override has the priority over dispatch_table-registered
+ reducers.
+ * reducer_override can be used to fix other limitations of
+ cloudpickle for other types that suffered from type-specific
+ reducers, such as Exceptions. See
+ https://github.com/cloudpipe/cloudpickle/issues/248
+ """
+ t = type(obj)
+ try:
+ is_anyclass = issubclass(t, type)
+ except TypeError: # t is not a class (old Boost; see SF #502085)
+ is_anyclass = False
+
+ if is_anyclass:
+ return _class_reduce(obj)
+ elif isinstance(obj, types.FunctionType):
+ return self._function_reduce(obj)
+ else:
+ # fallback to save_global, including the Pickler's
+ # dispatch_table
+ return NotImplemented
+
+ else:
+ # When reducer_override is not available, hack the pure-Python
+ # Pickler's types.FunctionType and type savers. Note: the type saver
+ # must override Pickler.save_global, because pickle.py contains a
+ # hard-coded call to save_global when pickling meta-classes.
+ dispatch = pickle.Pickler.dispatch.copy()
+
+ def _save_reduce_pickle5(
+ self,
+ func,
+ args,
+ state=None,
+ listitems=None,
+ dictitems=None,
+ state_setter=None,
+ obj=None,
+ ):
+ save = self.save
+ write = self.write
+ self.save_reduce(
+ func,
+ args,
+ state=None,
+ listitems=listitems,
+ dictitems=dictitems,
+ obj=obj,
+ )
+ # backport of the Python 3.8 state_setter pickle operations
+ save(state_setter)
+ save(obj) # simple BINGET opcode as obj is already memoized.
+ save(state)
+ write(pickle.TUPLE2)
+ # Trigger a state_setter(obj, state) function call.
+ write(pickle.REDUCE)
+ # The purpose of state_setter is to carry-out an
+ # inplace modification of obj. We do not care about what the
+ # method might return, so its output is eventually removed from
+ # the stack.
+ write(pickle.POP)
+
+ def save_global(self, obj, name=None, pack=struct.pack):
+ """Main dispatch method.
+
+ The name of this method is somewhat misleading: all types get
+ dispatched here.
+ """
+ if obj is type(None): # noqa
+ return self.save_reduce(type, (None,), obj=obj)
+ elif obj is type(Ellipsis):
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
+ elif obj is type(NotImplemented):
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
+ elif obj in _BUILTIN_TYPE_NAMES:
+ return self.save_reduce(
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
+ )
+
+ if name is not None:
+ super().save_global(obj, name=name)
+ elif not _should_pickle_by_reference(obj, name=name):
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
+ else:
+ super().save_global(obj, name=name)
+
+ dispatch[type] = save_global
+
+ def save_function(self, obj, name=None):
+ """Registered with the dispatch to handle all function types.
+
+ Determines what kind of function obj is (e.g. lambda, defined at
+ interactive prompt, etc) and handles the pickling appropriately.
+ """
+ if _should_pickle_by_reference(obj, name=name):
+ return super().save_global(obj, name=name)
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
+ return self.save_pypy_builtin_func(obj)
+ else:
+ return self._save_reduce_pickle5(
+ *self._dynamic_function_reduce(obj), obj=obj
+ )
+
+ def save_pypy_builtin_func(self, obj):
+ """Save pypy equivalent of builtin functions.
+
+ PyPy does not have the concept of builtin-functions. Instead,
+ builtin-functions are simple function instances, but with a
+ builtin-code attribute.
+ Most of the time, builtin functions should be pickled by attribute.
+ But PyPy has flaky support for __qualname__, so some builtin
+ functions such as float.__new__ will be classified as dynamic. For
+ this reason only, we created this special routine. Because
+ builtin-functions are not expected to have closure or globals,
+ there is no additional hack (compared the one already implemented
+ in pickle) to protect ourselves from reference cycles. A simple
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
+ also that PyPy improved their support for __qualname__ in v3.6, so
+ this routing should be removed when cloudpickle supports only PyPy
+ 3.6 and later.
+ """
+ rv = (
+ types.FunctionType,
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
+ obj.__dict__,
+ )
+ self.save_reduce(*rv, obj=obj)
+
+ dispatch[types.FunctionType] = save_function
+
+
+# Shorthands similar to pickle.dump/pickle.dumps
+
+
+def dump(obj, file, protocol=None, buffer_callback=None):
+ """Serialize obj as bytes streamed into file
+
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
+ speed between processes running the same Python version.
+
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
+ compatibility with older versions of Python (although this is not always
+ guaranteed to work because cloudpickle relies on some internal
+ implementation details that can change from one Python version to the
+ next).
+ """
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
+
+
+def dumps(obj, protocol=None, buffer_callback=None):
+ """Serialize obj as a string of bytes allocated in memory
+
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
+ speed between processes running the same Python version.
+
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
+ compatibility with older versions of Python (although this is not always
+ guaranteed to work because cloudpickle relies on some internal
+ implementation details that can change from one Python version to the
+ next).
+ """
+ with io.BytesIO() as file:
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
+ cp.dump(obj)
+ return file.getvalue()
+
+
+# Include pickles unloading functions in this namespace for convenience.
+load, loads = pickle.load, pickle.loads
+
+# Backward compat alias.
+CloudPickler = Pickler
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..52d6732e44ebcc0053b24969943f7c3b742268bb
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py
@@ -0,0 +1,13 @@
+"""Compatibility module.
+
+It can be necessary to load files generated by previous versions of cloudpickle
+that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
+namespace.
+
+See: tests/test_backward_compat.py
+"""
+from . import cloudpickle
+
+
+def __getattr__(name):
+ return getattr(cloudpickle, name)
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5886d2a62092bdc9f444d7a22058d065de567818
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__init__.py
@@ -0,0 +1,44 @@
+r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.
+It provides a robust and dynamic implementation os the
+:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which
+hide the pool management under the hood.
+"""
+from concurrent.futures import (
+ ALL_COMPLETED,
+ FIRST_COMPLETED,
+ FIRST_EXCEPTION,
+ CancelledError,
+ Executor,
+ TimeoutError,
+ as_completed,
+ wait,
+)
+
+from ._base import Future
+from .backend.context import cpu_count
+from .backend.reduction import set_loky_pickler
+from .reusable_executor import get_reusable_executor
+from .cloudpickle_wrapper import wrap_non_picklable_objects
+from .process_executor import BrokenProcessPool, ProcessPoolExecutor
+
+
+__all__ = [
+ "get_reusable_executor",
+ "cpu_count",
+ "wait",
+ "as_completed",
+ "Future",
+ "Executor",
+ "ProcessPoolExecutor",
+ "BrokenProcessPool",
+ "CancelledError",
+ "TimeoutError",
+ "FIRST_COMPLETED",
+ "FIRST_EXCEPTION",
+ "ALL_COMPLETED",
+ "wrap_non_picklable_objects",
+ "set_loky_pickler",
+]
+
+
+__version__ = "3.4.1"
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cad5a2caf9d9e49436a73fe7fb3305cd95fd2277
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8614611d3c1bc96c33d76c40bd74cd0c6af28ac1
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcfd1361a9b8bc1617f0404f94aad119a4db83f2
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10a35d25c55cdfb71159dac146d7586dbd92a3f1
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/_base.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..da0abc1e7fa18363e6342a3b67410f1429e6fa10
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/_base.py
@@ -0,0 +1,28 @@
+###############################################################################
+# Modification of concurrent.futures.Future
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from concurrent/futures/_base.py (17/02/2017)
+# * Do not use yield from
+# * Use old super syntax
+#
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+from concurrent.futures import Future as _BaseFuture
+from concurrent.futures._base import LOGGER
+
+
+# To make loky._base.Future instances awaitable by concurrent.futures.wait,
+# derive our custom Future class from _BaseFuture. _invoke_callback is the only
+# modification made to this class in loky.
+# TODO investigate why using `concurrent.futures.Future` directly does not
+# always work in our test suite.
+class Future(_BaseFuture):
+ def _invoke_callbacks(self):
+ for callback in self._done_callbacks:
+ try:
+ callback(self)
+ except BaseException:
+ LOGGER.exception(f"exception calling callback for {self!r}")
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..080370a6f9341804cff330b0af1556a66b78e285
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..240c02c22415e329ebeb637a58f429557dde1671
Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc differ
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py
new file mode 100644
index 0000000000000000000000000000000000000000..2353c42f51a6e6c558ce70e35e1b7405e22d70ed
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py
@@ -0,0 +1,43 @@
+###############################################################################
+# Launch a subprocess using forkexec and make sure only the needed fd are
+# shared in the two process.
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+import os
+import sys
+
+
+def close_fds(keep_fds): # pragma: no cover
+ """Close all the file descriptors except those in keep_fds."""
+
+ # Make sure to keep stdout and stderr open for logging purpose
+ keep_fds = {*keep_fds, 1, 2}
+
+ # We try to retrieve all the open fds
+ try:
+ open_fds = {int(fd) for fd in os.listdir("/proc/self/fd")}
+ except FileNotFoundError:
+ import resource
+
+ max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+ open_fds = {*range(max_nfds)}
+
+ for i in open_fds - keep_fds:
+ try:
+ os.close(i)
+ except OSError:
+ pass
+
+
+def fork_exec(cmd, keep_fds, env=None):
+ # copy the environment variables to set in the child process
+ env = env or {}
+ child_env = {**os.environ, **env}
+
+ pid = os.fork()
+ if pid == 0: # pragma: no cover
+ close_fds(keep_fds)
+ os.execve(sys.executable, cmd, child_env)
+ else:
+ return pid
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
new file mode 100644
index 0000000000000000000000000000000000000000..74395be0757f0a07ef92a7b0efe1e1ea4ecdac77
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py
@@ -0,0 +1,193 @@
+###############################################################################
+# Popen for LokyProcess.
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+import os
+import sys
+import signal
+import pickle
+from io import BytesIO
+from multiprocessing import util, process
+from multiprocessing.connection import wait
+from multiprocessing.context import set_spawning_popen
+
+from . import reduction, resource_tracker, spawn
+
+
+__all__ = ["Popen"]
+
+
+#
+# Wrapper for an fd used while launching a process
+#
+
+
+class _DupFd:
+ def __init__(self, fd):
+ self.fd = reduction._mk_inheritable(fd)
+
+ def detach(self):
+ return self.fd
+
+
+#
+# Start child process using subprocess.Popen
+#
+
+
+class Popen:
+ method = "loky"
+ DupFd = _DupFd
+
+ def __init__(self, process_obj):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ self.returncode = None
+ self._fds = []
+ self._launch(process_obj)
+
+ def duplicate_for_child(self, fd):
+ self._fds.append(fd)
+ return reduction._mk_inheritable(fd)
+
+ def poll(self, flag=os.WNOHANG):
+ if self.returncode is None:
+ while True:
+ try:
+ pid, sts = os.waitpid(self.pid, flag)
+ except OSError:
+ # Child process not yet created. See #1731717
+ # e.errno == errno.ECHILD == 10
+ return None
+ else:
+ break
+ if pid == self.pid:
+ if os.WIFSIGNALED(sts):
+ self.returncode = -os.WTERMSIG(sts)
+ else:
+ assert os.WIFEXITED(sts)
+ self.returncode = os.WEXITSTATUS(sts)
+ return self.returncode
+
+ def wait(self, timeout=None):
+ if self.returncode is None:
+ if timeout is not None:
+ if not wait([self.sentinel], timeout):
+ return None
+ # This shouldn't block if wait() returned successfully.
+ return self.poll(os.WNOHANG if timeout == 0.0 else 0)
+ return self.returncode
+
+ def terminate(self):
+ if self.returncode is None:
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except ProcessLookupError:
+ pass
+ except OSError:
+ if self.wait(timeout=0.1) is None:
+ raise
+
+ def _launch(self, process_obj):
+
+ tracker_fd = resource_tracker._resource_tracker.getfd()
+
+ fp = BytesIO()
+ set_spawning_popen(self)
+ try:
+ prep_data = spawn.get_preparation_data(
+ process_obj._name,
+ getattr(process_obj, "init_main_module", True),
+ )
+ reduction.dump(prep_data, fp)
+ reduction.dump(process_obj, fp)
+
+ finally:
+ set_spawning_popen(None)
+
+ try:
+ parent_r, child_w = os.pipe()
+ child_r, parent_w = os.pipe()
+ # for fd in self._fds:
+ # _mk_inheritable(fd)
+
+ cmd_python = [sys.executable]
+ cmd_python += ["-m", self.__module__]
+ cmd_python += ["--process-name", str(process_obj.name)]
+ cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
+ reduction._mk_inheritable(child_w)
+ reduction._mk_inheritable(tracker_fd)
+ self._fds += [child_r, child_w, tracker_fd]
+ if sys.version_info >= (3, 8) and os.name == "posix":
+ mp_tracker_fd = prep_data["mp_tracker_args"]["fd"]
+ self.duplicate_for_child(mp_tracker_fd)
+
+ from .fork_exec import fork_exec
+
+ pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
+ util.debug(
+ f"launched python with pid {pid} and cmd:\n{cmd_python}"
+ )
+ self.sentinel = parent_r
+
+ method = "getbuffer"
+ if not hasattr(fp, method):
+ method = "getvalue"
+ with os.fdopen(parent_w, "wb") as f:
+ f.write(getattr(fp, method)())
+ self.pid = pid
+ finally:
+ if parent_r is not None:
+ util.Finalize(self, os.close, (parent_r,))
+ for fd in (child_r, child_w):
+ if fd is not None:
+ os.close(fd)
+
+ @staticmethod
+ def thread_is_spawning():
+ return True
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser("Command line parser")
+ parser.add_argument(
+ "--pipe", type=int, required=True, help="File handle for the pipe"
+ )
+ parser.add_argument(
+ "--process-name",
+ type=str,
+ default=None,
+ help="Identifier for debugging purpose",
+ )
+
+ args = parser.parse_args()
+
+ info = {}
+ exitcode = 1
+ try:
+ with os.fdopen(args.pipe, "rb") as from_parent:
+ process.current_process()._inheriting = True
+ try:
+ prep_data = pickle.load(from_parent)
+ spawn.prepare(prep_data)
+ process_obj = pickle.load(from_parent)
+ finally:
+ del process.current_process()._inheriting
+
+ exitcode = process_obj._bootstrap()
+ except Exception:
+ print("\n\n" + "-" * 80)
+ print(f"{args.process_name} failed with traceback: ")
+ print("-" * 80)
+ import traceback
+
+ print(traceback.format_exc())
+ print("\n" + "-" * 80)
+ finally:
+ if from_parent is not None:
+ from_parent.close()
+
+ sys.exit(exitcode)
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py
new file mode 100644
index 0000000000000000000000000000000000000000..356255094b7647be8de6998a8752dd7807b25e10
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py
@@ -0,0 +1,85 @@
+###############################################################################
+# LokyProcess implementation
+#
+# authors: Thomas Moreau and Olivier Grisel
+#
+# based on multiprocessing/process.py (17/02/2017)
+#
+import sys
+from multiprocessing.context import assert_spawning
+from multiprocessing.process import BaseProcess
+
+
+class LokyProcess(BaseProcess):
+ _start_method = "loky"
+
+ def __init__(
+ self,
+ group=None,
+ target=None,
+ name=None,
+ args=(),
+ kwargs={},
+ daemon=None,
+ init_main_module=False,
+ env=None,
+ ):
+ super().__init__(
+ group=group,
+ target=target,
+ name=name,
+ args=args,
+ kwargs=kwargs,
+ daemon=daemon,
+ )
+ self.env = {} if env is None else env
+ self.authkey = self.authkey
+ self.init_main_module = init_main_module
+
+ @staticmethod
+ def _Popen(process_obj):
+ if sys.platform == "win32":
+ from .popen_loky_win32 import Popen
+ else:
+ from .popen_loky_posix import Popen
+ return Popen(process_obj)
+
+
+class LokyInitMainProcess(LokyProcess):
+ _start_method = "loky_init_main"
+
+ def __init__(
+ self,
+ group=None,
+ target=None,
+ name=None,
+ args=(),
+ kwargs={},
+ daemon=None,
+ ):
+ super().__init__(
+ group=group,
+ target=target,
+ name=name,
+ args=args,
+ kwargs=kwargs,
+ daemon=daemon,
+ init_main_module=True,
+ )
+
+
+#
+# We subclass bytes to avoid accidental transmission of auth keys over network
+#
+
+
+class AuthenticationKey(bytes):
+ def __reduce__(self):
+ try:
+ assert_spawning(self)
+ except RuntimeError:
+ raise TypeError(
+ "Pickling an AuthenticationKey object is "
+ "disallowed for security reasons"
+ )
+ return AuthenticationKey, (bytes(self),)
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py
new file mode 100644
index 0000000000000000000000000000000000000000..5afd99b420fbc480ed5eb743333a687110a90e49
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py
@@ -0,0 +1,236 @@
+###############################################################################
+# Queue and SimpleQueue implementation for loky
+#
+# authors: Thomas Moreau, Olivier Grisel
+#
+# based on multiprocessing/queues.py (16/02/2017)
+# * Add some custom reducers for the Queues/SimpleQueue to tweak the
+# pickling process. (overload Queue._feed/SimpleQueue.put)
+#
+import os
+import sys
+import errno
+import weakref
+import threading
+from multiprocessing import util
+from multiprocessing.queues import (
+ Full,
+ Queue as mp_Queue,
+ SimpleQueue as mp_SimpleQueue,
+ _sentinel,
+)
+from multiprocessing.context import assert_spawning
+
+from .reduction import dumps
+
+
+__all__ = ["Queue", "SimpleQueue", "Full"]
+
+
+class Queue(mp_Queue):
+ def __init__(self, maxsize=0, reducers=None, ctx=None):
+ super().__init__(maxsize=maxsize, ctx=ctx)
+ self._reducers = reducers
+
+ # Use custom queue set/get state to be able to reduce the custom reducers
+ def __getstate__(self):
+ assert_spawning(self)
+ return (
+ self._ignore_epipe,
+ self._maxsize,
+ self._reader,
+ self._writer,
+ self._reducers,
+ self._rlock,
+ self._wlock,
+ self._sem,
+ self._opid,
+ )
+
+ def __setstate__(self, state):
+ (
+ self._ignore_epipe,
+ self._maxsize,
+ self._reader,
+ self._writer,
+ self._reducers,
+ self._rlock,
+ self._wlock,
+ self._sem,
+ self._opid,
+ ) = state
+ if sys.version_info >= (3, 9):
+ self._reset()
+ else:
+ self._after_fork()
+
+ # Overload _start_thread to correctly call our custom _feed
+ def _start_thread(self):
+ util.debug("Queue._start_thread()")
+
+ # Start thread which transfers data from buffer to pipe
+ self._buffer.clear()
+ self._thread = threading.Thread(
+ target=Queue._feed,
+ args=(
+ self._buffer,
+ self._notempty,
+ self._send_bytes,
+ self._wlock,
+ self._writer.close,
+ self._reducers,
+ self._ignore_epipe,
+ self._on_queue_feeder_error,
+ self._sem,
+ ),
+ name="QueueFeederThread",
+ )
+ self._thread.daemon = True
+
+ util.debug("doing self._thread.start()")
+ self._thread.start()
+ util.debug("... done self._thread.start()")
+
+ # On process exit we will wait for data to be flushed to pipe.
+ #
+ # However, if this process created the queue then all
+ # processes which use the queue will be descendants of this
+ # process. Therefore waiting for the queue to be flushed
+ # is pointless once all the child processes have been joined.
+ created_by_this_process = self._opid == os.getpid()
+ if not self._joincancelled and not created_by_this_process:
+ self._jointhread = util.Finalize(
+ self._thread,
+ Queue._finalize_join,
+ [weakref.ref(self._thread)],
+ exitpriority=-5,
+ )
+
+ # Send sentinel to the thread queue object when garbage collected
+ self._close = util.Finalize(
+ self,
+ Queue._finalize_close,
+ [self._buffer, self._notempty],
+ exitpriority=10,
+ )
+
+ # Overload the _feed methods to use our custom pickling strategy.
+ @staticmethod
+ def _feed(
+ buffer,
+ notempty,
+ send_bytes,
+ writelock,
+ close,
+ reducers,
+ ignore_epipe,
+ onerror,
+ queue_sem,
+ ):
+ util.debug("starting thread to feed data to pipe")
+ nacquire = notempty.acquire
+ nrelease = notempty.release
+ nwait = notempty.wait
+ bpopleft = buffer.popleft
+ sentinel = _sentinel
+ if sys.platform != "win32":
+ wacquire = writelock.acquire
+ wrelease = writelock.release
+ else:
+ wacquire = None
+
+ while True:
+ try:
+ nacquire()
+ try:
+ if not buffer:
+ nwait()
+ finally:
+ nrelease()
+ try:
+ while True:
+ obj = bpopleft()
+ if obj is sentinel:
+ util.debug("feeder thread got sentinel -- exiting")
+ close()
+ return
+
+ # serialize the data before acquiring the lock
+ obj_ = dumps(obj, reducers=reducers)
+ if wacquire is None:
+ send_bytes(obj_)
+ else:
+ wacquire()
+ try:
+ send_bytes(obj_)
+ finally:
+ wrelease()
+ # Remove references early to avoid leaking memory
+ del obj, obj_
+ except IndexError:
+ pass
+ except BaseException as e:
+ if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
+ return
+ # Since this runs in a daemon thread the resources it uses
+ # may be become unusable while the process is cleaning up.
+ # We ignore errors which happen after the process has
+ # started to cleanup.
+ if util.is_exiting():
+ util.info(f"error in queue thread: {e}")
+ return
+ else:
+ queue_sem.release()
+ onerror(e, obj)
+
+ def _on_queue_feeder_error(self, e, obj):
+ """
+ Private API hook called when feeding data in the background thread
+ raises an exception. For overriding by concurrent.futures.
+ """
+ import traceback
+
+ traceback.print_exc()
+
+
+class SimpleQueue(mp_SimpleQueue):
+ def __init__(self, reducers=None, ctx=None):
+ super().__init__(ctx=ctx)
+
+ # Add possiblity to use custom reducers
+ self._reducers = reducers
+
+ def close(self):
+ self._reader.close()
+ self._writer.close()
+
+ # Use custom queue set/get state to be able to reduce the custom reducers
+ def __getstate__(self):
+ assert_spawning(self)
+ return (
+ self._reader,
+ self._writer,
+ self._reducers,
+ self._rlock,
+ self._wlock,
+ )
+
+ def __setstate__(self, state):
+ (
+ self._reader,
+ self._writer,
+ self._reducers,
+ self._rlock,
+ self._wlock,
+ ) = state
+
+ # Overload put to use our customizable reducer
+ def put(self, obj):
+ # serialize the data before acquiring the lock
+ obj = dumps(obj, reducers=self._reducers)
+ if self._wlock is None:
+ # writes to a message oriented win32 pipe are atomic
+ self._writer.send_bytes(obj)
+ else:
+ with self._wlock:
+ self._writer.send_bytes(obj)
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py
new file mode 100644
index 0000000000000000000000000000000000000000..25204a7a729d4d5f295070cd050c17a4ed9d49b7
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py
@@ -0,0 +1,378 @@
+###############################################################################
+# Server process to keep track of unlinked resources, like folders and
+# semaphores and clean them.
+#
+# author: Thomas Moreau
+#
+# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
+# * include custom spawnv_passfds to start the process
+# * add some VERBOSE logging
+#
+# TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so
+# once loky drops support for Python 3.7 it might be possible to stop
+# maintaining this loky-specific fork. As a consequence, it might also be
+# possible to stop maintaining the loky.backend.synchronize fork of
+# multiprocessing.synchronize.
+
+#
+# On Unix we run a server process which keeps track of unlinked
+# resources. The server ignores SIGINT and SIGTERM and reads from a
+# pipe. The resource_tracker implements a reference counting scheme: each time
+# a Python process anticipates the shared usage of a resource by another
+# process, it signals the resource_tracker of this shared usage, and in return,
+# the resource_tracker increments the resource's reference count by 1.
+# Similarly, when access to a resource is closed by a Python process, the
+# process notifies the resource_tracker by asking it to decrement the
+# resource's reference count by 1. When the reference count drops to 0, the
+# resource_tracker attempts to clean up the underlying resource.
+
+# Finally, every other process connected to the resource tracker has a copy of
+# the writable end of the pipe used to communicate with it, so the resource
+# tracker gets EOF when all other processes have exited. Then the
+# resource_tracker process unlinks any remaining leaked resources (with
+# reference count above 0)
+
+# For semaphores, this is important because the system only supports a limited
+# number of named semaphores, and they will not be automatically removed till
+# the next reboot. Without this resource tracker process, "killall python"
+# would probably leave unlinked semaphores.
+
+# Note that this behavior differs from CPython's resource_tracker, which only
+# implements list of shared resources, and not a proper refcounting scheme.
+# Also, CPython's resource tracker will only attempt to cleanup those shared
+# resources once all procsses connected to the resouce tracker have exited.
+
+
+import os
+import shutil
+import sys
+import signal
+import warnings
+import threading
+from _multiprocessing import sem_unlink
+from multiprocessing import util
+
+from . import spawn
+
+if sys.platform == "win32":
+ import _winapi
+ import msvcrt
+ from multiprocessing.reduction import duplicate
+
+
+__all__ = ["ensure_running", "register", "unregister"]
+
+_HAVE_SIGMASK = hasattr(signal, "pthread_sigmask")
+_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
+
+_CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink}
+
+if os.name == "posix":
+ _CLEANUP_FUNCS["semlock"] = sem_unlink
+
+
+VERBOSE = False
+
+
+class ResourceTracker:
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._fd = None
+ self._pid = None
+
+ def getfd(self):
+ self.ensure_running()
+ return self._fd
+
+ def ensure_running(self):
+ """Make sure that resource tracker process is running.
+
+ This can be run from any process. Usually a child process will use
+ the resource created by its parent."""
+ with self._lock:
+ if self._fd is not None:
+ # resource tracker was launched before, is it still running?
+ if self._check_alive():
+ # => still alive
+ return
+ # => dead, launch it again
+ os.close(self._fd)
+ if os.name == "posix":
+ try:
+ # At this point, the resource_tracker process has been
+ # killed or crashed. Let's remove the process entry
+ # from the process table to avoid zombie processes.
+ os.waitpid(self._pid, 0)
+ except OSError:
+ # The process was terminated or is a child from an
+ # ancestor of the current process.
+ pass
+ self._fd = None
+ self._pid = None
+
+ warnings.warn(
+ "resource_tracker: process died unexpectedly, "
+ "relaunching. Some folders/sempahores might "
+ "leak."
+ )
+
+ fds_to_pass = []
+ try:
+ fds_to_pass.append(sys.stderr.fileno())
+ except Exception:
+ pass
+
+ r, w = os.pipe()
+ if sys.platform == "win32":
+ _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
+ os.close(r)
+ r = _r
+
+ cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
+ try:
+ fds_to_pass.append(r)
+ # process will out live us, so no need to wait on pid
+ exe = spawn.get_executable()
+ args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
+ util.debug(f"launching resource tracker: {args}")
+ # bpo-33613: Register a signal mask that will block the
+ # signals. This signal mask will be inherited by the child
+ # that is going to be spawned and will protect the child from a
+ # race condition that can make the child die before it
+ # registers signal handlers for SIGINT and SIGTERM. The mask is
+ # unregistered after spawning the child.
+ try:
+ if _HAVE_SIGMASK:
+ signal.pthread_sigmask(
+ signal.SIG_BLOCK, _IGNORED_SIGNALS
+ )
+ pid = spawnv_passfds(exe, args, fds_to_pass)
+ finally:
+ if _HAVE_SIGMASK:
+ signal.pthread_sigmask(
+ signal.SIG_UNBLOCK, _IGNORED_SIGNALS
+ )
+ except BaseException:
+ os.close(w)
+ raise
+ else:
+ self._fd = w
+ self._pid = pid
+ finally:
+ if sys.platform == "win32":
+ _winapi.CloseHandle(r)
+ else:
+ os.close(r)
+
+ def _check_alive(self):
+ """Check for the existence of the resource tracker process."""
+ try:
+ self._send("PROBE", "", "")
+ except BrokenPipeError:
+ return False
+ else:
+ return True
+
+ def register(self, name, rtype):
+ """Register a named resource, and increment its refcount."""
+ self.ensure_running()
+ self._send("REGISTER", name, rtype)
+
+ def unregister(self, name, rtype):
+ """Unregister a named resource with resource tracker."""
+ self.ensure_running()
+ self._send("UNREGISTER", name, rtype)
+
+ def maybe_unlink(self, name, rtype):
+ """Decrement the refcount of a resource, and delete it if it hits 0"""
+ self.ensure_running()
+ self._send("MAYBE_UNLINK", name, rtype)
+
+ def _send(self, cmd, name, rtype):
+ if len(name) > 512:
+ # posix guarantees that writes to a pipe of less than PIPE_BUF
+ # bytes are atomic, and that PIPE_BUF >= 512
+ raise ValueError("name too long")
+ msg = f"{cmd}:{name}:{rtype}\n".encode("ascii")
+ nbytes = os.write(self._fd, msg)
+ assert nbytes == len(msg)
+
+
+_resource_tracker = ResourceTracker()
+ensure_running = _resource_tracker.ensure_running
+register = _resource_tracker.register
+maybe_unlink = _resource_tracker.maybe_unlink
+unregister = _resource_tracker.unregister
+getfd = _resource_tracker.getfd
+
+
+def main(fd, verbose=0):
+ """Run resource tracker."""
+ # protect the process from ^C and "killall python" etc
+ if verbose:
+ util.log_to_stderr(level=util.DEBUG)
+
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+ if _HAVE_SIGMASK:
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
+
+ for f in (sys.stdin, sys.stdout):
+ try:
+ f.close()
+ except Exception:
+ pass
+
+ if verbose:
+ util.debug("Main resource tracker is running")
+
+ registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
+ try:
+ # keep track of registered/unregistered resources
+ if sys.platform == "win32":
+ fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
+ with open(fd, "rb") as f:
+ while True:
+ line = f.readline()
+ if line == b"": # EOF
+ break
+ try:
+ splitted = line.strip().decode("ascii").split(":")
+ # name can potentially contain separator symbols (for
+ # instance folders on Windows)
+ cmd, name, rtype = (
+ splitted[0],
+ ":".join(splitted[1:-1]),
+ splitted[-1],
+ )
+
+ if cmd == "PROBE":
+ continue
+
+ if rtype not in _CLEANUP_FUNCS:
+ raise ValueError(
+ f"Cannot register {name} for automatic cleanup: "
+ f"unknown resource type ({rtype}). Resource type "
+ "should be one of the following: "
+ f"{list(_CLEANUP_FUNCS.keys())}"
+ )
+
+ if cmd == "REGISTER":
+ if name not in registry[rtype]:
+ registry[rtype][name] = 1
+ else:
+ registry[rtype][name] += 1
+
+ if verbose:
+ util.debug(
+ "[ResourceTracker] incremented refcount of "
+ f"{rtype} {name} "
+ f"(current {registry[rtype][name]})"
+ )
+ elif cmd == "UNREGISTER":
+ del registry[rtype][name]
+ if verbose:
+ util.debug(
+ f"[ResourceTracker] unregister {name} {rtype}: "
+ f"registry({len(registry)})"
+ )
+ elif cmd == "MAYBE_UNLINK":
+ registry[rtype][name] -= 1
+ if verbose:
+ util.debug(
+ "[ResourceTracker] decremented refcount of "
+ f"{rtype} {name} "
+ f"(current {registry[rtype][name]})"
+ )
+
+ if registry[rtype][name] == 0:
+ del registry[rtype][name]
+ try:
+ if verbose:
+ util.debug(
+ f"[ResourceTracker] unlink {name}"
+ )
+ _CLEANUP_FUNCS[rtype](name)
+ except Exception as e:
+ warnings.warn(
+ f"resource_tracker: {name}: {e!r}"
+ )
+
+ else:
+ raise RuntimeError(f"unrecognized command {cmd!r}")
+ except BaseException:
+ try:
+ sys.excepthook(*sys.exc_info())
+ except BaseException:
+ pass
+ finally:
+ # all processes have terminated; cleanup any remaining resources
+ def _unlink_resources(rtype_registry, rtype):
+ if rtype_registry:
+ try:
+ warnings.warn(
+ "resource_tracker: There appear to be "
+ f"{len(rtype_registry)} leaked {rtype} objects to "
+ "clean up at shutdown"
+ )
+ except Exception:
+ pass
+ for name in rtype_registry:
+ # For some reason the process which created and registered this
+ # resource has failed to unregister it. Presumably it has
+ # died. We therefore clean it up.
+ try:
+ _CLEANUP_FUNCS[rtype](name)
+ if verbose:
+ util.debug(f"[ResourceTracker] unlink {name}")
+ except Exception as e:
+ warnings.warn(f"resource_tracker: {name}: {e!r}")
+
+ for rtype, rtype_registry in registry.items():
+ if rtype == "folder":
+ continue
+ else:
+ _unlink_resources(rtype_registry, rtype)
+
+ # The default cleanup routine for folders deletes everything inside
+ # those folders recursively, which can include other resources tracked
+ # by the resource tracker). To limit the risk of the resource tracker
+ # attempting to delete twice a resource (once as part of a tracked
+ # folder, and once as a resource), we delete the folders after all
+ # other resource types.
+ if "folder" in registry:
+ _unlink_resources(registry["folder"], "folder")
+
+ if verbose:
+ util.debug("resource tracker shut down")
+
+
+#
+# Start a program with only specified fds kept open
+#
+
+
+def spawnv_passfds(path, args, passfds):
+ passfds = sorted(passfds)
+ if sys.platform != "win32":
+ errpipe_read, errpipe_write = os.pipe()
+ try:
+ from .reduction import _mk_inheritable
+ from .fork_exec import fork_exec
+
+ _pass = [_mk_inheritable(fd) for fd in passfds]
+ return fork_exec(args, _pass)
+ finally:
+ os.close(errpipe_read)
+ os.close(errpipe_write)
+ else:
+ cmd = " ".join(f'"{x}"' for x in args)
+ try:
+ _, ht, pid, _ = _winapi.CreateProcess(
+ path, cmd, None, None, True, 0, None, None, None
+ )
+ _winapi.CloseHandle(ht)
+ except BaseException:
+ pass
+ return pid
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py
new file mode 100644
index 0000000000000000000000000000000000000000..d011c398035f4e013ef36615a56e3bf0d8519d07
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py
@@ -0,0 +1,250 @@
+###############################################################################
+# Prepares and processes the data to setup the new process environment
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from multiprocessing/spawn.py (17/02/2017)
+# * Improve logging data
+#
+import os
+import sys
+import runpy
+import textwrap
+import types
+from multiprocessing import process, util
+
+
+if sys.platform != "win32":
+ WINEXE = False
+ WINSERVICE = False
+else:
+ import msvcrt
+ from multiprocessing.reduction import duplicate
+
+ WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)
+ WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
+
+if WINSERVICE:
+ _python_exe = os.path.join(sys.exec_prefix, "python.exe")
+else:
+ _python_exe = sys.executable
+
+
+def get_executable():
+ return _python_exe
+
+
+def _check_not_importing_main():
+ if getattr(process.current_process(), "_inheriting", False):
+ raise RuntimeError(
+ textwrap.dedent(
+ """\
+ An attempt has been made to start a new process before the
+ current process has finished its bootstrapping phase.
+
+ This probably means that you are not using fork to start your
+ child processes and you have forgotten to use the proper idiom
+ in the main module:
+
+ if __name__ == '__main__':
+ freeze_support()
+ ...
+
+ The "freeze_support()" line can be omitted if the program
+ is not going to be frozen to produce an executable."""
+ )
+ )
+
+
+def get_preparation_data(name, init_main_module=True):
+ """Return info about parent needed by child to unpickle process object."""
+ _check_not_importing_main()
+ d = dict(
+ log_to_stderr=util._log_to_stderr,
+ authkey=bytes(process.current_process().authkey),
+ name=name,
+ sys_argv=sys.argv,
+ orig_dir=process.ORIGINAL_DIR,
+ dir=os.getcwd(),
+ )
+
+ # Send sys_path and make sure the current directory will not be changed
+ d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]
+
+ # Make sure to pass the information if the multiprocessing logger is active
+ if util._logger is not None:
+ d["log_level"] = util._logger.getEffectiveLevel()
+ if util._logger.handlers:
+ h = util._logger.handlers[0]
+ d["log_fmt"] = h.formatter._fmt
+
+ # Tell the child how to communicate with the resource_tracker
+ from .resource_tracker import _resource_tracker
+
+ _resource_tracker.ensure_running()
+ d["tracker_args"] = {"pid": _resource_tracker._pid}
+ if sys.platform == "win32":
+ d["tracker_args"]["fh"] = msvcrt.get_osfhandle(_resource_tracker._fd)
+ else:
+ d["tracker_args"]["fd"] = _resource_tracker._fd
+
+ if sys.version_info >= (3, 8) and os.name == "posix":
+ # joblib/loky#242: allow loky processes to retrieve the resource
+ # tracker of their parent in case the child processes depickles
+ # shared_memory objects, that are still tracked by multiprocessing's
+ # resource_tracker by default.
+ # XXX: this is a workaround that may be error prone: in the future, it
+ # would be better to have loky subclass multiprocessing's shared_memory
+ # to force registration of shared_memory segments via loky's
+ # resource_tracker.
+ from multiprocessing.resource_tracker import (
+ _resource_tracker as mp_resource_tracker,
+ )
+
+ # multiprocessing's resource_tracker must be running before loky
+ # process is created (othewise the child won't be able to use it if it
+ # is created later on)
+ mp_resource_tracker.ensure_running()
+ d["mp_tracker_args"] = {
+ "fd": mp_resource_tracker._fd,
+ "pid": mp_resource_tracker._pid,
+ }
+
+ # Figure out whether to initialise main in the subprocess as a module
+ # or through direct execution (or to leave it alone entirely)
+ if init_main_module:
+ main_module = sys.modules["__main__"]
+ try:
+ main_mod_name = getattr(main_module.__spec__, "name", None)
+ except BaseException:
+ main_mod_name = None
+ if main_mod_name is not None:
+ d["init_main_from_name"] = main_mod_name
+ elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):
+ main_path = getattr(main_module, "__file__", None)
+ if main_path is not None:
+ if (
+ not os.path.isabs(main_path)
+ and process.ORIGINAL_DIR is not None
+ ):
+ main_path = os.path.join(process.ORIGINAL_DIR, main_path)
+ d["init_main_from_path"] = os.path.normpath(main_path)
+
+ return d
+
+
+#
+# Prepare current process
+#
+old_main_modules = []
+
+
+def prepare(data, parent_sentinel=None):
+ """Try to get current process ready to unpickle process object."""
+ if "name" in data:
+ process.current_process().name = data["name"]
+
+ if "authkey" in data:
+ process.current_process().authkey = data["authkey"]
+
+ if "log_to_stderr" in data and data["log_to_stderr"]:
+ util.log_to_stderr()
+
+ if "log_level" in data:
+ util.get_logger().setLevel(data["log_level"])
+
+ if "log_fmt" in data:
+ import logging
+
+ util.get_logger().handlers[0].setFormatter(
+ logging.Formatter(data["log_fmt"])
+ )
+
+ if "sys_path" in data:
+ sys.path = data["sys_path"]
+
+ if "sys_argv" in data:
+ sys.argv = data["sys_argv"]
+
+ if "dir" in data:
+ os.chdir(data["dir"])
+
+ if "orig_dir" in data:
+ process.ORIGINAL_DIR = data["orig_dir"]
+
+ if "mp_tracker_args" in data:
+ from multiprocessing.resource_tracker import (
+ _resource_tracker as mp_resource_tracker,
+ )
+
+ mp_resource_tracker._fd = data["mp_tracker_args"]["fd"]
+ mp_resource_tracker._pid = data["mp_tracker_args"]["pid"]
+ if "tracker_args" in data:
+ from .resource_tracker import _resource_tracker
+
+ _resource_tracker._pid = data["tracker_args"]["pid"]
+ if sys.platform == "win32":
+ handle = data["tracker_args"]["fh"]
+ handle = duplicate(handle, source_process=parent_sentinel)
+ _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
+ else:
+ _resource_tracker._fd = data["tracker_args"]["fd"]
+
+ if "init_main_from_name" in data:
+ _fixup_main_from_name(data["init_main_from_name"])
+ elif "init_main_from_path" in data:
+ _fixup_main_from_path(data["init_main_from_path"])
+
+
+# Multiprocessing module helpers to fix up the main module in
+# spawned subprocesses
+def _fixup_main_from_name(mod_name):
+ # __main__.py files for packages, directories, zip archives, etc, run
+ # their "main only" code unconditionally, so we don't even try to
+ # populate anything in __main__, nor do we make any changes to
+ # __main__ attributes
+ current_main = sys.modules["__main__"]
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
+ return
+
+ # If this process was forked, __main__ may already be populated
+ if getattr(current_main.__spec__, "name", None) == mod_name:
+ return
+
+ # Otherwise, __main__ may contain some non-main code where we need to
+ # support unpickling it properly. We rerun it as __mp_main__ and make
+ # the normal __main__ an alias to that
+ old_main_modules.append(current_main)
+ main_module = types.ModuleType("__mp_main__")
+ main_content = runpy.run_module(
+ mod_name, run_name="__mp_main__", alter_sys=True
+ )
+ main_module.__dict__.update(main_content)
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
+
+
+def _fixup_main_from_path(main_path):
+ # If this process was forked, __main__ may already be populated
+ current_main = sys.modules["__main__"]
+
+ # Unfortunately, the main ipython launch script historically had no
+ # "if __name__ == '__main__'" guard, so we work around that
+ # by treating it like a __main__.py file
+ # See https://github.com/ipython/ipython/issues/4698
+ main_name = os.path.splitext(os.path.basename(main_path))[0]
+ if main_name == "ipython":
+ return
+
+ # Otherwise, if __file__ already has the setting we expect,
+ # there's nothing more to do
+ if getattr(current_main, "__file__", None) == main_path:
+ return
+
+ # If the parent process has sent a path through rather than a module
+ # name we assume it is an executable script that may contain
+ # non-main code that needs to be executed
+ old_main_modules.append(current_main)
+ main_module = types.ModuleType("__mp_main__")
+ main_content = runpy.run_path(main_path, run_name="__mp_main__")
+ main_module.__dict__.update(main_content)
+ sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py
new file mode 100644
index 0000000000000000000000000000000000000000..18db3e34db979240b4a4a943ea6931db3091321d
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py
@@ -0,0 +1,409 @@
+###############################################################################
+# Synchronization primitives based on our SemLock implementation
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from multiprocessing/synchronize.py (17/02/2017)
+# * Remove ctx argument for compatibility reason
+# * Registers a cleanup function with the loky resource_tracker to remove the
+# semaphore when the process dies instead.
+#
+# TODO: investigate which Python version is required to be able to use
+# multiprocessing.resource_tracker and therefore multiprocessing.synchronize
+# instead of a loky-specific fork.
+
+import os
+import sys
+import tempfile
+import threading
+import _multiprocessing
+from time import time as _time
+from multiprocessing import process, util
+from multiprocessing.context import assert_spawning
+
+from . import resource_tracker
+
+__all__ = [
+ "Lock",
+ "RLock",
+ "Semaphore",
+ "BoundedSemaphore",
+ "Condition",
+ "Event",
+]
+# Try to import the mp.synchronize module cleanly, if it fails
+# raise ImportError for platforms lacking a working sem_open implementation.
+# See issue 3770
+try:
+ from _multiprocessing import SemLock as _SemLock
+ from _multiprocessing import sem_unlink
+except ImportError:
+ raise ImportError(
+ "This platform lacks a functioning sem_open"
+ " implementation, therefore, the required"
+ " synchronization primitives needed will not"
+ " function, see issue 3770."
+ )
+
+#
+# Constants
+#
+
+RECURSIVE_MUTEX, SEMAPHORE = range(2)
+SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
+
+
+#
+# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
+#
+
+
+class SemLock:
+
+ _rand = tempfile._RandomNameSequence()
+
+ def __init__(self, kind, value, maxvalue, name=None):
+ # unlink_now is only used on win32 or when we are using fork.
+ unlink_now = False
+ if name is None:
+ # Try to find an unused name for the SemLock instance.
+ for _ in range(100):
+ try:
+ self._semlock = _SemLock(
+ kind, value, maxvalue, SemLock._make_name(), unlink_now
+ )
+ except FileExistsError: # pragma: no cover
+ pass
+ else:
+ break
+ else: # pragma: no cover
+ raise FileExistsError("cannot find name for semaphore")
+ else:
+ self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now)
+ self.name = name
+ util.debug(
+ f"created semlock with handle {self._semlock.handle} and name "
+ f'"{self.name}"'
+ )
+
+ self._make_methods()
+
+ def _after_fork(obj):
+ obj._semlock._after_fork()
+
+ util.register_after_fork(self, _after_fork)
+
+ # When the object is garbage collected or the
+ # process shuts down we unlink the semaphore name
+ resource_tracker.register(self._semlock.name, "semlock")
+ util.Finalize(
+ self, SemLock._cleanup, (self._semlock.name,), exitpriority=0
+ )
+
+ @staticmethod
+ def _cleanup(name):
+ try:
+ sem_unlink(name)
+ except FileNotFoundError:
+ # Already unlinked, possibly by user code: ignore and make sure to
+ # unregister the semaphore from the resource tracker.
+ pass
+ finally:
+ resource_tracker.unregister(name, "semlock")
+
+ def _make_methods(self):
+ self.acquire = self._semlock.acquire
+ self.release = self._semlock.release
+
+ def __enter__(self):
+ return self._semlock.acquire()
+
+ def __exit__(self, *args):
+ return self._semlock.release()
+
+ def __getstate__(self):
+ assert_spawning(self)
+ sl = self._semlock
+ h = sl.handle
+ return (h, sl.kind, sl.maxvalue, sl.name)
+
+ def __setstate__(self, state):
+ self._semlock = _SemLock._rebuild(*state)
+ util.debug(
+ f'recreated blocker with handle {state[0]!r} and name "{state[3]}"'
+ )
+ self._make_methods()
+
+ @staticmethod
+ def _make_name():
+ # OSX does not support long names for semaphores
+ return f"/loky-{os.getpid()}-{next(SemLock._rand)}"
+
+
+#
+# Semaphore
+#
+
+
+class Semaphore(SemLock):
+ def __init__(self, value=1):
+ SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
+
+ def get_value(self):
+ if sys.platform == "darwin":
+ raise NotImplementedError("OSX does not implement sem_getvalue")
+ return self._semlock._get_value()
+
+ def __repr__(self):
+ try:
+ value = self._semlock._get_value()
+ except Exception:
+ value = "unknown"
+ return f"<{self.__class__.__name__}(value={value})>"
+
+
+#
+# Bounded semaphore
+#
+
+
+class BoundedSemaphore(Semaphore):
+ def __init__(self, value=1):
+ SemLock.__init__(self, SEMAPHORE, value, value)
+
+ def __repr__(self):
+ try:
+ value = self._semlock._get_value()
+ except Exception:
+ value = "unknown"
+ return (
+ f"<{self.__class__.__name__}(value={value}, "
+ f"maxvalue={self._semlock.maxvalue})>"
+ )
+
+
+#
+# Non-recursive lock
+#
+
+
+class Lock(SemLock):
+ def __init__(self):
+ super().__init__(SEMAPHORE, 1, 1)
+
+ def __repr__(self):
+ try:
+ if self._semlock._is_mine():
+ name = process.current_process().name
+ if threading.current_thread().name != "MainThread":
+ name = f"{name}|{threading.current_thread().name}"
+ elif self._semlock._get_value() == 1:
+ name = "None"
+ elif self._semlock._count() > 0:
+ name = "SomeOtherThread"
+ else:
+ name = "SomeOtherProcess"
+ except Exception:
+ name = "unknown"
+ return f"<{self.__class__.__name__}(owner={name})>"
+
+
+#
+# Recursive lock
+#
+
+
+class RLock(SemLock):
+ def __init__(self):
+ super().__init__(RECURSIVE_MUTEX, 1, 1)
+
+ def __repr__(self):
+ try:
+ if self._semlock._is_mine():
+ name = process.current_process().name
+ if threading.current_thread().name != "MainThread":
+ name = f"{name}|{threading.current_thread().name}"
+ count = self._semlock._count()
+ elif self._semlock._get_value() == 1:
+ name, count = "None", 0
+ elif self._semlock._count() > 0:
+ name, count = "SomeOtherThread", "nonzero"
+ else:
+ name, count = "SomeOtherProcess", "nonzero"
+ except Exception:
+ name, count = "unknown", "unknown"
+ return f"<{self.__class__.__name__}({name}, {count})>"
+
+
+#
+# Condition variable
+#
+
+
+class Condition:
+ def __init__(self, lock=None):
+ self._lock = lock or RLock()
+ self._sleeping_count = Semaphore(0)
+ self._woken_count = Semaphore(0)
+ self._wait_semaphore = Semaphore(0)
+ self._make_methods()
+
+ def __getstate__(self):
+ assert_spawning(self)
+ return (
+ self._lock,
+ self._sleeping_count,
+ self._woken_count,
+ self._wait_semaphore,
+ )
+
+ def __setstate__(self, state):
+ (
+ self._lock,
+ self._sleeping_count,
+ self._woken_count,
+ self._wait_semaphore,
+ ) = state
+ self._make_methods()
+
+ def __enter__(self):
+ return self._lock.__enter__()
+
+ def __exit__(self, *args):
+ return self._lock.__exit__(*args)
+
+ def _make_methods(self):
+ self.acquire = self._lock.acquire
+ self.release = self._lock.release
+
+ def __repr__(self):
+ try:
+ num_waiters = (
+ self._sleeping_count._semlock._get_value()
+ - self._woken_count._semlock._get_value()
+ )
+ except Exception:
+ num_waiters = "unknown"
+ return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>"
+
+ def wait(self, timeout=None):
+ assert (
+ self._lock._semlock._is_mine()
+ ), "must acquire() condition before using wait()"
+
+ # indicate that this thread is going to sleep
+ self._sleeping_count.release()
+
+ # release lock
+ count = self._lock._semlock._count()
+ for _ in range(count):
+ self._lock.release()
+
+ try:
+ # wait for notification or timeout
+ return self._wait_semaphore.acquire(True, timeout)
+ finally:
+ # indicate that this thread has woken
+ self._woken_count.release()
+
+ # reacquire lock
+ for _ in range(count):
+ self._lock.acquire()
+
+ def notify(self):
+ assert self._lock._semlock._is_mine(), "lock is not owned"
+ assert not self._wait_semaphore.acquire(False)
+
+ # to take account of timeouts since last notify() we subtract
+ # woken_count from sleeping_count and rezero woken_count
+ while self._woken_count.acquire(False):
+ res = self._sleeping_count.acquire(False)
+ assert res
+
+ if self._sleeping_count.acquire(False): # try grabbing a sleeper
+ self._wait_semaphore.release() # wake up one sleeper
+ self._woken_count.acquire() # wait for the sleeper to wake
+
+ # rezero _wait_semaphore in case a timeout just happened
+ self._wait_semaphore.acquire(False)
+
+ def notify_all(self):
+ assert self._lock._semlock._is_mine(), "lock is not owned"
+ assert not self._wait_semaphore.acquire(False)
+
+ # to take account of timeouts since last notify*() we subtract
+ # woken_count from sleeping_count and rezero woken_count
+ while self._woken_count.acquire(False):
+ res = self._sleeping_count.acquire(False)
+ assert res
+
+ sleepers = 0
+ while self._sleeping_count.acquire(False):
+ self._wait_semaphore.release() # wake up one sleeper
+ sleepers += 1
+
+ if sleepers:
+ for _ in range(sleepers):
+ self._woken_count.acquire() # wait for a sleeper to wake
+
+ # rezero wait_semaphore in case some timeouts just happened
+ while self._wait_semaphore.acquire(False):
+ pass
+
+ def wait_for(self, predicate, timeout=None):
+ result = predicate()
+ if result:
+ return result
+ if timeout is not None:
+ endtime = _time() + timeout
+ else:
+ endtime = None
+ waittime = None
+ while not result:
+ if endtime is not None:
+ waittime = endtime - _time()
+ if waittime <= 0:
+ break
+ self.wait(waittime)
+ result = predicate()
+ return result
+
+
+#
+# Event
+#
+
+
+class Event:
+ def __init__(self):
+ self._cond = Condition(Lock())
+ self._flag = Semaphore(0)
+
+ def is_set(self):
+ with self._cond:
+ if self._flag.acquire(False):
+ self._flag.release()
+ return True
+ return False
+
+ def set(self):
+ with self._cond:
+ self._flag.acquire(False)
+ self._flag.release()
+ self._cond.notify_all()
+
+ def clear(self):
+ with self._cond:
+ self._flag.acquire(False)
+
+ def wait(self, timeout=None):
+ with self._cond:
+ if self._flag.acquire(False):
+ self._flag.release()
+ else:
+ self._cond.wait(timeout)
+
+ if self._flag.acquire(False):
+ self._flag.release()
+ return True
+ return False
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..099debcb711c6695f0570861293b198047bd6093
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py
@@ -0,0 +1,102 @@
+import inspect
+from functools import partial
+from joblib.externals.cloudpickle import dumps, loads
+
+
+WRAP_CACHE = {}
+
+
+class CloudpickledObjectWrapper:
+ def __init__(self, obj, keep_wrapper=False):
+ self._obj = obj
+ self._keep_wrapper = keep_wrapper
+
+ def __reduce__(self):
+ _pickled_object = dumps(self._obj)
+ if not self._keep_wrapper:
+ return loads, (_pickled_object,)
+
+ return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
+
+ def __getattr__(self, attr):
+ # Ensure that the wrapped object can be used seemlessly as the
+ # previous object.
+ if attr not in ["_obj", "_keep_wrapper"]:
+ return getattr(self._obj, attr)
+ return getattr(self, attr)
+
+
+# Make sure the wrapped object conserves the callable property
+class CallableObjectWrapper(CloudpickledObjectWrapper):
+ def __call__(self, *args, **kwargs):
+ return self._obj(*args, **kwargs)
+
+
+def _wrap_non_picklable_objects(obj, keep_wrapper):
+ if callable(obj):
+ return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
+ return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
+
+
+def _reconstruct_wrapper(_pickled_object, keep_wrapper):
+ obj = loads(_pickled_object)
+ return _wrap_non_picklable_objects(obj, keep_wrapper)
+
+
+def _wrap_objects_when_needed(obj):
+ # Function to introspect an object and decide if it should be wrapped or
+ # not.
+ need_wrap = "__main__" in getattr(obj, "__module__", "")
+ if isinstance(obj, partial):
+ return partial(
+ _wrap_objects_when_needed(obj.func),
+ *[_wrap_objects_when_needed(a) for a in obj.args],
+ **{
+ k: _wrap_objects_when_needed(v)
+ for k, v in obj.keywords.items()
+ }
+ )
+ if callable(obj):
+ # Need wrap if the object is a function defined in a local scope of
+ # another function.
+ func_code = getattr(obj, "__code__", "")
+ need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
+
+ # Need wrap if the obj is a lambda expression
+ func_name = getattr(obj, "__name__", "")
+ need_wrap |= "" in func_name
+
+ if not need_wrap:
+ return obj
+
+ wrapped_obj = WRAP_CACHE.get(obj)
+ if wrapped_obj is None:
+ wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
+ WRAP_CACHE[obj] = wrapped_obj
+ return wrapped_obj
+
+
+def wrap_non_picklable_objects(obj, keep_wrapper=True):
+ """Wrapper for non-picklable object to use cloudpickle to serialize them.
+
+ Note that this wrapper tends to slow down the serialization process as it
+ is done with cloudpickle which is typically slower compared to pickle. The
+ proper way to solve serialization issues is to avoid defining functions and
+ objects in the main scripts and to implement __reduce__ functions for
+ complex classes.
+ """
+ # If obj is a class, create a CloudpickledClassWrapper which instantiates
+ # the object internally and wrap it directly in a CloudpickledObjectWrapper
+ if inspect.isclass(obj):
+
+ class CloudpickledClassWrapper(CloudpickledObjectWrapper):
+ def __init__(self, *args, **kwargs):
+ self._obj = obj(*args, **kwargs)
+ self._keep_wrapper = keep_wrapper
+
+ CloudpickledClassWrapper.__name__ = obj.__name__
+ return CloudpickledClassWrapper
+
+ # If obj is an instance of a class, just wrap it in a regular
+ # CloudpickledObjectWrapper
+ return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/initializers.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/initializers.py
new file mode 100644
index 0000000000000000000000000000000000000000..aea0e56c25d0d74e04788493058549a1399f8342
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/initializers.py
@@ -0,0 +1,80 @@
+import warnings
+
+
+def _viztracer_init(init_kwargs):
+ """Initialize viztracer's profiler in worker processes"""
+ from viztracer import VizTracer
+
+ tracer = VizTracer(**init_kwargs)
+ tracer.register_exit()
+ tracer.start()
+
+
+def _make_viztracer_initializer_and_initargs():
+ try:
+ import viztracer
+
+ tracer = viztracer.get_tracer()
+ if tracer is not None and getattr(tracer, "enable", False):
+ # Profiler is active: introspect its configuration to
+ # initialize the workers with the same configuration.
+ return _viztracer_init, (tracer.init_kwargs,)
+ except ImportError:
+ # viztracer is not installed: nothing to do
+ pass
+ except Exception as e:
+ # In case viztracer's API evolve, we do not want to crash loky but
+ # we want to know about it to be able to update loky.
+ warnings.warn(f"Unable to introspect viztracer state: {e}")
+ return None, ()
+
+
+class _ChainedInitializer:
+ """Compound worker initializer
+
+ This is meant to be used in conjunction with _chain_initializers to
+ produce the necessary chained_args list to be passed to __call__.
+ """
+
+ def __init__(self, initializers):
+ self._initializers = initializers
+
+ def __call__(self, *chained_args):
+ for initializer, args in zip(self._initializers, chained_args):
+ initializer(*args)
+
+
+def _chain_initializers(initializer_and_args):
+ """Convenience helper to combine a sequence of initializers.
+
+ If some initializers are None, they are filtered out.
+ """
+ filtered_initializers = []
+ filtered_initargs = []
+ for initializer, initargs in initializer_and_args:
+ if initializer is not None:
+ filtered_initializers.append(initializer)
+ filtered_initargs.append(initargs)
+
+ if not filtered_initializers:
+ return None, ()
+ elif len(filtered_initializers) == 1:
+ return filtered_initializers[0], filtered_initargs[0]
+ else:
+ return _ChainedInitializer(filtered_initializers), filtered_initargs
+
+
+def _prepare_initializer(initializer, initargs):
+ if initializer is not None and not callable(initializer):
+ raise TypeError(
+ f"initializer must be a callable, got: {initializer!r}"
+ )
+
+ # Introspect runtime to determine if we need to propagate the viztracer
+ # profiler information to the workers:
+ return _chain_initializers(
+ [
+ (initializer, initargs),
+ _make_viztracer_initializer_and_initargs(),
+ ]
+ )
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..3040719579f74ecc7d5645e4894dbad138f0a5c1
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py
@@ -0,0 +1,1314 @@
+###############################################################################
+# Re-implementation of the ProcessPoolExecutor more robust to faults
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+# adapted from concurrent/futures/process_pool_executor.py (17/02/2017)
+# * Add an extra management thread to detect executor_manager_thread failures,
+# * Improve the shutdown process to avoid deadlocks,
+# * Add timeout for workers,
+# * More robust pickling process.
+#
+# Copyright 2009 Brian Quinlan. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Implements ProcessPoolExecutor.
+
+The follow diagram and text describe the data-flow through the system:
+
+|======================= In-process =====================|== Out-of-process ==|
+
++----------+ +----------+ +--------+ +-----------+ +---------+
+| | => | Work Ids | | | | Call Q | | Process |
+| | +----------+ | | +-----------+ | Pool |
+| | | ... | | | | ... | +---------+
+| | | 6 | => | | => | 5, call() | => | |
+| | | 7 | | | | ... | | |
+| Process | | ... | | Local | +-----------+ | Process |
+| Pool | +----------+ | Worker | | #1..n |
+| Executor | | Thread | | |
+| | +----------- + | | +-----------+ | |
+| | <=> | Work Items | <=> | | <= | Result Q | <= | |
+| | +------------+ | | +-----------+ | |
+| | | 6: call() | | | | ... | | |
+| | | future | +--------+ | 4, result | | |
+| | | ... | | 3, except | | |
++----------+ +------------+ +-----------+ +---------+
+
+Executor.submit() called:
+- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
+- adds the id of the _WorkItem to the "Work Ids" queue
+
+Local worker thread:
+- reads work ids from the "Work Ids" queue and looks up the corresponding
+ WorkItem from the "Work Items" dict: if the work item has been cancelled then
+ it is simply removed from the dict, otherwise it is repackaged as a
+ _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
+ until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
+ calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
+- reads _ResultItems from "Result Q", updates the future stored in the
+ "Work Items" dict and deletes the dict entry
+
+Process #1..n:
+- reads _CallItems from "Call Q", executes the calls, and puts the resulting
+ _ResultItems in "Result Q"
+"""
+
+
+__author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)"
+
+
+import os
+import gc
+import sys
+import queue
+import struct
+import weakref
+import warnings
+import itertools
+import traceback
+import threading
+from time import time, sleep
+import multiprocessing as mp
+from functools import partial
+from pickle import PicklingError
+from concurrent.futures import Executor
+from concurrent.futures._base import LOGGER
+from concurrent.futures.process import BrokenProcessPool as _BPPException
+from multiprocessing.connection import wait
+
+from ._base import Future
+from .backend import get_context
+from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS
+from .backend.queues import Queue, SimpleQueue
+from .backend.reduction import set_loky_pickler, get_loky_pickler_name
+from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
+from .initializers import _prepare_initializer
+
+
+# Mechanism to prevent infinite process spawning. When a worker of a
+# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new
+# Executor, a LokyRecursionError is raised
+MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
+_CURRENT_DEPTH = 0
+
+# Minimum time interval between two consecutive memory leak protection checks.
+_MEMORY_LEAK_CHECK_DELAY = 1.0
+
+# Number of bytes of memory usage allowed over the reference process size.
+_MAX_MEMORY_LEAK_SIZE = int(3e8)
+
+
+try:
+ from psutil import Process
+
+ _USE_PSUTIL = True
+
+ def _get_memory_usage(pid, force_gc=False):
+ if force_gc:
+ gc.collect()
+
+ mem_size = Process(pid).memory_info().rss
+ mp.util.debug(f"psutil return memory size: {mem_size}")
+ return mem_size
+
+except ImportError:
+ _USE_PSUTIL = False
+
+
+class _ThreadWakeup:
+ def __init__(self):
+ self._closed = False
+ self._reader, self._writer = mp.Pipe(duplex=False)
+
+ def close(self):
+ if not self._closed:
+ self._closed = True
+ self._writer.close()
+ self._reader.close()
+
+ def wakeup(self):
+ if not self._closed:
+ self._writer.send_bytes(b"")
+
+ def clear(self):
+ if not self._closed:
+ while self._reader.poll():
+ self._reader.recv_bytes()
+
+
+class _ExecutorFlags:
+ """necessary references to maintain executor states without preventing gc
+
+ It permits to keep the information needed by executor_manager_thread
+ and crash_detection_thread to maintain the pool without preventing the
+ garbage collection of unreferenced executors.
+ """
+
+ def __init__(self, shutdown_lock):
+
+ self.shutdown = False
+ self.broken = None
+ self.kill_workers = False
+ self.shutdown_lock = shutdown_lock
+
+ def flag_as_shutting_down(self, kill_workers=None):
+ with self.shutdown_lock:
+ self.shutdown = True
+ if kill_workers is not None:
+ self.kill_workers = kill_workers
+
+ def flag_as_broken(self, broken):
+ with self.shutdown_lock:
+ self.shutdown = True
+ self.broken = broken
+
+
+# Prior to 3.9, executor_manager_thread is created as daemon thread. This means
+# that it is not joined automatically when the interpreter is shutting down.
+# To work around this problem, an exit handler is installed to tell the
+# thread to exit when the interpreter is shutting down and then waits until
+# it finishes. The thread needs to be daemonized because the atexit hooks are
+# called after all non daemonized threads are joined.
+#
+# Starting 3.9, there exists a specific atexit hook to be called before joining
+# the threads so the executor_manager_thread does not need to be daemonized
+# anymore.
+#
+# The atexit hooks are registered when starting the first ProcessPoolExecutor
+# to avoid import having an effect on the interpreter.
+
+_global_shutdown = False
+_global_shutdown_lock = threading.Lock()
+_threads_wakeups = weakref.WeakKeyDictionary()
+
+
+def _python_exit():
+ global _global_shutdown
+ _global_shutdown = True
+
+ # Materialize the list of items to avoid error due to iterating over
+ # changing size dictionary.
+ items = list(_threads_wakeups.items())
+ if len(items) > 0:
+ mp.util.debug(
+ "Interpreter shutting down. Waking up {len(items)}"
+ f"executor_manager_thread:\n{items}"
+ )
+
+ # Wake up the executor_manager_thread's so they can detect the interpreter
+ # is shutting down and exit.
+ for _, (shutdown_lock, thread_wakeup) in items:
+ with shutdown_lock:
+ thread_wakeup.wakeup()
+
+ # Collect the executor_manager_thread's to make sure we exit cleanly.
+ for thread, _ in items:
+ # This locks is to prevent situations where an executor is gc'ed in one
+ # thread while the atexit finalizer is running in another thread. This
+ # can happen when joblib is used in pypy for instance.
+ with _global_shutdown_lock:
+ thread.join()
+
+
+# With the fork context, _thread_wakeups is propagated to children.
+# Clear it after fork to avoid some situation that can cause some
+# freeze when joining the workers.
+mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
+
+
+# Module variable to register the at_exit call
+process_pool_executor_at_exit = None
+
+# Controls how many more calls than processes will be queued in the call queue.
+# A smaller number will mean that processes spend more time idle waiting for
+# work while a larger number will make Future.cancel() succeed less frequently
+# (Futures in the call queue cannot be cancelled).
+EXTRA_QUEUED_CALLS = 1
+
+
+class _RemoteTraceback(Exception):
+ """Embed stringification of remote traceback in local traceback"""
+
+ def __init__(self, tb=None):
+ self.tb = f'\n"""\n{tb}"""'
+
+ def __str__(self):
+ return self.tb
+
+
+# Do not inherit from BaseException to mirror
+# concurrent.futures.process._ExceptionWithTraceback
+class _ExceptionWithTraceback:
+ def __init__(self, exc):
+ tb = getattr(exc, "__traceback__", None)
+ if tb is None:
+ _, _, tb = sys.exc_info()
+ tb = traceback.format_exception(type(exc), exc, tb)
+ tb = "".join(tb)
+ self.exc = exc
+ self.tb = tb
+
+ def __reduce__(self):
+ return _rebuild_exc, (self.exc, self.tb)
+
+
+def _rebuild_exc(exc, tb):
+ exc.__cause__ = _RemoteTraceback(tb)
+ return exc
+
+
+class _WorkItem:
+
+ __slots__ = ["future", "fn", "args", "kwargs"]
+
+ def __init__(self, future, fn, args, kwargs):
+ self.future = future
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+
+class _ResultItem:
+ def __init__(self, work_id, exception=None, result=None):
+ self.work_id = work_id
+ self.exception = exception
+ self.result = result
+
+
+class _CallItem:
+ def __init__(self, work_id, fn, args, kwargs):
+ self.work_id = work_id
+ self.fn = fn
+ self.args = args
+ self.kwargs = kwargs
+
+ # Store the current loky_pickler so it is correctly set in the worker
+ self.loky_pickler = get_loky_pickler_name()
+
+ def __call__(self):
+ set_loky_pickler(self.loky_pickler)
+ return self.fn(*self.args, **self.kwargs)
+
+ def __repr__(self):
+ return (
+ f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
+ )
+
+
+class _SafeQueue(Queue):
+ """Safe Queue set exception to the future object linked to a job"""
+
+ def __init__(
+ self,
+ max_size=0,
+ ctx=None,
+ pending_work_items=None,
+ running_work_items=None,
+ thread_wakeup=None,
+ reducers=None,
+ ):
+ self.thread_wakeup = thread_wakeup
+ self.pending_work_items = pending_work_items
+ self.running_work_items = running_work_items
+ super().__init__(max_size, reducers=reducers, ctx=ctx)
+
+ def _on_queue_feeder_error(self, e, obj):
+ if isinstance(obj, _CallItem):
+ # format traceback only works on python3
+ if isinstance(e, struct.error):
+ raised_error = RuntimeError(
+ "The task could not be sent to the workers as it is too "
+ "large for `send_bytes`."
+ )
+ else:
+ raised_error = PicklingError(
+ "Could not pickle the task to send it to the workers."
+ )
+ tb = traceback.format_exception(
+ type(e), e, getattr(e, "__traceback__", None)
+ )
+ raised_error.__cause__ = _RemoteTraceback("".join(tb))
+ work_item = self.pending_work_items.pop(obj.work_id, None)
+ self.running_work_items.remove(obj.work_id)
+ # work_item can be None if another process terminated. In this
+ # case, the executor_manager_thread fails all work_items with
+ # BrokenProcessPool
+ if work_item is not None:
+ work_item.future.set_exception(raised_error)
+ del work_item
+ self.thread_wakeup.wakeup()
+ else:
+ super()._on_queue_feeder_error(e, obj)
+
+
+def _get_chunks(chunksize, *iterables):
+ """Iterates over zip()ed iterables in chunks."""
+ it = zip(*iterables)
+ while True:
+ chunk = tuple(itertools.islice(it, chunksize))
+ if not chunk:
+ return
+ yield chunk
+
+
+def _process_chunk(fn, chunk):
+ """Processes a chunk of an iterable passed to map.
+
+ Runs the function passed to map() on a chunk of the
+ iterable passed to map.
+
+ This function is run in a separate process.
+
+ """
+ return [fn(*args) for args in chunk]
+
+
+def _sendback_result(result_queue, work_id, result=None, exception=None):
+ """Safely send back the given result or exception"""
+ try:
+ result_queue.put(
+ _ResultItem(work_id, result=result, exception=exception)
+ )
+ except BaseException as e:
+ exc = _ExceptionWithTraceback(e)
+ result_queue.put(_ResultItem(work_id, exception=exc))
+
+
+def _process_worker(
+ call_queue,
+ result_queue,
+ initializer,
+ initargs,
+ processes_management_lock,
+ timeout,
+ worker_exit_lock,
+ current_depth,
+):
+ """Evaluates calls from call_queue and places the results in result_queue.
+
+ This worker is run in a separate process.
+
+ Args:
+ call_queue: A ctx.Queue of _CallItems that will be read and
+ evaluated by the worker.
+ result_queue: A ctx.Queue of _ResultItems that will written
+ to by the worker.
+ initializer: A callable initializer, or None
+ initargs: A tuple of args for the initializer
+ processes_management_lock: A ctx.Lock avoiding worker timeout while
+ some workers are being spawned.
+ timeout: maximum time to wait for a new item in the call_queue. If that
+ time is expired, the worker will shutdown.
+ worker_exit_lock: Lock to avoid flagging the executor as broken on
+ workers timeout.
+ current_depth: Nested parallelism level, to avoid infinite spawning.
+ """
+ if initializer is not None:
+ try:
+ initializer(*initargs)
+ except BaseException:
+ LOGGER.critical("Exception in initializer:", exc_info=True)
+ # The parent will notice that the process stopped and
+ # mark the pool broken
+ return
+
+ # set the global _CURRENT_DEPTH mechanism to limit recursive call
+ global _CURRENT_DEPTH
+ _CURRENT_DEPTH = current_depth
+ _process_reference_size = None
+ _last_memory_leak_check = None
+ pid = os.getpid()
+
+ mp.util.debug(f"Worker started with timeout={timeout}")
+ while True:
+ try:
+ call_item = call_queue.get(block=True, timeout=timeout)
+ if call_item is None:
+ mp.util.info("Shutting down worker on sentinel")
+ except queue.Empty:
+ mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
+ if processes_management_lock.acquire(block=False):
+ processes_management_lock.release()
+ call_item = None
+ else:
+ mp.util.info("Could not acquire processes_management_lock")
+ continue
+ except BaseException:
+ previous_tb = traceback.format_exc()
+ try:
+ result_queue.put(_RemoteTraceback(previous_tb))
+ except BaseException:
+ # If we cannot format correctly the exception, at least print
+ # the traceback.
+ print(previous_tb)
+ mp.util.debug("Exiting with code 1")
+ sys.exit(1)
+ if call_item is None:
+ # Notify queue management thread about worker shutdown
+ result_queue.put(pid)
+ is_clean = worker_exit_lock.acquire(True, timeout=30)
+
+ # Early notify any loky executor running in this worker process
+ # (nested parallelism) that this process is about to shutdown to
+ # avoid a deadlock waiting undifinitely for the worker to finish.
+ _python_exit()
+
+ if is_clean:
+ mp.util.debug("Exited cleanly")
+ else:
+ mp.util.info("Main process did not release worker_exit")
+ return
+ try:
+ r = call_item()
+ except BaseException as e:
+ exc = _ExceptionWithTraceback(e)
+ result_queue.put(_ResultItem(call_item.work_id, exception=exc))
+ else:
+ _sendback_result(result_queue, call_item.work_id, result=r)
+ del r
+
+ # Free the resource as soon as possible, to avoid holding onto
+ # open files or shared memory that is not needed anymore
+ del call_item
+
+ if _USE_PSUTIL:
+ if _process_reference_size is None:
+ # Make reference measurement after the first call
+ _process_reference_size = _get_memory_usage(pid, force_gc=True)
+ _last_memory_leak_check = time()
+ continue
+ if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
+ mem_usage = _get_memory_usage(pid)
+ _last_memory_leak_check = time()
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
+ # Memory usage stays within bounds: everything is fine.
+ continue
+
+ # Check again memory usage; this time take the measurement
+ # after a forced garbage collection to break any reference
+ # cycles.
+ mem_usage = _get_memory_usage(pid, force_gc=True)
+ _last_memory_leak_check = time()
+ if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
+ # The GC managed to free the memory: everything is fine.
+ continue
+
+ # The process is leaking memory: let the main process
+ # know that we need to start a new worker.
+ mp.util.info("Memory leak detected: shutting down worker")
+ result_queue.put(pid)
+ with worker_exit_lock:
+ mp.util.debug("Exit due to memory leak")
+ return
+ else:
+ # if psutil is not installed, trigger gc.collect events
+ # regularly to limit potential memory leaks due to reference cycles
+ if _last_memory_leak_check is None or (
+ time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY
+ ):
+ gc.collect()
+ _last_memory_leak_check = time()
+
+
+class _ExecutorManagerThread(threading.Thread):
+ """Manages the communication between this process and the worker processes.
+
+ The manager is run in a local thread.
+
+ Args:
+ executor: A reference to the ProcessPoolExecutor that owns
+ this thread. A weakref will be own by the manager as well as
+ references to internal objects used to introspect the state of
+ the executor.
+ """
+
+ def __init__(self, executor):
+ # Store references to necessary internals of the executor.
+
+ # A _ThreadWakeup to allow waking up the executor_manager_thread from
+ # the main Thread and avoid deadlocks caused by permanently
+ # locked queues.
+ self.thread_wakeup = executor._executor_manager_thread_wakeup
+ self.shutdown_lock = executor._shutdown_lock
+
+ # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
+ # to determine if the ProcessPoolExecutor has been garbage collected
+ # and that the manager can exit.
+ # When the executor gets garbage collected, the weakref callback
+ # will wake up the queue management thread so that it can terminate
+ # if there is no pending work item.
+ def weakref_cb(
+ _,
+ thread_wakeup=self.thread_wakeup,
+ shutdown_lock=self.shutdown_lock,
+ ):
+ if mp is not None:
+ # At this point, the multiprocessing module can already be
+ # garbage collected. We only log debug info when still
+ # possible.
+ mp.util.debug(
+ "Executor collected: triggering callback for"
+ " QueueManager wakeup"
+ )
+ with shutdown_lock:
+ thread_wakeup.wakeup()
+
+ self.executor_reference = weakref.ref(executor, weakref_cb)
+
+ # The flags of the executor
+ self.executor_flags = executor._flags
+
+ # A list of the ctx.Process instances used as workers.
+ self.processes = executor._processes
+
+ # A ctx.Queue that will be filled with _CallItems derived from
+ # _WorkItems for processing by the process workers.
+ self.call_queue = executor._call_queue
+
+ # A ctx.SimpleQueue of _ResultItems generated by the process workers.
+ self.result_queue = executor._result_queue
+
+ # A queue.Queue of work ids e.g. Queue([5, 6, ...]).
+ self.work_ids_queue = executor._work_ids
+
+ # A dict mapping work ids to _WorkItems e.g.
+ # {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
+ self.pending_work_items = executor._pending_work_items
+
+ # A list of the work_ids that are currently running
+ self.running_work_items = executor._running_work_items
+
+ # A lock to avoid concurrent shutdown of workers on timeout and spawn
+ # of new processes or shut down
+ self.processes_management_lock = executor._processes_management_lock
+
+ super().__init__(name="ExecutorManagerThread")
+ if sys.version_info < (3, 9):
+ self.daemon = True
+
+ def run(self):
+ # Main loop for the executor manager thread.
+
+ while True:
+ self.add_call_item_to_queue()
+
+ result_item, is_broken, bpe = self.wait_result_broken_or_wakeup()
+
+ if is_broken:
+ self.terminate_broken(bpe)
+ return
+ if result_item is not None:
+ self.process_result_item(result_item)
+ # Delete reference to result_item to avoid keeping references
+ # while waiting on new results.
+ del result_item
+
+ if self.is_shutting_down():
+ self.flag_executor_shutting_down()
+
+ # Since no new work items can be added, it is safe to shutdown
+ # this thread if there are no pending work items.
+ if not self.pending_work_items:
+ self.join_executor_internals()
+ return
+
+ def add_call_item_to_queue(self):
+ # Fills call_queue with _WorkItems from pending_work_items.
+ # This function never blocks.
+ while True:
+ if self.call_queue.full():
+ return
+ try:
+ work_id = self.work_ids_queue.get(block=False)
+ except queue.Empty:
+ return
+ else:
+ work_item = self.pending_work_items[work_id]
+
+ if work_item.future.set_running_or_notify_cancel():
+ self.running_work_items += [work_id]
+ self.call_queue.put(
+ _CallItem(
+ work_id,
+ work_item.fn,
+ work_item.args,
+ work_item.kwargs,
+ ),
+ block=True,
+ )
+ else:
+ del self.pending_work_items[work_id]
+ continue
+
+ def wait_result_broken_or_wakeup(self):
+ # Wait for a result to be ready in the result_queue while checking
+ # that all worker processes are still running, or for a wake up
+ # signal send. The wake up signals come either from new tasks being
+ # submitted, from the executor being shutdown/gc-ed, or from the
+ # shutdown of the python interpreter.
+ result_reader = self.result_queue._reader
+ wakeup_reader = self.thread_wakeup._reader
+ readers = [result_reader, wakeup_reader]
+ worker_sentinels = [p.sentinel for p in list(self.processes.values())]
+ ready = wait(readers + worker_sentinels)
+
+ bpe = None
+ is_broken = True
+ result_item = None
+ if result_reader in ready:
+ try:
+ result_item = result_reader.recv()
+ if isinstance(result_item, _RemoteTraceback):
+ bpe = BrokenProcessPool(
+ "A task has failed to un-serialize. Please ensure that"
+ " the arguments of the function are all picklable."
+ )
+ bpe.__cause__ = result_item
+ else:
+ is_broken = False
+ except BaseException as e:
+ bpe = BrokenProcessPool(
+ "A result has failed to un-serialize. Please ensure that "
+ "the objects returned by the function are always "
+ "picklable."
+ )
+ tb = traceback.format_exception(
+ type(e), e, getattr(e, "__traceback__", None)
+ )
+ bpe.__cause__ = _RemoteTraceback("".join(tb))
+
+ elif wakeup_reader in ready:
+ # This is simply a wake-up event that might either trigger putting
+ # more tasks in the queue or trigger the clean up of resources.
+ is_broken = False
+ else:
+ # A worker has terminated and we don't know why, set the state of
+ # the executor as broken
+ exit_codes = ""
+ if sys.platform != "win32":
+ # In Windows, introspecting terminated workers exitcodes seems
+ # unstable, therefore they are not appended in the exception
+ # message.
+ exit_codes = (
+ "\nThe exit codes of the workers are "
+ f"{get_exitcodes_terminated_worker(self.processes)}"
+ )
+ mp.util.debug(
+ "A worker unexpectedly terminated. Workers that "
+ "might have caused the breakage: "
+ + str(
+ {
+ p.name: p.exitcode
+ for p in list(self.processes.values())
+ if p is not None and p.sentinel in ready
+ }
+ )
+ )
+ bpe = TerminatedWorkerError(
+ "A worker process managed by the executor was unexpectedly "
+ "terminated. This could be caused by a segmentation fault "
+ "while calling the function or by an excessive memory usage "
+ "causing the Operating System to kill the worker.\n"
+ f"{exit_codes}"
+ )
+
+ self.thread_wakeup.clear()
+
+ return result_item, is_broken, bpe
+
+ def process_result_item(self, result_item):
+ # Process the received a result_item. This can be either the PID of a
+ # worker that exited gracefully or a _ResultItem
+
+ if isinstance(result_item, int):
+ # Clean shutdown of a worker using its PID, either on request
+ # by the executor.shutdown method or by the timeout of the worker
+ # itself: we should not mark the executor as broken.
+ with self.processes_management_lock:
+ p = self.processes.pop(result_item, None)
+
+ # p can be None if the executor is concurrently shutting down.
+ if p is not None:
+ p._worker_exit_lock.release()
+ mp.util.debug(
+ f"joining {p.name} when processing {p.pid} as result_item"
+ )
+ p.join()
+ del p
+
+ # Make sure the executor have the right number of worker, even if a
+ # worker timeout while some jobs were submitted. If some work is
+ # pending or there is less processes than running items, we need to
+ # start a new Process and raise a warning.
+ n_pending = len(self.pending_work_items)
+ n_running = len(self.running_work_items)
+ if n_pending - n_running > 0 or n_running > len(self.processes):
+ executor = self.executor_reference()
+ if (
+ executor is not None
+ and len(self.processes) < executor._max_workers
+ ):
+ warnings.warn(
+ "A worker stopped while some jobs were given to the "
+ "executor. This can be caused by a too short worker "
+ "timeout or by a memory leak.",
+ UserWarning,
+ )
+ with executor._processes_management_lock:
+ executor._adjust_process_count()
+ executor = None
+ else:
+ # Received a _ResultItem so mark the future as completed.
+ work_item = self.pending_work_items.pop(result_item.work_id, None)
+ # work_item can be None if another process terminated (see above)
+ if work_item is not None:
+ if result_item.exception:
+ work_item.future.set_exception(result_item.exception)
+ else:
+ work_item.future.set_result(result_item.result)
+ self.running_work_items.remove(result_item.work_id)
+
+ def is_shutting_down(self):
+ # Check whether we should start shutting down the executor.
+ executor = self.executor_reference()
+ # No more work items can be added if:
+ # - The interpreter is shutting down OR
+ # - The executor that owns this thread is not broken AND
+ # * The executor that owns this worker has been collected OR
+ # * The executor that owns this worker has been shutdown.
+ # If the executor is broken, it should be detected in the next loop.
+ return _global_shutdown or (
+ (executor is None or self.executor_flags.shutdown)
+ and not self.executor_flags.broken
+ )
+
+ def terminate_broken(self, bpe):
+ # Terminate the executor because it is in a broken state. The bpe
+ # argument can be used to display more information on the error that
+ # lead the executor into becoming broken.
+
+ # Mark the process pool broken so that submits fail right now.
+ self.executor_flags.flag_as_broken(bpe)
+
+ # Mark pending tasks as failed.
+ for work_item in self.pending_work_items.values():
+ work_item.future.set_exception(bpe)
+ # Delete references to object. See issue16284
+ del work_item
+ self.pending_work_items.clear()
+
+ # Terminate remaining workers forcibly: the queues or their
+ # locks may be in a dirty state and block forever.
+ self.kill_workers(reason="broken executor")
+
+ # clean up resources
+ self.join_executor_internals()
+
+ def flag_executor_shutting_down(self):
+ # Flag the executor as shutting down and cancel remaining tasks if
+ # requested as early as possible if it is not gc-ed yet.
+ self.executor_flags.flag_as_shutting_down()
+
+ # Cancel pending work items if requested.
+ if self.executor_flags.kill_workers:
+ while self.pending_work_items:
+ _, work_item = self.pending_work_items.popitem()
+ work_item.future.set_exception(
+ ShutdownExecutorError(
+ "The Executor was shutdown with `kill_workers=True` "
+ "before this job could complete."
+ )
+ )
+ del work_item
+
+ # Kill the remaining worker forcibly to no waste time joining them
+ self.kill_workers(reason="executor shutting down")
+
+ def kill_workers(self, reason=""):
+ # Terminate the remaining workers using SIGKILL. This function also
+ # terminates descendant workers of the children in case there is some
+ # nested parallelism.
+ while self.processes:
+ _, p = self.processes.popitem()
+ mp.util.debug(f"terminate process {p.name}, reason: {reason}")
+ try:
+ kill_process_tree(p)
+ except ProcessLookupError: # pragma: no cover
+ pass
+
+ def shutdown_workers(self):
+ # shutdown all workers in self.processes
+
+ # Create a list to avoid RuntimeError due to concurrent modification of
+ # processes. nb_children_alive is thus an upper bound. Also release the
+ # processes' _worker_exit_lock to accelerate the shutdown procedure, as
+ # there is no need for hand-shake here.
+ with self.processes_management_lock:
+ n_children_to_stop = 0
+ for p in list(self.processes.values()):
+ mp.util.debug(f"releasing worker exit lock on {p.name}")
+ p._worker_exit_lock.release()
+ n_children_to_stop += 1
+
+ mp.util.debug(f"found {n_children_to_stop} processes to stop")
+
+ # Send the right number of sentinels, to make sure all children are
+ # properly terminated. Do it with a mechanism that avoid hanging on
+ # Full queue when all workers have already been shutdown.
+ n_sentinels_sent = 0
+ cooldown_time = 0.001
+ while (
+ n_sentinels_sent < n_children_to_stop
+ and self.get_n_children_alive() > 0
+ ):
+ for _ in range(n_children_to_stop - n_sentinels_sent):
+ try:
+ self.call_queue.put_nowait(None)
+ n_sentinels_sent += 1
+ except queue.Full as e:
+ if cooldown_time > 5.0:
+ mp.util.info(
+ "failed to send all sentinels and exit with error."
+ f"\ncall_queue size={self.call_queue._maxsize}; "
+ f" full is {self.call_queue.full()}; "
+ )
+ raise e
+ mp.util.info(
+ "full call_queue prevented to send all sentinels at "
+ "once, waiting..."
+ )
+ sleep(cooldown_time)
+ cooldown_time *= 1.2
+ break
+
+ mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue")
+
+ def join_executor_internals(self):
+ self.shutdown_workers()
+
+ # Release the queue's resources as soon as possible. Flag the feeder
+ # thread for clean exit to avoid having the crash detection thread flag
+ # the Executor as broken during the shutdown. This is safe as either:
+ # * We don't need to communicate with the workers anymore
+ # * There is nothing left in the Queue buffer except None sentinels
+ mp.util.debug("closing call_queue")
+ self.call_queue.close()
+ self.call_queue.join_thread()
+
+ # Closing result_queue
+ mp.util.debug("closing result_queue")
+ self.result_queue.close()
+
+ mp.util.debug("closing thread_wakeup")
+ with self.shutdown_lock:
+ self.thread_wakeup.close()
+
+ # If .join() is not called on the created processes then
+ # some ctx.Queue methods may deadlock on macOS.
+ with self.processes_management_lock:
+ mp.util.debug(f"joining {len(self.processes)} processes")
+ n_joined_processes = 0
+ while True:
+ try:
+ pid, p = self.processes.popitem()
+ mp.util.debug(f"joining process {p.name} with pid {pid}")
+ p.join()
+ n_joined_processes += 1
+ except KeyError:
+ break
+
+ mp.util.debug(
+ "executor management thread clean shutdown of "
+ f"{n_joined_processes} workers"
+ )
+
+ def get_n_children_alive(self):
+ # This is an upper bound on the number of children alive.
+ with self.processes_management_lock:
+ return sum(p.is_alive() for p in list(self.processes.values()))
+
+
+_system_limits_checked = False
+_system_limited = None
+
+
+def _check_system_limits():
+ global _system_limits_checked, _system_limited
+ if _system_limits_checked and _system_limited:
+ raise NotImplementedError(_system_limited)
+ _system_limits_checked = True
+ try:
+ nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
+ except (AttributeError, ValueError):
+ # sysconf not available or setting not available
+ return
+ if nsems_max == -1:
+ # undetermined limit, assume that limit is determined
+ # by available memory only
+ return
+ if nsems_max >= 256:
+ # minimum number of semaphores available
+ # according to POSIX
+ return
+ _system_limited = (
+ f"system provides too few semaphores ({nsems_max} available, "
+ "256 necessary)"
+ )
+ raise NotImplementedError(_system_limited)
+
+
+def _chain_from_iterable_of_lists(iterable):
+ """
+ Specialized implementation of itertools.chain.from_iterable.
+ Each item in *iterable* should be a list. This function is
+ careful not to keep references to yielded objects.
+ """
+ for element in iterable:
+ element.reverse()
+ while element:
+ yield element.pop()
+
+
+def _check_max_depth(context):
+ # Limit the maxmal recursion level
+ global _CURRENT_DEPTH
+ if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
+ raise LokyRecursionError(
+ "Could not spawn extra nested processes at depth superior to "
+ "MAX_DEPTH=1. It is not possible to increase this limit when "
+ "using the 'fork' start method."
+ )
+
+ if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
+ raise LokyRecursionError(
+ "Could not spawn extra nested processes at depth superior to "
+ f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
+ "this limit with the LOKY_MAX_DEPTH environment variable."
+ )
+
+
+class LokyRecursionError(RuntimeError):
+ """A process tries to spawn too many levels of nested processes."""
+
+
+class BrokenProcessPool(_BPPException):
+ """
+ Raised when the executor is broken while a future was in the running state.
+ The cause can an error raised when unpickling the task in the worker
+ process or when unpickling the result value in the parent process. It can
+ also be caused by a worker process being terminated unexpectedly.
+ """
+
+
+class TerminatedWorkerError(BrokenProcessPool):
+ """
+ Raised when a process in a ProcessPoolExecutor terminated abruptly
+ while a future was in the running state.
+ """
+
+
+# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do
+# not use in new code.
+BrokenExecutor = BrokenProcessPool
+
+
+class ShutdownExecutorError(RuntimeError):
+
+ """
+ Raised when a ProcessPoolExecutor is shutdown while a future was in the
+ running or pending state.
+ """
+
+
+class ProcessPoolExecutor(Executor):
+
+ _at_exit = None
+
+ def __init__(
+ self,
+ max_workers=None,
+ job_reducers=None,
+ result_reducers=None,
+ timeout=None,
+ context=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+ ):
+ """Initializes a new ProcessPoolExecutor instance.
+
+ Args:
+ max_workers: int, optional (default: cpu_count())
+ The maximum number of processes that can be used to execute the
+ given calls. If None or not given then as many worker processes
+ will be created as the number of CPUs the current process
+ can use.
+ job_reducers, result_reducers: dict(type: reducer_func)
+ Custom reducer for pickling the jobs and the results from the
+ Executor. If only `job_reducers` is provided, `result_reducer`
+ will use the same reducers
+ timeout: int, optional (default: None)
+ Idle workers exit after timeout seconds. If a new job is
+ submitted after the timeout, the executor will start enough
+ new Python processes to make sure the pool of workers is full.
+ context: A multiprocessing context to launch the workers. This
+ object should provide SimpleQueue, Queue and Process.
+ initializer: An callable used to initialize worker processes.
+ initargs: A tuple of arguments to pass to the initializer.
+ env: A dict of environment variable to overwrite in the child
+ process. The environment variables are set before any module is
+ loaded. Note that this only works with the loky context.
+ """
+ _check_system_limits()
+
+ if max_workers is None:
+ self._max_workers = cpu_count()
+ else:
+ if max_workers <= 0:
+ raise ValueError("max_workers must be greater than 0")
+ self._max_workers = max_workers
+
+ if (
+ sys.platform == "win32"
+ and self._max_workers > _MAX_WINDOWS_WORKERS
+ ):
+ warnings.warn(
+ f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} "
+ "due to limitations of the operating system."
+ )
+ self._max_workers = _MAX_WINDOWS_WORKERS
+
+ if context is None:
+ context = get_context()
+ self._context = context
+ self._env = env
+
+ self._initializer, self._initargs = _prepare_initializer(
+ initializer, initargs
+ )
+ _check_max_depth(self._context)
+
+ if result_reducers is None:
+ result_reducers = job_reducers
+
+ # Timeout
+ self._timeout = timeout
+
+ # Management thread
+ self._executor_manager_thread = None
+
+ # Map of pids to processes
+ self._processes = {}
+
+ # Internal variables of the ProcessPoolExecutor
+ self._processes = {}
+ self._queue_count = 0
+ self._pending_work_items = {}
+ self._running_work_items = []
+ self._work_ids = queue.Queue()
+ self._processes_management_lock = self._context.Lock()
+ self._executor_manager_thread = None
+ self._shutdown_lock = threading.Lock()
+
+ # _ThreadWakeup is a communication channel used to interrupt the wait
+ # of the main loop of executor_manager_thread from another thread (e.g.
+ # when calling executor.submit or executor.shutdown). We do not use the
+ # _result_queue to send wakeup signals to the executor_manager_thread
+ # as it could result in a deadlock if a worker process dies with the
+ # _result_queue write lock still acquired.
+ #
+ # _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
+ self._executor_manager_thread_wakeup = _ThreadWakeup()
+
+ # Flag to hold the state of the Executor. This permits to introspect
+ # the Executor state even once it has been garbage collected.
+ self._flags = _ExecutorFlags(self._shutdown_lock)
+
+ # Finally setup the queues for interprocess communication
+ self._setup_queues(job_reducers, result_reducers)
+
+ mp.util.debug("ProcessPoolExecutor is setup")
+
+ def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
+ # Make the call queue slightly larger than the number of processes to
+ # prevent the worker processes from idling. But don't make it too big
+ # because futures in the call queue cannot be cancelled.
+ if queue_size is None:
+ queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
+ self._call_queue = _SafeQueue(
+ max_size=queue_size,
+ pending_work_items=self._pending_work_items,
+ running_work_items=self._running_work_items,
+ thread_wakeup=self._executor_manager_thread_wakeup,
+ reducers=job_reducers,
+ ctx=self._context,
+ )
+ # Killed worker processes can produce spurious "broken pipe"
+ # tracebacks in the queue's own worker thread. But we detect killed
+ # processes anyway, so silence the tracebacks.
+ self._call_queue._ignore_epipe = True
+
+ self._result_queue = SimpleQueue(
+ reducers=result_reducers, ctx=self._context
+ )
+
+ def _start_executor_manager_thread(self):
+ if self._executor_manager_thread is None:
+ mp.util.debug("_start_executor_manager_thread called")
+
+ # Start the processes so that their sentinels are known.
+ self._executor_manager_thread = _ExecutorManagerThread(self)
+ self._executor_manager_thread.start()
+
+ # register this executor in a mechanism that ensures it will wakeup
+ # when the interpreter is exiting.
+ _threads_wakeups[self._executor_manager_thread] = (
+ self._shutdown_lock,
+ self._executor_manager_thread_wakeup,
+ )
+
+ global process_pool_executor_at_exit
+ if process_pool_executor_at_exit is None:
+ # Ensure that the _python_exit function will be called before
+ # the multiprocessing.Queue._close finalizers which have an
+ # exitpriority of 10.
+
+ if sys.version_info < (3, 9):
+ process_pool_executor_at_exit = mp.util.Finalize(
+ None, _python_exit, exitpriority=20
+ )
+ else:
+ process_pool_executor_at_exit = threading._register_atexit(
+ _python_exit
+ )
+
+ def _adjust_process_count(self):
+ while len(self._processes) < self._max_workers:
+ worker_exit_lock = self._context.BoundedSemaphore(1)
+ args = (
+ self._call_queue,
+ self._result_queue,
+ self._initializer,
+ self._initargs,
+ self._processes_management_lock,
+ self._timeout,
+ worker_exit_lock,
+ _CURRENT_DEPTH + 1,
+ )
+ worker_exit_lock.acquire()
+ try:
+ # Try to spawn the process with some environment variable to
+ # overwrite but it only works with the loky context for now.
+ p = self._context.Process(
+ target=_process_worker, args=args, env=self._env
+ )
+ except TypeError:
+ p = self._context.Process(target=_process_worker, args=args)
+ p._worker_exit_lock = worker_exit_lock
+ p.start()
+ self._processes[p.pid] = p
+ mp.util.debug(
+ f"Adjusted process count to {self._max_workers}: "
+ f"{[(p.name, pid) for pid, p in self._processes.items()]}"
+ )
+
+ def _ensure_executor_running(self):
+ """ensures all workers and management thread are running"""
+ with self._processes_management_lock:
+ if len(self._processes) != self._max_workers:
+ self._adjust_process_count()
+ self._start_executor_manager_thread()
+
+ def submit(self, fn, *args, **kwargs):
+ with self._flags.shutdown_lock:
+ if self._flags.broken is not None:
+ raise self._flags.broken
+ if self._flags.shutdown:
+ raise ShutdownExecutorError(
+ "cannot schedule new futures after shutdown"
+ )
+
+ # Cannot submit a new calls once the interpreter is shutting down.
+ # This check avoids spawning new processes at exit.
+ if _global_shutdown:
+ raise RuntimeError(
+ "cannot schedule new futures after " "interpreter shutdown"
+ )
+
+ f = Future()
+ w = _WorkItem(f, fn, args, kwargs)
+
+ self._pending_work_items[self._queue_count] = w
+ self._work_ids.put(self._queue_count)
+ self._queue_count += 1
+ # Wake up queue management thread
+ self._executor_manager_thread_wakeup.wakeup()
+
+ self._ensure_executor_running()
+ return f
+
+ submit.__doc__ = Executor.submit.__doc__
+
+ def map(self, fn, *iterables, **kwargs):
+ """Returns an iterator equivalent to map(fn, iter).
+
+ Args:
+ fn: A callable that will take as many arguments as there are
+ passed iterables.
+ timeout: The maximum number of seconds to wait. If None, then there
+ is no limit on the wait time.
+ chunksize: If greater than one, the iterables will be chopped into
+ chunks of size chunksize and submitted to the process pool.
+ If set to one, the items in the list will be sent one at a
+ time.
+
+ Returns:
+ An iterator equivalent to: map(func, *iterables) but the calls may
+ be evaluated out-of-order.
+
+ Raises:
+ TimeoutError: If the entire result iterator could not be generated
+ before the given timeout.
+ Exception: If fn(*args) raises for any values.
+ """
+ timeout = kwargs.get("timeout", None)
+ chunksize = kwargs.get("chunksize", 1)
+ if chunksize < 1:
+ raise ValueError("chunksize must be >= 1.")
+
+ results = super().map(
+ partial(_process_chunk, fn),
+ _get_chunks(chunksize, *iterables),
+ timeout=timeout,
+ )
+ return _chain_from_iterable_of_lists(results)
+
+ def shutdown(self, wait=True, kill_workers=False):
+ mp.util.debug(f"shutting down executor {self}")
+
+ self._flags.flag_as_shutting_down(kill_workers)
+ executor_manager_thread = self._executor_manager_thread
+ executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
+
+ if executor_manager_thread_wakeup is not None:
+ # Wake up queue management thread
+ with self._shutdown_lock:
+ self._executor_manager_thread_wakeup.wakeup()
+
+ if executor_manager_thread is not None and wait:
+ # This locks avoids concurrent join if the interpreter
+ # is shutting down.
+ with _global_shutdown_lock:
+ executor_manager_thread.join()
+ _threads_wakeups.pop(executor_manager_thread, None)
+
+ # To reduce the risk of opening too many files, remove references to
+ # objects that use file descriptors.
+ self._executor_manager_thread = None
+ self._executor_manager_thread_wakeup = None
+ self._call_queue = None
+ self._result_queue = None
+ self._processes_management_lock = None
+
+ shutdown.__doc__ = Executor.shutdown.__doc__
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad016fd389762a1c458200ffe7b310239da3a3f3
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py
@@ -0,0 +1,285 @@
+###############################################################################
+# Reusable ProcessPoolExecutor
+#
+# author: Thomas Moreau and Olivier Grisel
+#
+import time
+import warnings
+import threading
+import multiprocessing as mp
+
+from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
+from .backend.context import cpu_count
+from .backend import get_context
+
+__all__ = ["get_reusable_executor"]
+
+# Singleton executor and id management
+_executor_lock = threading.RLock()
+_next_executor_id = 0
+_executor = None
+_executor_kwargs = None
+
+
+def _get_next_executor_id():
+ """Ensure that each successive executor instance has a unique, monotonic id.
+
+ The purpose of this monotonic id is to help debug and test automated
+ instance creation.
+ """
+ global _next_executor_id
+ with _executor_lock:
+ executor_id = _next_executor_id
+ _next_executor_id += 1
+ return executor_id
+
+
+def get_reusable_executor(
+ max_workers=None,
+ context=None,
+ timeout=10,
+ kill_workers=False,
+ reuse="auto",
+ job_reducers=None,
+ result_reducers=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+):
+ """Return the current ReusableExectutor instance.
+
+ Start a new instance if it has not been started already or if the previous
+ instance was left in a broken state.
+
+ If the previous instance does not have the requested number of workers, the
+ executor is dynamically resized to adjust the number of workers prior to
+ returning.
+
+ Reusing a singleton instance spares the overhead of starting new worker
+ processes and importing common python packages each time.
+
+ ``max_workers`` controls the maximum number of tasks that can be running in
+ parallel in worker processes. By default this is set to the number of
+ CPUs on the host.
+
+ Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
+ so as to release system resources. New workers are respawn upon submission
+ of new tasks so that ``max_workers`` are available to accept the newly
+ submitted tasks. Setting ``timeout`` to around 100 times the time required
+ to spawn new processes and import packages in them (on the order of 100ms)
+ ensures that the overhead of spawning workers is negligible.
+
+ Setting ``kill_workers=True`` makes it possible to forcibly interrupt
+ previously spawned jobs to get a new instance of the reusable executor
+ with new constructor argument values.
+
+ The ``job_reducers`` and ``result_reducers`` are used to customize the
+ pickling of tasks and results send to the executor.
+
+ When provided, the ``initializer`` is run first in newly spawned
+ processes with argument ``initargs``.
+
+ The environment variable in the child process are a copy of the values in
+ the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
+ ``VAL`` are string literals to overwrite the environment variable ``ENV``
+ in the child processes to value ``VAL``. The environment variables are set
+ in the children before any module is loaded. This only works with the
+ ``loky`` context.
+ """
+ _executor, _ = _ReusablePoolExecutor.get_reusable_executor(
+ max_workers=max_workers,
+ context=context,
+ timeout=timeout,
+ kill_workers=kill_workers,
+ reuse=reuse,
+ job_reducers=job_reducers,
+ result_reducers=result_reducers,
+ initializer=initializer,
+ initargs=initargs,
+ env=env,
+ )
+ return _executor
+
+
+class _ReusablePoolExecutor(ProcessPoolExecutor):
+ def __init__(
+ self,
+ submit_resize_lock,
+ max_workers=None,
+ context=None,
+ timeout=None,
+ executor_id=0,
+ job_reducers=None,
+ result_reducers=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+ ):
+ super().__init__(
+ max_workers=max_workers,
+ context=context,
+ timeout=timeout,
+ job_reducers=job_reducers,
+ result_reducers=result_reducers,
+ initializer=initializer,
+ initargs=initargs,
+ env=env,
+ )
+ self.executor_id = executor_id
+ self._submit_resize_lock = submit_resize_lock
+
+ @classmethod
+ def get_reusable_executor(
+ cls,
+ max_workers=None,
+ context=None,
+ timeout=10,
+ kill_workers=False,
+ reuse="auto",
+ job_reducers=None,
+ result_reducers=None,
+ initializer=None,
+ initargs=(),
+ env=None,
+ ):
+ with _executor_lock:
+ global _executor, _executor_kwargs
+ executor = _executor
+
+ if max_workers is None:
+ if reuse is True and executor is not None:
+ max_workers = executor._max_workers
+ else:
+ max_workers = cpu_count()
+ elif max_workers <= 0:
+ raise ValueError(
+ f"max_workers must be greater than 0, got {max_workers}."
+ )
+
+ if isinstance(context, str):
+ context = get_context(context)
+ if context is not None and context.get_start_method() == "fork":
+ raise ValueError(
+ "Cannot use reusable executor with the 'fork' context"
+ )
+
+ kwargs = dict(
+ context=context,
+ timeout=timeout,
+ job_reducers=job_reducers,
+ result_reducers=result_reducers,
+ initializer=initializer,
+ initargs=initargs,
+ env=env,
+ )
+ if executor is None:
+ is_reused = False
+ mp.util.debug(
+ f"Create a executor with max_workers={max_workers}."
+ )
+ executor_id = _get_next_executor_id()
+ _executor_kwargs = kwargs
+ _executor = executor = cls(
+ _executor_lock,
+ max_workers=max_workers,
+ executor_id=executor_id,
+ **kwargs,
+ )
+ else:
+ if reuse == "auto":
+ reuse = kwargs == _executor_kwargs
+ if (
+ executor._flags.broken
+ or executor._flags.shutdown
+ or not reuse
+ ):
+ if executor._flags.broken:
+ reason = "broken"
+ elif executor._flags.shutdown:
+ reason = "shutdown"
+ else:
+ reason = "arguments have changed"
+ mp.util.debug(
+ "Creating a new executor with max_workers="
+ f"{max_workers} as the previous instance cannot be "
+ f"reused ({reason})."
+ )
+ executor.shutdown(wait=True, kill_workers=kill_workers)
+ _executor = executor = _executor_kwargs = None
+ # Recursive call to build a new instance
+ return cls.get_reusable_executor(
+ max_workers=max_workers, **kwargs
+ )
+ else:
+ mp.util.debug(
+ "Reusing existing executor with "
+ f"max_workers={executor._max_workers}."
+ )
+ is_reused = True
+ executor._resize(max_workers)
+
+ return executor, is_reused
+
+ def submit(self, fn, *args, **kwargs):
+ with self._submit_resize_lock:
+ return super().submit(fn, *args, **kwargs)
+
+ def _resize(self, max_workers):
+ with self._submit_resize_lock:
+ if max_workers is None:
+ raise ValueError("Trying to resize with max_workers=None")
+ elif max_workers == self._max_workers:
+ return
+
+ if self._executor_manager_thread is None:
+ # If the executor_manager_thread has not been started
+ # then no processes have been spawned and we can just
+ # update _max_workers and return
+ self._max_workers = max_workers
+ return
+
+ self._wait_job_completion()
+
+ # Some process might have returned due to timeout so check how many
+ # children are still alive. Use the _process_management_lock to
+ # ensure that no process are spawned or timeout during the resize.
+ with self._processes_management_lock:
+ processes = list(self._processes.values())
+ nb_children_alive = sum(p.is_alive() for p in processes)
+ self._max_workers = max_workers
+ for _ in range(max_workers, nb_children_alive):
+ self._call_queue.put(None)
+ while (
+ len(self._processes) > max_workers and not self._flags.broken
+ ):
+ time.sleep(1e-3)
+
+ self._adjust_process_count()
+ processes = list(self._processes.values())
+ while not all(p.is_alive() for p in processes):
+ time.sleep(1e-3)
+
+ def _wait_job_completion(self):
+ """Wait for the cache to be empty before resizing the pool."""
+ # Issue a warning to the user about the bad effect of this usage.
+ if self._pending_work_items:
+ warnings.warn(
+ "Trying to resize an executor with running jobs: "
+ "waiting for jobs completion before resizing.",
+ UserWarning,
+ )
+ mp.util.debug(
+ f"Executor {self.executor_id} waiting for jobs completion "
+ "before resizing"
+ )
+ # Wait for the completion of the jobs
+ while self._pending_work_items:
+ time.sleep(1e-3)
+
+ def _setup_queues(self, job_reducers, result_reducers):
+ # As this executor can be resized, use a large queue size to avoid
+ # underestimating capacity and introducing overhead
+ queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
+ super()._setup_queues(
+ job_reducers, result_reducers, queue_size=queue_size
+ )
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz
new file mode 100644
index 0000000000000000000000000000000000000000..3fd32f71887ddd0c94d06c8a77afccc322fed583
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py27_np17.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a1f4e8cccfca94f25ae744d1f050b0734f663263ba38ed0642181404b348b17b
+size 757
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz
new file mode 100644
index 0000000000000000000000000000000000000000..7cd1fcc9dc7a04d7ac251d3b1bbf973609b947b8
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py33_np18.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9e215780f978ce693e48110ead23652e1c6de1c2189172232690198f7088788
+size 792
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz
new file mode 100644
index 0000000000000000000000000000000000000000..878decdcad534f6d2cdd14a487c207f8c6133261
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py35_np19.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a56c3fc6e0db3a4102aaed4a19fd4e154eecd956f30b6bf9179897844ed3c01e
+size 790
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz
new file mode 100644
index 0000000000000000000000000000000000000000..7812497bc95e5894c8e880736bfb06aa22bb2fae
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:efb146d450c6d061d06affb56f17384e7f64cbab9b516fcc6c4d3f8869b3e707
+size 712
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..d6cf697b1c1c752d4d8a78d702a70042ad047ce9
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e064c2eecfdc58d552844467da7bd56eca596098322bfd266a7e1312abdd5735
+size 1068
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.bz2 b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.bz2
new file mode 100644
index 0000000000000000000000000000000000000000..317981559c4a9987aa099efeb68e4359c08d71ec
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py33_np18.pkl.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e86d6f6ecfe2626cf691827ac38a81d64ec3ebb527c5432eb344b8496781b45a
+size 1000
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..f22c25bdb59d15a3771104dff6dfebe564e98add
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1cbe456f5b91f5a3cb8e386838f276c30335432a351426686187761d5c34168b
+size 1068
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2 b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2
new file mode 100644
index 0000000000000000000000000000000000000000..80818a8baa1e2481b62bed06bb2b95f4a614cc3a
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3f2af67ea667c1f5315ddcab06bfa447005863c1c0fd88bb7e04a0b8acb9a54b
+size 1021
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..360af38dc3a9bde47e3b18b144dc1c5257e7daca
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97b9ef2e896104321d3c5ce73b3de504788c38f04f08c8b56d7a29d6d1520a96
+size 1068
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz
new file mode 100644
index 0000000000000000000000000000000000000000..cec2871b09ae347e07c81eb55e7979300748ccd1
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py35_np19.pkl.xz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:02cf30d8b196c303662b2dd035d2a58caeb762ae3a82345ffd1274961e7f5aa0
+size 752
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz
new file mode 100644
index 0000000000000000000000000000000000000000..f2e65e202609648f0a5464ae5b78b9f9fba8dd6e
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_compressed_pickle_py36_np111.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d56ae75c3a83a0d10f60e657d50e56af6e3addbf2f555e9fc385a6e52e1b32de
+size 800
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..4dda21d9ad4ce279b8474ecce9697e3290e96bfa
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e6b0e171782d5fd5a61d1844dc946eb27c5f6b2e8075d436b23808433142ebc
+size 1068
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2 b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2
new file mode 100644
index 0000000000000000000000000000000000000000..895dd324d574d9b2298833317a76f3794209bbb3
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.bz2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bc8db259be742ca2ff36067277f5e4a03e6d78883ddee238da65a7c7d79ef804
+size 991
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz
new file mode 100644
index 0000000000000000000000000000000000000000..c7607dcdb2b09e7a50acc3239cc585974e7a09e6
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.11.0_pickle_py36_np111.pkl.xz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd787f35b3197418d8c7bca77c9dc5ca47b6f22cd24524b3ccd074cf90f893d6
+size 752
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz
new file mode 100644
index 0000000000000000000000000000000000000000..1238376dd6ac2e166bf56f263862afe56b866da3
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py27_np16.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34bb43aefa365c81f42af51402f84ea8c7a85c48c65b422e4e4fe8b2ee57883c
+size 658
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz
new file mode 100644
index 0000000000000000000000000000000000000000..0720a70aee276c37f9457817922ae60b67600d47
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py34_np19.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f33bd8a21a41b729b05dac5deeb0e868f218a092b0e3fe5988094cf167217f6
+size 673
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz
new file mode 100644
index 0000000000000000000000000000000000000000..0720a70aee276c37f9457817922ae60b67600d47
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_compressed_pickle_py35_np19.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f33bd8a21a41b729b05dac5deeb0e868f218a092b0e3fe5988094cf167217f6
+size 673
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..f7ca0addc6d032e93d0b530a2b42a583fb0d4b81
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9da8a3764db121e29d21ade67c9c3426598e76d88deae44cd7238983af8cef73
+size 670
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy
new file mode 100644
index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_02.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_03.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_03.npy
new file mode 100644
index 0000000000000000000000000000000000000000..ccc84c361de2569ed5cb91967f9063efcd84dd14
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_03.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0c45ae2a289841cbeba2443b7ebaa3b31c0a9e9dcc73294aca5729da0092405
+size 236
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy
new file mode 100644
index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np16.pkl_04.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05
+size 104
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy
new file mode 100644
index 0000000000000000000000000000000000000000..15574a4193ad4ad724b2b8053c701a82efa78fd5
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_01.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_02.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_02.npy
new file mode 100644
index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_02.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy
new file mode 100644
index 0000000000000000000000000000000000000000..ccc84c361de2569ed5cb91967f9063efcd84dd14
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_03.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0c45ae2a289841cbeba2443b7ebaa3b31c0a9e9dcc73294aca5729da0092405
+size 236
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy
new file mode 100644
index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py27_np17.pkl_04.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05
+size 104
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..e739b6d035cdf110063dbb8b2cdceb116e187019
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3d4cbc690d3ce9e5323a714ea546f32c01ab1710285c420184f6cdf4b26fc25
+size 691
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy
new file mode 100644
index 0000000000000000000000000000000000000000..15574a4193ad4ad724b2b8053c701a82efa78fd5
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_01.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_02.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_02.npy
new file mode 100644
index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py33_np18.pkl_02.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy
new file mode 100644
index 0000000000000000000000000000000000000000..15574a4193ad4ad724b2b8053c701a82efa78fd5
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_01.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0efbd7d9ce7eec3a6e0a0db41e795e0396cca3d6b037dad6c61b464843d28809
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_02.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_02.npy
new file mode 100644
index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_02.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy
new file mode 100644
index 0000000000000000000000000000000000000000..e9b5e77c73268dfff541b576126f06fc6fed3d59
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py34_np19.pkl_04.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ecbe244294ba93e08479b16c1b9a9411e3569ff660ed0459dca1d241381df05
+size 104
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..93417ab8e94e4542a24211ad514948f9d1b80a3a
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59f0d522a29c333ce1d60480b2121fcc1a08a5d2dd650b86efdc987f991fa4ea
+size 691
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy
new file mode 100644
index 0000000000000000000000000000000000000000..f00f08fbeeda280fa3ce00069c313c5412a33eca
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
+size 120
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy
new file mode 100644
index 0000000000000000000000000000000000000000..73976395be90d4b2b2d955c79a90721e16cebc82
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_03.npy
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ede9a64a52b25d7db30950956c978ec0b3932b7d14acd5abc63216e64babde7
+size 307
diff --git a/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz
new file mode 100644
index 0000000000000000000000000000000000000000..e3125fe0fd4709dbd0067e67a06a3f24073934ad
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f2361f589b31d2863627edcb96612280ae5c0a59c9496d89dab7de493670f93b
+size 802
diff --git a/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/RECORD b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..2e7580b310656dbf4c36dcfeb9b3f2828795013f
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/RECORD
@@ -0,0 +1,58 @@
+omegaconf-2.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+omegaconf-2.3.0.dist-info/LICENSE,sha256=sxN17V1cvn7C0MTBTfivp6jlUBu-JfcLbLxGFoX0uRo,1518
+omegaconf-2.3.0.dist-info/METADATA,sha256=qy3L8OPnDMddyh2JT4lT596LZR3C3CjwjzUL-leHuts,3865
+omegaconf-2.3.0.dist-info/RECORD,,
+omegaconf-2.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+omegaconf-2.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+omegaconf-2.3.0.dist-info/top_level.txt,sha256=gKo5sjlMnML5r9-l1eLYBTS1xZ-xAr67mlBSgL9yvSA,25
+omegaconf/__init__.py,sha256=lynwiDHEFBIHMqrBN3HRh0KJevzIap6MyULFNkMmlBc,1170
+omegaconf/__pycache__/__init__.cpython-310.pyc,,
+omegaconf/__pycache__/_impl.cpython-310.pyc,,
+omegaconf/__pycache__/_utils.cpython-310.pyc,,
+omegaconf/__pycache__/base.cpython-310.pyc,,
+omegaconf/__pycache__/basecontainer.cpython-310.pyc,,
+omegaconf/__pycache__/dictconfig.cpython-310.pyc,,
+omegaconf/__pycache__/errors.cpython-310.pyc,,
+omegaconf/__pycache__/grammar_parser.cpython-310.pyc,,
+omegaconf/__pycache__/grammar_visitor.cpython-310.pyc,,
+omegaconf/__pycache__/listconfig.cpython-310.pyc,,
+omegaconf/__pycache__/nodes.cpython-310.pyc,,
+omegaconf/__pycache__/omegaconf.cpython-310.pyc,,
+omegaconf/__pycache__/version.cpython-310.pyc,,
+omegaconf/_impl.py,sha256=CJfcRt8K20BiMMW5u3q8wuQ9hAeAmuYlFxctzPgTIgQ,2877
+omegaconf/_utils.py,sha256=bw8Jpra0B8YkHFrwODrBcKPvgQTyU0pEpskLl7CDBRg,32564
+omegaconf/base.py,sha256=rHWuLw8Iiew_SNJIUkLZy4EfyRb60hyS9X9Hl4OuBvs,32462
+omegaconf/basecontainer.py,sha256=FWL2PZWRb7wP-AGpeN651l_uVnNhZvzzLjaUdDlJiB8,33318
+omegaconf/dictconfig.py,sha256=IPtUPHoak025Q-gvDVqMdS497Xb6tjcsawrHXv4iQ2Y,27855
+omegaconf/errors.py,sha256=ectbX8Kd-5FCJMTBIh6jshqvCnCiXMFGfFQJYGIereQ,3595
+omegaconf/grammar/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+omegaconf/grammar/__pycache__/__init__.cpython-310.pyc,,
+omegaconf/grammar/gen/OmegaConfGrammarLexer.py,sha256=eoog1aofOltw-u61QFeCWZ3A9oa6vK2tjTaX4wmJY14,21689
+omegaconf/grammar/gen/OmegaConfGrammarParser.py,sha256=X0uJc0OieVB_f9yKRZw4OfwRBPOGoiyDMjhz8OKPvZ4,63429
+omegaconf/grammar/gen/OmegaConfGrammarParserListener.py,sha256=ncibh-Nhg68pdJPdMaqeFTP876wvo0lpJqPXhaOmjZU,5843
+omegaconf/grammar/gen/OmegaConfGrammarParserVisitor.py,sha256=Amvi244RtYFebNSJCbOwsqdvQW-QgdDKTku0rXnkD5Y,3617
+omegaconf/grammar/gen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+omegaconf/grammar/gen/__pycache__/OmegaConfGrammarLexer.cpython-310.pyc,,
+omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParser.cpython-310.pyc,,
+omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserListener.cpython-310.pyc,,
+omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserVisitor.cpython-310.pyc,,
+omegaconf/grammar/gen/__pycache__/__init__.cpython-310.pyc,,
+omegaconf/grammar_parser.py,sha256=7gvIGsqtsgl5e0XOWhI3Ur58FoIk3iBaX_M8JdyzpYM,5511
+omegaconf/grammar_visitor.py,sha256=L6LOomG24GcPA1-aYzNOmKLup5kYirHTuyp857V7T7A,16055
+omegaconf/listconfig.py,sha256=LyNRrn84BqNiz-TpwrDILat8HhlVwwM6zFmYCTUAdIo,24671
+omegaconf/nodes.py,sha256=1XRIC8sfEoaV4PT_Vm7Z75JMsIc30kKTuJHGyZ2aM0w,17479
+omegaconf/omegaconf.py,sha256=hFeVL0k6PNRqGE7ZUpI2p6sSGPkc9IEvsqr3MNdgU_s,39048
+omegaconf/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+omegaconf/resolvers/__init__.py,sha256=zE-8WJwipGC2cXPq12CGAz49m82F1D1cKMwuV3EoT7k,60
+omegaconf/resolvers/__pycache__/__init__.cpython-310.pyc,,
+omegaconf/resolvers/oc/__init__.py,sha256=gLniKaouCmkm1ZTQOzS1F5jFlCMw7eL-ae6PhaMxNjg,3548
+omegaconf/resolvers/oc/__pycache__/__init__.cpython-310.pyc,,
+omegaconf/resolvers/oc/__pycache__/dict.cpython-310.pyc,,
+omegaconf/resolvers/oc/dict.py,sha256=OL7iZ_aBIt7kYXxhMiFxSyskiX-xXBvGI_DnBbdxqVs,2324
+omegaconf/version.py,sha256=9OuwPWmsrNRBQYmeipe5XnywKY4jw-ZQBrjbo5C5Md4,442
+pydevd_plugins/__init__.py,sha256=byyA3PmxQsWCnqpQzFy44heJq59wOqHr5bSt6knm5vY,180
+pydevd_plugins/__pycache__/__init__.cpython-310.pyc,,
+pydevd_plugins/extensions/__init__.py,sha256=byyA3PmxQsWCnqpQzFy44heJq59wOqHr5bSt6knm5vY,180
+pydevd_plugins/extensions/__pycache__/__init__.cpython-310.pyc,,
+pydevd_plugins/extensions/__pycache__/pydevd_plugin_omegaconf.cpython-310.pyc,,
+pydevd_plugins/extensions/pydevd_plugin_omegaconf.py,sha256=Ac1SCniO-22CsRsMAXhFyyoVSN2R-MUWsOujm-601Ak,4251
diff --git a/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/top_level.txt b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fcd2b36b800878631de7b82d0218569415c6a323
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/omegaconf-2.3.0.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+omegaconf
+pydevd_plugins
diff --git a/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11 b/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11
new file mode 100644
index 0000000000000000000000000000000000000000..c2b3447fcdad30da1866f840b9e48734384b3557
--- /dev/null
+++ b/evalkit_internvl/lib/python3.10/site-packages/torchvision.libs/libnvjpeg.70530407.so.11
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62748b186c6fd8f20cf31c3da1604d0236c6003a6a246d4b76147663ac8d8862
+size 5690112
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32eb6b0be5baee28afba254f75c1426332788fa5
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..914730270604cf8e1d761823dc6e5fb881e2fe14
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_base.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3356b6d5989e86318ebb2c805c9d33568eea3d51
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/__pycache__/_variance_threshold.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..ede6fa9a21c3488b569ead317fc86f32cf12d1bc
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_mutual_info.py
@@ -0,0 +1,580 @@
+# Authors: The scikit-learn developers
+# SPDX-License-Identifier: BSD-3-Clause
+
+from numbers import Integral
+
+import numpy as np
+from scipy.sparse import issparse
+from scipy.special import digamma
+
+from ..metrics.cluster import mutual_info_score
+from ..neighbors import KDTree, NearestNeighbors
+from ..preprocessing import scale
+from ..utils import check_random_state
+from ..utils._param_validation import Interval, StrOptions, validate_params
+from ..utils.multiclass import check_classification_targets
+from ..utils.parallel import Parallel, delayed
+from ..utils.validation import check_array, check_X_y
+
+
+def _compute_mi_cc(x, y, n_neighbors):
+ """Compute mutual information between two continuous variables.
+
+ Parameters
+ ----------
+ x, y : ndarray, shape (n_samples,)
+ Samples of two continuous random variables, must have an identical
+ shape.
+
+ n_neighbors : int
+ Number of nearest neighbors to search for each point, see [1]_.
+
+ Returns
+ -------
+ mi : float
+ Estimated mutual information in nat units. If it turned out to be
+ negative it is replaced by 0.
+
+ Notes
+ -----
+ True mutual information can't be negative. If its estimate by a numerical
+ method is negative, it means (providing the method is adequate) that the
+ mutual information is close to 0 and replacing it by 0 is a reasonable
+ strategy.
+
+ References
+ ----------
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
+ information". Phys. Rev. E 69, 2004.
+ """
+ n_samples = x.size
+
+ x = x.reshape((-1, 1))
+ y = y.reshape((-1, 1))
+ xy = np.hstack((x, y))
+
+ # Here we rely on NearestNeighbors to select the fastest algorithm.
+ nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors)
+
+ nn.fit(xy)
+ radius = nn.kneighbors()[0]
+ radius = np.nextafter(radius[:, -1], 0)
+
+ # KDTree is explicitly fit to allow for the querying of number of
+ # neighbors within a specified radius
+ kd = KDTree(x, metric="chebyshev")
+ nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
+ nx = np.array(nx) - 1.0
+
+ kd = KDTree(y, metric="chebyshev")
+ ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
+ ny = np.array(ny) - 1.0
+
+ mi = (
+ digamma(n_samples)
+ + digamma(n_neighbors)
+ - np.mean(digamma(nx + 1))
+ - np.mean(digamma(ny + 1))
+ )
+
+ return max(0, mi)
+
+
+def _compute_mi_cd(c, d, n_neighbors):
+ """Compute mutual information between continuous and discrete variables.
+
+ Parameters
+ ----------
+ c : ndarray, shape (n_samples,)
+ Samples of a continuous random variable.
+
+ d : ndarray, shape (n_samples,)
+ Samples of a discrete random variable.
+
+ n_neighbors : int
+ Number of nearest neighbors to search for each point, see [1]_.
+
+ Returns
+ -------
+ mi : float
+ Estimated mutual information in nat units. If it turned out to be
+ negative it is replaced by 0.
+
+ Notes
+ -----
+ True mutual information can't be negative. If its estimate by a numerical
+ method is negative, it means (providing the method is adequate) that the
+ mutual information is close to 0 and replacing it by 0 is a reasonable
+ strategy.
+
+ References
+ ----------
+ .. [1] B. C. Ross "Mutual Information between Discrete and Continuous
+ Data Sets". PLoS ONE 9(2), 2014.
+ """
+ n_samples = c.shape[0]
+ c = c.reshape((-1, 1))
+
+ radius = np.empty(n_samples)
+ label_counts = np.empty(n_samples)
+ k_all = np.empty(n_samples)
+ nn = NearestNeighbors()
+ for label in np.unique(d):
+ mask = d == label
+ count = np.sum(mask)
+ if count > 1:
+ k = min(n_neighbors, count - 1)
+ nn.set_params(n_neighbors=k)
+ nn.fit(c[mask])
+ r = nn.kneighbors()[0]
+ radius[mask] = np.nextafter(r[:, -1], 0)
+ k_all[mask] = k
+ label_counts[mask] = count
+
+ # Ignore points with unique labels.
+ mask = label_counts > 1
+ n_samples = np.sum(mask)
+ label_counts = label_counts[mask]
+ k_all = k_all[mask]
+ c = c[mask]
+ radius = radius[mask]
+
+ kd = KDTree(c)
+ m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
+ m_all = np.array(m_all)
+
+ mi = (
+ digamma(n_samples)
+ + np.mean(digamma(k_all))
+ - np.mean(digamma(label_counts))
+ - np.mean(digamma(m_all))
+ )
+
+ return max(0, mi)
+
+
+def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
+ """Compute mutual information between two variables.
+
+ This is a simple wrapper which selects a proper function to call based on
+ whether `x` and `y` are discrete or not.
+ """
+ if x_discrete and y_discrete:
+ return mutual_info_score(x, y)
+ elif x_discrete and not y_discrete:
+ return _compute_mi_cd(y, x, n_neighbors)
+ elif not x_discrete and y_discrete:
+ return _compute_mi_cd(x, y, n_neighbors)
+ else:
+ return _compute_mi_cc(x, y, n_neighbors)
+
+
+def _iterate_columns(X, columns=None):
+ """Iterate over columns of a matrix.
+
+ Parameters
+ ----------
+ X : ndarray or csc_matrix, shape (n_samples, n_features)
+ Matrix over which to iterate.
+
+ columns : iterable or None, default=None
+ Indices of columns to iterate over. If None, iterate over all columns.
+
+ Yields
+ ------
+ x : ndarray, shape (n_samples,)
+ Columns of `X` in dense format.
+ """
+ if columns is None:
+ columns = range(X.shape[1])
+
+ if issparse(X):
+ for i in columns:
+ x = np.zeros(X.shape[0])
+ start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
+ x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
+ yield x
+ else:
+ for i in columns:
+ yield X[:, i]
+
+
+def _estimate_mi(
+ X,
+ y,
+ *,
+ discrete_features="auto",
+ discrete_target=False,
+ n_neighbors=3,
+ copy=True,
+ random_state=None,
+ n_jobs=None,
+):
+ """Estimate mutual information between the features and the target.
+
+ Parameters
+ ----------
+ X : array-like or sparse matrix, shape (n_samples, n_features)
+ Feature matrix.
+
+ y : array-like of shape (n_samples,)
+ Target vector.
+
+ discrete_features : {'auto', bool, array-like}, default='auto'
+ If bool, then determines whether to consider all features discrete
+ or continuous. If array, then it should be either a boolean mask
+ with shape (n_features,) or array with indices of discrete features.
+ If 'auto', it is assigned to False for dense `X` and to True for
+ sparse `X`.
+
+ discrete_target : bool, default=False
+ Whether to consider `y` as a discrete variable.
+
+ n_neighbors : int, default=3
+ Number of neighbors to use for MI estimation for continuous variables,
+ see [1]_ and [2]_. Higher values reduce variance of the estimation, but
+ could introduce a bias.
+
+ copy : bool, default=True
+ Whether to make a copy of the given data. If set to False, the initial
+ data will be overwritten.
+
+ random_state : int, RandomState instance or None, default=None
+ Determines random number generation for adding small noise to
+ continuous variables in order to remove repeated values.
+ Pass an int for reproducible results across multiple function calls.
+ See :term:`Glossary `.
+
+ n_jobs : int, default=None
+ The number of jobs to use for computing the mutual information.
+ The parallelization is done on the columns of `X`.
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
+ ``-1`` means using all processors. See :term:`Glossary `
+ for more details.
+
+ .. versionadded:: 1.5
+
+
+ Returns
+ -------
+ mi : ndarray, shape (n_features,)
+ Estimated mutual information between each feature and the target in
+ nat units. A negative value will be replaced by 0.
+
+ References
+ ----------
+ .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
+ information". Phys. Rev. E 69, 2004.
+ .. [2] B. C. Ross "Mutual Information between Discrete and Continuous
+ Data Sets". PLoS ONE 9(2), 2014.
+ """
+ X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target)
+ n_samples, n_features = X.shape
+
+ if isinstance(discrete_features, (str, bool)):
+ if isinstance(discrete_features, str):
+ if discrete_features == "auto":
+ discrete_features = issparse(X)
+ else:
+ raise ValueError("Invalid string value for discrete_features.")
+ discrete_mask = np.empty(n_features, dtype=bool)
+ discrete_mask.fill(discrete_features)
+ else:
+ discrete_features = check_array(discrete_features, ensure_2d=False)
+ if discrete_features.dtype != "bool":
+ discrete_mask = np.zeros(n_features, dtype=bool)
+ discrete_mask[discrete_features] = True
+ else:
+ discrete_mask = discrete_features
+
+ continuous_mask = ~discrete_mask
+ if np.any(continuous_mask) and issparse(X):
+ raise ValueError("Sparse matrix `X` can't have continuous features.")
+
+ rng = check_random_state(random_state)
+ if np.any(continuous_mask):
+ X = X.astype(np.float64, copy=copy)
+ X[:, continuous_mask] = scale(
+ X[:, continuous_mask], with_mean=False, copy=False
+ )
+
+ # Add small noise to continuous features as advised in Kraskov et. al.
+ means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
+ X[:, continuous_mask] += (
+ 1e-10
+ * means
+ * rng.standard_normal(size=(n_samples, np.sum(continuous_mask)))
+ )
+
+ if not discrete_target:
+ y = scale(y, with_mean=False)
+ y += (
+ 1e-10
+ * np.maximum(1, np.mean(np.abs(y)))
+ * rng.standard_normal(size=n_samples)
+ )
+
+ mi = Parallel(n_jobs=n_jobs)(
+ delayed(_compute_mi)(x, y, discrete_feature, discrete_target, n_neighbors)
+ for x, discrete_feature in zip(_iterate_columns(X), discrete_mask)
+ )
+
+ return np.array(mi)
+
+
+@validate_params(
+ {
+ "X": ["array-like", "sparse matrix"],
+ "y": ["array-like"],
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
+ "copy": ["boolean"],
+ "random_state": ["random_state"],
+ "n_jobs": [Integral, None],
+ },
+ prefer_skip_nested_validation=True,
+)
+def mutual_info_regression(
+ X,
+ y,
+ *,
+ discrete_features="auto",
+ n_neighbors=3,
+ copy=True,
+ random_state=None,
+ n_jobs=None,
+):
+ """Estimate mutual information for a continuous target variable.
+
+ Mutual information (MI) [1]_ between two random variables is a non-negative
+ value, which measures the dependency between the variables. It is equal
+ to zero if and only if two random variables are independent, and higher
+ values mean higher dependency.
+
+ The function relies on nonparametric methods based on entropy estimation
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
+ methods are based on the idea originally proposed in [4]_.
+
+ It can be used for univariate features selection, read more in the
+ :ref:`User Guide `.
+
+ Parameters
+ ----------
+ X : array-like or sparse matrix, shape (n_samples, n_features)
+ Feature matrix.
+
+ y : array-like of shape (n_samples,)
+ Target vector.
+
+ discrete_features : {'auto', bool, array-like}, default='auto'
+ If bool, then determines whether to consider all features discrete
+ or continuous. If array, then it should be either a boolean mask
+ with shape (n_features,) or array with indices of discrete features.
+ If 'auto', it is assigned to False for dense `X` and to True for
+ sparse `X`.
+
+ n_neighbors : int, default=3
+ Number of neighbors to use for MI estimation for continuous variables,
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
+ could introduce a bias.
+
+ copy : bool, default=True
+ Whether to make a copy of the given data. If set to False, the initial
+ data will be overwritten.
+
+ random_state : int, RandomState instance or None, default=None
+ Determines random number generation for adding small noise to
+ continuous variables in order to remove repeated values.
+ Pass an int for reproducible results across multiple function calls.
+ See :term:`Glossary `.
+
+ n_jobs : int, default=None
+ The number of jobs to use for computing the mutual information.
+ The parallelization is done on the columns of `X`.
+
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
+ ``-1`` means using all processors. See :term:`Glossary `
+ for more details.
+
+ .. versionadded:: 1.5
+
+ Returns
+ -------
+ mi : ndarray, shape (n_features,)
+ Estimated mutual information between each feature and the target in
+ nat units.
+
+ Notes
+ -----
+ 1. The term "discrete features" is used instead of naming them
+ "categorical", because it describes the essence more accurately.
+ For example, pixel intensities of an image are discrete features
+ (but hardly categorical) and you will get better results if mark them
+ as such. Also note, that treating a continuous variable as discrete and
+ vice versa will usually give incorrect results, so be attentive about
+ that.
+ 2. True mutual information can't be negative. If its estimate turns out
+ to be negative, it is replaced by zero.
+
+ References
+ ----------
+ .. [1] `Mutual Information
+ `_
+ on Wikipedia.
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
+ information". Phys. Rev. E 69, 2004.
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
+ Data Sets". PLoS ONE 9(2), 2014.
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
+ of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
+
+ Examples
+ --------
+ >>> from sklearn.datasets import make_regression
+ >>> from sklearn.feature_selection import mutual_info_regression
+ >>> X, y = make_regression(
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
+ ... )
+ >>> mutual_info_regression(X, y)
+ array([0.1..., 2.6... , 0.0...])
+ """
+ return _estimate_mi(
+ X,
+ y,
+ discrete_features=discrete_features,
+ discrete_target=False,
+ n_neighbors=n_neighbors,
+ copy=copy,
+ random_state=random_state,
+ n_jobs=n_jobs,
+ )
+
+
+@validate_params(
+ {
+ "X": ["array-like", "sparse matrix"],
+ "y": ["array-like"],
+ "discrete_features": [StrOptions({"auto"}), "boolean", "array-like"],
+ "n_neighbors": [Interval(Integral, 1, None, closed="left")],
+ "copy": ["boolean"],
+ "random_state": ["random_state"],
+ "n_jobs": [Integral, None],
+ },
+ prefer_skip_nested_validation=True,
+)
+def mutual_info_classif(
+ X,
+ y,
+ *,
+ discrete_features="auto",
+ n_neighbors=3,
+ copy=True,
+ random_state=None,
+ n_jobs=None,
+):
+ """Estimate mutual information for a discrete target variable.
+
+ Mutual information (MI) [1]_ between two random variables is a non-negative
+ value, which measures the dependency between the variables. It is equal
+ to zero if and only if two random variables are independent, and higher
+ values mean higher dependency.
+
+ The function relies on nonparametric methods based on entropy estimation
+ from k-nearest neighbors distances as described in [2]_ and [3]_. Both
+ methods are based on the idea originally proposed in [4]_.
+
+ It can be used for univariate features selection, read more in the
+ :ref:`User Guide `.
+
+ Parameters
+ ----------
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ Feature matrix.
+
+ y : array-like of shape (n_samples,)
+ Target vector.
+
+ discrete_features : 'auto', bool or array-like, default='auto'
+ If bool, then determines whether to consider all features discrete
+ or continuous. If array, then it should be either a boolean mask
+ with shape (n_features,) or array with indices of discrete features.
+ If 'auto', it is assigned to False for dense `X` and to True for
+ sparse `X`.
+
+ n_neighbors : int, default=3
+ Number of neighbors to use for MI estimation for continuous variables,
+ see [2]_ and [3]_. Higher values reduce variance of the estimation, but
+ could introduce a bias.
+
+ copy : bool, default=True
+ Whether to make a copy of the given data. If set to False, the initial
+ data will be overwritten.
+
+ random_state : int, RandomState instance or None, default=None
+ Determines random number generation for adding small noise to
+ continuous variables in order to remove repeated values.
+ Pass an int for reproducible results across multiple function calls.
+ See :term:`Glossary `.
+
+ n_jobs : int, default=None
+ The number of jobs to use for computing the mutual information.
+ The parallelization is done on the columns of `X`.
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
+ ``-1`` means using all processors. See :term:`Glossary `
+ for more details.
+
+ .. versionadded:: 1.5
+
+ Returns
+ -------
+ mi : ndarray, shape (n_features,)
+ Estimated mutual information between each feature and the target in
+ nat units.
+
+ Notes
+ -----
+ 1. The term "discrete features" is used instead of naming them
+ "categorical", because it describes the essence more accurately.
+ For example, pixel intensities of an image are discrete features
+ (but hardly categorical) and you will get better results if mark them
+ as such. Also note, that treating a continuous variable as discrete and
+ vice versa will usually give incorrect results, so be attentive about
+ that.
+ 2. True mutual information can't be negative. If its estimate turns out
+ to be negative, it is replaced by zero.
+
+ References
+ ----------
+ .. [1] `Mutual Information
+ `_
+ on Wikipedia.
+ .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
+ information". Phys. Rev. E 69, 2004.
+ .. [3] B. C. Ross "Mutual Information between Discrete and Continuous
+ Data Sets". PLoS ONE 9(2), 2014.
+ .. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
+ of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
+
+ Examples
+ --------
+ >>> from sklearn.datasets import make_classification
+ >>> from sklearn.feature_selection import mutual_info_classif
+ >>> X, y = make_classification(
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
+ ... shuffle=False, random_state=42
+ ... )
+ >>> mutual_info_classif(X, y)
+ array([0.58..., 0.10..., 0.19..., 0.09... , 0. ,
+ 0. , 0. , 0. , 0. , 0. ])
+ """
+ check_classification_targets(y)
+ return _estimate_mi(
+ X,
+ y,
+ discrete_features=discrete_features,
+ discrete_target=True,
+ n_neighbors=n_neighbors,
+ copy=copy,
+ random_state=random_state,
+ n_jobs=n_jobs,
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py
new file mode 100644
index 0000000000000000000000000000000000000000..80cf1fb171cc00e7fb12398c6185dba7b2be6dd6
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_sequential.py
@@ -0,0 +1,362 @@
+"""
+Sequential feature selection
+"""
+
+# Authors: The scikit-learn developers
+# SPDX-License-Identifier: BSD-3-Clause
+
+from numbers import Integral, Real
+
+import numpy as np
+
+from ..base import BaseEstimator, MetaEstimatorMixin, _fit_context, clone, is_classifier
+from ..metrics import check_scoring, get_scorer_names
+from ..model_selection import check_cv, cross_val_score
+from ..utils._metadata_requests import (
+ MetadataRouter,
+ MethodMapping,
+ _raise_for_params,
+ _routing_enabled,
+ process_routing,
+)
+from ..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
+from ..utils._tags import get_tags
+from ..utils.validation import check_is_fitted, validate_data
+from ._base import SelectorMixin
+
+
+class SequentialFeatureSelector(SelectorMixin, MetaEstimatorMixin, BaseEstimator):
+ """Transformer that performs Sequential Feature Selection.
+
+ This Sequential Feature Selector adds (forward selection) or
+ removes (backward selection) features to form a feature subset in a
+ greedy fashion. At each stage, this estimator chooses the best feature to
+ add or remove based on the cross-validation score of an estimator. In
+ the case of unsupervised learning, this Sequential Feature Selector
+ looks only at the features (X), not the desired outputs (y).
+
+ Read more in the :ref:`User Guide `.
+
+ .. versionadded:: 0.24
+
+ Parameters
+ ----------
+ estimator : estimator instance
+ An unfitted estimator.
+
+ n_features_to_select : "auto", int or float, default="auto"
+ If `"auto"`, the behaviour depends on the `tol` parameter:
+
+ - if `tol` is not `None`, then features are selected while the score
+ change does not exceed `tol`.
+ - otherwise, half of the features are selected.
+
+ If integer, the parameter is the absolute number of features to select.
+ If float between 0 and 1, it is the fraction of features to select.
+
+ .. versionadded:: 1.1
+ The option `"auto"` was added in version 1.1.
+
+ .. versionchanged:: 1.3
+ The default changed from `"warn"` to `"auto"` in 1.3.
+
+ tol : float, default=None
+ If the score is not incremented by at least `tol` between two
+ consecutive feature additions or removals, stop adding or removing.
+
+ `tol` can be negative when removing features using `direction="backward"`.
+ `tol` is required to be strictly positive when doing forward selection.
+ It can be useful to reduce the number of features at the cost of a small
+ decrease in the score.
+
+ `tol` is enabled only when `n_features_to_select` is `"auto"`.
+
+ .. versionadded:: 1.1
+
+ direction : {'forward', 'backward'}, default='forward'
+ Whether to perform forward selection or backward selection.
+
+ scoring : str or callable, default=None
+ A single str (see :ref:`scoring_parameter`) or a callable
+ (see :ref:`scoring_callable`) to evaluate the predictions on the test set.
+
+ NOTE that when using a custom scorer, it should return a single
+ value.
+
+ If None, the estimator's score method is used.
+
+ cv : int, cross-validation generator or an iterable, default=None
+ Determines the cross-validation splitting strategy.
+ Possible inputs for cv are:
+
+ - None, to use the default 5-fold cross validation,
+ - integer, to specify the number of folds in a `(Stratified)KFold`,
+ - :term:`CV splitter`,
+ - An iterable yielding (train, test) splits as arrays of indices.
+
+ For integer/None inputs, if the estimator is a classifier and ``y`` is
+ either binary or multiclass,
+ :class:`~sklearn.model_selection.StratifiedKFold` is used. In all other
+ cases, :class:`~sklearn.model_selection.KFold` is used. These splitters
+ are instantiated with `shuffle=False` so the splits will be the same
+ across calls.
+
+ Refer :ref:`User Guide ` for the various
+ cross-validation strategies that can be used here.
+
+ n_jobs : int, default=None
+ Number of jobs to run in parallel. When evaluating a new feature to
+ add or remove, the cross-validation procedure is parallel over the
+ folds.
+ ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
+ ``-1`` means using all processors. See :term:`Glossary `
+ for more details.
+
+ Attributes
+ ----------
+ n_features_in_ : int
+ Number of features seen during :term:`fit`. Only defined if the
+ underlying estimator exposes such an attribute when fit.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ n_features_to_select_ : int
+ The number of features that were selected.
+
+ support_ : ndarray of shape (n_features,), dtype=bool
+ The mask of selected features.
+
+ See Also
+ --------
+ GenericUnivariateSelect : Univariate feature selector with configurable
+ strategy.
+ RFE : Recursive feature elimination based on importance weights.
+ RFECV : Recursive feature elimination based on importance weights, with
+ automatic selection of the number of features.
+ SelectFromModel : Feature selection based on thresholds of importance
+ weights.
+
+ Examples
+ --------
+ >>> from sklearn.feature_selection import SequentialFeatureSelector
+ >>> from sklearn.neighbors import KNeighborsClassifier
+ >>> from sklearn.datasets import load_iris
+ >>> X, y = load_iris(return_X_y=True)
+ >>> knn = KNeighborsClassifier(n_neighbors=3)
+ >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)
+ >>> sfs.fit(X, y)
+ SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),
+ n_features_to_select=3)
+ >>> sfs.get_support()
+ array([ True, False, True, True])
+ >>> sfs.transform(X).shape
+ (150, 3)
+ """
+
+ _parameter_constraints: dict = {
+ "estimator": [HasMethods(["fit"])],
+ "n_features_to_select": [
+ StrOptions({"auto"}),
+ Interval(RealNotInt, 0, 1, closed="right"),
+ Interval(Integral, 0, None, closed="neither"),
+ ],
+ "tol": [None, Interval(Real, None, None, closed="neither")],
+ "direction": [StrOptions({"forward", "backward"})],
+ "scoring": [None, StrOptions(set(get_scorer_names())), callable],
+ "cv": ["cv_object"],
+ "n_jobs": [None, Integral],
+ }
+
+ def __init__(
+ self,
+ estimator,
+ *,
+ n_features_to_select="auto",
+ tol=None,
+ direction="forward",
+ scoring=None,
+ cv=5,
+ n_jobs=None,
+ ):
+ self.estimator = estimator
+ self.n_features_to_select = n_features_to_select
+ self.tol = tol
+ self.direction = direction
+ self.scoring = scoring
+ self.cv = cv
+ self.n_jobs = n_jobs
+
+ @_fit_context(
+ # SequentialFeatureSelector.estimator is not validated yet
+ prefer_skip_nested_validation=False
+ )
+ def fit(self, X, y=None, **params):
+ """Learn the features to select from X.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ Training vectors, where `n_samples` is the number of samples and
+ `n_features` is the number of predictors.
+
+ y : array-like of shape (n_samples,), default=None
+ Target values. This parameter may be ignored for
+ unsupervised learning.
+
+ **params : dict, default=None
+ Parameters to be passed to the underlying `estimator`, `cv`
+ and `scorer` objects.
+
+ .. versionadded:: 1.6
+
+ Only available if `enable_metadata_routing=True`,
+ which can be set by using
+ ``sklearn.set_config(enable_metadata_routing=True)``.
+ See :ref:`Metadata Routing User Guide ` for
+ more details.
+
+ Returns
+ -------
+ self : object
+ Returns the instance itself.
+ """
+ _raise_for_params(params, self, "fit")
+ tags = self.__sklearn_tags__()
+ X = validate_data(
+ self,
+ X,
+ accept_sparse="csc",
+ ensure_min_features=2,
+ ensure_all_finite=not tags.input_tags.allow_nan,
+ )
+ n_features = X.shape[1]
+
+ if self.n_features_to_select == "auto":
+ if self.tol is not None:
+ # With auto feature selection, `n_features_to_select_` will be updated
+ # to `support_.sum()` after features are selected.
+ self.n_features_to_select_ = n_features - 1
+ else:
+ self.n_features_to_select_ = n_features // 2
+ elif isinstance(self.n_features_to_select, Integral):
+ if self.n_features_to_select >= n_features:
+ raise ValueError("n_features_to_select must be < n_features.")
+ self.n_features_to_select_ = self.n_features_to_select
+ elif isinstance(self.n_features_to_select, Real):
+ self.n_features_to_select_ = int(n_features * self.n_features_to_select)
+
+ if self.tol is not None and self.tol < 0 and self.direction == "forward":
+ raise ValueError(
+ "tol must be strictly positive when doing forward selection"
+ )
+
+ cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
+
+ cloned_estimator = clone(self.estimator)
+
+ # the current mask corresponds to the set of features:
+ # - that we have already *selected* if we do forward selection
+ # - that we have already *excluded* if we do backward selection
+ current_mask = np.zeros(shape=n_features, dtype=bool)
+ n_iterations = (
+ self.n_features_to_select_
+ if self.n_features_to_select == "auto" or self.direction == "forward"
+ else n_features - self.n_features_to_select_
+ )
+
+ old_score = -np.inf
+ is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
+
+ # We only need to verify the routing here and not use the routed params
+ # because internally the actual routing will also take place inside the
+ # `cross_val_score` function.
+ if _routing_enabled():
+ process_routing(self, "fit", **params)
+ for _ in range(n_iterations):
+ new_feature_idx, new_score = self._get_best_new_feature_score(
+ cloned_estimator, X, y, cv, current_mask, **params
+ )
+ if is_auto_select and ((new_score - old_score) < self.tol):
+ break
+
+ old_score = new_score
+ current_mask[new_feature_idx] = True
+
+ if self.direction == "backward":
+ current_mask = ~current_mask
+
+ self.support_ = current_mask
+ self.n_features_to_select_ = self.support_.sum()
+
+ return self
+
+ def _get_best_new_feature_score(self, estimator, X, y, cv, current_mask, **params):
+ # Return the best new feature and its score to add to the current_mask,
+ # i.e. return the best new feature and its score to add (resp. remove)
+ # when doing forward selection (resp. backward selection).
+ # Feature will be added if the current score and past score are greater
+ # than tol when n_feature is auto,
+ candidate_feature_indices = np.flatnonzero(~current_mask)
+ scores = {}
+ for feature_idx in candidate_feature_indices:
+ candidate_mask = current_mask.copy()
+ candidate_mask[feature_idx] = True
+ if self.direction == "backward":
+ candidate_mask = ~candidate_mask
+ X_new = X[:, candidate_mask]
+ scores[feature_idx] = cross_val_score(
+ estimator,
+ X_new,
+ y,
+ cv=cv,
+ scoring=self.scoring,
+ n_jobs=self.n_jobs,
+ params=params,
+ ).mean()
+ new_feature_idx = max(scores, key=lambda feature_idx: scores[feature_idx])
+ return new_feature_idx, scores[new_feature_idx]
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+ return self.support_
+
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = get_tags(self.estimator).input_tags.allow_nan
+ tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
+ return tags
+
+ def get_metadata_routing(self):
+ """Get metadata routing of this object.
+
+ Please check :ref:`User Guide ` on how the routing
+ mechanism works.
+
+ .. versionadded:: 1.6
+
+ Returns
+ -------
+ routing : MetadataRouter
+ A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
+ routing information.
+ """
+ router = MetadataRouter(owner=self.__class__.__name__)
+ router.add(
+ estimator=self.estimator,
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
+ )
+ router.add(
+ splitter=check_cv(self.cv, classifier=is_classifier(self.estimator)),
+ method_mapping=MethodMapping().add(caller="fit", callee="split"),
+ )
+ router.add(
+ scorer=check_scoring(self.estimator, scoring=self.scoring),
+ method_mapping=MethodMapping().add(caller="fit", callee="score"),
+ )
+ return router
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py
new file mode 100644
index 0000000000000000000000000000000000000000..855ba5ad70f12d26a56828a1ff31fd6edf1fcc59
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/_univariate_selection.py
@@ -0,0 +1,1172 @@
+"""Univariate features selection."""
+
+# Authors: The scikit-learn developers
+# SPDX-License-Identifier: BSD-3-Clause
+
+
+import warnings
+from numbers import Integral, Real
+
+import numpy as np
+from scipy import special, stats
+from scipy.sparse import issparse
+
+from ..base import BaseEstimator, _fit_context
+from ..preprocessing import LabelBinarizer
+from ..utils import as_float_array, check_array, check_X_y, safe_mask, safe_sqr
+from ..utils._param_validation import Interval, StrOptions, validate_params
+from ..utils.extmath import row_norms, safe_sparse_dot
+from ..utils.validation import check_is_fitted, validate_data
+from ._base import SelectorMixin
+
+
+def _clean_nans(scores):
+ """
+ Fixes Issue #1240: NaNs can't be properly compared, so change them to the
+ smallest value of scores's dtype. -inf seems to be unreliable.
+ """
+ # XXX where should this function be called? fit? scoring functions
+ # themselves?
+ scores = as_float_array(scores, copy=True)
+ scores[np.isnan(scores)] = np.finfo(scores.dtype).min
+ return scores
+
+
+######################################################################
+# Scoring functions
+
+
+# The following function is a rewriting of scipy.stats.f_oneway
+# Contrary to the scipy.stats.f_oneway implementation it does not
+# copy the data while keeping the inputs unchanged.
+def f_oneway(*args):
+ """Perform a 1-way ANOVA.
+
+ The one-way ANOVA tests the null hypothesis that 2 or more groups have
+ the same population mean. The test is applied to samples from two or
+ more groups, possibly with differing sizes.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ *args : {array-like, sparse matrix}
+ Sample1, sample2... The sample measurements should be given as
+ arguments.
+
+ Returns
+ -------
+ f_statistic : float
+ The computed F-value of the test.
+ p_value : float
+ The associated p-value from the F-distribution.
+
+ Notes
+ -----
+ The ANOVA test has important assumptions that must be satisfied in order
+ for the associated p-value to be valid.
+
+ 1. The samples are independent
+ 2. Each sample is from a normally distributed population
+ 3. The population standard deviations of the groups are all equal. This
+ property is known as homoscedasticity.
+
+ If these assumptions are not true for a given set of data, it may still be
+ possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
+ with some loss of power.
+
+ The algorithm is from Heiman[2], pp.394-7.
+
+ See ``scipy.stats.f_oneway`` that should give the same results while
+ being less efficient.
+
+ References
+ ----------
+ .. [1] Lowry, Richard. "Concepts and Applications of Inferential
+ Statistics". Chapter 14.
+ http://vassarstats.net/textbook
+
+ .. [2] Heiman, G.W. Research Methods in Statistics. 2002.
+ """
+ n_classes = len(args)
+ args = [as_float_array(a) for a in args]
+ n_samples_per_class = np.array([a.shape[0] for a in args])
+ n_samples = np.sum(n_samples_per_class)
+ ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
+ sums_args = [np.asarray(a.sum(axis=0)) for a in args]
+ square_of_sums_alldata = sum(sums_args) ** 2
+ square_of_sums_args = [s**2 for s in sums_args]
+ sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
+ ssbn = 0.0
+ for k, _ in enumerate(args):
+ ssbn += square_of_sums_args[k] / n_samples_per_class[k]
+ ssbn -= square_of_sums_alldata / float(n_samples)
+ sswn = sstot - ssbn
+ dfbn = n_classes - 1
+ dfwn = n_samples - n_classes
+ msb = ssbn / float(dfbn)
+ msw = sswn / float(dfwn)
+ constant_features_idx = np.where(msw == 0.0)[0]
+ if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size:
+ warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
+ f = msb / msw
+ # flatten matrix to vector in sparse case
+ f = np.asarray(f).ravel()
+ prob = special.fdtrc(dfbn, dfwn, f)
+ return f, prob
+
+
+@validate_params(
+ {
+ "X": ["array-like", "sparse matrix"],
+ "y": ["array-like"],
+ },
+ prefer_skip_nested_validation=True,
+)
+def f_classif(X, y):
+ """Compute the ANOVA F-value for the provided sample.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ The set of regressors that will be tested sequentially.
+
+ y : array-like of shape (n_samples,)
+ The target vector.
+
+ Returns
+ -------
+ f_statistic : ndarray of shape (n_features,)
+ F-statistic for each feature.
+
+ p_values : ndarray of shape (n_features,)
+ P-values associated with the F-statistic.
+
+ See Also
+ --------
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
+ f_regression : F-value between label/feature for regression tasks.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import make_classification
+ >>> from sklearn.feature_selection import f_classif
+ >>> X, y = make_classification(
+ ... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
+ ... shuffle=False, random_state=42
+ ... )
+ >>> f_statistic, p_values = f_classif(X, y)
+ >>> f_statistic
+ array([2.2...e+02, 7.0...e-01, 1.6...e+00, 9.3...e-01,
+ 5.4...e+00, 3.2...e-01, 4.7...e-02, 5.7...e-01,
+ 7.5...e-01, 8.9...e-02])
+ >>> p_values
+ array([7.1...e-27, 4.0...e-01, 1.9...e-01, 3.3...e-01,
+ 2.2...e-02, 5.7...e-01, 8.2...e-01, 4.5...e-01,
+ 3.8...e-01, 7.6...e-01])
+ """
+ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"])
+ args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
+ return f_oneway(*args)
+
+
+def _chisquare(f_obs, f_exp):
+ """Fast replacement for scipy.stats.chisquare.
+
+ Version from https://github.com/scipy/scipy/pull/2525 with additional
+ optimizations.
+ """
+ f_obs = np.asarray(f_obs, dtype=np.float64)
+
+ k = len(f_obs)
+ # Reuse f_obs for chi-squared statistics
+ chisq = f_obs
+ chisq -= f_exp
+ chisq **= 2
+ with np.errstate(invalid="ignore"):
+ chisq /= f_exp
+ chisq = chisq.sum(axis=0)
+ return chisq, special.chdtrc(k - 1, chisq)
+
+
+@validate_params(
+ {
+ "X": ["array-like", "sparse matrix"],
+ "y": ["array-like"],
+ },
+ prefer_skip_nested_validation=True,
+)
+def chi2(X, y):
+ """Compute chi-squared stats between each non-negative feature and class.
+
+ This score can be used to select the `n_features` features with the
+ highest values for the test chi-squared statistic from X, which must
+ contain only **non-negative integer feature values** such as booleans or frequencies
+ (e.g., term counts in document classification), relative to the classes.
+
+ If some of your features are continuous, you need to bin them, for
+ example by using :class:`~sklearn.preprocessing.KBinsDiscretizer`.
+
+ Recall that the chi-square test measures dependence between stochastic
+ variables, so using this function "weeds out" the features that are the
+ most likely to be independent of class and therefore irrelevant for
+ classification.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ Sample vectors.
+
+ y : array-like of shape (n_samples,)
+ Target vector (class labels).
+
+ Returns
+ -------
+ chi2 : ndarray of shape (n_features,)
+ Chi2 statistics for each feature.
+
+ p_values : ndarray of shape (n_features,)
+ P-values for each feature.
+
+ See Also
+ --------
+ f_classif : ANOVA F-value between label/feature for classification tasks.
+ f_regression : F-value between label/feature for regression tasks.
+
+ Notes
+ -----
+ Complexity of this algorithm is O(n_classes * n_features).
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from sklearn.feature_selection import chi2
+ >>> X = np.array([[1, 1, 3],
+ ... [0, 1, 5],
+ ... [5, 4, 1],
+ ... [6, 6, 2],
+ ... [1, 4, 0],
+ ... [0, 0, 0]])
+ >>> y = np.array([1, 1, 0, 0, 2, 2])
+ >>> chi2_stats, p_values = chi2(X, y)
+ >>> chi2_stats
+ array([15.3..., 6.5 , 8.9...])
+ >>> p_values
+ array([0.0004..., 0.0387..., 0.0116... ])
+ """
+
+ # XXX: we might want to do some of the following in logspace instead for
+ # numerical stability.
+ # Converting X to float allows getting better performance for the
+ # safe_sparse_dot call made below.
+ X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32))
+ if np.any((X.data if issparse(X) else X) < 0):
+ raise ValueError("Input X must be non-negative.")
+
+ # Use a sparse representation for Y by default to reduce memory usage when
+ # y has many unique classes.
+ Y = LabelBinarizer(sparse_output=True).fit_transform(y)
+ if Y.shape[1] == 1:
+ Y = Y.toarray()
+ Y = np.append(1 - Y, Y, axis=1)
+
+ observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
+
+ if issparse(observed):
+ # convert back to a dense array before calling _chisquare
+ # XXX: could _chisquare be reimplement to accept sparse matrices for
+ # cases where both n_classes and n_features are large (and X is
+ # sparse)?
+ observed = observed.toarray()
+
+ feature_count = X.sum(axis=0).reshape(1, -1)
+ class_prob = Y.mean(axis=0).reshape(1, -1)
+ expected = np.dot(class_prob.T, feature_count)
+
+ return _chisquare(observed, expected)
+
+
+@validate_params(
+ {
+ "X": ["array-like", "sparse matrix"],
+ "y": ["array-like"],
+ "center": ["boolean"],
+ "force_finite": ["boolean"],
+ },
+ prefer_skip_nested_validation=True,
+)
+def r_regression(X, y, *, center=True, force_finite=True):
+ """Compute Pearson's r for each features and the target.
+
+ Pearson's r is also known as the Pearson correlation coefficient.
+
+ Linear model for testing the individual effect of each of many regressors.
+ This is a scoring function to be used in a feature selection procedure, not
+ a free standing feature selection procedure.
+
+ The cross correlation between each regressor and the target is computed
+ as::
+
+ E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
+
+ For more on usage see the :ref:`User Guide `.
+
+ .. versionadded:: 1.0
+
+ Parameters
+ ----------
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ The data matrix.
+
+ y : array-like of shape (n_samples,)
+ The target vector.
+
+ center : bool, default=True
+ Whether or not to center the data matrix `X` and the target vector `y`.
+ By default, `X` and `y` will be centered.
+
+ force_finite : bool, default=True
+ Whether or not to force the Pearson's R correlation to be finite.
+ In the particular case where some features in `X` or the target `y`
+ are constant, the Pearson's R correlation is not defined. When
+ `force_finite=False`, a correlation of `np.nan` is returned to
+ acknowledge this case. When `force_finite=True`, this value will be
+ forced to a minimal correlation of `0.0`.
+
+ .. versionadded:: 1.1
+
+ Returns
+ -------
+ correlation_coefficient : ndarray of shape (n_features,)
+ Pearson's R correlation coefficients of features.
+
+ See Also
+ --------
+ f_regression: Univariate linear regression tests returning f-statistic
+ and p-values.
+ mutual_info_regression: Mutual information for a continuous target.
+ f_classif: ANOVA F-value between label/feature for classification tasks.
+ chi2: Chi-squared stats of non-negative features for classification tasks.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import make_regression
+ >>> from sklearn.feature_selection import r_regression
+ >>> X, y = make_regression(
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
+ ... )
+ >>> r_regression(X, y)
+ array([-0.15..., 1. , -0.22...])
+ """
+ X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64)
+ n_samples = X.shape[0]
+
+ # Compute centered values
+ # Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
+ # need not center X
+ if center:
+ y = y - np.mean(y)
+ # TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays.
+ # Here, we check the output of the `.mean` operation that returns a `np.matrix`
+ # for sparse matrices while a `np.array` for dense and sparse arrays.
+ # We can reconsider using `isspmatrix` when the minimum version is
+ # SciPy >= 1.11
+ X_means = X.mean(axis=0)
+ X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means
+ # Compute the scaled standard deviations via moments
+ X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2)
+ else:
+ X_norms = row_norms(X.T)
+
+ correlation_coefficient = safe_sparse_dot(y, X)
+ with np.errstate(divide="ignore", invalid="ignore"):
+ correlation_coefficient /= X_norms
+ correlation_coefficient /= np.linalg.norm(y)
+
+ if force_finite and not np.isfinite(correlation_coefficient).all():
+ # case where the target or some features are constant
+ # the correlation coefficient(s) is/are set to the minimum (i.e. 0.0)
+ nan_mask = np.isnan(correlation_coefficient)
+ correlation_coefficient[nan_mask] = 0.0
+ return correlation_coefficient
+
+
+@validate_params(
+ {
+ "X": ["array-like", "sparse matrix"],
+ "y": ["array-like"],
+ "center": ["boolean"],
+ "force_finite": ["boolean"],
+ },
+ prefer_skip_nested_validation=True,
+)
+def f_regression(X, y, *, center=True, force_finite=True):
+ """Univariate linear regression tests returning F-statistic and p-values.
+
+ Quick linear model for testing the effect of a single regressor,
+ sequentially for many regressors.
+
+ This is done in 2 steps:
+
+ 1. The cross correlation between each regressor and the target is computed
+ using :func:`r_regression` as::
+
+ E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
+
+ 2. It is converted to an F score and then to a p-value.
+
+ :func:`f_regression` is derived from :func:`r_regression` and will rank
+ features in the same order if all the features are positively correlated
+ with the target.
+
+ Note however that contrary to :func:`f_regression`, :func:`r_regression`
+ values lie in [-1, 1] and can thus be negative. :func:`f_regression` is
+ therefore recommended as a feature selection criterion to identify
+ potentially predictive feature for a downstream classifier, irrespective of
+ the sign of the association with the target variable.
+
+ Furthermore :func:`f_regression` returns p-values while
+ :func:`r_regression` does not.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ X : {array-like, sparse matrix} of shape (n_samples, n_features)
+ The data matrix.
+
+ y : array-like of shape (n_samples,)
+ The target vector.
+
+ center : bool, default=True
+ Whether or not to center the data matrix `X` and the target vector `y`.
+ By default, `X` and `y` will be centered.
+
+ force_finite : bool, default=True
+ Whether or not to force the F-statistics and associated p-values to
+ be finite. There are two cases where the F-statistic is expected to not
+ be finite:
+
+ - when the target `y` or some features in `X` are constant. In this
+ case, the Pearson's R correlation is not defined leading to obtain
+ `np.nan` values in the F-statistic and p-value. When
+ `force_finite=True`, the F-statistic is set to `0.0` and the
+ associated p-value is set to `1.0`.
+ - when a feature in `X` is perfectly correlated (or
+ anti-correlated) with the target `y`. In this case, the F-statistic
+ is expected to be `np.inf`. When `force_finite=True`, the F-statistic
+ is set to `np.finfo(dtype).max` and the associated p-value is set to
+ `0.0`.
+
+ .. versionadded:: 1.1
+
+ Returns
+ -------
+ f_statistic : ndarray of shape (n_features,)
+ F-statistic for each feature.
+
+ p_values : ndarray of shape (n_features,)
+ P-values associated with the F-statistic.
+
+ See Also
+ --------
+ r_regression: Pearson's R between label/feature for regression tasks.
+ f_classif: ANOVA F-value between label/feature for classification tasks.
+ chi2: Chi-squared stats of non-negative features for classification tasks.
+ SelectKBest: Select features based on the k highest scores.
+ SelectFpr: Select features based on a false positive rate test.
+ SelectFdr: Select features based on an estimated false discovery rate.
+ SelectFwe: Select features based on family-wise error rate.
+ SelectPercentile: Select features based on percentile of the highest
+ scores.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import make_regression
+ >>> from sklearn.feature_selection import f_regression
+ >>> X, y = make_regression(
+ ... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
+ ... )
+ >>> f_statistic, p_values = f_regression(X, y)
+ >>> f_statistic
+ array([1.2...+00, 2.6...+13, 2.6...+00])
+ >>> p_values
+ array([2.7..., 1.5..., 1.0...])
+ """
+ correlation_coefficient = r_regression(
+ X, y, center=center, force_finite=force_finite
+ )
+ deg_of_freedom = y.size - (2 if center else 1)
+
+ corr_coef_squared = correlation_coefficient**2
+
+ with np.errstate(divide="ignore", invalid="ignore"):
+ f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom
+ p_values = stats.f.sf(f_statistic, 1, deg_of_freedom)
+
+ if force_finite and not np.isfinite(f_statistic).all():
+ # case where there is a perfect (anti-)correlation
+ # f-statistics can be set to the maximum and p-values to zero
+ mask_inf = np.isinf(f_statistic)
+ f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max
+ # case where the target or some features are constant
+ # f-statistics would be minimum and thus p-values large
+ mask_nan = np.isnan(f_statistic)
+ f_statistic[mask_nan] = 0.0
+ p_values[mask_nan] = 1.0
+ return f_statistic, p_values
+
+
+######################################################################
+# Base classes
+
+
+class _BaseFilter(SelectorMixin, BaseEstimator):
+ """Initialize the univariate feature selection.
+
+ Parameters
+ ----------
+ score_func : callable
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues) or a single array with scores.
+ """
+
+ _parameter_constraints: dict = {"score_func": [callable]}
+
+ def __init__(self, score_func):
+ self.score_func = score_func
+
+ @_fit_context(prefer_skip_nested_validation=True)
+ def fit(self, X, y=None):
+ """Run score function on (X, y) and get the appropriate features.
+
+ Parameters
+ ----------
+ X : array-like of shape (n_samples, n_features)
+ The training input samples.
+
+ y : array-like of shape (n_samples,) or None
+ The target values (class labels in classification, real numbers in
+ regression). If the selector is unsupervised then `y` can be set to `None`.
+
+ Returns
+ -------
+ self : object
+ Returns the instance itself.
+ """
+ if y is None:
+ X = validate_data(self, X, accept_sparse=["csr", "csc"])
+ else:
+ X, y = validate_data(
+ self, X, y, accept_sparse=["csr", "csc"], multi_output=True
+ )
+
+ self._check_params(X, y)
+ score_func_ret = self.score_func(X, y)
+ if isinstance(score_func_ret, (list, tuple)):
+ self.scores_, self.pvalues_ = score_func_ret
+ self.pvalues_ = np.asarray(self.pvalues_)
+ else:
+ self.scores_ = score_func_ret
+ self.pvalues_ = None
+
+ self.scores_ = np.asarray(self.scores_)
+
+ return self
+
+ def _check_params(self, X, y):
+ pass
+
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.target_tags.required = True
+ tags.input_tags.sparse = True
+ return tags
+
+
+######################################################################
+# Specific filters
+######################################################################
+class SelectPercentile(_BaseFilter):
+ """Select features according to a percentile of the highest scores.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ score_func : callable, default=f_classif
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues) or a single array with scores.
+ Default is f_classif (see below "See Also"). The default function only
+ works with classification tasks.
+
+ .. versionadded:: 0.18
+
+ percentile : int, default=10
+ Percent of features to keep.
+
+ Attributes
+ ----------
+ scores_ : array-like of shape (n_features,)
+ Scores of features.
+
+ pvalues_ : array-like of shape (n_features,)
+ p-values of feature scores, None if `score_func` returned only scores.
+
+ n_features_in_ : int
+ Number of features seen during :term:`fit`.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ See Also
+ --------
+ f_classif : ANOVA F-value between label/feature for classification tasks.
+ mutual_info_classif : Mutual information for a discrete target.
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
+ f_regression : F-value between label/feature for regression tasks.
+ mutual_info_regression : Mutual information for a continuous target.
+ SelectKBest : Select features based on the k highest scores.
+ SelectFpr : Select features based on a false positive rate test.
+ SelectFdr : Select features based on an estimated false discovery rate.
+ SelectFwe : Select features based on family-wise error rate.
+ GenericUnivariateSelect : Univariate feature selector with configurable
+ mode.
+
+ Notes
+ -----
+ Ties between features with equal scores will be broken in an unspecified
+ way.
+
+ This filter supports unsupervised feature selection that only requests `X` for
+ computing the scores.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import load_digits
+ >>> from sklearn.feature_selection import SelectPercentile, chi2
+ >>> X, y = load_digits(return_X_y=True)
+ >>> X.shape
+ (1797, 64)
+ >>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y)
+ >>> X_new.shape
+ (1797, 7)
+ """
+
+ _parameter_constraints: dict = {
+ **_BaseFilter._parameter_constraints,
+ "percentile": [Interval(Real, 0, 100, closed="both")],
+ }
+
+ def __init__(self, score_func=f_classif, *, percentile=10):
+ super().__init__(score_func=score_func)
+ self.percentile = percentile
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+
+ # Cater for NaNs
+ if self.percentile == 100:
+ return np.ones(len(self.scores_), dtype=bool)
+ elif self.percentile == 0:
+ return np.zeros(len(self.scores_), dtype=bool)
+
+ scores = _clean_nans(self.scores_)
+ threshold = np.percentile(scores, 100 - self.percentile)
+ mask = scores > threshold
+ ties = np.where(scores == threshold)[0]
+ if len(ties):
+ max_feats = int(len(scores) * self.percentile / 100)
+ kept_ties = ties[: max_feats - mask.sum()]
+ mask[kept_ties] = True
+ return mask
+
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.target_tags.required = False
+ return tags
+
+
+class SelectKBest(_BaseFilter):
+ """Select features according to the k highest scores.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ score_func : callable, default=f_classif
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues) or a single array with scores.
+ Default is f_classif (see below "See Also"). The default function only
+ works with classification tasks.
+
+ .. versionadded:: 0.18
+
+ k : int or "all", default=10
+ Number of top features to select.
+ The "all" option bypasses selection, for use in a parameter search.
+
+ Attributes
+ ----------
+ scores_ : array-like of shape (n_features,)
+ Scores of features.
+
+ pvalues_ : array-like of shape (n_features,)
+ p-values of feature scores, None if `score_func` returned only scores.
+
+ n_features_in_ : int
+ Number of features seen during :term:`fit`.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ See Also
+ --------
+ f_classif: ANOVA F-value between label/feature for classification tasks.
+ mutual_info_classif: Mutual information for a discrete target.
+ chi2: Chi-squared stats of non-negative features for classification tasks.
+ f_regression: F-value between label/feature for regression tasks.
+ mutual_info_regression: Mutual information for a continuous target.
+ SelectPercentile: Select features based on percentile of the highest
+ scores.
+ SelectFpr : Select features based on a false positive rate test.
+ SelectFdr : Select features based on an estimated false discovery rate.
+ SelectFwe : Select features based on family-wise error rate.
+ GenericUnivariateSelect : Univariate feature selector with configurable
+ mode.
+
+ Notes
+ -----
+ Ties between features with equal scores will be broken in an unspecified
+ way.
+
+ This filter supports unsupervised feature selection that only requests `X` for
+ computing the scores.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import load_digits
+ >>> from sklearn.feature_selection import SelectKBest, chi2
+ >>> X, y = load_digits(return_X_y=True)
+ >>> X.shape
+ (1797, 64)
+ >>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)
+ >>> X_new.shape
+ (1797, 20)
+ """
+
+ _parameter_constraints: dict = {
+ **_BaseFilter._parameter_constraints,
+ "k": [StrOptions({"all"}), Interval(Integral, 0, None, closed="left")],
+ }
+
+ def __init__(self, score_func=f_classif, *, k=10):
+ super().__init__(score_func=score_func)
+ self.k = k
+
+ def _check_params(self, X, y):
+ if not isinstance(self.k, str) and self.k > X.shape[1]:
+ warnings.warn(
+ f"k={self.k} is greater than n_features={X.shape[1]}. "
+ "All the features will be returned."
+ )
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+
+ if self.k == "all":
+ return np.ones(self.scores_.shape, dtype=bool)
+ elif self.k == 0:
+ return np.zeros(self.scores_.shape, dtype=bool)
+ else:
+ scores = _clean_nans(self.scores_)
+ mask = np.zeros(scores.shape, dtype=bool)
+
+ # Request a stable sort. Mergesort takes more memory (~40MB per
+ # megafeature on x86-64).
+ mask[np.argsort(scores, kind="mergesort")[-self.k :]] = 1
+ return mask
+
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.target_tags.required = False
+ return tags
+
+
+class SelectFpr(_BaseFilter):
+ """Filter: Select the pvalues below alpha based on a FPR test.
+
+ FPR test stands for False Positive Rate test. It controls the total
+ amount of false detections.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ score_func : callable, default=f_classif
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues).
+ Default is f_classif (see below "See Also"). The default function only
+ works with classification tasks.
+
+ alpha : float, default=5e-2
+ Features with p-values less than `alpha` are selected.
+
+ Attributes
+ ----------
+ scores_ : array-like of shape (n_features,)
+ Scores of features.
+
+ pvalues_ : array-like of shape (n_features,)
+ p-values of feature scores.
+
+ n_features_in_ : int
+ Number of features seen during :term:`fit`.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ See Also
+ --------
+ f_classif : ANOVA F-value between label/feature for classification tasks.
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
+ mutual_info_classif: Mutual information for a discrete target.
+ f_regression : F-value between label/feature for regression tasks.
+ mutual_info_regression : Mutual information for a continuous target.
+ SelectPercentile : Select features based on percentile of the highest
+ scores.
+ SelectKBest : Select features based on the k highest scores.
+ SelectFdr : Select features based on an estimated false discovery rate.
+ SelectFwe : Select features based on family-wise error rate.
+ GenericUnivariateSelect : Univariate feature selector with configurable
+ mode.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import load_breast_cancer
+ >>> from sklearn.feature_selection import SelectFpr, chi2
+ >>> X, y = load_breast_cancer(return_X_y=True)
+ >>> X.shape
+ (569, 30)
+ >>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)
+ >>> X_new.shape
+ (569, 16)
+ """
+
+ _parameter_constraints: dict = {
+ **_BaseFilter._parameter_constraints,
+ "alpha": [Interval(Real, 0, 1, closed="both")],
+ }
+
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
+ super().__init__(score_func=score_func)
+ self.alpha = alpha
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+
+ return self.pvalues_ < self.alpha
+
+
+class SelectFdr(_BaseFilter):
+ """Filter: Select the p-values for an estimated false discovery rate.
+
+ This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
+ on the expected false discovery rate.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ score_func : callable, default=f_classif
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues).
+ Default is f_classif (see below "See Also"). The default function only
+ works with classification tasks.
+
+ alpha : float, default=5e-2
+ The highest uncorrected p-value for features to keep.
+
+ Attributes
+ ----------
+ scores_ : array-like of shape (n_features,)
+ Scores of features.
+
+ pvalues_ : array-like of shape (n_features,)
+ p-values of feature scores.
+
+ n_features_in_ : int
+ Number of features seen during :term:`fit`.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ See Also
+ --------
+ f_classif : ANOVA F-value between label/feature for classification tasks.
+ mutual_info_classif : Mutual information for a discrete target.
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
+ f_regression : F-value between label/feature for regression tasks.
+ mutual_info_regression : Mutual information for a continuous target.
+ SelectPercentile : Select features based on percentile of the highest
+ scores.
+ SelectKBest : Select features based on the k highest scores.
+ SelectFpr : Select features based on a false positive rate test.
+ SelectFwe : Select features based on family-wise error rate.
+ GenericUnivariateSelect : Univariate feature selector with configurable
+ mode.
+
+ References
+ ----------
+ https://en.wikipedia.org/wiki/False_discovery_rate
+
+ Examples
+ --------
+ >>> from sklearn.datasets import load_breast_cancer
+ >>> from sklearn.feature_selection import SelectFdr, chi2
+ >>> X, y = load_breast_cancer(return_X_y=True)
+ >>> X.shape
+ (569, 30)
+ >>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)
+ >>> X_new.shape
+ (569, 16)
+ """
+
+ _parameter_constraints: dict = {
+ **_BaseFilter._parameter_constraints,
+ "alpha": [Interval(Real, 0, 1, closed="both")],
+ }
+
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
+ super().__init__(score_func=score_func)
+ self.alpha = alpha
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+
+ n_features = len(self.pvalues_)
+ sv = np.sort(self.pvalues_)
+ selected = sv[
+ sv <= float(self.alpha) / n_features * np.arange(1, n_features + 1)
+ ]
+ if selected.size == 0:
+ return np.zeros_like(self.pvalues_, dtype=bool)
+ return self.pvalues_ <= selected.max()
+
+
+class SelectFwe(_BaseFilter):
+ """Filter: Select the p-values corresponding to Family-wise error rate.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ score_func : callable, default=f_classif
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues).
+ Default is f_classif (see below "See Also"). The default function only
+ works with classification tasks.
+
+ alpha : float, default=5e-2
+ The highest uncorrected p-value for features to keep.
+
+ Attributes
+ ----------
+ scores_ : array-like of shape (n_features,)
+ Scores of features.
+
+ pvalues_ : array-like of shape (n_features,)
+ p-values of feature scores.
+
+ n_features_in_ : int
+ Number of features seen during :term:`fit`.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ See Also
+ --------
+ f_classif : ANOVA F-value between label/feature for classification tasks.
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
+ f_regression : F-value between label/feature for regression tasks.
+ SelectPercentile : Select features based on percentile of the highest
+ scores.
+ SelectKBest : Select features based on the k highest scores.
+ SelectFpr : Select features based on a false positive rate test.
+ SelectFdr : Select features based on an estimated false discovery rate.
+ GenericUnivariateSelect : Univariate feature selector with configurable
+ mode.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import load_breast_cancer
+ >>> from sklearn.feature_selection import SelectFwe, chi2
+ >>> X, y = load_breast_cancer(return_X_y=True)
+ >>> X.shape
+ (569, 30)
+ >>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)
+ >>> X_new.shape
+ (569, 15)
+ """
+
+ _parameter_constraints: dict = {
+ **_BaseFilter._parameter_constraints,
+ "alpha": [Interval(Real, 0, 1, closed="both")],
+ }
+
+ def __init__(self, score_func=f_classif, *, alpha=5e-2):
+ super().__init__(score_func=score_func)
+ self.alpha = alpha
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+
+ return self.pvalues_ < self.alpha / len(self.pvalues_)
+
+
+######################################################################
+# Generic filter
+######################################################################
+
+
+# TODO this class should fit on either p-values or scores,
+# depending on the mode.
+class GenericUnivariateSelect(_BaseFilter):
+ """Univariate feature selector with configurable strategy.
+
+ Read more in the :ref:`User Guide `.
+
+ Parameters
+ ----------
+ score_func : callable, default=f_classif
+ Function taking two arrays X and y, and returning a pair of arrays
+ (scores, pvalues). For modes 'percentile' or 'kbest' it can return
+ a single array scores.
+
+ mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'
+ Feature selection mode. Note that the `'percentile'` and `'kbest'`
+ modes are supporting unsupervised feature selection (when `y` is `None`).
+
+ param : "all", float or int, default=1e-5
+ Parameter of the corresponding mode.
+
+ Attributes
+ ----------
+ scores_ : array-like of shape (n_features,)
+ Scores of features.
+
+ pvalues_ : array-like of shape (n_features,)
+ p-values of feature scores, None if `score_func` returned scores only.
+
+ n_features_in_ : int
+ Number of features seen during :term:`fit`.
+
+ .. versionadded:: 0.24
+
+ feature_names_in_ : ndarray of shape (`n_features_in_`,)
+ Names of features seen during :term:`fit`. Defined only when `X`
+ has feature names that are all strings.
+
+ .. versionadded:: 1.0
+
+ See Also
+ --------
+ f_classif : ANOVA F-value between label/feature for classification tasks.
+ mutual_info_classif : Mutual information for a discrete target.
+ chi2 : Chi-squared stats of non-negative features for classification tasks.
+ f_regression : F-value between label/feature for regression tasks.
+ mutual_info_regression : Mutual information for a continuous target.
+ SelectPercentile : Select features based on percentile of the highest
+ scores.
+ SelectKBest : Select features based on the k highest scores.
+ SelectFpr : Select features based on a false positive rate test.
+ SelectFdr : Select features based on an estimated false discovery rate.
+ SelectFwe : Select features based on family-wise error rate.
+
+ Examples
+ --------
+ >>> from sklearn.datasets import load_breast_cancer
+ >>> from sklearn.feature_selection import GenericUnivariateSelect, chi2
+ >>> X, y = load_breast_cancer(return_X_y=True)
+ >>> X.shape
+ (569, 30)
+ >>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20)
+ >>> X_new = transformer.fit_transform(X, y)
+ >>> X_new.shape
+ (569, 20)
+ """
+
+ _selection_modes: dict = {
+ "percentile": SelectPercentile,
+ "k_best": SelectKBest,
+ "fpr": SelectFpr,
+ "fdr": SelectFdr,
+ "fwe": SelectFwe,
+ }
+
+ _parameter_constraints: dict = {
+ **_BaseFilter._parameter_constraints,
+ "mode": [StrOptions(set(_selection_modes.keys()))],
+ "param": [Interval(Real, 0, None, closed="left"), StrOptions({"all"})],
+ }
+
+ def __init__(self, score_func=f_classif, *, mode="percentile", param=1e-5):
+ super().__init__(score_func=score_func)
+ self.mode = mode
+ self.param = param
+
+ def _make_selector(self):
+ selector = self._selection_modes[self.mode](score_func=self.score_func)
+
+ # Now perform some acrobatics to set the right named parameter in
+ # the selector
+ possible_params = selector._get_param_names()
+ possible_params.remove("score_func")
+ selector.set_params(**{possible_params[0]: self.param})
+
+ return selector
+
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.transformer_tags.preserves_dtype = ["float64", "float32"]
+ return tags
+
+ def _check_params(self, X, y):
+ self._make_selector()._check_params(X, y)
+
+ def _get_support_mask(self):
+ check_is_fitted(self)
+
+ selector = self._make_selector()
+ selector.pvalues_ = self.pvalues_
+ selector.scores_ = self.scores_
+ return selector._get_support_mask()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..316d5c86b1ba7d3f2fddb20474daf5b943b775d1
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/__pycache__/test_from_model.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py
new file mode 100644
index 0000000000000000000000000000000000000000..c50def36f1b6c281e6c96019355b901bf4326a38
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_chi2.py
@@ -0,0 +1,93 @@
+"""
+Tests for chi2, currently the only feature selection function designed
+specifically to work with sparse matrices.
+"""
+
+import warnings
+
+import numpy as np
+import pytest
+import scipy.stats
+
+from sklearn.feature_selection import SelectKBest, chi2
+from sklearn.feature_selection._univariate_selection import _chisquare
+from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal
+from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS
+
+# Feature 0 is highly informative for class 1;
+# feature 1 is the same everywhere;
+# feature 2 is a bit informative for class 2.
+X = [[2, 1, 2], [9, 1, 1], [6, 1, 2], [0, 1, 2]]
+y = [0, 1, 2, 2]
+
+
+def mkchi2(k):
+ """Make k-best chi2 selector"""
+ return SelectKBest(chi2, k=k)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_chi2(csr_container):
+ # Test Chi2 feature extraction
+
+ chi2 = mkchi2(k=1).fit(X, y)
+ chi2 = mkchi2(k=1).fit(X, y)
+ assert_array_equal(chi2.get_support(indices=True), [0])
+ assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
+
+ chi2 = mkchi2(k=2).fit(X, y)
+ assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
+
+ Xsp = csr_container(X, dtype=np.float64)
+ chi2 = mkchi2(k=2).fit(Xsp, y)
+ assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
+ Xtrans = chi2.transform(Xsp)
+ assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
+
+ # == doesn't work on scipy.sparse matrices
+ Xtrans = Xtrans.toarray()
+ Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
+ assert_array_almost_equal(Xtrans, Xtrans2)
+
+
+@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
+def test_chi2_coo(coo_container):
+ # Check that chi2 works with a COO matrix
+ # (as returned by CountVectorizer, DictVectorizer)
+ Xcoo = coo_container(X)
+ mkchi2(k=2).fit_transform(Xcoo, y)
+ # if we got here without an exception, we're safe
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_chi2_negative(csr_container):
+ # Check for proper error on negative numbers in the input X.
+ X, y = [[0, 1], [-1e-20, 1]], [0, 1]
+ for X in (X, np.array(X), csr_container(X)):
+ with pytest.raises(ValueError):
+ chi2(X, y)
+
+
+def test_chi2_unused_feature():
+ # Unused feature should evaluate to NaN
+ # and should issue no runtime warning
+ with warnings.catch_warnings(record=True) as warned:
+ warnings.simplefilter("always")
+ chi, p = chi2([[1, 0], [0, 0]], [1, 0])
+ for w in warned:
+ if "divide by zero" in repr(w):
+ raise AssertionError("Found unexpected warning %s" % w)
+ assert_array_equal(chi, [1, np.nan])
+ assert_array_equal(p[1], np.nan)
+
+
+def test_chisquare():
+ # Test replacement for scipy.stats.chisquare against the original.
+ obs = np.array([[2.0, 2.0], [1.0, 1.0]])
+ exp = np.array([[1.5, 1.5], [1.5, 1.5]])
+ # call SciPy first because our version overwrites obs
+ chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
+ chi_our, p_our = _chisquare(obs, exp)
+
+ assert_array_almost_equal(chi_scp, chi_our)
+ assert_array_almost_equal(p_scp, p_our)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7bffec5159bfc7ba8faf452a218d5147906419c
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_feature_select.py
@@ -0,0 +1,1018 @@
+"""
+Todo: cross-check the F-value with stats model
+"""
+
+import itertools
+import warnings
+
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose
+from scipy import sparse, stats
+
+from sklearn.datasets import load_iris, make_classification, make_regression
+from sklearn.feature_selection import (
+ GenericUnivariateSelect,
+ SelectFdr,
+ SelectFpr,
+ SelectFwe,
+ SelectKBest,
+ SelectPercentile,
+ chi2,
+ f_classif,
+ f_oneway,
+ f_regression,
+ mutual_info_classif,
+ mutual_info_regression,
+ r_regression,
+)
+from sklearn.utils import safe_mask
+from sklearn.utils._testing import (
+ _convert_container,
+ assert_almost_equal,
+ assert_array_almost_equal,
+ assert_array_equal,
+ ignore_warnings,
+)
+from sklearn.utils.fixes import CSR_CONTAINERS
+
+##############################################################################
+# Test the score functions
+
+
+def test_f_oneway_vs_scipy_stats():
+ # Test that our f_oneway gives the same result as scipy.stats
+ rng = np.random.RandomState(0)
+ X1 = rng.randn(10, 3)
+ X2 = 1 + rng.randn(10, 3)
+ f, pv = stats.f_oneway(X1, X2)
+ f2, pv2 = f_oneway(X1, X2)
+ assert np.allclose(f, f2)
+ assert np.allclose(pv, pv2)
+
+
+def test_f_oneway_ints():
+ # Smoke test f_oneway on integers: that it does raise casting errors
+ # with recent numpys
+ rng = np.random.RandomState(0)
+ X = rng.randint(10, size=(10, 10))
+ y = np.arange(10)
+ fint, pint = f_oneway(X, y)
+
+ # test that is gives the same result as with float
+ f, p = f_oneway(X.astype(float), y)
+ assert_array_almost_equal(f, fint, decimal=4)
+ assert_array_almost_equal(p, pint, decimal=4)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_f_classif(csr_container):
+ # Test whether the F test yields meaningful results
+ # on a simple simulated classification problem
+ X, y = make_classification(
+ n_samples=200,
+ n_features=20,
+ n_informative=3,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=8,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+
+ F, pv = f_classif(X, y)
+ F_sparse, pv_sparse = f_classif(csr_container(X), y)
+ assert (F > 0).all()
+ assert (pv > 0).all()
+ assert (pv < 1).all()
+ assert (pv[:5] < 0.05).all()
+ assert (pv[5:] > 1.0e-4).all()
+ assert_array_almost_equal(F_sparse, F)
+ assert_array_almost_equal(pv_sparse, pv)
+
+
+@pytest.mark.parametrize("center", [True, False])
+def test_r_regression(center):
+ X, y = make_regression(
+ n_samples=2000, n_features=20, n_informative=5, shuffle=False, random_state=0
+ )
+
+ corr_coeffs = r_regression(X, y, center=center)
+ assert (-1 < corr_coeffs).all()
+ assert (corr_coeffs < 1).all()
+
+ sparse_X = _convert_container(X, "sparse")
+
+ sparse_corr_coeffs = r_regression(sparse_X, y, center=center)
+ assert_allclose(sparse_corr_coeffs, corr_coeffs)
+
+ # Testing against numpy for reference
+ Z = np.hstack((X, y[:, np.newaxis]))
+ correlation_matrix = np.corrcoef(Z, rowvar=False)
+ np_corr_coeffs = correlation_matrix[:-1, -1]
+ assert_array_almost_equal(np_corr_coeffs, corr_coeffs, decimal=3)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_f_regression(csr_container):
+ # Test whether the F test yields meaningful results
+ # on a simple simulated regression problem
+ X, y = make_regression(
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
+ )
+
+ F, pv = f_regression(X, y)
+ assert (F > 0).all()
+ assert (pv > 0).all()
+ assert (pv < 1).all()
+ assert (pv[:5] < 0.05).all()
+ assert (pv[5:] > 1.0e-4).all()
+
+ # with centering, compare with sparse
+ F, pv = f_regression(X, y, center=True)
+ F_sparse, pv_sparse = f_regression(csr_container(X), y, center=True)
+ assert_allclose(F_sparse, F)
+ assert_allclose(pv_sparse, pv)
+
+ # again without centering, compare with sparse
+ F, pv = f_regression(X, y, center=False)
+ F_sparse, pv_sparse = f_regression(csr_container(X), y, center=False)
+ assert_allclose(F_sparse, F)
+ assert_allclose(pv_sparse, pv)
+
+
+def test_f_regression_input_dtype():
+ # Test whether f_regression returns the same value
+ # for any numeric data_type
+ rng = np.random.RandomState(0)
+ X = rng.rand(10, 20)
+ y = np.arange(10).astype(int)
+
+ F1, pv1 = f_regression(X, y)
+ F2, pv2 = f_regression(X, y.astype(float))
+ assert_allclose(F1, F2, 5)
+ assert_allclose(pv1, pv2, 5)
+
+
+def test_f_regression_center():
+ # Test whether f_regression preserves dof according to 'center' argument
+ # We use two centered variates so we have a simple relationship between
+ # F-score with variates centering and F-score without variates centering.
+ # Create toy example
+ X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
+ n_samples = X.size
+ Y = np.ones(n_samples)
+ Y[::2] *= -1.0
+ Y[0] = 0.0 # have Y mean being null
+
+ F1, _ = f_regression(X, Y, center=True)
+ F2, _ = f_regression(X, Y, center=False)
+ assert_allclose(F1 * (n_samples - 1.0) / (n_samples - 2.0), F2)
+ assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
+
+
+@pytest.mark.parametrize(
+ "X, y, expected_corr_coef, force_finite",
+ [
+ (
+ # A feature in X is constant - forcing finite
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
+ np.array([0, 1, 1, 0]),
+ np.array([0.0, 0.32075]),
+ True,
+ ),
+ (
+ # The target y is constant - forcing finite
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
+ np.array([0, 0, 0, 0]),
+ np.array([0.0, 0.0]),
+ True,
+ ),
+ (
+ # A feature in X is constant - not forcing finite
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
+ np.array([0, 1, 1, 0]),
+ np.array([np.nan, 0.32075]),
+ False,
+ ),
+ (
+ # The target y is constant - not forcing finite
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
+ np.array([0, 0, 0, 0]),
+ np.array([np.nan, np.nan]),
+ False,
+ ),
+ ],
+)
+def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite):
+ """Check the behaviour of `force_finite` for some corner cases with `r_regression`.
+
+ Non-regression test for:
+ https://github.com/scikit-learn/scikit-learn/issues/15672
+ """
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", RuntimeWarning)
+ corr_coef = r_regression(X, y, force_finite=force_finite)
+ np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef)
+
+
+@pytest.mark.parametrize(
+ "X, y, expected_f_statistic, expected_p_values, force_finite",
+ [
+ (
+ # A feature in X is constant - forcing finite
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
+ np.array([0, 1, 1, 0]),
+ np.array([0.0, 0.2293578]),
+ np.array([1.0, 0.67924985]),
+ True,
+ ),
+ (
+ # The target y is constant - forcing finite
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
+ np.array([0, 0, 0, 0]),
+ np.array([0.0, 0.0]),
+ np.array([1.0, 1.0]),
+ True,
+ ),
+ (
+ # Feature in X correlated with y - forcing finite
+ np.array([[0, 1], [1, 0], [2, 10], [3, 4]]),
+ np.array([0, 1, 2, 3]),
+ np.array([np.finfo(np.float64).max, 0.845433]),
+ np.array([0.0, 0.454913]),
+ True,
+ ),
+ (
+ # Feature in X anti-correlated with y - forcing finite
+ np.array([[3, 1], [2, 0], [1, 10], [0, 4]]),
+ np.array([0, 1, 2, 3]),
+ np.array([np.finfo(np.float64).max, 0.845433]),
+ np.array([0.0, 0.454913]),
+ True,
+ ),
+ (
+ # A feature in X is constant - not forcing finite
+ np.array([[2, 1], [2, 0], [2, 10], [2, 4]]),
+ np.array([0, 1, 1, 0]),
+ np.array([np.nan, 0.2293578]),
+ np.array([np.nan, 0.67924985]),
+ False,
+ ),
+ (
+ # The target y is constant - not forcing finite
+ np.array([[5, 1], [3, 0], [2, 10], [8, 4]]),
+ np.array([0, 0, 0, 0]),
+ np.array([np.nan, np.nan]),
+ np.array([np.nan, np.nan]),
+ False,
+ ),
+ (
+ # Feature in X correlated with y - not forcing finite
+ np.array([[0, 1], [1, 0], [2, 10], [3, 4]]),
+ np.array([0, 1, 2, 3]),
+ np.array([np.inf, 0.845433]),
+ np.array([0.0, 0.454913]),
+ False,
+ ),
+ (
+ # Feature in X anti-correlated with y - not forcing finite
+ np.array([[3, 1], [2, 0], [1, 10], [0, 4]]),
+ np.array([0, 1, 2, 3]),
+ np.array([np.inf, 0.845433]),
+ np.array([0.0, 0.454913]),
+ False,
+ ),
+ ],
+)
+def test_f_regression_corner_case(
+ X, y, expected_f_statistic, expected_p_values, force_finite
+):
+ """Check the behaviour of `force_finite` for some corner cases with `f_regression`.
+
+ Non-regression test for:
+ https://github.com/scikit-learn/scikit-learn/issues/15672
+ """
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", RuntimeWarning)
+ f_statistic, p_values = f_regression(X, y, force_finite=force_finite)
+ np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic)
+ np.testing.assert_array_almost_equal(p_values, expected_p_values)
+
+
+def test_f_classif_multi_class():
+ # Test whether the F test yields meaningful results
+ # on a simple simulated classification problem
+ X, y = make_classification(
+ n_samples=200,
+ n_features=20,
+ n_informative=3,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=8,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+
+ F, pv = f_classif(X, y)
+ assert (F > 0).all()
+ assert (pv > 0).all()
+ assert (pv < 1).all()
+ assert (pv[:5] < 0.05).all()
+ assert (pv[5:] > 1.0e-4).all()
+
+
+def test_select_percentile_classif():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple classification problem
+ # with the percentile heuristic
+ X, y = make_classification(
+ n_samples=200,
+ n_features=20,
+ n_informative=3,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=8,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+
+ univariate_filter = SelectPercentile(f_classif, percentile=25)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(f_classif, mode="percentile", param=25)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ assert_array_equal(support, gtruth)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_select_percentile_classif_sparse(csr_container):
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple classification problem
+ # with the percentile heuristic
+ X, y = make_classification(
+ n_samples=200,
+ n_features=20,
+ n_informative=3,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=8,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+ X = csr_container(X)
+ univariate_filter = SelectPercentile(f_classif, percentile=25)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(f_classif, mode="percentile", param=25)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r.toarray(), X_r2.toarray())
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ assert_array_equal(support, gtruth)
+
+ X_r2inv = univariate_filter.inverse_transform(X_r2)
+ assert sparse.issparse(X_r2inv)
+ support_mask = safe_mask(X_r2inv, support)
+ assert X_r2inv.shape == X.shape
+ assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
+ # Check other columns are empty
+ assert X_r2inv.nnz == X_r.nnz
+
+
+##############################################################################
+# Test univariate selection in classification settings
+
+
+def test_select_kbest_classif():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple classification problem
+ # with the k best heuristic
+ X, y = make_classification(
+ n_samples=200,
+ n_features=20,
+ n_informative=3,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=8,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+
+ univariate_filter = SelectKBest(f_classif, k=5)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(f_classif, mode="k_best", param=5)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ assert_array_equal(support, gtruth)
+
+
+def test_select_kbest_all():
+ # Test whether k="all" correctly returns all features.
+ X, y = make_classification(
+ n_samples=20, n_features=10, shuffle=False, random_state=0
+ )
+
+ univariate_filter = SelectKBest(f_classif, k="all")
+ X_r = univariate_filter.fit(X, y).transform(X)
+ assert_array_equal(X, X_r)
+ # Non-regression test for:
+ # https://github.com/scikit-learn/scikit-learn/issues/24949
+ X_r2 = (
+ GenericUnivariateSelect(f_classif, mode="k_best", param="all")
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+
+
+@pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
+def test_select_kbest_zero(dtype_in):
+ # Test whether k=0 correctly returns no features.
+ X, y = make_classification(
+ n_samples=20, n_features=10, shuffle=False, random_state=0
+ )
+ X = X.astype(dtype_in)
+
+ univariate_filter = SelectKBest(f_classif, k=0)
+ univariate_filter.fit(X, y)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(10, dtype=bool)
+ assert_array_equal(support, gtruth)
+ with pytest.warns(UserWarning, match="No features were selected"):
+ X_selected = univariate_filter.transform(X)
+ assert X_selected.shape == (20, 0)
+ assert X_selected.dtype == dtype_in
+
+
+def test_select_heuristics_classif():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple classification problem
+ # with the fdr, fwe and fpr heuristics
+ X, y = make_classification(
+ n_samples=200,
+ n_features=20,
+ n_informative=3,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=8,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+
+ univariate_filter = SelectFwe(f_classif, alpha=0.01)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ for mode in ["fdr", "fpr", "fwe"]:
+ X_r2 = (
+ GenericUnivariateSelect(f_classif, mode=mode, param=0.01)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ assert_allclose(support, gtruth)
+
+
+##############################################################################
+# Test univariate selection in regression settings
+
+
+def assert_best_scores_kept(score_filter):
+ scores = score_filter.scores_
+ support = score_filter.get_support()
+ assert_allclose(np.sort(scores[support]), np.sort(scores)[-support.sum() :])
+
+
+def test_select_percentile_regression():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple regression problem
+ # with the percentile heuristic
+ X, y = make_regression(
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
+ )
+
+ univariate_filter = SelectPercentile(f_regression, percentile=25)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ assert_best_scores_kept(univariate_filter)
+ X_r2 = (
+ GenericUnivariateSelect(f_regression, mode="percentile", param=25)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ assert_array_equal(support, gtruth)
+ X_2 = X.copy()
+ X_2[:, np.logical_not(support)] = 0
+ assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
+ # Check inverse_transform respects dtype
+ assert_array_equal(
+ X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))
+ )
+
+
+def test_select_percentile_regression_full():
+ # Test whether the relative univariate feature selection
+ # selects all features when '100%' is asked.
+ X, y = make_regression(
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
+ )
+
+ univariate_filter = SelectPercentile(f_regression, percentile=100)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ assert_best_scores_kept(univariate_filter)
+ X_r2 = (
+ GenericUnivariateSelect(f_regression, mode="percentile", param=100)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.ones(20)
+ assert_array_equal(support, gtruth)
+
+
+def test_select_kbest_regression():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple regression problem
+ # with the k best heuristic
+ X, y = make_regression(
+ n_samples=200,
+ n_features=20,
+ n_informative=5,
+ shuffle=False,
+ random_state=0,
+ noise=10,
+ )
+
+ univariate_filter = SelectKBest(f_regression, k=5)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ assert_best_scores_kept(univariate_filter)
+ X_r2 = (
+ GenericUnivariateSelect(f_regression, mode="k_best", param=5)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ assert_array_equal(support, gtruth)
+
+
+def test_select_heuristics_regression():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple regression problem
+ # with the fpr, fdr or fwe heuristics
+ X, y = make_regression(
+ n_samples=200,
+ n_features=20,
+ n_informative=5,
+ shuffle=False,
+ random_state=0,
+ noise=10,
+ )
+
+ univariate_filter = SelectFpr(f_regression, alpha=0.01)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ for mode in ["fdr", "fpr", "fwe"]:
+ X_r2 = (
+ GenericUnivariateSelect(f_regression, mode=mode, param=0.01)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ assert_array_equal(support[:5], np.ones((5,), dtype=bool))
+ assert np.sum(support[5:] == 1) < 3
+
+
+def test_boundary_case_ch2():
+ # Test boundary case, and always aim to select 1 feature.
+ X = np.array([[10, 20], [20, 20], [20, 30]])
+ y = np.array([[1], [0], [0]])
+ scores, pvalues = chi2(X, y)
+ assert_array_almost_equal(scores, np.array([4.0, 0.71428571]))
+ assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
+
+ filter_fdr = SelectFdr(chi2, alpha=0.1)
+ filter_fdr.fit(X, y)
+ support_fdr = filter_fdr.get_support()
+ assert_array_equal(support_fdr, np.array([True, False]))
+
+ filter_kbest = SelectKBest(chi2, k=1)
+ filter_kbest.fit(X, y)
+ support_kbest = filter_kbest.get_support()
+ assert_array_equal(support_kbest, np.array([True, False]))
+
+ filter_percentile = SelectPercentile(chi2, percentile=50)
+ filter_percentile.fit(X, y)
+ support_percentile = filter_percentile.get_support()
+ assert_array_equal(support_percentile, np.array([True, False]))
+
+ filter_fpr = SelectFpr(chi2, alpha=0.1)
+ filter_fpr.fit(X, y)
+ support_fpr = filter_fpr.get_support()
+ assert_array_equal(support_fpr, np.array([True, False]))
+
+ filter_fwe = SelectFwe(chi2, alpha=0.1)
+ filter_fwe.fit(X, y)
+ support_fwe = filter_fwe.get_support()
+ assert_array_equal(support_fwe, np.array([True, False]))
+
+
+@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1])
+@pytest.mark.parametrize("n_informative", [1, 5, 10])
+def test_select_fdr_regression(alpha, n_informative):
+ # Test that fdr heuristic actually has low FDR.
+ def single_fdr(alpha, n_informative, random_state):
+ X, y = make_regression(
+ n_samples=150,
+ n_features=20,
+ n_informative=n_informative,
+ shuffle=False,
+ random_state=random_state,
+ noise=10,
+ )
+
+ with warnings.catch_warnings(record=True):
+ # Warnings can be raised when no features are selected
+ # (low alpha or very noisy data)
+ univariate_filter = SelectFdr(f_regression, alpha=alpha)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(f_regression, mode="fdr", param=alpha)
+ .fit(X, y)
+ .transform(X)
+ )
+
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ num_false_positives = np.sum(support[n_informative:] == 1)
+ num_true_positives = np.sum(support[:n_informative] == 1)
+
+ if num_false_positives == 0:
+ return 0.0
+ false_discovery_rate = num_false_positives / (
+ num_true_positives + num_false_positives
+ )
+ return false_discovery_rate
+
+ # As per Benjamini-Hochberg, the expected false discovery rate
+ # should be lower than alpha:
+ # FDR = E(FP / (TP + FP)) <= alpha
+ false_discovery_rate = np.mean(
+ [single_fdr(alpha, n_informative, random_state) for random_state in range(100)]
+ )
+ assert alpha >= false_discovery_rate
+
+ # Make sure that the empirical false discovery rate increases
+ # with alpha:
+ if false_discovery_rate != 0:
+ assert false_discovery_rate > alpha / 10
+
+
+def test_select_fwe_regression():
+ # Test whether the relative univariate feature selection
+ # gets the correct items in a simple regression problem
+ # with the fwe heuristic
+ X, y = make_regression(
+ n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0
+ )
+
+ univariate_filter = SelectFwe(f_regression, alpha=0.01)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(f_regression, mode="fwe", param=0.01)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(20)
+ gtruth[:5] = 1
+ assert_array_equal(support[:5], np.ones((5,), dtype=bool))
+ assert np.sum(support[5:] == 1) < 2
+
+
+def test_selectkbest_tiebreaking():
+ # Test whether SelectKBest actually selects k features in case of ties.
+ # Prior to 0.11, SelectKBest would return more features than requested.
+ Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
+ y = [1]
+ dummy_score = lambda X, y: (X[0], X[0])
+ for X in Xs:
+ sel = SelectKBest(dummy_score, k=1)
+ X1 = ignore_warnings(sel.fit_transform)([X], y)
+ assert X1.shape[1] == 1
+ assert_best_scores_kept(sel)
+
+ sel = SelectKBest(dummy_score, k=2)
+ X2 = ignore_warnings(sel.fit_transform)([X], y)
+ assert X2.shape[1] == 2
+ assert_best_scores_kept(sel)
+
+
+def test_selectpercentile_tiebreaking():
+ # Test if SelectPercentile selects the right n_features in case of ties.
+ Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
+ y = [1]
+ dummy_score = lambda X, y: (X[0], X[0])
+ for X in Xs:
+ sel = SelectPercentile(dummy_score, percentile=34)
+ X1 = ignore_warnings(sel.fit_transform)([X], y)
+ assert X1.shape[1] == 1
+ assert_best_scores_kept(sel)
+
+ sel = SelectPercentile(dummy_score, percentile=67)
+ X2 = ignore_warnings(sel.fit_transform)([X], y)
+ assert X2.shape[1] == 2
+ assert_best_scores_kept(sel)
+
+
+def test_tied_pvalues():
+ # Test whether k-best and percentiles work with tied pvalues from chi2.
+ # chi2 will return the same p-values for the following features, but it
+ # will return different scores.
+ X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
+ y = [0, 1]
+
+ for perm in itertools.permutations((0, 1, 2)):
+ X = X0[:, perm]
+ Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
+ assert Xt.shape == (2, 2)
+ assert 9998 not in Xt
+
+ Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
+ assert Xt.shape == (2, 2)
+ assert 9998 not in Xt
+
+
+def test_scorefunc_multilabel():
+ # Test whether k-best and percentiles works with multilabels with chi2.
+
+ X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
+ y = [[1, 1], [0, 1], [1, 0]]
+
+ Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
+ assert Xt.shape == (3, 2)
+ assert 0 not in Xt
+
+ Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
+ assert Xt.shape == (3, 2)
+ assert 0 not in Xt
+
+
+def test_tied_scores():
+ # Test for stable sorting in k-best with tied scores.
+ X_train = np.array([[0, 0, 0], [1, 1, 1]])
+ y_train = [0, 1]
+
+ for n_features in [1, 2, 3]:
+ sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
+ X_test = sel.transform([[0, 1, 2]])
+ assert_array_equal(X_test[0], np.arange(3)[-n_features:])
+
+
+def test_nans():
+ # Assert that SelectKBest and SelectPercentile can handle NaNs.
+ # First feature has zero variance to confuse f_classif (ANOVA) and
+ # make it return a NaN.
+ X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]]
+ y = [1, 0, 1]
+
+ for select in (
+ SelectKBest(f_classif, k=2),
+ SelectPercentile(f_classif, percentile=67),
+ ):
+ ignore_warnings(select.fit)(X, y)
+ assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
+
+
+def test_invalid_k():
+ X = [[0, 1, 0], [0, -1, -1], [0, 0.5, 0.5]]
+ y = [1, 0, 1]
+
+ msg = "k=4 is greater than n_features=3. All the features will be returned."
+ with pytest.warns(UserWarning, match=msg):
+ SelectKBest(k=4).fit(X, y)
+ with pytest.warns(UserWarning, match=msg):
+ GenericUnivariateSelect(mode="k_best", param=4).fit(X, y)
+
+
+def test_f_classif_constant_feature():
+ # Test that f_classif warns if a feature is constant throughout.
+
+ X, y = make_classification(n_samples=10, n_features=5)
+ X[:, 0] = 2.0
+ with pytest.warns(UserWarning):
+ f_classif(X, y)
+
+
+def test_no_feature_selected():
+ rng = np.random.RandomState(0)
+
+ # Generate random uncorrelated data: a strict univariate test should
+ # rejects all the features
+ X = rng.rand(40, 10)
+ y = rng.randint(0, 4, size=40)
+ strict_selectors = [
+ SelectFwe(alpha=0.01).fit(X, y),
+ SelectFdr(alpha=0.01).fit(X, y),
+ SelectFpr(alpha=0.01).fit(X, y),
+ SelectPercentile(percentile=0).fit(X, y),
+ SelectKBest(k=0).fit(X, y),
+ ]
+ for selector in strict_selectors:
+ assert_array_equal(selector.get_support(), np.zeros(10))
+ with pytest.warns(UserWarning, match="No features were selected"):
+ X_selected = selector.transform(X)
+ assert X_selected.shape == (40, 0)
+
+
+def test_mutual_info_classif():
+ X, y = make_classification(
+ n_samples=100,
+ n_features=5,
+ n_informative=1,
+ n_redundant=1,
+ n_repeated=0,
+ n_classes=2,
+ n_clusters_per_class=1,
+ flip_y=0.0,
+ class_sep=10,
+ shuffle=False,
+ random_state=0,
+ )
+
+ # Test in KBest mode.
+ univariate_filter = SelectKBest(mutual_info_classif, k=2)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(mutual_info_classif, mode="k_best", param=2)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(5)
+ gtruth[:2] = 1
+ assert_array_equal(support, gtruth)
+
+ # Test in Percentile mode.
+ univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(mutual_info_classif, mode="percentile", param=40)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(5)
+ gtruth[:2] = 1
+ assert_array_equal(support, gtruth)
+
+
+def test_mutual_info_regression():
+ X, y = make_regression(
+ n_samples=100,
+ n_features=10,
+ n_informative=2,
+ shuffle=False,
+ random_state=0,
+ noise=10,
+ )
+
+ # Test in KBest mode.
+ univariate_filter = SelectKBest(mutual_info_regression, k=2)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ assert_best_scores_kept(univariate_filter)
+ X_r2 = (
+ GenericUnivariateSelect(mutual_info_regression, mode="k_best", param=2)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(10)
+ gtruth[:2] = 1
+ assert_array_equal(support, gtruth)
+
+ # Test in Percentile mode.
+ univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
+ X_r = univariate_filter.fit(X, y).transform(X)
+ X_r2 = (
+ GenericUnivariateSelect(mutual_info_regression, mode="percentile", param=20)
+ .fit(X, y)
+ .transform(X)
+ )
+ assert_array_equal(X_r, X_r2)
+ support = univariate_filter.get_support()
+ gtruth = np.zeros(10)
+ gtruth[:2] = 1
+ assert_array_equal(support, gtruth)
+
+
+def test_dataframe_output_dtypes():
+ """Check that the output datafarme dtypes are the same as the input.
+
+ Non-regression test for gh-24860.
+ """
+ pd = pytest.importorskip("pandas")
+
+ X, y = load_iris(return_X_y=True, as_frame=True)
+ X = X.astype(
+ {
+ "petal length (cm)": np.float32,
+ "petal width (cm)": np.float64,
+ }
+ )
+ X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10)
+
+ column_order = X.columns
+
+ def selector(X, y):
+ ranking = {
+ "sepal length (cm)": 1,
+ "sepal width (cm)": 2,
+ "petal length (cm)": 3,
+ "petal width (cm)": 4,
+ "petal_width_binned": 5,
+ }
+ return np.asarray([ranking[name] for name in column_order])
+
+ univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas")
+ output = univariate_filter.fit_transform(X, y)
+
+ assert_array_equal(
+ output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"]
+ )
+ for name, dtype in output.dtypes.items():
+ assert dtype == X.dtypes[name]
+
+
+@pytest.mark.parametrize(
+ "selector",
+ [
+ SelectKBest(k=4),
+ SelectPercentile(percentile=80),
+ GenericUnivariateSelect(mode="k_best", param=4),
+ GenericUnivariateSelect(mode="percentile", param=80),
+ ],
+)
+def test_unsupervised_filter(selector):
+ """Check support for unsupervised feature selection for the filter that could
+ require only `X`.
+ """
+ rng = np.random.RandomState(0)
+ X = rng.randn(10, 5)
+
+ def score_func(X, y=None):
+ return np.array([1, 1, 1, 1, 0])
+
+ selector.set_params(score_func=score_func)
+ selector.fit(X)
+ X_trans = selector.transform(X)
+ assert_allclose(X_trans, X[:, :4])
+ X_trans = selector.fit_transform(X)
+ assert_allclose(X_trans, X[:, :4])
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..8008b8c0280855d7dd2de0eab2179a4aebec07ea
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_from_model.py
@@ -0,0 +1,690 @@
+import re
+import warnings
+from unittest.mock import Mock
+
+import numpy as np
+import pytest
+
+from sklearn import datasets
+from sklearn.base import BaseEstimator
+from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
+from sklearn.datasets import make_friedman1
+from sklearn.decomposition import PCA
+from sklearn.ensemble import HistGradientBoostingClassifier, RandomForestClassifier
+from sklearn.exceptions import NotFittedError
+from sklearn.feature_selection import SelectFromModel
+from sklearn.linear_model import (
+ ElasticNet,
+ ElasticNetCV,
+ Lasso,
+ LassoCV,
+ LinearRegression,
+ LogisticRegression,
+ PassiveAggressiveClassifier,
+ SGDClassifier,
+)
+from sklearn.pipeline import make_pipeline
+from sklearn.svm import LinearSVC
+from sklearn.utils._testing import (
+ MinimalClassifier,
+ assert_allclose,
+ assert_array_almost_equal,
+ assert_array_equal,
+ skip_if_32bit,
+)
+
+
+class NaNTag(BaseEstimator):
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = True
+ return tags
+
+
+class NoNaNTag(BaseEstimator):
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = False
+ return tags
+
+
+class NaNTagRandomForest(RandomForestClassifier):
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = True
+ return tags
+
+
+iris = datasets.load_iris()
+data, y = iris.data, iris.target
+rng = np.random.RandomState(0)
+
+
+def test_invalid_input():
+ clf = SGDClassifier(
+ alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None
+ )
+ for threshold in ["gobbledigook", ".5 * gobbledigook"]:
+ model = SelectFromModel(clf, threshold=threshold)
+ model.fit(data, y)
+ with pytest.raises(ValueError):
+ model.transform(data)
+
+
+def test_input_estimator_unchanged():
+ # Test that SelectFromModel fits on a clone of the estimator.
+ est = RandomForestClassifier()
+ transformer = SelectFromModel(estimator=est)
+ transformer.fit(data, y)
+ assert transformer.estimator is est
+
+
+@pytest.mark.parametrize(
+ "max_features, err_type, err_msg",
+ [
+ (
+ data.shape[1] + 1,
+ ValueError,
+ "max_features ==",
+ ),
+ (
+ lambda X: 1.5,
+ TypeError,
+ "max_features must be an instance of int, not float.",
+ ),
+ (
+ lambda X: data.shape[1] + 1,
+ ValueError,
+ "max_features ==",
+ ),
+ (
+ lambda X: -1,
+ ValueError,
+ "max_features ==",
+ ),
+ ],
+)
+def test_max_features_error(max_features, err_type, err_msg):
+ err_msg = re.escape(err_msg)
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
+
+ transformer = SelectFromModel(
+ estimator=clf, max_features=max_features, threshold=-np.inf
+ )
+ with pytest.raises(err_type, match=err_msg):
+ transformer.fit(data, y)
+
+
+@pytest.mark.parametrize("max_features", [0, 2, data.shape[1], None])
+def test_inferred_max_features_integer(max_features):
+ """Check max_features_ and output shape for integer max_features."""
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
+ transformer = SelectFromModel(
+ estimator=clf, max_features=max_features, threshold=-np.inf
+ )
+ X_trans = transformer.fit_transform(data, y)
+ if max_features is not None:
+ assert transformer.max_features_ == max_features
+ assert X_trans.shape[1] == transformer.max_features_
+ else:
+ assert not hasattr(transformer, "max_features_")
+ assert X_trans.shape[1] == data.shape[1]
+
+
+@pytest.mark.parametrize(
+ "max_features",
+ [lambda X: 1, lambda X: X.shape[1], lambda X: min(X.shape[1], 10000)],
+)
+def test_inferred_max_features_callable(max_features):
+ """Check max_features_ and output shape for callable max_features."""
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
+ transformer = SelectFromModel(
+ estimator=clf, max_features=max_features, threshold=-np.inf
+ )
+ X_trans = transformer.fit_transform(data, y)
+ assert transformer.max_features_ == max_features(data)
+ assert X_trans.shape[1] == transformer.max_features_
+
+
+@pytest.mark.parametrize("max_features", [lambda X: round(len(X[0]) / 2), 2])
+def test_max_features_array_like(max_features):
+ X = [
+ [0.87, -1.34, 0.31],
+ [-2.79, -0.02, -0.85],
+ [-1.34, -0.48, -2.55],
+ [1.92, 1.48, 0.65],
+ ]
+ y = [0, 1, 0, 1]
+
+ clf = RandomForestClassifier(n_estimators=5, random_state=0)
+ transformer = SelectFromModel(
+ estimator=clf, max_features=max_features, threshold=-np.inf
+ )
+ X_trans = transformer.fit_transform(X, y)
+ assert X_trans.shape[1] == transformer.max_features_
+
+
+@pytest.mark.parametrize(
+ "max_features",
+ [lambda X: min(X.shape[1], 10000), lambda X: X.shape[1], lambda X: 1],
+)
+def test_max_features_callable_data(max_features):
+ """Tests that the callable passed to `fit` is called on X."""
+ clf = RandomForestClassifier(n_estimators=50, random_state=0)
+ m = Mock(side_effect=max_features)
+ transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf)
+ transformer.fit_transform(data, y)
+ m.assert_called_with(data)
+
+
+class FixedImportanceEstimator(BaseEstimator):
+ def __init__(self, importances):
+ self.importances = importances
+
+ def fit(self, X, y=None):
+ self.feature_importances_ = np.array(self.importances)
+
+
+def test_max_features():
+ # Test max_features parameter using various values
+ X, y = datasets.make_classification(
+ n_samples=1000,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ )
+ max_features = X.shape[1]
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
+
+ transformer1 = SelectFromModel(estimator=est, threshold=-np.inf)
+ transformer2 = SelectFromModel(
+ estimator=est, max_features=max_features, threshold=-np.inf
+ )
+ X_new1 = transformer1.fit_transform(X, y)
+ X_new2 = transformer2.fit_transform(X, y)
+ assert_allclose(X_new1, X_new2)
+
+ # Test max_features against actual model.
+ transformer1 = SelectFromModel(estimator=Lasso(alpha=0.025, random_state=42))
+ X_new1 = transformer1.fit_transform(X, y)
+ scores1 = np.abs(transformer1.estimator_.coef_)
+ candidate_indices1 = np.argsort(-scores1, kind="mergesort")
+
+ for n_features in range(1, X_new1.shape[1] + 1):
+ transformer2 = SelectFromModel(
+ estimator=Lasso(alpha=0.025, random_state=42),
+ max_features=n_features,
+ threshold=-np.inf,
+ )
+ X_new2 = transformer2.fit_transform(X, y)
+ scores2 = np.abs(transformer2.estimator_.coef_)
+ candidate_indices2 = np.argsort(-scores2, kind="mergesort")
+ assert_allclose(
+ X[:, candidate_indices1[:n_features]], X[:, candidate_indices2[:n_features]]
+ )
+ assert_allclose(transformer1.estimator_.coef_, transformer2.estimator_.coef_)
+
+
+def test_max_features_tiebreak():
+ # Test if max_features can break tie among feature importance
+ X, y = datasets.make_classification(
+ n_samples=1000,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ )
+ max_features = X.shape[1]
+
+ feature_importances = np.array([4, 4, 4, 4, 3, 3, 3, 2, 2, 1])
+ for n_features in range(1, max_features + 1):
+ transformer = SelectFromModel(
+ FixedImportanceEstimator(feature_importances),
+ max_features=n_features,
+ threshold=-np.inf,
+ )
+ X_new = transformer.fit_transform(X, y)
+ selected_feature_indices = np.where(transformer._get_support_mask())[0]
+ assert_array_equal(selected_feature_indices, np.arange(n_features))
+ assert X_new.shape[1] == n_features
+
+
+def test_threshold_and_max_features():
+ X, y = datasets.make_classification(
+ n_samples=1000,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ )
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
+
+ transformer1 = SelectFromModel(estimator=est, max_features=3, threshold=-np.inf)
+ X_new1 = transformer1.fit_transform(X, y)
+
+ transformer2 = SelectFromModel(estimator=est, threshold=0.04)
+ X_new2 = transformer2.fit_transform(X, y)
+
+ transformer3 = SelectFromModel(estimator=est, max_features=3, threshold=0.04)
+ X_new3 = transformer3.fit_transform(X, y)
+ assert X_new3.shape[1] == min(X_new1.shape[1], X_new2.shape[1])
+ selected_indices = transformer3.transform(np.arange(X.shape[1])[np.newaxis, :])
+ assert_allclose(X_new3, X[:, selected_indices[0]])
+
+
+@skip_if_32bit
+def test_feature_importances():
+ X, y = datasets.make_classification(
+ n_samples=1000,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ )
+
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
+ for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
+ transformer = SelectFromModel(estimator=est, threshold=threshold)
+ transformer.fit(X, y)
+ assert hasattr(transformer.estimator_, "feature_importances_")
+
+ X_new = transformer.transform(X)
+ assert X_new.shape[1] < X.shape[1]
+ importances = transformer.estimator_.feature_importances_
+
+ feature_mask = np.abs(importances) > func(importances)
+ assert_array_almost_equal(X_new, X[:, feature_mask])
+
+
+def test_sample_weight():
+ # Ensure sample weights are passed to underlying estimator
+ X, y = datasets.make_classification(
+ n_samples=100,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ )
+
+ # Check with sample weights
+ sample_weight = np.ones(y.shape)
+ sample_weight[y == 1] *= 100
+
+ est = LogisticRegression(random_state=0, fit_intercept=False)
+ transformer = SelectFromModel(estimator=est)
+ transformer.fit(X, y, sample_weight=None)
+ mask = transformer._get_support_mask()
+ transformer.fit(X, y, sample_weight=sample_weight)
+ weighted_mask = transformer._get_support_mask()
+ assert not np.all(weighted_mask == mask)
+ transformer.fit(X, y, sample_weight=3 * sample_weight)
+ reweighted_mask = transformer._get_support_mask()
+ assert np.all(weighted_mask == reweighted_mask)
+
+
+@pytest.mark.parametrize(
+ "estimator",
+ [
+ Lasso(alpha=0.1, random_state=42),
+ LassoCV(random_state=42),
+ ElasticNet(l1_ratio=1, random_state=42),
+ ElasticNetCV(l1_ratio=[1], random_state=42),
+ ],
+)
+def test_coef_default_threshold(estimator):
+ X, y = datasets.make_classification(
+ n_samples=100,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ )
+
+ # For the Lasso and related models, the threshold defaults to 1e-5
+ transformer = SelectFromModel(estimator=estimator)
+ transformer.fit(X, y)
+ X_new = transformer.transform(X)
+ mask = np.abs(transformer.estimator_.coef_) > 1e-5
+ assert_array_almost_equal(X_new, X[:, mask])
+
+
+@skip_if_32bit
+def test_2d_coef():
+ X, y = datasets.make_classification(
+ n_samples=1000,
+ n_features=10,
+ n_informative=3,
+ n_redundant=0,
+ n_repeated=0,
+ shuffle=False,
+ random_state=0,
+ n_classes=4,
+ )
+
+ est = LogisticRegression()
+ for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
+ for order in [1, 2, np.inf]:
+ # Fit SelectFromModel a multi-class problem
+ transformer = SelectFromModel(
+ estimator=LogisticRegression(), threshold=threshold, norm_order=order
+ )
+ transformer.fit(X, y)
+ assert hasattr(transformer.estimator_, "coef_")
+ X_new = transformer.transform(X)
+ assert X_new.shape[1] < X.shape[1]
+
+ # Manually check that the norm is correctly performed
+ est.fit(X, y)
+ importances = np.linalg.norm(est.coef_, axis=0, ord=order)
+ feature_mask = importances > func(importances)
+ assert_array_almost_equal(X_new, X[:, feature_mask])
+
+
+def test_partial_fit():
+ est = PassiveAggressiveClassifier(
+ random_state=0, shuffle=False, max_iter=5, tol=None
+ )
+ transformer = SelectFromModel(estimator=est)
+ transformer.partial_fit(data, y, classes=np.unique(y))
+ old_model = transformer.estimator_
+ transformer.partial_fit(data, y, classes=np.unique(y))
+ new_model = transformer.estimator_
+ assert old_model is new_model
+
+ X_transform = transformer.transform(data)
+ transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
+ assert_array_almost_equal(X_transform, transformer.transform(data))
+
+ # check that if est doesn't have partial_fit, neither does SelectFromModel
+ transformer = SelectFromModel(estimator=RandomForestClassifier())
+ assert not hasattr(transformer, "partial_fit")
+
+
+def test_calling_fit_reinitializes():
+ est = LinearSVC(random_state=0)
+ transformer = SelectFromModel(estimator=est)
+ transformer.fit(data, y)
+ transformer.set_params(estimator__C=100)
+ transformer.fit(data, y)
+ assert transformer.estimator_.C == 100
+
+
+def test_prefit():
+ # Test all possible combinations of the prefit parameter.
+
+ # Passing a prefit parameter with the selected model
+ # and fitting a unfit model with prefit=False should give same results.
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
+ model = SelectFromModel(clf)
+ model.fit(data, y)
+ X_transform = model.transform(data)
+ clf.fit(data, y)
+ model = SelectFromModel(clf, prefit=True)
+ assert_array_almost_equal(model.transform(data), X_transform)
+ model.fit(data, y)
+ assert model.estimator_ is not clf
+
+ # Check that the model is rewritten if prefit=False and a fitted model is
+ # passed
+ model = SelectFromModel(clf, prefit=False)
+ model.fit(data, y)
+ assert_array_almost_equal(model.transform(data), X_transform)
+
+ # Check that passing an unfitted estimator with `prefit=True` raises a
+ # `ValueError`
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
+ model = SelectFromModel(clf, prefit=True)
+ err_msg = "When `prefit=True`, `estimator` is expected to be a fitted estimator."
+ with pytest.raises(NotFittedError, match=err_msg):
+ model.fit(data, y)
+ with pytest.raises(NotFittedError, match=err_msg):
+ model.partial_fit(data, y)
+ with pytest.raises(NotFittedError, match=err_msg):
+ model.transform(data)
+
+ # Check that the internal parameters of prefitted model are not changed
+ # when calling `fit` or `partial_fit` with `prefit=True`
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, tol=None).fit(data, y)
+ model = SelectFromModel(clf, prefit=True)
+ model.fit(data, y)
+ assert_allclose(model.estimator_.coef_, clf.coef_)
+ model.partial_fit(data, y)
+ assert_allclose(model.estimator_.coef_, clf.coef_)
+
+
+def test_prefit_max_features():
+ """Check the interaction between `prefit` and `max_features`."""
+ # case 1: an error should be raised at `transform` if `fit` was not called to
+ # validate the attributes
+ estimator = RandomForestClassifier(n_estimators=5, random_state=0)
+ estimator.fit(data, y)
+ model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1])
+
+ err_msg = (
+ "When `prefit=True` and `max_features` is a callable, call `fit` "
+ "before calling `transform`."
+ )
+ with pytest.raises(NotFittedError, match=err_msg):
+ model.transform(data)
+
+ # case 2: `max_features` is not validated and different from an integer
+ # FIXME: we cannot validate the upper bound of the attribute at transform
+ # and we should force calling `fit` if we intend to force the attribute
+ # to have such an upper bound.
+ max_features = 2.5
+ model.set_params(max_features=max_features)
+ with pytest.raises(ValueError, match="`max_features` must be an integer"):
+ model.transform(data)
+
+
+def test_prefit_get_feature_names_out():
+ """Check the interaction between prefit and the feature names."""
+ clf = RandomForestClassifier(n_estimators=2, random_state=0)
+ clf.fit(data, y)
+ model = SelectFromModel(clf, prefit=True, max_features=1)
+
+ name = type(model).__name__
+ err_msg = (
+ f"This {name} instance is not fitted yet. Call 'fit' with "
+ "appropriate arguments before using this estimator."
+ )
+ with pytest.raises(NotFittedError, match=err_msg):
+ model.get_feature_names_out()
+
+ model.fit(data, y)
+ feature_names = model.get_feature_names_out()
+ assert feature_names == ["x3"]
+
+
+def test_threshold_string():
+ est = RandomForestClassifier(n_estimators=50, random_state=0)
+ model = SelectFromModel(est, threshold="0.5*mean")
+ model.fit(data, y)
+ X_transform = model.transform(data)
+
+ # Calculate the threshold from the estimator directly.
+ est.fit(data, y)
+ threshold = 0.5 * np.mean(est.feature_importances_)
+ mask = est.feature_importances_ > threshold
+ assert_array_almost_equal(X_transform, data[:, mask])
+
+
+def test_threshold_without_refitting():
+ # Test that the threshold can be set without refitting the model.
+ clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None)
+ model = SelectFromModel(clf, threshold="0.1 * mean")
+ model.fit(data, y)
+ X_transform = model.transform(data)
+
+ # Set a higher threshold to filter out more features.
+ model.threshold = "1.0 * mean"
+ assert X_transform.shape[1] > model.transform(data).shape[1]
+
+
+def test_fit_accepts_nan_inf():
+ # Test that fit doesn't check for np.inf and np.nan values.
+ clf = HistGradientBoostingClassifier(random_state=0)
+
+ model = SelectFromModel(estimator=clf)
+
+ nan_data = data.copy()
+ nan_data[0] = np.nan
+ nan_data[1] = np.inf
+
+ model.fit(data, y)
+
+
+def test_transform_accepts_nan_inf():
+ # Test that transform doesn't check for np.inf and np.nan values.
+ clf = NaNTagRandomForest(n_estimators=100, random_state=0)
+ nan_data = data.copy()
+
+ model = SelectFromModel(estimator=clf)
+ model.fit(nan_data, y)
+
+ nan_data[0] = np.nan
+ nan_data[1] = np.inf
+
+ model.transform(nan_data)
+
+
+def test_allow_nan_tag_comes_from_estimator():
+ allow_nan_est = NaNTag()
+ model = SelectFromModel(estimator=allow_nan_est)
+ assert model.__sklearn_tags__().input_tags.allow_nan is True
+
+ no_nan_est = NoNaNTag()
+ model = SelectFromModel(estimator=no_nan_est)
+ assert model.__sklearn_tags__().input_tags.allow_nan is False
+
+
+def _pca_importances(pca_estimator):
+ return np.abs(pca_estimator.explained_variance_)
+
+
+@pytest.mark.parametrize(
+ "estimator, importance_getter",
+ [
+ (
+ make_pipeline(PCA(random_state=0), LogisticRegression()),
+ "named_steps.logisticregression.coef_",
+ ),
+ (PCA(random_state=0), _pca_importances),
+ ],
+)
+def test_importance_getter(estimator, importance_getter):
+ selector = SelectFromModel(
+ estimator, threshold="mean", importance_getter=importance_getter
+ )
+ selector.fit(data, y)
+ assert selector.transform(data).shape[1] == 1
+
+
+@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
+def test_select_from_model_pls(PLSEstimator):
+ """Check the behaviour of SelectFromModel with PLS estimators.
+
+ Non-regression test for:
+ https://github.com/scikit-learn/scikit-learn/issues/12410
+ """
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
+ estimator = PLSEstimator(n_components=1)
+ model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y)
+ assert model.score(X, y) > 0.5
+
+
+def test_estimator_does_not_support_feature_names():
+ """SelectFromModel works with estimators that do not support feature_names_in_.
+
+ Non-regression test for #21949.
+ """
+ pytest.importorskip("pandas")
+ X, y = datasets.load_iris(as_frame=True, return_X_y=True)
+ all_feature_names = set(X.columns)
+
+ def importance_getter(estimator):
+ return np.arange(X.shape[1])
+
+ selector = SelectFromModel(
+ MinimalClassifier(), importance_getter=importance_getter
+ ).fit(X, y)
+
+ # selector learns the feature names itself
+ assert_array_equal(selector.feature_names_in_, X.columns)
+
+ feature_names_out = set(selector.get_feature_names_out())
+ assert feature_names_out < all_feature_names
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", UserWarning)
+
+ selector.transform(X.iloc[1:3])
+
+
+@pytest.mark.parametrize(
+ "error, err_msg, max_features",
+ (
+ [ValueError, "max_features == 10, must be <= 4", 10],
+ [ValueError, "max_features == 5, must be <= 4", lambda x: x.shape[1] + 1],
+ ),
+)
+def test_partial_fit_validate_max_features(error, err_msg, max_features):
+ """Test that partial_fit from SelectFromModel validates `max_features`."""
+ X, y = datasets.make_classification(
+ n_samples=100,
+ n_features=4,
+ random_state=0,
+ )
+
+ with pytest.raises(error, match=err_msg):
+ SelectFromModel(
+ estimator=SGDClassifier(), max_features=max_features
+ ).partial_fit(X, y, classes=[0, 1])
+
+
+@pytest.mark.parametrize("as_frame", [True, False])
+def test_partial_fit_validate_feature_names(as_frame):
+ """Test that partial_fit from SelectFromModel validates `feature_names_in_`."""
+ pytest.importorskip("pandas")
+ X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True)
+
+ selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit(
+ X, y, classes=[0, 1, 2]
+ )
+ if as_frame:
+ assert_array_equal(selector.feature_names_in_, X.columns)
+ else:
+ assert not hasattr(selector, "feature_names_in_")
+
+
+def test_from_model_estimator_attribute_error():
+ """Check that we raise the proper AttributeError when the estimator
+ does not implement the `partial_fit` method, which is decorated with
+ `available_if`.
+
+ Non-regression test for:
+ https://github.com/scikit-learn/scikit-learn/issues/28108
+ """
+ # `LinearRegression` does not implement 'partial_fit' and should raise an
+ # AttributeError
+ from_model = SelectFromModel(estimator=LinearRegression())
+
+ outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'"
+ inner_msg = "'LinearRegression' object has no attribute 'partial_fit'"
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
+ from_model.fit(data, y).partial_fit(data)
+ assert isinstance(exec_info.value.__cause__, AttributeError)
+ assert inner_msg in str(exec_info.value.__cause__)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae11de2fadf59ae888e22338b8843fb238f99dc3
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_rfe.py
@@ -0,0 +1,723 @@
+"""
+Testing Recursive feature elimination
+"""
+
+from operator import attrgetter
+
+import numpy as np
+import pytest
+from joblib import parallel_backend
+from numpy.testing import assert_allclose, assert_array_almost_equal, assert_array_equal
+
+from sklearn.base import BaseEstimator, ClassifierMixin, is_classifier
+from sklearn.compose import TransformedTargetRegressor
+from sklearn.cross_decomposition import CCA, PLSCanonical, PLSRegression
+from sklearn.datasets import load_iris, make_classification, make_friedman1
+from sklearn.ensemble import RandomForestClassifier
+from sklearn.feature_selection import RFE, RFECV
+from sklearn.impute import SimpleImputer
+from sklearn.linear_model import LinearRegression, LogisticRegression
+from sklearn.metrics import get_scorer, make_scorer, zero_one_loss
+from sklearn.model_selection import GroupKFold, cross_val_score
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.svm import SVC, SVR, LinearSVR
+from sklearn.utils import check_random_state
+from sklearn.utils._testing import ignore_warnings
+from sklearn.utils.fixes import CSR_CONTAINERS
+
+
+class MockClassifier(ClassifierMixin, BaseEstimator):
+ """
+ Dummy classifier to test recursive feature elimination
+ """
+
+ def __init__(self, foo_param=0):
+ self.foo_param = foo_param
+
+ def fit(self, X, y):
+ assert len(X) == len(y)
+ self.coef_ = np.ones(X.shape[1], dtype=np.float64)
+ self.classes_ = sorted(set(y))
+ return self
+
+ def predict(self, T):
+ return np.ones(T.shape[0])
+
+ predict_proba = predict
+ decision_function = predict
+ transform = predict
+
+ def score(self, X=None, y=None):
+ return 0.0
+
+ def get_params(self, deep=True):
+ return {"foo_param": self.foo_param}
+
+ def set_params(self, **params):
+ return self
+
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = True
+ return tags
+
+
+def test_rfe_features_importance():
+ generator = check_random_state(0)
+ iris = load_iris()
+ # Add some irrelevant features. Random seed is set to make sure that
+ # irrelevant features are always irrelevant.
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = iris.target
+
+ clf = RandomForestClassifier(n_estimators=20, random_state=generator, max_depth=2)
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
+ rfe.fit(X, y)
+ assert len(rfe.ranking_) == X.shape[1]
+
+ clf_svc = SVC(kernel="linear")
+ rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
+ rfe_svc.fit(X, y)
+
+ # Check if the supports are equal
+ assert_array_equal(rfe.get_support(), rfe_svc.get_support())
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_rfe(csr_container):
+ generator = check_random_state(0)
+ iris = load_iris()
+ # Add some irrelevant features. Random seed is set to make sure that
+ # irrelevant features are always irrelevant.
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ X_sparse = csr_container(X)
+ y = iris.target
+
+ # dense model
+ clf = SVC(kernel="linear")
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
+ rfe.fit(X, y)
+ X_r = rfe.transform(X)
+ clf.fit(X_r, y)
+ assert len(rfe.ranking_) == X.shape[1]
+
+ # sparse model
+ clf_sparse = SVC(kernel="linear")
+ rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
+ rfe_sparse.fit(X_sparse, y)
+ X_r_sparse = rfe_sparse.transform(X_sparse)
+
+ assert X_r.shape == iris.data.shape
+ assert_array_almost_equal(X_r[:10], iris.data[:10])
+
+ assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
+ assert rfe.score(X, y) == clf.score(iris.data, iris.target)
+ assert_array_almost_equal(X_r, X_r_sparse.toarray())
+
+
+def test_RFE_fit_score_params():
+ # Make sure RFE passes the metadata down to fit and score methods of the
+ # underlying estimator
+ class TestEstimator(BaseEstimator, ClassifierMixin):
+ def fit(self, X, y, prop=None):
+ if prop is None:
+ raise ValueError("fit: prop cannot be None")
+ self.svc_ = SVC(kernel="linear").fit(X, y)
+ self.coef_ = self.svc_.coef_
+ return self
+
+ def score(self, X, y, prop=None):
+ if prop is None:
+ raise ValueError("score: prop cannot be None")
+ return self.svc_.score(X, y)
+
+ X, y = load_iris(return_X_y=True)
+ with pytest.raises(ValueError, match="fit: prop cannot be None"):
+ RFE(estimator=TestEstimator()).fit(X, y)
+ with pytest.raises(ValueError, match="score: prop cannot be None"):
+ RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y)
+
+ RFE(estimator=TestEstimator()).fit(X, y, prop="foo").score(X, y, prop="foo")
+
+
+def test_rfe_percent_n_features():
+ # test that the results are the same
+ generator = check_random_state(0)
+ iris = load_iris()
+ # Add some irrelevant features. Random seed is set to make sure that
+ # irrelevant features are always irrelevant.
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = iris.target
+ # there are 10 features in the data. We select 40%.
+ clf = SVC(kernel="linear")
+ rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1)
+ rfe_num.fit(X, y)
+
+ rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1)
+ rfe_perc.fit(X, y)
+
+ assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_)
+ assert_array_equal(rfe_perc.support_, rfe_num.support_)
+
+
+def test_rfe_mockclassifier():
+ generator = check_random_state(0)
+ iris = load_iris()
+ # Add some irrelevant features. Random seed is set to make sure that
+ # irrelevant features are always irrelevant.
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = iris.target
+
+ # dense model
+ clf = MockClassifier()
+ rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
+ rfe.fit(X, y)
+ X_r = rfe.transform(X)
+ clf.fit(X_r, y)
+ assert len(rfe.ranking_) == X.shape[1]
+ assert X_r.shape == iris.data.shape
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_rfecv(csr_container):
+ generator = check_random_state(0)
+ iris = load_iris()
+ # Add some irrelevant features. Random seed is set to make sure that
+ # irrelevant features are always irrelevant.
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = list(iris.target) # regression test: list should be supported
+
+ # Test using the score function
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1)
+ rfecv.fit(X, y)
+ # non-regression test for missing worst feature:
+
+ for key in rfecv.cv_results_.keys():
+ assert len(rfecv.cv_results_[key]) == X.shape[1]
+
+ assert len(rfecv.ranking_) == X.shape[1]
+ X_r = rfecv.transform(X)
+
+ # All the noisy variable were filtered out
+ assert_array_equal(X_r, iris.data)
+
+ # same in sparse
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1)
+ X_sparse = csr_container(X)
+ rfecv_sparse.fit(X_sparse, y)
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
+
+ # Test using a customized loss function
+ scoring = make_scorer(zero_one_loss, greater_is_better=False)
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring)
+ ignore_warnings(rfecv.fit)(X, y)
+ X_r = rfecv.transform(X)
+ assert_array_equal(X_r, iris.data)
+
+ # Test using a scorer
+ scorer = get_scorer("accuracy")
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer)
+ rfecv.fit(X, y)
+ X_r = rfecv.transform(X)
+ assert_array_equal(X_r, iris.data)
+
+ # Test fix on cv_results_
+ def test_scorer(estimator, X, y):
+ return 1.0
+
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer)
+ rfecv.fit(X, y)
+
+ # In the event of cross validation score ties, the expected behavior of
+ # RFECV is to return the FEWEST features that maximize the CV score.
+ # Because test_scorer always returns 1.0 in this example, RFECV should
+ # reduce the dimensionality to a single feature (i.e. n_features_ = 1)
+ assert rfecv.n_features_ == 1
+
+ # Same as the first two tests, but with step=2
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=2)
+ rfecv.fit(X, y)
+
+ for key in rfecv.cv_results_.keys():
+ assert len(rfecv.cv_results_[key]) == 6
+
+ assert len(rfecv.ranking_) == X.shape[1]
+ X_r = rfecv.transform(X)
+ assert_array_equal(X_r, iris.data)
+
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2)
+ X_sparse = csr_container(X)
+ rfecv_sparse.fit(X_sparse, y)
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
+
+ # Verifying that steps < 1 don't blow up.
+ rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=0.2)
+ X_sparse = csr_container(X)
+ rfecv_sparse.fit(X_sparse, y)
+ X_r_sparse = rfecv_sparse.transform(X_sparse)
+ assert_array_equal(X_r_sparse.toarray(), iris.data)
+
+
+def test_rfecv_mockclassifier():
+ generator = check_random_state(0)
+ iris = load_iris()
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = list(iris.target) # regression test: list should be supported
+
+ # Test using the score function
+ rfecv = RFECV(estimator=MockClassifier(), step=1)
+ rfecv.fit(X, y)
+ # non-regression test for missing worst feature:
+
+ for key in rfecv.cv_results_.keys():
+ assert len(rfecv.cv_results_[key]) == X.shape[1]
+
+ assert len(rfecv.ranking_) == X.shape[1]
+
+
+def test_rfecv_verbose_output():
+ # Check verbose=1 is producing an output.
+ import sys
+ from io import StringIO
+
+ sys.stdout = StringIO()
+
+ generator = check_random_state(0)
+ iris = load_iris()
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = list(iris.target)
+
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1)
+ rfecv.fit(X, y)
+
+ verbose_output = sys.stdout
+ verbose_output.seek(0)
+ assert len(verbose_output.readline()) > 0
+
+
+def test_rfecv_cv_results_size(global_random_seed):
+ generator = check_random_state(global_random_seed)
+ iris = load_iris()
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = list(iris.target) # regression test: list should be supported
+
+ # Non-regression test for varying combinations of step and
+ # min_features_to_select.
+ for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]:
+ rfecv = RFECV(
+ estimator=MockClassifier(),
+ step=step,
+ min_features_to_select=min_features_to_select,
+ )
+ rfecv.fit(X, y)
+
+ score_len = np.ceil((X.shape[1] - min_features_to_select) / step) + 1
+
+ for key in rfecv.cv_results_.keys():
+ assert len(rfecv.cv_results_[key]) == score_len
+
+ assert len(rfecv.ranking_) == X.shape[1]
+ assert rfecv.n_features_ >= min_features_to_select
+
+
+def test_rfe_estimator_tags():
+ rfe = RFE(SVC(kernel="linear"))
+ assert is_classifier(rfe)
+ # make sure that cross-validation is stratified
+ iris = load_iris()
+ score = cross_val_score(rfe, iris.data, iris.target)
+ assert score.min() > 0.7
+
+
+def test_rfe_min_step(global_random_seed):
+ n_features = 10
+ X, y = make_friedman1(
+ n_samples=50, n_features=n_features, random_state=global_random_seed
+ )
+ n_samples, n_features = X.shape
+ estimator = SVR(kernel="linear")
+
+ # Test when floor(step * n_features) <= 0
+ selector = RFE(estimator, step=0.01)
+ sel = selector.fit(X, y)
+ assert sel.support_.sum() == n_features // 2
+
+ # Test when step is between (0,1) and floor(step * n_features) > 0
+ selector = RFE(estimator, step=0.20)
+ sel = selector.fit(X, y)
+ assert sel.support_.sum() == n_features // 2
+
+ # Test when step is an integer
+ selector = RFE(estimator, step=5)
+ sel = selector.fit(X, y)
+ assert sel.support_.sum() == n_features // 2
+
+
+def test_number_of_subsets_of_features(global_random_seed):
+ # In RFE, 'number_of_subsets_of_features'
+ # = the number of iterations in '_fit'
+ # = max(ranking_)
+ # = 1 + (n_features + step - n_features_to_select - 1) // step
+ # After optimization #4534, this number
+ # = 1 + np.ceil((n_features - n_features_to_select) / float(step))
+ # This test case is to test their equivalence, refer to #4534 and #3824
+
+ def formula1(n_features, n_features_to_select, step):
+ return 1 + ((n_features + step - n_features_to_select - 1) // step)
+
+ def formula2(n_features, n_features_to_select, step):
+ return 1 + np.ceil((n_features - n_features_to_select) / float(step))
+
+ # RFE
+ # Case 1, n_features - n_features_to_select is divisible by step
+ # Case 2, n_features - n_features_to_select is not divisible by step
+ n_features_list = [11, 11]
+ n_features_to_select_list = [3, 3]
+ step_list = [2, 3]
+ for n_features, n_features_to_select, step in zip(
+ n_features_list, n_features_to_select_list, step_list
+ ):
+ generator = check_random_state(global_random_seed)
+ X = generator.normal(size=(100, n_features))
+ y = generator.rand(100).round()
+ rfe = RFE(
+ estimator=SVC(kernel="linear"),
+ n_features_to_select=n_features_to_select,
+ step=step,
+ )
+ rfe.fit(X, y)
+ # this number also equals to the maximum of ranking_
+ assert np.max(rfe.ranking_) == formula1(n_features, n_features_to_select, step)
+ assert np.max(rfe.ranking_) == formula2(n_features, n_features_to_select, step)
+
+ # In RFECV, 'fit' calls 'RFE._fit'
+ # 'number_of_subsets_of_features' of RFE
+ # = the size of each score in 'cv_results_' of RFECV
+ # = the number of iterations of the for loop before optimization #4534
+
+ # RFECV, n_features_to_select = 1
+ # Case 1, n_features - 1 is divisible by step
+ # Case 2, n_features - 1 is not divisible by step
+
+ n_features_to_select = 1
+ n_features_list = [11, 10]
+ step_list = [2, 2]
+ for n_features, step in zip(n_features_list, step_list):
+ generator = check_random_state(global_random_seed)
+ X = generator.normal(size=(100, n_features))
+ y = generator.rand(100).round()
+ rfecv = RFECV(estimator=SVC(kernel="linear"), step=step)
+ rfecv.fit(X, y)
+
+ for key in rfecv.cv_results_.keys():
+ assert len(rfecv.cv_results_[key]) == formula1(
+ n_features, n_features_to_select, step
+ )
+ assert len(rfecv.cv_results_[key]) == formula2(
+ n_features, n_features_to_select, step
+ )
+
+
+def test_rfe_cv_n_jobs(global_random_seed):
+ generator = check_random_state(global_random_seed)
+ iris = load_iris()
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = iris.target
+
+ rfecv = RFECV(estimator=SVC(kernel="linear"))
+ rfecv.fit(X, y)
+ rfecv_ranking = rfecv.ranking_
+
+ rfecv_cv_results_ = rfecv.cv_results_
+
+ rfecv.set_params(n_jobs=2)
+ rfecv.fit(X, y)
+ assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
+
+ assert rfecv_cv_results_.keys() == rfecv.cv_results_.keys()
+ for key in rfecv_cv_results_.keys():
+ assert rfecv_cv_results_[key] == pytest.approx(rfecv.cv_results_[key])
+
+
+def test_rfe_cv_groups():
+ generator = check_random_state(0)
+ iris = load_iris()
+ number_groups = 4
+ groups = np.floor(np.linspace(0, number_groups, len(iris.target)))
+ X = iris.data
+ y = (iris.target > 0).astype(int)
+
+ est_groups = RFECV(
+ estimator=RandomForestClassifier(random_state=generator),
+ step=1,
+ scoring="accuracy",
+ cv=GroupKFold(n_splits=2),
+ )
+ est_groups.fit(X, y, groups=groups)
+ assert est_groups.n_features_ > 0
+
+
+@pytest.mark.parametrize(
+ "importance_getter", [attrgetter("regressor_.coef_"), "regressor_.coef_"]
+)
+@pytest.mark.parametrize("selector, expected_n_features", [(RFE, 5), (RFECV, 4)])
+def test_rfe_wrapped_estimator(importance_getter, selector, expected_n_features):
+ # Non-regression test for
+ # https://github.com/scikit-learn/scikit-learn/issues/15312
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
+ estimator = LinearSVR(random_state=0)
+
+ log_estimator = TransformedTargetRegressor(
+ regressor=estimator, func=np.log, inverse_func=np.exp
+ )
+
+ selector = selector(log_estimator, importance_getter=importance_getter)
+ sel = selector.fit(X, y)
+ assert sel.support_.sum() == expected_n_features
+
+
+@pytest.mark.parametrize(
+ "importance_getter, err_type",
+ [
+ ("auto", ValueError),
+ ("random", AttributeError),
+ (lambda x: x.importance, AttributeError),
+ ],
+)
+@pytest.mark.parametrize("Selector", [RFE, RFECV])
+def test_rfe_importance_getter_validation(importance_getter, err_type, Selector):
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=42)
+ estimator = LinearSVR()
+ log_estimator = TransformedTargetRegressor(
+ regressor=estimator, func=np.log, inverse_func=np.exp
+ )
+
+ with pytest.raises(err_type):
+ model = Selector(log_estimator, importance_getter=importance_getter)
+ model.fit(X, y)
+
+
+@pytest.mark.parametrize("cv", [None, 5])
+def test_rfe_allow_nan_inf_in_x(cv):
+ iris = load_iris()
+ X = iris.data
+ y = iris.target
+
+ # add nan and inf value to X
+ X[0][0] = np.nan
+ X[0][1] = np.inf
+
+ clf = MockClassifier()
+ if cv is not None:
+ rfe = RFECV(estimator=clf, cv=cv)
+ else:
+ rfe = RFE(estimator=clf)
+ rfe.fit(X, y)
+ rfe.transform(X)
+
+
+def test_w_pipeline_2d_coef_():
+ pipeline = make_pipeline(StandardScaler(), LogisticRegression())
+
+ data, y = load_iris(return_X_y=True)
+ sfm = RFE(
+ pipeline,
+ n_features_to_select=2,
+ importance_getter="named_steps.logisticregression.coef_",
+ )
+
+ sfm.fit(data, y)
+ assert sfm.transform(data).shape[1] == 2
+
+
+def test_rfecv_std_and_mean(global_random_seed):
+ generator = check_random_state(global_random_seed)
+ iris = load_iris()
+ X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
+ y = iris.target
+
+ rfecv = RFECV(estimator=SVC(kernel="linear"))
+ rfecv.fit(X, y)
+ split_keys = [key for key in rfecv.cv_results_.keys() if "split" in key]
+ cv_scores = np.asarray([rfecv.cv_results_[key] for key in split_keys])
+ expected_mean = np.mean(cv_scores, axis=0)
+ expected_std = np.std(cv_scores, axis=0)
+
+ assert_allclose(rfecv.cv_results_["mean_test_score"], expected_mean)
+ assert_allclose(rfecv.cv_results_["std_test_score"], expected_std)
+
+
+@pytest.mark.parametrize(
+ ["min_features_to_select", "n_features", "step", "cv_results_n_features"],
+ [
+ [1, 4, 1, np.array([1, 2, 3, 4])],
+ [1, 5, 1, np.array([1, 2, 3, 4, 5])],
+ [1, 4, 2, np.array([1, 2, 4])],
+ [1, 5, 2, np.array([1, 3, 5])],
+ [1, 4, 3, np.array([1, 4])],
+ [1, 5, 3, np.array([1, 2, 5])],
+ [1, 4, 4, np.array([1, 4])],
+ [1, 5, 4, np.array([1, 5])],
+ [4, 4, 2, np.array([4])],
+ [4, 5, 1, np.array([4, 5])],
+ [4, 5, 2, np.array([4, 5])],
+ ],
+)
+def test_rfecv_cv_results_n_features(
+ min_features_to_select,
+ n_features,
+ step,
+ cv_results_n_features,
+):
+ X, y = make_classification(
+ n_samples=20, n_features=n_features, n_informative=n_features, n_redundant=0
+ )
+ rfecv = RFECV(
+ estimator=SVC(kernel="linear"),
+ step=step,
+ min_features_to_select=min_features_to_select,
+ )
+ rfecv.fit(X, y)
+ assert_array_equal(rfecv.cv_results_["n_features"], cv_results_n_features)
+ assert all(
+ len(value) == len(rfecv.cv_results_["n_features"])
+ for value in rfecv.cv_results_.values()
+ )
+
+
+@pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
+def test_multioutput(ClsRFE):
+ X = np.random.normal(size=(10, 3))
+ y = np.random.randint(2, size=(10, 2))
+ clf = RandomForestClassifier(n_estimators=5)
+ rfe_test = ClsRFE(clf)
+ rfe_test.fit(X, y)
+
+
+@pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
+def test_pipeline_with_nans(ClsRFE):
+ """Check that RFE works with pipeline that accept nans.
+
+ Non-regression test for gh-21743.
+ """
+ X, y = load_iris(return_X_y=True)
+ X[0, 0] = np.nan
+
+ pipe = make_pipeline(
+ SimpleImputer(),
+ StandardScaler(),
+ LogisticRegression(),
+ )
+
+ fs = ClsRFE(
+ estimator=pipe,
+ importance_getter="named_steps.logisticregression.coef_",
+ )
+ fs.fit(X, y)
+
+
+@pytest.mark.parametrize("ClsRFE", [RFE, RFECV])
+@pytest.mark.parametrize("PLSEstimator", [CCA, PLSCanonical, PLSRegression])
+def test_rfe_pls(ClsRFE, PLSEstimator):
+ """Check the behaviour of RFE with PLS estimators.
+
+ Non-regression test for:
+ https://github.com/scikit-learn/scikit-learn/issues/12410
+ """
+ X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
+ estimator = PLSEstimator(n_components=1)
+ selector = ClsRFE(estimator, step=1).fit(X, y)
+ assert selector.score(X, y) > 0.5
+
+
+def test_rfe_estimator_attribute_error():
+ """Check that we raise the proper AttributeError when the estimator
+ does not implement the `decision_function` method, which is decorated with
+ `available_if`.
+
+ Non-regression test for:
+ https://github.com/scikit-learn/scikit-learn/issues/28108
+ """
+ iris = load_iris()
+
+ # `LinearRegression` does not implement 'decision_function' and should raise an
+ # AttributeError
+ rfe = RFE(estimator=LinearRegression())
+
+ outer_msg = "This 'RFE' has no attribute 'decision_function'"
+ inner_msg = "'LinearRegression' object has no attribute 'decision_function'"
+ with pytest.raises(AttributeError, match=outer_msg) as exec_info:
+ rfe.fit(iris.data, iris.target).decision_function(iris.data)
+ assert isinstance(exec_info.value.__cause__, AttributeError)
+ assert inner_msg in str(exec_info.value.__cause__)
+
+
+@pytest.mark.parametrize(
+ "ClsRFE, param", [(RFE, "n_features_to_select"), (RFECV, "min_features_to_select")]
+)
+def test_rfe_n_features_to_select_warning(ClsRFE, param):
+ """Check if the correct warning is raised when trying to initialize a RFE
+ object with a n_features_to_select attribute larger than the number of
+ features present in the X variable that is passed to the fit method
+ """
+ X, y = make_classification(n_features=20, random_state=0)
+
+ with pytest.warns(UserWarning, match=f"{param}=21 > n_features=20"):
+ # Create RFE/RFECV with n_features_to_select/min_features_to_select
+ # larger than the number of features present in the X variable
+ clsrfe = ClsRFE(estimator=LogisticRegression(), **{param: 21})
+ clsrfe.fit(X, y)
+
+
+def test_rfe_with_sample_weight():
+ """Test that `RFE` works correctly with sample weights."""
+ X, y = make_classification(random_state=0)
+ n_samples = X.shape[0]
+
+ # Assign the first half of the samples with twice the weight
+ sample_weight = np.ones_like(y)
+ sample_weight[: n_samples // 2] = 2
+
+ # Duplicate the first half of the data samples to replicate the effect
+ # of sample weights for comparison
+ X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
+ y2 = np.concatenate([y, y[: n_samples // 2]])
+
+ estimator = SVC(kernel="linear")
+
+ rfe_sw = RFE(estimator=estimator, step=0.1)
+ rfe_sw.fit(X, y, sample_weight=sample_weight)
+
+ rfe = RFE(estimator=estimator, step=0.1)
+ rfe.fit(X2, y2)
+
+ assert_array_equal(rfe_sw.ranking_, rfe.ranking_)
+
+ # Also verify that when sample weights are not doubled the results
+ # are different from the duplicated data
+ rfe_sw_2 = RFE(estimator=estimator, step=0.1)
+ sample_weight_2 = np.ones_like(y)
+ rfe_sw_2.fit(X, y, sample_weight=sample_weight_2)
+
+ assert not np.array_equal(rfe_sw_2.ranking_, rfe.ranking_)
+
+
+def test_rfe_with_joblib_threading_backend(global_random_seed):
+ X, y = make_classification(random_state=global_random_seed)
+
+ clf = LogisticRegression()
+ rfe = RFECV(
+ estimator=clf,
+ n_jobs=2,
+ )
+
+ rfe.fit(X, y)
+ ranking_ref = rfe.ranking_
+
+ with parallel_backend("threading"):
+ rfe.fit(X, y)
+
+ assert_array_equal(ranking_ref, rfe.ranking_)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py
new file mode 100644
index 0000000000000000000000000000000000000000..b98d5b400b84eaa68440c0dbc3891b99372444a2
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_sequential.py
@@ -0,0 +1,332 @@
+import numpy as np
+import pytest
+from numpy.testing import assert_array_equal
+
+from sklearn.cluster import KMeans
+from sklearn.datasets import make_blobs, make_classification, make_regression
+from sklearn.ensemble import HistGradientBoostingRegressor
+from sklearn.feature_selection import SequentialFeatureSelector
+from sklearn.linear_model import LinearRegression
+from sklearn.model_selection import LeaveOneGroupOut, cross_val_score
+from sklearn.neighbors import KNeighborsClassifier
+from sklearn.pipeline import make_pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.utils.fixes import CSR_CONTAINERS
+
+
+def test_bad_n_features_to_select():
+ n_features = 5
+ X, y = make_regression(n_features=n_features)
+ sfs = SequentialFeatureSelector(LinearRegression(), n_features_to_select=n_features)
+ with pytest.raises(ValueError, match="n_features_to_select must be < n_features"):
+ sfs.fit(X, y)
+
+
+@pytest.mark.parametrize("direction", ("forward", "backward"))
+@pytest.mark.parametrize("n_features_to_select", (1, 5, 9, "auto"))
+def test_n_features_to_select(direction, n_features_to_select):
+ # Make sure n_features_to_select is respected
+
+ n_features = 10
+ X, y = make_regression(n_features=n_features, random_state=0)
+ sfs = SequentialFeatureSelector(
+ LinearRegression(),
+ n_features_to_select=n_features_to_select,
+ direction=direction,
+ cv=2,
+ )
+ sfs.fit(X, y)
+
+ if n_features_to_select == "auto":
+ n_features_to_select = n_features // 2
+
+ assert sfs.get_support(indices=True).shape[0] == n_features_to_select
+ assert sfs.n_features_to_select_ == n_features_to_select
+ assert sfs.transform(X).shape[1] == n_features_to_select
+
+
+@pytest.mark.parametrize("direction", ("forward", "backward"))
+def test_n_features_to_select_auto(direction):
+ """Check the behaviour of `n_features_to_select="auto"` with different
+ values for the parameter `tol`.
+ """
+
+ n_features = 10
+ tol = 1e-3
+ X, y = make_regression(n_features=n_features, random_state=0)
+ sfs = SequentialFeatureSelector(
+ LinearRegression(),
+ n_features_to_select="auto",
+ tol=tol,
+ direction=direction,
+ cv=2,
+ )
+ sfs.fit(X, y)
+
+ max_features_to_select = n_features - 1
+
+ assert sfs.get_support(indices=True).shape[0] <= max_features_to_select
+ assert sfs.n_features_to_select_ <= max_features_to_select
+ assert sfs.transform(X).shape[1] <= max_features_to_select
+ assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_
+
+
+@pytest.mark.parametrize("direction", ("forward", "backward"))
+def test_n_features_to_select_stopping_criterion(direction):
+ """Check the behaviour stopping criterion for feature selection
+ depending on the values of `n_features_to_select` and `tol`.
+
+ When `direction` is `'forward'`, select a new features at random
+ among those not currently selected in selector.support_,
+ build a new version of the data that includes all the features
+ in selector.support_ + this newly selected feature.
+ And check that the cross-validation score of the model trained on
+ this new dataset variant is lower than the model with
+ the selected forward selected features or at least does not improve
+ by more than the tol margin.
+
+ When `direction` is `'backward'`, instead of adding a new feature
+ to selector.support_, try to remove one of those selected features at random
+ And check that the cross-validation score is either decreasing or
+ not improving by more than the tol margin.
+ """
+
+ X, y = make_regression(n_features=50, n_informative=10, random_state=0)
+
+ tol = 1e-3
+
+ sfs = SequentialFeatureSelector(
+ LinearRegression(),
+ n_features_to_select="auto",
+ tol=tol,
+ direction=direction,
+ cv=2,
+ )
+ sfs.fit(X, y)
+ selected_X = sfs.transform(X)
+
+ rng = np.random.RandomState(0)
+
+ added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True)))
+ added_X = np.hstack(
+ [
+ selected_X,
+ (X[:, rng.choice(added_candidates)])[:, np.newaxis],
+ ]
+ )
+
+ removed_candidate = rng.choice(list(range(sfs.n_features_to_select_)))
+ removed_X = np.delete(selected_X, removed_candidate, axis=1)
+
+ plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean()
+ sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean()
+ added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean()
+ removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean()
+
+ assert sfs_cv_score >= plain_cv_score
+
+ if direction == "forward":
+ assert (sfs_cv_score - added_cv_score) <= tol
+ assert (sfs_cv_score - removed_cv_score) >= tol
+ else:
+ assert (added_cv_score - sfs_cv_score) <= tol
+ assert (removed_cv_score - sfs_cv_score) <= tol
+
+
+@pytest.mark.parametrize("direction", ("forward", "backward"))
+@pytest.mark.parametrize(
+ "n_features_to_select, expected",
+ (
+ (0.1, 1),
+ (1.0, 10),
+ (0.5, 5),
+ ),
+)
+def test_n_features_to_select_float(direction, n_features_to_select, expected):
+ # Test passing a float as n_features_to_select
+ X, y = make_regression(n_features=10)
+ sfs = SequentialFeatureSelector(
+ LinearRegression(),
+ n_features_to_select=n_features_to_select,
+ direction=direction,
+ cv=2,
+ )
+ sfs.fit(X, y)
+ assert sfs.n_features_to_select_ == expected
+
+
+@pytest.mark.parametrize("seed", range(10))
+@pytest.mark.parametrize("direction", ("forward", "backward"))
+@pytest.mark.parametrize(
+ "n_features_to_select, expected_selected_features",
+ [
+ (2, [0, 2]), # f1 is dropped since it has no predictive power
+ (1, [2]), # f2 is more predictive than f0 so it's kept
+ ],
+)
+def test_sanity(seed, direction, n_features_to_select, expected_selected_features):
+ # Basic sanity check: 3 features, only f0 and f2 are correlated with the
+ # target, f2 having a stronger correlation than f0. We expect f1 to be
+ # dropped, and f2 to always be selected.
+
+ rng = np.random.RandomState(seed)
+ n_samples = 100
+ X = rng.randn(n_samples, 3)
+ y = 3 * X[:, 0] - 10 * X[:, 2]
+
+ sfs = SequentialFeatureSelector(
+ LinearRegression(),
+ n_features_to_select=n_features_to_select,
+ direction=direction,
+ cv=2,
+ )
+ sfs.fit(X, y)
+ assert_array_equal(sfs.get_support(indices=True), expected_selected_features)
+
+
+@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
+def test_sparse_support(csr_container):
+ # Make sure sparse data is supported
+
+ X, y = make_regression(n_features=10)
+ X = csr_container(X)
+ sfs = SequentialFeatureSelector(
+ LinearRegression(), n_features_to_select="auto", cv=2
+ )
+ sfs.fit(X, y)
+ sfs.transform(X)
+
+
+def test_nan_support():
+ # Make sure nans are OK if the underlying estimator supports nans
+
+ rng = np.random.RandomState(0)
+ n_samples, n_features = 40, 4
+ X, y = make_regression(n_samples, n_features, random_state=0)
+ nan_mask = rng.randint(0, 2, size=(n_samples, n_features), dtype=bool)
+ X[nan_mask] = np.nan
+ sfs = SequentialFeatureSelector(
+ HistGradientBoostingRegressor(), n_features_to_select="auto", cv=2
+ )
+ sfs.fit(X, y)
+ sfs.transform(X)
+
+ with pytest.raises(ValueError, match="Input X contains NaN"):
+ # LinearRegression does not support nans
+ SequentialFeatureSelector(
+ LinearRegression(), n_features_to_select="auto", cv=2
+ ).fit(X, y)
+
+
+def test_pipeline_support():
+ # Make sure that pipelines can be passed into SFS and that SFS can be
+ # passed into a pipeline
+
+ n_samples, n_features = 50, 3
+ X, y = make_regression(n_samples, n_features, random_state=0)
+
+ # pipeline in SFS
+ pipe = make_pipeline(StandardScaler(), LinearRegression())
+ sfs = SequentialFeatureSelector(pipe, n_features_to_select="auto", cv=2)
+ sfs.fit(X, y)
+ sfs.transform(X)
+
+ # SFS in pipeline
+ sfs = SequentialFeatureSelector(
+ LinearRegression(), n_features_to_select="auto", cv=2
+ )
+ pipe = make_pipeline(StandardScaler(), sfs)
+ pipe.fit(X, y)
+ pipe.transform(X)
+
+
+@pytest.mark.parametrize("n_features_to_select", (2, 3))
+def test_unsupervised_model_fit(n_features_to_select):
+ # Make sure that models without classification labels are not being
+ # validated
+
+ X, y = make_blobs(n_features=4)
+ sfs = SequentialFeatureSelector(
+ KMeans(n_init=1),
+ n_features_to_select=n_features_to_select,
+ )
+ sfs.fit(X)
+ assert sfs.transform(X).shape[1] == n_features_to_select
+
+
+@pytest.mark.parametrize("y", ("no_validation", 1j, 99.9, np.nan, 3))
+def test_no_y_validation_model_fit(y):
+ # Make sure that other non-conventional y labels are not accepted
+
+ X, clusters = make_blobs(n_features=6)
+ sfs = SequentialFeatureSelector(
+ KMeans(),
+ n_features_to_select=3,
+ )
+
+ with pytest.raises((TypeError, ValueError)):
+ sfs.fit(X, y)
+
+
+def test_forward_neg_tol_error():
+ """Check that we raise an error when tol<0 and direction='forward'"""
+ X, y = make_regression(n_features=10, random_state=0)
+ sfs = SequentialFeatureSelector(
+ LinearRegression(),
+ n_features_to_select="auto",
+ direction="forward",
+ tol=-1e-3,
+ )
+
+ with pytest.raises(ValueError, match="tol must be strictly positive"):
+ sfs.fit(X, y)
+
+
+def test_backward_neg_tol():
+ """Check that SequentialFeatureSelector works negative tol
+
+ non-regression test for #25525
+ """
+ X, y = make_regression(n_features=10, random_state=0)
+ lr = LinearRegression()
+ initial_score = lr.fit(X, y).score(X, y)
+
+ sfs = SequentialFeatureSelector(
+ lr,
+ n_features_to_select="auto",
+ direction="backward",
+ tol=-1e-3,
+ )
+ Xr = sfs.fit_transform(X, y)
+ new_score = lr.fit(Xr, y).score(Xr, y)
+
+ assert 0 < sfs.get_support().sum() < X.shape[1]
+ assert new_score < initial_score
+
+
+def test_cv_generator_support():
+ """Check that no exception raised when cv is generator
+
+ non-regression test for #25957
+ """
+ X, y = make_classification(random_state=0)
+
+ groups = np.zeros_like(y, dtype=int)
+ groups[y.size // 2 :] = 1
+
+ cv = LeaveOneGroupOut()
+ splits = cv.split(X, y, groups=groups)
+
+ knc = KNeighborsClassifier(n_neighbors=5)
+
+ sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits)
+ sfs.fit(X, y)
+
+
+def test_fit_rejects_params_with_no_routing_enabled():
+ X, y = make_classification(random_state=42)
+ est = LinearRegression()
+ sfs = SequentialFeatureSelector(estimator=est)
+
+ with pytest.raises(ValueError, match="is only supported if"):
+ sfs.fit(X, y, sample_weight=np.ones_like(y))
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py
new file mode 100644
index 0000000000000000000000000000000000000000..45e66cb338a4b7a5a410db669a13f6f9213451dc
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/feature_selection/tests/test_variance_threshold.py
@@ -0,0 +1,72 @@
+import numpy as np
+import pytest
+
+from sklearn.feature_selection import VarianceThreshold
+from sklearn.utils._testing import assert_array_equal
+from sklearn.utils.fixes import BSR_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
+
+data = [[0, 1, 2, 3, 4], [0, 2, 2, 3, 5], [1, 1, 2, 4, 0]]
+
+data2 = [[-0.13725701]] * 10
+
+
+@pytest.mark.parametrize(
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
+)
+def test_zero_variance(sparse_container):
+ # Test VarianceThreshold with default setting, zero variance.
+ X = data if sparse_container is None else sparse_container(data)
+ sel = VarianceThreshold().fit(X)
+ assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True))
+
+
+def test_zero_variance_value_error():
+ # Test VarianceThreshold with default setting, zero variance, error cases.
+ with pytest.raises(ValueError):
+ VarianceThreshold().fit([[0, 1, 2, 3]])
+ with pytest.raises(ValueError):
+ VarianceThreshold().fit([[0, 1], [0, 1]])
+
+
+@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
+def test_variance_threshold(sparse_container):
+ # Test VarianceThreshold with custom variance.
+ X = data if sparse_container is None else sparse_container(data)
+ X = VarianceThreshold(threshold=0.4).fit_transform(X)
+ assert (len(data), 1) == X.shape
+
+
+@pytest.mark.skipif(
+ np.var(data2) == 0,
+ reason=(
+ "This test is not valid for this platform, "
+ "as it relies on numerical instabilities."
+ ),
+)
+@pytest.mark.parametrize(
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
+)
+def test_zero_variance_floating_point_error(sparse_container):
+ # Test that VarianceThreshold(0.0).fit eliminates features that have
+ # the same value in every sample, even when floating point errors
+ # cause np.var not to be 0 for the feature.
+ # See #13691
+ X = data2 if sparse_container is None else sparse_container(data2)
+ msg = "No feature in X meets the variance threshold 0.00000"
+ with pytest.raises(ValueError, match=msg):
+ VarianceThreshold().fit(X)
+
+
+@pytest.mark.parametrize(
+ "sparse_container", [None] + BSR_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
+)
+def test_variance_nan(sparse_container):
+ arr = np.array(data, dtype=np.float64)
+ # add single NaN and feature should still be included
+ arr[0, 0] = np.nan
+ # make all values in feature NaN and feature should be rejected
+ arr[:, 1] = np.nan
+
+ X = arr if sparse_container is None else sparse_container(arr)
+ sel = VarianceThreshold().fit(X)
+ assert_array_equal([0, 3, 4], sel.get_support(indices=True))
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..062fe7938ed09785d0f8340e4d042603c18ba483
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_approximation.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..98503652df6f04ba39f3119387be23ab3d267867
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/metadata_routing_common.py
@@ -0,0 +1,544 @@
+import inspect
+from collections import defaultdict
+from functools import partial
+
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from sklearn.base import (
+ BaseEstimator,
+ ClassifierMixin,
+ MetaEstimatorMixin,
+ RegressorMixin,
+ TransformerMixin,
+ clone,
+)
+from sklearn.metrics._scorer import _Scorer, mean_squared_error
+from sklearn.model_selection import BaseCrossValidator
+from sklearn.model_selection._split import GroupsConsumerMixin
+from sklearn.utils._metadata_requests import (
+ SIMPLE_METHODS,
+)
+from sklearn.utils.metadata_routing import (
+ MetadataRouter,
+ MethodMapping,
+ process_routing,
+)
+from sklearn.utils.multiclass import _check_partial_fit_first_call
+
+
+def record_metadata(obj, record_default=True, **kwargs):
+ """Utility function to store passed metadata to a method of obj.
+
+ If record_default is False, kwargs whose values are "default" are skipped.
+ This is so that checks on keyword arguments whose default was not changed
+ are skipped.
+
+ """
+ stack = inspect.stack()
+ callee = stack[1].function
+ caller = stack[2].function
+ if not hasattr(obj, "_records"):
+ obj._records = defaultdict(lambda: defaultdict(list))
+ if not record_default:
+ kwargs = {
+ key: val
+ for key, val in kwargs.items()
+ if not isinstance(val, str) or (val != "default")
+ }
+ obj._records[callee][caller].append(kwargs)
+
+
+def check_recorded_metadata(obj, method, parent, split_params=tuple(), **kwargs):
+ """Check whether the expected metadata is passed to the object's method.
+
+ Parameters
+ ----------
+ obj : estimator object
+ sub-estimator to check routed params for
+ method : str
+ sub-estimator's method where metadata is routed to, or otherwise in
+ the context of metadata routing referred to as 'callee'
+ parent : str
+ the parent method which should have called `method`, or otherwise in
+ the context of metadata routing referred to as 'caller'
+ split_params : tuple, default=empty
+ specifies any parameters which are to be checked as being a subset
+ of the original values
+ **kwargs : dict
+ passed metadata
+ """
+ all_records = (
+ getattr(obj, "_records", dict()).get(method, dict()).get(parent, list())
+ )
+ for record in all_records:
+ # first check that the names of the metadata passed are the same as
+ # expected. The names are stored as keys in `record`.
+ assert set(kwargs.keys()) == set(
+ record.keys()
+ ), f"Expected {kwargs.keys()} vs {record.keys()}"
+ for key, value in kwargs.items():
+ recorded_value = record[key]
+ # The following condition is used to check for any specified parameters
+ # being a subset of the original values
+ if key in split_params and recorded_value is not None:
+ assert np.isin(recorded_value, value).all()
+ else:
+ if isinstance(recorded_value, np.ndarray):
+ assert_array_equal(recorded_value, value)
+ else:
+ assert (
+ recorded_value is value
+ ), f"Expected {recorded_value} vs {value}. Method: {method}"
+
+
+record_metadata_not_default = partial(record_metadata, record_default=False)
+
+
+def assert_request_is_empty(metadata_request, exclude=None):
+ """Check if a metadata request dict is empty.
+
+ One can exclude a method or a list of methods from the check using the
+ ``exclude`` parameter. If metadata_request is a MetadataRouter, then
+ ``exclude`` can be of the form ``{"object" : [method, ...]}``.
+ """
+ if isinstance(metadata_request, MetadataRouter):
+ for name, route_mapping in metadata_request:
+ if exclude is not None and name in exclude:
+ _exclude = exclude[name]
+ else:
+ _exclude = None
+ assert_request_is_empty(route_mapping.router, exclude=_exclude)
+ return
+
+ exclude = [] if exclude is None else exclude
+ for method in SIMPLE_METHODS:
+ if method in exclude:
+ continue
+ mmr = getattr(metadata_request, method)
+ props = [
+ prop
+ for prop, alias in mmr.requests.items()
+ if isinstance(alias, str) or alias is not None
+ ]
+ assert not props
+
+
+def assert_request_equal(request, dictionary):
+ for method, requests in dictionary.items():
+ mmr = getattr(request, method)
+ assert mmr.requests == requests
+
+ empty_methods = [method for method in SIMPLE_METHODS if method not in dictionary]
+ for method in empty_methods:
+ assert not len(getattr(request, method).requests)
+
+
+class _Registry(list):
+ # This list is used to get a reference to the sub-estimators, which are not
+ # necessarily stored on the metaestimator. We need to override __deepcopy__
+ # because the sub-estimators are probably cloned, which would result in a
+ # new copy of the list, but we need copy and deep copy both to return the
+ # same instance.
+ def __deepcopy__(self, memo):
+ return self
+
+ def __copy__(self):
+ return self
+
+
+class ConsumingRegressor(RegressorMixin, BaseEstimator):
+ """A regressor consuming metadata.
+
+ Parameters
+ ----------
+ registry : list, default=None
+ If a list, the estimator will append itself to the list in order to have
+ a reference to the estimator later on. Since that reference is not
+ required in all tests, registration can be skipped by leaving this value
+ as None.
+ """
+
+ def __init__(self, registry=None):
+ self.registry = registry
+
+ def partial_fit(self, X, y, sample_weight="default", metadata="default"):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return self
+
+ def fit(self, X, y, sample_weight="default", metadata="default"):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return self
+
+ def predict(self, X, y=None, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return np.zeros(shape=(len(X),))
+
+ def score(self, X, y, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return 1
+
+
+class NonConsumingClassifier(ClassifierMixin, BaseEstimator):
+ """A classifier which accepts no metadata on any method."""
+
+ def __init__(self, alpha=0.0):
+ self.alpha = alpha
+
+ def fit(self, X, y):
+ self.classes_ = np.unique(y)
+ self.coef_ = np.ones_like(X)
+ return self
+
+ def partial_fit(self, X, y, classes=None):
+ return self
+
+ def decision_function(self, X):
+ return self.predict(X)
+
+ def predict(self, X):
+ y_pred = np.empty(shape=(len(X),))
+ y_pred[: len(X) // 2] = 0
+ y_pred[len(X) // 2 :] = 1
+ return y_pred
+
+ def predict_proba(self, X):
+ # dummy probabilities to support predict_proba
+ y_proba = np.empty(shape=(len(X), 2))
+ y_proba[: len(X) // 2, :] = np.asarray([1.0, 0.0])
+ y_proba[len(X) // 2 :, :] = np.asarray([0.0, 1.0])
+ return y_proba
+
+ def predict_log_proba(self, X):
+ # dummy probabilities to support predict_log_proba
+ return self.predict_proba(X)
+
+
+class NonConsumingRegressor(RegressorMixin, BaseEstimator):
+ """A classifier which accepts no metadata on any method."""
+
+ def fit(self, X, y):
+ return self
+
+ def partial_fit(self, X, y):
+ return self
+
+ def predict(self, X):
+ return np.ones(len(X)) # pragma: no cover
+
+
+class ConsumingClassifier(ClassifierMixin, BaseEstimator):
+ """A classifier consuming metadata.
+
+ Parameters
+ ----------
+ registry : list, default=None
+ If a list, the estimator will append itself to the list in order to have
+ a reference to the estimator later on. Since that reference is not
+ required in all tests, registration can be skipped by leaving this value
+ as None.
+
+ alpha : float, default=0
+ This parameter is only used to test the ``*SearchCV`` objects, and
+ doesn't do anything.
+ """
+
+ def __init__(self, registry=None, alpha=0.0):
+ self.alpha = alpha
+ self.registry = registry
+
+ def partial_fit(
+ self, X, y, classes=None, sample_weight="default", metadata="default"
+ ):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ _check_partial_fit_first_call(self, classes)
+ return self
+
+ def fit(self, X, y, sample_weight="default", metadata="default"):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+
+ self.classes_ = np.unique(y)
+ self.coef_ = np.ones_like(X)
+ return self
+
+ def predict(self, X, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ y_score = np.empty(shape=(len(X),), dtype="int8")
+ y_score[len(X) // 2 :] = 0
+ y_score[: len(X) // 2] = 1
+ return y_score
+
+ def predict_proba(self, X, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ y_proba = np.empty(shape=(len(X), 2))
+ y_proba[: len(X) // 2, :] = np.asarray([1.0, 0.0])
+ y_proba[len(X) // 2 :, :] = np.asarray([0.0, 1.0])
+ return y_proba
+
+ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return np.zeros(shape=(len(X), 2))
+
+ def decision_function(self, X, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ y_score = np.empty(shape=(len(X),))
+ y_score[len(X) // 2 :] = 0
+ y_score[: len(X) // 2] = 1
+ return y_score
+
+ def score(self, X, y, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return 1
+
+
+class ConsumingTransformer(TransformerMixin, BaseEstimator):
+ """A transformer which accepts metadata on fit and transform.
+
+ Parameters
+ ----------
+ registry : list, default=None
+ If a list, the estimator will append itself to the list in order to have
+ a reference to the estimator later on. Since that reference is not
+ required in all tests, registration can be skipped by leaving this value
+ as None.
+ """
+
+ def __init__(self, registry=None):
+ self.registry = registry
+
+ def fit(self, X, y=None, sample_weight="default", metadata="default"):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ self.fitted_ = True
+ return self
+
+ def transform(self, X, sample_weight="default", metadata="default"):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return X + 1
+
+ def fit_transform(self, X, y, sample_weight="default", metadata="default"):
+ # implementing ``fit_transform`` is necessary since
+ # ``TransformerMixin.fit_transform`` doesn't route any metadata to
+ # ``transform``, while here we want ``transform`` to receive
+ # ``sample_weight`` and ``metadata``.
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return self.fit(X, y, sample_weight=sample_weight, metadata=metadata).transform(
+ X, sample_weight=sample_weight, metadata=metadata
+ )
+
+ def inverse_transform(self, X, sample_weight=None, metadata=None):
+ record_metadata_not_default(
+ self, sample_weight=sample_weight, metadata=metadata
+ )
+ return X - 1
+
+
+class ConsumingNoFitTransformTransformer(BaseEstimator):
+ """A metadata consuming transformer that doesn't inherit from
+ TransformerMixin, and thus doesn't implement `fit_transform`. Note that
+ TransformerMixin's `fit_transform` doesn't route metadata to `transform`."""
+
+ def __init__(self, registry=None):
+ self.registry = registry
+
+ def fit(self, X, y=None, sample_weight=None, metadata=None):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata(self, sample_weight=sample_weight, metadata=metadata)
+
+ return self
+
+ def transform(self, X, sample_weight=None, metadata=None):
+ record_metadata(self, sample_weight=sample_weight, metadata=metadata)
+ return X
+
+
+class ConsumingScorer(_Scorer):
+ def __init__(self, registry=None):
+ super().__init__(
+ score_func=mean_squared_error, sign=1, kwargs={}, response_method="predict"
+ )
+ self.registry = registry
+
+ def _score(self, method_caller, clf, X, y, **kwargs):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(self, **kwargs)
+
+ sample_weight = kwargs.get("sample_weight", None)
+ return super()._score(method_caller, clf, X, y, sample_weight=sample_weight)
+
+
+class ConsumingSplitter(GroupsConsumerMixin, BaseCrossValidator):
+ def __init__(self, registry=None):
+ self.registry = registry
+
+ def split(self, X, y=None, groups="default", metadata="default"):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata_not_default(self, groups=groups, metadata=metadata)
+
+ split_index = len(X) // 2
+ train_indices = list(range(0, split_index))
+ test_indices = list(range(split_index, len(X)))
+ yield test_indices, train_indices
+ yield train_indices, test_indices
+
+ def get_n_splits(self, X=None, y=None, groups=None, metadata=None):
+ return 2
+
+ def _iter_test_indices(self, X=None, y=None, groups=None):
+ split_index = len(X) // 2
+ train_indices = list(range(0, split_index))
+ test_indices = list(range(split_index, len(X)))
+ yield test_indices
+ yield train_indices
+
+
+class MetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
+ """A meta-regressor which is only a router."""
+
+ def __init__(self, estimator):
+ self.estimator = estimator
+
+ def fit(self, X, y, **fit_params):
+ params = process_routing(self, "fit", **fit_params)
+ self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
+
+ def get_metadata_routing(self):
+ router = MetadataRouter(owner=self.__class__.__name__).add(
+ estimator=self.estimator,
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
+ )
+ return router
+
+
+class WeightedMetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator):
+ """A meta-regressor which is also a consumer."""
+
+ def __init__(self, estimator, registry=None):
+ self.estimator = estimator
+ self.registry = registry
+
+ def fit(self, X, y, sample_weight=None, **fit_params):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata(self, sample_weight=sample_weight)
+ params = process_routing(self, "fit", sample_weight=sample_weight, **fit_params)
+ self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
+ return self
+
+ def predict(self, X, **predict_params):
+ params = process_routing(self, "predict", **predict_params)
+ return self.estimator_.predict(X, **params.estimator.predict)
+
+ def get_metadata_routing(self):
+ router = (
+ MetadataRouter(owner=self.__class__.__name__)
+ .add_self_request(self)
+ .add(
+ estimator=self.estimator,
+ method_mapping=MethodMapping()
+ .add(caller="fit", callee="fit")
+ .add(caller="predict", callee="predict"),
+ )
+ )
+ return router
+
+
+class WeightedMetaClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator):
+ """A meta-estimator which also consumes sample_weight itself in ``fit``."""
+
+ def __init__(self, estimator, registry=None):
+ self.estimator = estimator
+ self.registry = registry
+
+ def fit(self, X, y, sample_weight=None, **kwargs):
+ if self.registry is not None:
+ self.registry.append(self)
+
+ record_metadata(self, sample_weight=sample_weight)
+ params = process_routing(self, "fit", sample_weight=sample_weight, **kwargs)
+ self.estimator_ = clone(self.estimator).fit(X, y, **params.estimator.fit)
+ return self
+
+ def get_metadata_routing(self):
+ router = (
+ MetadataRouter(owner=self.__class__.__name__)
+ .add_self_request(self)
+ .add(
+ estimator=self.estimator,
+ method_mapping=MethodMapping().add(caller="fit", callee="fit"),
+ )
+ )
+ return router
+
+
+class MetaTransformer(MetaEstimatorMixin, TransformerMixin, BaseEstimator):
+ """A simple meta-transformer."""
+
+ def __init__(self, transformer):
+ self.transformer = transformer
+
+ def fit(self, X, y=None, **fit_params):
+ params = process_routing(self, "fit", **fit_params)
+ self.transformer_ = clone(self.transformer).fit(X, y, **params.transformer.fit)
+ return self
+
+ def transform(self, X, y=None, **transform_params):
+ params = process_routing(self, "transform", **transform_params)
+ return self.transformer_.transform(X, **params.transformer.transform)
+
+ def get_metadata_routing(self):
+ return MetadataRouter(owner=self.__class__.__name__).add(
+ transformer=self.transformer,
+ method_mapping=MethodMapping()
+ .add(caller="fit", callee="fit")
+ .add(caller="transform", callee="transform"),
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/test_base.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..b65baa78802bc4ec81e7194f42fddec282eeafd5
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/test_base.py
@@ -0,0 +1,994 @@
+# Authors: The scikit-learn developers
+# SPDX-License-Identifier: BSD-3-Clause
+
+import pickle
+import re
+import warnings
+
+import numpy as np
+import pytest
+import scipy.sparse as sp
+from numpy.testing import assert_allclose
+
+import sklearn
+from sklearn import config_context, datasets
+from sklearn.base import (
+ BaseEstimator,
+ OutlierMixin,
+ TransformerMixin,
+ clone,
+ is_classifier,
+ is_clusterer,
+ is_outlier_detector,
+ is_regressor,
+)
+from sklearn.cluster import KMeans
+from sklearn.decomposition import PCA
+from sklearn.ensemble import IsolationForest
+from sklearn.exceptions import InconsistentVersionWarning
+from sklearn.model_selection import GridSearchCV
+from sklearn.pipeline import Pipeline
+from sklearn.preprocessing import StandardScaler
+from sklearn.svm import SVC, SVR
+from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
+from sklearn.utils._mocking import MockDataFrame
+from sklearn.utils._set_output import _get_output_config
+from sklearn.utils._testing import (
+ _convert_container,
+ assert_array_equal,
+)
+from sklearn.utils.validation import _check_n_features, validate_data
+
+
+#############################################################################
+# A few test classes
+class MyEstimator(BaseEstimator):
+ def __init__(self, l1=0, empty=None):
+ self.l1 = l1
+ self.empty = empty
+
+
+class K(BaseEstimator):
+ def __init__(self, c=None, d=None):
+ self.c = c
+ self.d = d
+
+
+class T(BaseEstimator):
+ def __init__(self, a=None, b=None):
+ self.a = a
+ self.b = b
+
+
+class NaNTag(BaseEstimator):
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = True
+ return tags
+
+
+class NoNaNTag(BaseEstimator):
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = False
+ return tags
+
+
+class OverrideTag(NaNTag):
+ def __sklearn_tags__(self):
+ tags = super().__sklearn_tags__()
+ tags.input_tags.allow_nan = False
+ return tags
+
+
+class DiamondOverwriteTag(NaNTag, NoNaNTag):
+ pass
+
+
+class InheritDiamondOverwriteTag(DiamondOverwriteTag):
+ pass
+
+
+class ModifyInitParams(BaseEstimator):
+ """Deprecated behavior.
+ Equal parameters but with a type cast.
+ Doesn't fulfill a is a
+ """
+
+ def __init__(self, a=np.array([0])):
+ self.a = a.copy()
+
+
+class Buggy(BaseEstimator):
+ "A buggy estimator that does not set its parameters right."
+
+ def __init__(self, a=None):
+ self.a = 1
+
+
+class NoEstimator:
+ def __init__(self):
+ pass
+
+ def fit(self, X=None, y=None):
+ return self
+
+ def predict(self, X=None):
+ return None
+
+
+class VargEstimator(BaseEstimator):
+ """scikit-learn estimators shouldn't have vargs."""
+
+ def __init__(self, *vargs):
+ pass
+
+
+#############################################################################
+# The tests
+
+
+def test_clone():
+ # Tests that clone creates a correct deep copy.
+ # We create an estimator, make a copy of its original state
+ # (which, in this case, is the current state of the estimator),
+ # and check that the obtained copy is a correct deep copy.
+
+ from sklearn.feature_selection import SelectFpr, f_classif
+
+ selector = SelectFpr(f_classif, alpha=0.1)
+ new_selector = clone(selector)
+ assert selector is not new_selector
+ assert selector.get_params() == new_selector.get_params()
+
+ selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
+ new_selector = clone(selector)
+ assert selector is not new_selector
+
+
+def test_clone_2():
+ # Tests that clone doesn't copy everything.
+ # We first create an estimator, give it an own attribute, and
+ # make a copy of its original state. Then we check that the copy doesn't
+ # have the specific attribute we manually added to the initial estimator.
+
+ from sklearn.feature_selection import SelectFpr, f_classif
+
+ selector = SelectFpr(f_classif, alpha=0.1)
+ selector.own_attribute = "test"
+ new_selector = clone(selector)
+ assert not hasattr(new_selector, "own_attribute")
+
+
+def test_clone_buggy():
+ # Check that clone raises an error on buggy estimators.
+ buggy = Buggy()
+ buggy.a = 2
+ with pytest.raises(RuntimeError):
+ clone(buggy)
+
+ no_estimator = NoEstimator()
+ with pytest.raises(TypeError):
+ clone(no_estimator)
+
+ varg_est = VargEstimator()
+ with pytest.raises(RuntimeError):
+ clone(varg_est)
+
+ est = ModifyInitParams()
+ with pytest.raises(RuntimeError):
+ clone(est)
+
+
+def test_clone_empty_array():
+ # Regression test for cloning estimators with empty arrays
+ clf = MyEstimator(empty=np.array([]))
+ clf2 = clone(clf)
+ assert_array_equal(clf.empty, clf2.empty)
+
+ clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
+ clf2 = clone(clf)
+ assert_array_equal(clf.empty.data, clf2.empty.data)
+
+
+def test_clone_nan():
+ # Regression test for cloning estimators with default parameter as np.nan
+ clf = MyEstimator(empty=np.nan)
+ clf2 = clone(clf)
+
+ assert clf.empty is clf2.empty
+
+
+def test_clone_dict():
+ # test that clone creates a clone of a dict
+ orig = {"a": MyEstimator()}
+ cloned = clone(orig)
+ assert orig["a"] is not cloned["a"]
+
+
+def test_clone_sparse_matrices():
+ sparse_matrix_classes = [
+ cls
+ for name in dir(sp)
+ if name.endswith("_matrix") and type(cls := getattr(sp, name)) is type
+ ]
+
+ for cls in sparse_matrix_classes:
+ sparse_matrix = cls(np.eye(5))
+ clf = MyEstimator(empty=sparse_matrix)
+ clf_cloned = clone(clf)
+ assert clf.empty.__class__ is clf_cloned.empty.__class__
+ assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
+
+
+def test_clone_estimator_types():
+ # Check that clone works for parameters that are types rather than
+ # instances
+ clf = MyEstimator(empty=MyEstimator)
+ clf2 = clone(clf)
+
+ assert clf.empty is clf2.empty
+
+
+def test_clone_class_rather_than_instance():
+ # Check that clone raises expected error message when
+ # cloning class rather than instance
+ msg = "You should provide an instance of scikit-learn estimator"
+ with pytest.raises(TypeError, match=msg):
+ clone(MyEstimator)
+
+
+def test_repr():
+ # Smoke test the repr of the base estimator.
+ my_estimator = MyEstimator()
+ repr(my_estimator)
+ test = T(K(), K())
+ assert repr(test) == "T(a=K(), b=K())"
+
+ some_est = T(a=["long_params"] * 1000)
+ assert len(repr(some_est)) == 485
+
+
+def test_str():
+ # Smoke test the str of the base estimator
+ my_estimator = MyEstimator()
+ str(my_estimator)
+
+
+def test_get_params():
+ test = T(K(), K)
+
+ assert "a__d" in test.get_params(deep=True)
+ assert "a__d" not in test.get_params(deep=False)
+
+ test.set_params(a__d=2)
+ assert test.a.d == 2
+
+ with pytest.raises(ValueError):
+ test.set_params(a__a=2)
+
+
+# TODO(1.8): Remove this test when the deprecation is removed
+def test_is_estimator_type_class():
+ with pytest.warns(FutureWarning, match="passing a class to.*is deprecated"):
+ assert is_classifier(SVC)
+
+ with pytest.warns(FutureWarning, match="passing a class to.*is deprecated"):
+ assert is_regressor(SVR)
+
+ with pytest.warns(FutureWarning, match="passing a class to.*is deprecated"):
+ assert is_clusterer(KMeans)
+
+ with pytest.warns(FutureWarning, match="passing a class to.*is deprecated"):
+ assert is_outlier_detector(IsolationForest)
+
+
+@pytest.mark.parametrize(
+ "estimator, expected_result",
+ [
+ (SVC(), True),
+ (GridSearchCV(SVC(), {"C": [0.1, 1]}), True),
+ (Pipeline([("svc", SVC())]), True),
+ (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), True),
+ (SVR(), False),
+ (GridSearchCV(SVR(), {"C": [0.1, 1]}), False),
+ (Pipeline([("svr", SVR())]), False),
+ (Pipeline([("svr_cv", GridSearchCV(SVR(), {"C": [0.1, 1]}))]), False),
+ ],
+)
+def test_is_classifier(estimator, expected_result):
+ assert is_classifier(estimator) == expected_result
+
+
+@pytest.mark.parametrize(
+ "estimator, expected_result",
+ [
+ (SVR(), True),
+ (GridSearchCV(SVR(), {"C": [0.1, 1]}), True),
+ (Pipeline([("svr", SVR())]), True),
+ (Pipeline([("svr_cv", GridSearchCV(SVR(), {"C": [0.1, 1]}))]), True),
+ (SVC(), False),
+ (GridSearchCV(SVC(), {"C": [0.1, 1]}), False),
+ (Pipeline([("svc", SVC())]), False),
+ (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), False),
+ ],
+)
+def test_is_regressor(estimator, expected_result):
+ assert is_regressor(estimator) == expected_result
+
+
+@pytest.mark.parametrize(
+ "estimator, expected_result",
+ [
+ (KMeans(), True),
+ (GridSearchCV(KMeans(), {"n_clusters": [3, 8]}), True),
+ (Pipeline([("km", KMeans())]), True),
+ (Pipeline([("km_cv", GridSearchCV(KMeans(), {"n_clusters": [3, 8]}))]), True),
+ (SVC(), False),
+ (GridSearchCV(SVC(), {"C": [0.1, 1]}), False),
+ (Pipeline([("svc", SVC())]), False),
+ (Pipeline([("svc_cv", GridSearchCV(SVC(), {"C": [0.1, 1]}))]), False),
+ ],
+)
+def test_is_clusterer(estimator, expected_result):
+ assert is_clusterer(estimator) == expected_result
+
+
+def test_set_params():
+ # test nested estimator parameter setting
+ clf = Pipeline([("svc", SVC())])
+
+ # non-existing parameter in svc
+ with pytest.raises(ValueError):
+ clf.set_params(svc__stupid_param=True)
+
+ # non-existing parameter of pipeline
+ with pytest.raises(ValueError):
+ clf.set_params(svm__stupid_param=True)
+
+ # we don't currently catch if the things in pipeline are estimators
+ # bad_pipeline = Pipeline([("bad", NoEstimator())])
+ # with pytest.raises(AttributeError):
+ # bad_pipeline.set_params(bad__stupid_param=True)
+
+
+def test_set_params_passes_all_parameters():
+ # Make sure all parameters are passed together to set_params
+ # of nested estimator. Regression test for #9944
+
+ class TestDecisionTree(DecisionTreeClassifier):
+ def set_params(self, **kwargs):
+ super().set_params(**kwargs)
+ # expected_kwargs is in test scope
+ assert kwargs == expected_kwargs
+ return self
+
+ expected_kwargs = {"max_depth": 5, "min_samples_leaf": 2}
+ for est in [
+ Pipeline([("estimator", TestDecisionTree())]),
+ GridSearchCV(TestDecisionTree(), {}),
+ ]:
+ est.set_params(estimator__max_depth=5, estimator__min_samples_leaf=2)
+
+
+def test_set_params_updates_valid_params():
+ # Check that set_params tries to set SVC().C, not
+ # DecisionTreeClassifier().C
+ gscv = GridSearchCV(DecisionTreeClassifier(), {})
+ gscv.set_params(estimator=SVC(), estimator__C=42.0)
+ assert gscv.estimator.C == 42.0
+
+
+@pytest.mark.parametrize(
+ "tree,dataset",
+ [
+ (
+ DecisionTreeClassifier(max_depth=2, random_state=0),
+ datasets.make_classification(random_state=0),
+ ),
+ (
+ DecisionTreeRegressor(max_depth=2, random_state=0),
+ datasets.make_regression(random_state=0),
+ ),
+ ],
+)
+def test_score_sample_weight(tree, dataset):
+ rng = np.random.RandomState(0)
+ # check that the score with and without sample weights are different
+ X, y = dataset
+
+ tree.fit(X, y)
+ # generate random sample weights
+ sample_weight = rng.randint(1, 10, size=len(y))
+ score_unweighted = tree.score(X, y)
+ score_weighted = tree.score(X, y, sample_weight=sample_weight)
+ msg = "Unweighted and weighted scores are unexpectedly equal"
+ assert score_unweighted != score_weighted, msg
+
+
+def test_clone_pandas_dataframe():
+ class DummyEstimator(TransformerMixin, BaseEstimator):
+ """This is a dummy class for generating numerical features
+
+ This feature extractor extracts numerical features from pandas data
+ frame.
+
+ Parameters
+ ----------
+
+ df: pandas data frame
+ The pandas data frame parameter.
+
+ Notes
+ -----
+ """
+
+ def __init__(self, df=None, scalar_param=1):
+ self.df = df
+ self.scalar_param = scalar_param
+
+ def fit(self, X, y=None):
+ pass
+
+ def transform(self, X):
+ pass
+
+ # build and clone estimator
+ d = np.arange(10)
+ df = MockDataFrame(d)
+ e = DummyEstimator(df, scalar_param=1)
+ cloned_e = clone(e)
+
+ # the test
+ assert (e.df == cloned_e.df).values.all()
+ assert e.scalar_param == cloned_e.scalar_param
+
+
+def test_clone_protocol():
+ """Checks that clone works with `__sklearn_clone__` protocol."""
+
+ class FrozenEstimator(BaseEstimator):
+ def __init__(self, fitted_estimator):
+ self.fitted_estimator = fitted_estimator
+
+ def __getattr__(self, name):
+ return getattr(self.fitted_estimator, name)
+
+ def __sklearn_clone__(self):
+ return self
+
+ def fit(self, *args, **kwargs):
+ return self
+
+ def fit_transform(self, *args, **kwargs):
+ return self.fitted_estimator.transform(*args, **kwargs)
+
+ X = np.array([[-1, -1], [-2, -1], [-3, -2]])
+ pca = PCA().fit(X)
+ components = pca.components_
+
+ frozen_pca = FrozenEstimator(pca)
+ assert_allclose(frozen_pca.components_, components)
+
+ # Calling PCA methods such as `get_feature_names_out` still works
+ assert_array_equal(frozen_pca.get_feature_names_out(), pca.get_feature_names_out())
+
+ # Fitting on a new data does not alter `components_`
+ X_new = np.asarray([[-1, 2], [3, 4], [1, 2]])
+ frozen_pca.fit(X_new)
+ assert_allclose(frozen_pca.components_, components)
+
+ # `fit_transform` does not alter state
+ frozen_pca.fit_transform(X_new)
+ assert_allclose(frozen_pca.components_, components)
+
+ # Cloning estimator is a no-op
+ clone_frozen_pca = clone(frozen_pca)
+ assert clone_frozen_pca is frozen_pca
+ assert_allclose(clone_frozen_pca.components_, components)
+
+
+def test_pickle_version_warning_is_not_raised_with_matching_version():
+ iris = datasets.load_iris()
+ tree = DecisionTreeClassifier().fit(iris.data, iris.target)
+ tree_pickle = pickle.dumps(tree)
+ assert b"_sklearn_version" in tree_pickle
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+ tree_restored = pickle.loads(tree_pickle)
+
+ # test that we can predict with the restored decision tree classifier
+ score_of_original = tree.score(iris.data, iris.target)
+ score_of_restored = tree_restored.score(iris.data, iris.target)
+ assert score_of_original == score_of_restored
+
+
+class TreeBadVersion(DecisionTreeClassifier):
+ def __getstate__(self):
+ return dict(self.__dict__.items(), _sklearn_version="something")
+
+
+pickle_error_message = (
+ "Trying to unpickle estimator {estimator} from "
+ "version {old_version} when using version "
+ "{current_version}. This might "
+ "lead to breaking code or invalid results. "
+ "Use at your own risk."
+)
+
+
+def test_pickle_version_warning_is_issued_upon_different_version():
+ iris = datasets.load_iris()
+ tree = TreeBadVersion().fit(iris.data, iris.target)
+ tree_pickle_other = pickle.dumps(tree)
+ message = pickle_error_message.format(
+ estimator="TreeBadVersion",
+ old_version="something",
+ current_version=sklearn.__version__,
+ )
+ with pytest.warns(UserWarning, match=message) as warning_record:
+ pickle.loads(tree_pickle_other)
+
+ message = warning_record.list[0].message
+ assert isinstance(message, InconsistentVersionWarning)
+ assert message.estimator_name == "TreeBadVersion"
+ assert message.original_sklearn_version == "something"
+ assert message.current_sklearn_version == sklearn.__version__
+
+
+class TreeNoVersion(DecisionTreeClassifier):
+ def __getstate__(self):
+ return self.__dict__
+
+
+def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle():
+ iris = datasets.load_iris()
+ # TreeNoVersion has no getstate, like pre-0.18
+ tree = TreeNoVersion().fit(iris.data, iris.target)
+
+ tree_pickle_noversion = pickle.dumps(tree)
+ assert b"_sklearn_version" not in tree_pickle_noversion
+ message = pickle_error_message.format(
+ estimator="TreeNoVersion",
+ old_version="pre-0.18",
+ current_version=sklearn.__version__,
+ )
+ # check we got the warning about using pre-0.18 pickle
+ with pytest.warns(UserWarning, match=message):
+ pickle.loads(tree_pickle_noversion)
+
+
+def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator():
+ iris = datasets.load_iris()
+ tree = TreeNoVersion().fit(iris.data, iris.target)
+ tree_pickle_noversion = pickle.dumps(tree)
+ try:
+ module_backup = TreeNoVersion.__module__
+ TreeNoVersion.__module__ = "notsklearn"
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
+
+ pickle.loads(tree_pickle_noversion)
+ finally:
+ TreeNoVersion.__module__ = module_backup
+
+
+class DontPickleAttributeMixin:
+ def __getstate__(self):
+ data = self.__dict__.copy()
+ data["_attribute_not_pickled"] = None
+ return data
+
+ def __setstate__(self, state):
+ state["_restored"] = True
+ self.__dict__.update(state)
+
+
+class MultiInheritanceEstimator(DontPickleAttributeMixin, BaseEstimator):
+ def __init__(self, attribute_pickled=5):
+ self.attribute_pickled = attribute_pickled
+ self._attribute_not_pickled = None
+
+
+def test_pickling_when_getstate_is_overwritten_by_mixin():
+ estimator = MultiInheritanceEstimator()
+ estimator._attribute_not_pickled = "this attribute should not be pickled"
+
+ serialized = pickle.dumps(estimator)
+ estimator_restored = pickle.loads(serialized)
+ assert estimator_restored.attribute_pickled == 5
+ assert estimator_restored._attribute_not_pickled is None
+ assert estimator_restored._restored
+
+
+def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn():
+ try:
+ estimator = MultiInheritanceEstimator()
+ text = "this attribute should not be pickled"
+ estimator._attribute_not_pickled = text
+ old_mod = type(estimator).__module__
+ type(estimator).__module__ = "notsklearn"
+
+ serialized = estimator.__getstate__()
+ assert serialized == {"_attribute_not_pickled": None, "attribute_pickled": 5}
+
+ serialized["attribute_pickled"] = 4
+ estimator.__setstate__(serialized)
+ assert estimator.attribute_pickled == 4
+ assert estimator._restored
+ finally:
+ type(estimator).__module__ = old_mod
+
+
+class SingleInheritanceEstimator(BaseEstimator):
+ def __init__(self, attribute_pickled=5):
+ self.attribute_pickled = attribute_pickled
+ self._attribute_not_pickled = None
+
+ def __getstate__(self):
+ state = super().__getstate__()
+ state["_attribute_not_pickled"] = None
+ return state
+
+
+def test_pickling_works_when_getstate_is_overwritten_in_the_child_class():
+ estimator = SingleInheritanceEstimator()
+ estimator._attribute_not_pickled = "this attribute should not be pickled"
+
+ serialized = pickle.dumps(estimator)
+ estimator_restored = pickle.loads(serialized)
+ assert estimator_restored.attribute_pickled == 5
+ assert estimator_restored._attribute_not_pickled is None
+
+
+def test_tag_inheritance():
+ # test that changing tags by inheritance is not allowed
+
+ nan_tag_est = NaNTag()
+ no_nan_tag_est = NoNaNTag()
+ assert nan_tag_est.__sklearn_tags__().input_tags.allow_nan
+ assert not no_nan_tag_est.__sklearn_tags__().input_tags.allow_nan
+
+ redefine_tags_est = OverrideTag()
+ assert not redefine_tags_est.__sklearn_tags__().input_tags.allow_nan
+
+ diamond_tag_est = DiamondOverwriteTag()
+ assert diamond_tag_est.__sklearn_tags__().input_tags.allow_nan
+
+ inherit_diamond_tag_est = InheritDiamondOverwriteTag()
+ assert inherit_diamond_tag_est.__sklearn_tags__().input_tags.allow_nan
+
+
+def test_raises_on_get_params_non_attribute():
+ class MyEstimator(BaseEstimator):
+ def __init__(self, param=5):
+ pass
+
+ def fit(self, X, y=None):
+ return self
+
+ est = MyEstimator()
+ msg = "'MyEstimator' object has no attribute 'param'"
+
+ with pytest.raises(AttributeError, match=msg):
+ est.get_params()
+
+
+def test_repr_mimebundle_():
+ # Checks the display configuration flag controls the json output
+ tree = DecisionTreeClassifier()
+ output = tree._repr_mimebundle_()
+ assert "text/plain" in output
+ assert "text/html" in output
+
+ with config_context(display="text"):
+ output = tree._repr_mimebundle_()
+ assert "text/plain" in output
+ assert "text/html" not in output
+
+
+def test_repr_html_wraps():
+ # Checks the display configuration flag controls the html output
+ tree = DecisionTreeClassifier()
+
+ output = tree._repr_html_()
+ assert "