content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\filters.cpython-313.pyc
filters.cpython-313.pyc
Other
71,380
0.75
0.063477
0.01105
python-kit
383
2024-06-07T18:03:37.370500
BSD-3-Clause
false
4f9a9d62327aa2781f3dc8978a4fab40
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\idtracking.cpython-313.pyc
idtracking.cpython-313.pyc
Other
19,411
0.95
0.043103
0
vue-tools
768
2023-08-17T19:15:24.486573
MIT
false
7d94b452c60e2f6b16cefb5d658dcfb6
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\lexer.cpython-313.pyc
lexer.cpython-313.pyc
Other
32,502
0.95
0.035088
0.003049
python-kit
293
2024-01-11T22:49:58.884605
Apache-2.0
false
ac6e2a64d488d580c473b14fa6e6ea19
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\loaders.cpython-313.pyc
loaders.cpython-313.pyc
Other
32,360
0.95
0.086514
0
node-utils
95
2024-01-16T07:46:24.712474
GPL-3.0
false
d83444dfa12b218a337399bd582188cb
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\meta.cpython-313.pyc
meta.cpython-313.pyc
Other
5,585
0.95
0.107692
0
vue-tools
854
2024-01-01T06:42:47.784178
GPL-3.0
false
95f9077443ea98b643c97ed6d3dc6a5f
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\nativetypes.cpython-313.pyc
nativetypes.cpython-313.pyc
Other
7,168
0.8
0
0
python-kit
899
2024-02-01T03:58:23.181916
BSD-3-Clause
false
8976b6a8b4383f4736361604b454ed1d
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\nodes.cpython-313.pyc
nodes.cpython-313.pyc
Other
59,949
0.75
0.118123
0.007092
awesome-app
374
2024-07-26T08:00:52.578307
GPL-3.0
false
3014a56de10e5a5bfb13e7a6bc659316
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\optimizer.cpython-313.pyc
optimizer.cpython-313.pyc
Other
2,777
0.8
0
0
react-lib
405
2025-01-19T12:06:54.125511
MIT
false
f4f0dde46f844a5d62ebcd50520563bd
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\parser.cpython-313.pyc
parser.cpython-313.pyc
Other
61,740
0.75
0.065076
0.009281
awesome-app
784
2025-02-15T05:06:28.015549
MIT
false
358666185203780b4d687a8774545e54
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\runtime.cpython-313.pyc
runtime.cpython-313.pyc
Other
48,951
0.95
0.066532
0.008639
awesome-app
653
2024-10-01T10:55:01.166294
BSD-3-Clause
false
7810b3504b82fb55ae47487c103db1c9
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\sandbox.cpython-313.pyc
sandbox.cpython-313.pyc
Other
18,229
0.95
0.077626
0
python-kit
754
2024-07-02T02:15:08.753154
BSD-3-Clause
false
5a0943a3c47e291d64357d0b3f2496ca
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\tests.cpython-313.pyc
tests.cpython-313.pyc
Other
8,916
0.8
0.186335
0
node-utils
902
2023-08-25T09:42:15.023319
BSD-3-Clause
true
9b31a33165d47b237058a49ec70da1b4
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
34,864
0.95
0.109865
0.005089
python-kit
987
2023-08-06T07:33:11.161242
GPL-3.0
false
901725c671ddb6727fa1d0b376d25610
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\visitor.cpython-313.pyc
visitor.cpython-313.pyc
Other
5,453
0.95
0.269841
0
python-kit
419
2025-07-08T11:18:56.538672
BSD-3-Clause
false
c853cb4437510eb47f6feb3c1963ffba
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\_identifier.cpython-313.pyc
_identifier.cpython-313.pyc
Other
2,129
0.7
0
0.5
python-kit
242
2024-06-24T20:42:31.135262
Apache-2.0
false
4e90c8ebc323752fba1357a8ec03fd61
\n\n
.venv\Lib\site-packages\jinja2\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,650
0.8
0
0
awesome-app
443
2024-11-25T07:31:31.706860
Apache-2.0
false
b1112d033e7797b3bfff6b86fd8f4705
[babel.extractors]\njinja2=jinja2.ext:babel_extract[i18n]\n\n
.venv\Lib\site-packages\jinja2-3.1.6.dist-info\entry_points.txt
entry_points.txt
Other
58
0.5
0
0
python-kit
261
2024-01-28T15:43:34.538767
BSD-3-Clause
false
27ee54d73799b15fbbfc380f02f07957
pip\n
.venv\Lib\site-packages\jinja2-3.1.6.dist-info\INSTALLER
INSTALLER
Other
4
0.5
0
0
react-lib
983
2025-05-05T09:28:12.867442
MIT
false
365c9bfeb7d89244f2ce01c1de44cb85
Metadata-Version: 2.4\nName: Jinja2\nVersion: 3.1.6\nSummary: A very fast and expressive template engine.\nMaintainer-email: Pallets <contact@palletsprojects.com>\nRequires-Python: >=3.7\nDescription-Content-Type: text/markdown\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Environment :: Web Environment\nClassifier: Intended Audience :: Developers\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Operating System :: OS Independent\nClassifier: Programming Language :: Python\nClassifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content\nClassifier: Topic :: Text Processing :: Markup :: HTML\nClassifier: Typing :: Typed\nLicense-File: LICENSE.txt\nRequires-Dist: MarkupSafe>=2.0\nRequires-Dist: Babel>=2.7 ; extra == "i18n"\nProject-URL: Changes, https://jinja.palletsprojects.com/changes/\nProject-URL: Chat, https://discord.gg/pallets\nProject-URL: Documentation, https://jinja.palletsprojects.com/\nProject-URL: Donate, https://palletsprojects.com/donate\nProject-URL: Source, https://github.com/pallets/jinja/\nProvides-Extra: i18n\n\n# Jinja\n\nJinja is a fast, expressive, extensible templating engine. Special\nplaceholders in the template allow writing code similar to Python\nsyntax. Then the template is passed data to render the final document.\n\nIt includes:\n\n- Template inheritance and inclusion.\n- Define and import macros within templates.\n- HTML templates can use autoescaping to prevent XSS from untrusted\n user input.\n- A sandboxed environment can safely render untrusted templates.\n- AsyncIO support for generating templates and calling async\n functions.\n- I18N support with Babel.\n- Templates are compiled to optimized Python code just-in-time and\n cached, or can be compiled ahead-of-time.\n- Exceptions point to the correct line in templates to make debugging\n easier.\n- Extensible filters, tests, functions, and even syntax.\n\nJinja's philosophy is that while application logic belongs in Python if\npossible, it shouldn't make the template designer's job difficult by\nrestricting functionality too much.\n\n\n## In A Nutshell\n\n```jinja\n{% extends "base.html" %}\n{% block title %}Members{% endblock %}\n{% block content %}\n <ul>\n {% for user in users %}\n <li><a href="{{ user.url }}">{{ user.username }}</a></li>\n {% endfor %}\n </ul>\n{% endblock %}\n```\n\n## Donate\n\nThe Pallets organization develops and supports Jinja and other popular\npackages. In order to grow the community of contributors and users, and\nallow the maintainers to devote more time to the projects, [please\ndonate today][].\n\n[please donate today]: https://palletsprojects.com/donate\n\n## Contributing\n\nSee our [detailed contributing documentation][contrib] for many ways to\ncontribute, including reporting issues, requesting features, asking or answering\nquestions, and making PRs.\n\n[contrib]: https://palletsprojects.com/contributing/\n\n
.venv\Lib\site-packages\jinja2-3.1.6.dist-info\METADATA
METADATA
Other
2,871
0.95
0.059524
0.057971
python-kit
148
2023-09-10T21:31:08.851496
Apache-2.0
false
5ea26c9dd2016d2772bbae7c23bfe30d
jinja2-3.1.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\njinja2-3.1.6.dist-info/METADATA,sha256=aMVUj7Z8QTKhOJjZsx7FDGvqKr3ZFdkh8hQ1XDpkmcg,2871\njinja2-3.1.6.dist-info/RECORD,,\njinja2-3.1.6.dist-info/WHEEL,sha256=_2ozNFCLWc93bK4WKHCO-eDUENDlo-dgc9cU3qokYO4,82\njinja2-3.1.6.dist-info/entry_points.txt,sha256=OL85gYU1eD8cuPlikifFngXpeBjaxl6rIJ8KkC_3r-I,58\njinja2-3.1.6.dist-info/licenses/LICENSE.txt,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475\njinja2/__init__.py,sha256=xxepO9i7DHsqkQrgBEduLtfoz2QCuT6_gbL4XSN1hbU,1928\njinja2/__pycache__/__init__.cpython-313.pyc,,\njinja2/__pycache__/_identifier.cpython-313.pyc,,\njinja2/__pycache__/async_utils.cpython-313.pyc,,\njinja2/__pycache__/bccache.cpython-313.pyc,,\njinja2/__pycache__/compiler.cpython-313.pyc,,\njinja2/__pycache__/constants.cpython-313.pyc,,\njinja2/__pycache__/debug.cpython-313.pyc,,\njinja2/__pycache__/defaults.cpython-313.pyc,,\njinja2/__pycache__/environment.cpython-313.pyc,,\njinja2/__pycache__/exceptions.cpython-313.pyc,,\njinja2/__pycache__/ext.cpython-313.pyc,,\njinja2/__pycache__/filters.cpython-313.pyc,,\njinja2/__pycache__/idtracking.cpython-313.pyc,,\njinja2/__pycache__/lexer.cpython-313.pyc,,\njinja2/__pycache__/loaders.cpython-313.pyc,,\njinja2/__pycache__/meta.cpython-313.pyc,,\njinja2/__pycache__/nativetypes.cpython-313.pyc,,\njinja2/__pycache__/nodes.cpython-313.pyc,,\njinja2/__pycache__/optimizer.cpython-313.pyc,,\njinja2/__pycache__/parser.cpython-313.pyc,,\njinja2/__pycache__/runtime.cpython-313.pyc,,\njinja2/__pycache__/sandbox.cpython-313.pyc,,\njinja2/__pycache__/tests.cpython-313.pyc,,\njinja2/__pycache__/utils.cpython-313.pyc,,\njinja2/__pycache__/visitor.cpython-313.pyc,,\njinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958\njinja2/async_utils.py,sha256=vK-PdsuorOMnWSnEkT3iUJRIkTnYgO2T6MnGxDgHI5o,2834\njinja2/bccache.py,sha256=gh0qs9rulnXo0PhX5jTJy2UHzI8wFnQ63o_vw7nhzRg,14061\njinja2/compiler.py,sha256=9RpCQl5X88BHllJiPsHPh295Hh0uApvwFJNQuutULeM,74131\njinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433\njinja2/debug.py,sha256=CnHqCDHd-BVGvti_8ZsTolnXNhA3ECsY-6n_2pwU8Hw,6297\njinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267\njinja2/environment.py,sha256=9nhrP7Ch-NbGX00wvyr4yy-uhNHq2OCc60ggGrni_fk,61513\njinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071\njinja2/ext.py,sha256=5PF5eHfh8mXAIxXHHRB2xXbXohi8pE3nHSOxa66uS7E,31875\njinja2/filters.py,sha256=PQ_Egd9n9jSgtnGQYyF4K5j2nYwhUIulhPnyimkdr-k,55212\njinja2/idtracking.py,sha256=-ll5lIp73pML3ErUYiIJj7tdmWxcH_IlDv3yA_hiZYo,10555\njinja2/lexer.py,sha256=LYiYio6br-Tep9nPcupWXsPEtjluw3p1mU-lNBVRUfk,29786\njinja2/loaders.py,sha256=wIrnxjvcbqh5VwW28NSkfotiDq8qNCxIOSFbGUiSLB4,24055\njinja2/meta.py,sha256=OTDPkaFvU2Hgvx-6akz7154F8BIWaRmvJcBFvwopHww,4397\njinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210\njinja2/nodes.py,sha256=m1Duzcr6qhZI8JQ6VyJgUNinjAf5bQzijSmDnMsvUx8,34579\njinja2/optimizer.py,sha256=rJnCRlQ7pZsEEmMhsQDgC_pKyDHxP5TPS6zVPGsgcu8,1651\njinja2/parser.py,sha256=lLOFy3sEmHc5IaEHRiH1sQVnId2moUQzhyeJZTtdY30,40383\njinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0\njinja2/runtime.py,sha256=gDk-GvdriJXqgsGbHgrcKTP0Yp6zPXzhzrIpCFH3jAU,34249\njinja2/sandbox.py,sha256=Mw2aitlY2I8la7FYhcX2YG9BtUYcLnD0Gh3d29cDWrY,15009\njinja2/tests.py,sha256=VLsBhVFnWg-PxSBz1MhRnNWgP1ovXk3neO1FLQMeC9Q,5926\njinja2/utils.py,sha256=rRp3o9e7ZKS4fyrWRbELyLcpuGVTFcnooaOa1qx_FIk,24129\njinja2/visitor.py,sha256=EcnL1PIwf_4RVCOMxsRNuR8AXHbS1qfAdMOE2ngKJz4,3557\n
.venv\Lib\site-packages\jinja2-3.1.6.dist-info\RECORD
RECORD
Other
3,619
0.7
0
0
node-utils
296
2024-07-31T23:50:17.381696
MIT
false
04ed7f4d5e2e54d0a7ffa0dbb4a8405c
Wheel-Version: 1.0\nGenerator: flit 3.11.0\nRoot-Is-Purelib: true\nTag: py3-none-any\n
.venv\Lib\site-packages\jinja2-3.1.6.dist-info\WHEEL
WHEEL
Other
82
0.5
0
0
node-utils
675
2024-11-04T05:37:21.448072
GPL-3.0
false
a349dd2fc1d41151594f70d2b51f7544
Copyright 2007 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n
.venv\Lib\site-packages\jinja2-3.1.6.dist-info\licenses\LICENSE.txt
LICENSE.txt
Other
1,475
0.7
0
0
node-utils
464
2024-09-16T14:17:51.116708
BSD-3-Clause
false
5dc88300786f1c214c1e9827a5229462
"""\nBackports of fixes for joblib dependencies\n"""\n\nimport os\nimport re\nimport time\nfrom multiprocessing import util\nfrom os.path import basename\n\n\nclass Version:\n """Backport from deprecated distutils\n\n We maintain this backport to avoid introducing a new dependency on\n `packaging`.\n\n We might rexplore this choice in the future if all major Python projects\n introduce a dependency on packaging anyway.\n """\n\n def __init__(self, vstring=None):\n if vstring:\n self.parse(vstring)\n\n def __repr__(self):\n return "%s ('%s')" % (self.__class__.__name__, str(self))\n\n def __eq__(self, other):\n c = self._cmp(other)\n if c is NotImplemented:\n return c\n return c == 0\n\n def __lt__(self, other):\n c = self._cmp(other)\n if c is NotImplemented:\n return c\n return c < 0\n\n def __le__(self, other):\n c = self._cmp(other)\n if c is NotImplemented:\n return c\n return c <= 0\n\n def __gt__(self, other):\n c = self._cmp(other)\n if c is NotImplemented:\n return c\n return c > 0\n\n def __ge__(self, other):\n c = self._cmp(other)\n if c is NotImplemented:\n return c\n return c >= 0\n\n\nclass LooseVersion(Version):\n """Backport from deprecated distutils\n\n We maintain this backport to avoid introducing a new dependency on\n `packaging`.\n\n We might rexplore this choice in the future if all major Python projects\n introduce a dependency on packaging anyway.\n """\n\n component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)\n\n def __init__(self, vstring=None):\n if vstring:\n self.parse(vstring)\n\n def parse(self, vstring):\n # I've given up on thinking I can reconstruct the version string\n # from the parsed tuple -- so I just store the string here for\n # use by __str__\n self.vstring = vstring\n components = [x for x in self.component_re.split(vstring) if x and x != "."]\n for i, obj in enumerate(components):\n try:\n components[i] = int(obj)\n except ValueError:\n pass\n\n self.version = components\n\n def __str__(self):\n return self.vstring\n\n def __repr__(self):\n return "LooseVersion ('%s')" % str(self)\n\n def _cmp(self, other):\n if isinstance(other, str):\n other = LooseVersion(other)\n elif not isinstance(other, LooseVersion):\n return NotImplemented\n\n if self.version == other.version:\n return 0\n if self.version < other.version:\n return -1\n if self.version > other.version:\n return 1\n\n\ntry:\n import numpy as np\n\n def make_memmap(\n filename,\n dtype="uint8",\n mode="r+",\n offset=0,\n shape=None,\n order="C",\n unlink_on_gc_collect=False,\n ):\n """Custom memmap constructor compatible with numpy.memmap.\n\n This function:\n - is a backport the numpy memmap offset fix (See\n https://github.com/numpy/numpy/pull/8443 for more details.\n The numpy fix is available starting numpy 1.13)\n - adds ``unlink_on_gc_collect``, which specifies explicitly whether\n the process re-constructing the memmap owns a reference to the\n underlying file. If set to True, it adds a finalizer to the\n newly-created memmap that sends a maybe_unlink request for the\n memmaped file to resource_tracker.\n """\n util.debug(\n "[MEMMAP READ] creating a memmap (shape {}, filename {}, pid {})".format(\n shape, basename(filename), os.getpid()\n )\n )\n\n mm = np.memmap(\n filename, dtype=dtype, mode=mode, offset=offset, shape=shape, order=order\n )\n if LooseVersion(np.__version__) < "1.13":\n mm.offset = offset\n if unlink_on_gc_collect:\n from ._memmapping_reducer import add_maybe_unlink_finalizer\n\n add_maybe_unlink_finalizer(mm)\n return mm\nexcept ImportError:\n\n def make_memmap(\n filename,\n dtype="uint8",\n mode="r+",\n offset=0,\n shape=None,\n order="C",\n unlink_on_gc_collect=False,\n ):\n raise NotImplementedError(\n "'joblib.backports.make_memmap' should not be used "\n "if numpy is not installed."\n )\n\n\nif os.name == "nt":\n # https://github.com/joblib/joblib/issues/540\n access_denied_errors = (5, 13)\n from os import replace\n\n def concurrency_safe_rename(src, dst):\n """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists.\n\n On Windows os.replace can yield permission errors if executed by two\n different processes.\n """\n max_sleep_time = 1\n total_sleep_time = 0\n sleep_time = 0.001\n while total_sleep_time < max_sleep_time:\n try:\n replace(src, dst)\n break\n except Exception as exc:\n if getattr(exc, "winerror", None) in access_denied_errors:\n time.sleep(sleep_time)\n total_sleep_time += sleep_time\n sleep_time *= 2\n else:\n raise\n else:\n raise\nelse:\n from os import replace as concurrency_safe_rename # noqa\n
.venv\Lib\site-packages\joblib\backports.py
backports.py
Python
5,450
0.95
0.251282
0.025
node-utils
8
2024-12-09T23:35:33.989433
Apache-2.0
false
afb9442bfdcfc4aa18622148f61bacb4
"""Classes and functions for managing compressors."""\n\nimport io\nimport zlib\n\nfrom joblib.backports import LooseVersion\n\ntry:\n from threading import RLock\nexcept ImportError:\n from dummy_threading import RLock\n\ntry:\n import bz2\nexcept ImportError:\n bz2 = None\n\ntry:\n import lz4\n from lz4.frame import LZ4FrameFile\nexcept ImportError:\n lz4 = None\n\ntry:\n import lzma\nexcept ImportError:\n lzma = None\n\n\nLZ4_NOT_INSTALLED_ERROR = (\n "LZ4 is not installed. Install it with pip: https://python-lz4.readthedocs.io/"\n)\n\n# Registered compressors\n_COMPRESSORS = {}\n\n# Magic numbers of supported compression file formats.\n_ZFILE_PREFIX = b"ZF" # used with pickle files created before 0.9.3.\n_ZLIB_PREFIX = b"\x78"\n_GZIP_PREFIX = b"\x1f\x8b"\n_BZ2_PREFIX = b"BZ"\n_XZ_PREFIX = b"\xfd\x37\x7a\x58\x5a"\n_LZMA_PREFIX = b"\x5d\x00"\n_LZ4_PREFIX = b"\x04\x22\x4d\x18"\n\n\ndef register_compressor(compressor_name, compressor, force=False):\n """Register a new compressor.\n\n Parameters\n ----------\n compressor_name: str.\n The name of the compressor.\n compressor: CompressorWrapper\n An instance of a 'CompressorWrapper'.\n """\n global _COMPRESSORS\n if not isinstance(compressor_name, str):\n raise ValueError(\n "Compressor name should be a string, '{}' given.".format(compressor_name)\n )\n\n if not isinstance(compressor, CompressorWrapper):\n raise ValueError(\n "Compressor should implement the CompressorWrapper "\n "interface, '{}' given.".format(compressor)\n )\n\n if compressor.fileobj_factory is not None and (\n not hasattr(compressor.fileobj_factory, "read")\n or not hasattr(compressor.fileobj_factory, "write")\n or not hasattr(compressor.fileobj_factory, "seek")\n or not hasattr(compressor.fileobj_factory, "tell")\n ):\n raise ValueError(\n "Compressor 'fileobj_factory' attribute should "\n "implement the file object interface, '{}' given.".format(\n compressor.fileobj_factory\n )\n )\n\n if compressor_name in _COMPRESSORS and not force:\n raise ValueError("Compressor '{}' already registered.".format(compressor_name))\n\n _COMPRESSORS[compressor_name] = compressor\n\n\nclass CompressorWrapper:\n """A wrapper around a compressor file object.\n\n Attributes\n ----------\n obj: a file-like object\n The object must implement the buffer interface and will be used\n internally to compress/decompress the data.\n prefix: bytestring\n A bytestring corresponding to the magic number that identifies the\n file format associated to the compressor.\n extension: str\n The file extension used to automatically select this compressor during\n a dump to a file.\n """\n\n def __init__(self, obj, prefix=b"", extension=""):\n self.fileobj_factory = obj\n self.prefix = prefix\n self.extension = extension\n\n def compressor_file(self, fileobj, compresslevel=None):\n """Returns an instance of a compressor file object."""\n if compresslevel is None:\n return self.fileobj_factory(fileobj, "wb")\n else:\n return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel)\n\n def decompressor_file(self, fileobj):\n """Returns an instance of a decompressor file object."""\n return self.fileobj_factory(fileobj, "rb")\n\n\nclass BZ2CompressorWrapper(CompressorWrapper):\n prefix = _BZ2_PREFIX\n extension = ".bz2"\n\n def __init__(self):\n if bz2 is not None:\n self.fileobj_factory = bz2.BZ2File\n else:\n self.fileobj_factory = None\n\n def _check_versions(self):\n if bz2 is None:\n raise ValueError(\n "bz2 module is not compiled on your python standard library."\n )\n\n def compressor_file(self, fileobj, compresslevel=None):\n """Returns an instance of a compressor file object."""\n self._check_versions()\n if compresslevel is None:\n return self.fileobj_factory(fileobj, "wb")\n else:\n return self.fileobj_factory(fileobj, "wb", compresslevel=compresslevel)\n\n def decompressor_file(self, fileobj):\n """Returns an instance of a decompressor file object."""\n self._check_versions()\n fileobj = self.fileobj_factory(fileobj, "rb")\n return fileobj\n\n\nclass LZMACompressorWrapper(CompressorWrapper):\n prefix = _LZMA_PREFIX\n extension = ".lzma"\n _lzma_format_name = "FORMAT_ALONE"\n\n def __init__(self):\n if lzma is not None:\n self.fileobj_factory = lzma.LZMAFile\n self._lzma_format = getattr(lzma, self._lzma_format_name)\n else:\n self.fileobj_factory = None\n\n def _check_versions(self):\n if lzma is None:\n raise ValueError(\n "lzma module is not compiled on your python standard library."\n )\n\n def compressor_file(self, fileobj, compresslevel=None):\n """Returns an instance of a compressor file object."""\n if compresslevel is None:\n return self.fileobj_factory(fileobj, "wb", format=self._lzma_format)\n else:\n return self.fileobj_factory(\n fileobj, "wb", format=self._lzma_format, preset=compresslevel\n )\n\n def decompressor_file(self, fileobj):\n """Returns an instance of a decompressor file object."""\n return lzma.LZMAFile(fileobj, "rb")\n\n\nclass XZCompressorWrapper(LZMACompressorWrapper):\n prefix = _XZ_PREFIX\n extension = ".xz"\n _lzma_format_name = "FORMAT_XZ"\n\n\nclass LZ4CompressorWrapper(CompressorWrapper):\n prefix = _LZ4_PREFIX\n extension = ".lz4"\n\n def __init__(self):\n if lz4 is not None:\n self.fileobj_factory = LZ4FrameFile\n else:\n self.fileobj_factory = None\n\n def _check_versions(self):\n if lz4 is None:\n raise ValueError(LZ4_NOT_INSTALLED_ERROR)\n lz4_version = lz4.__version__\n if lz4_version.startswith("v"):\n lz4_version = lz4_version[1:]\n if LooseVersion(lz4_version) < LooseVersion("0.19"):\n raise ValueError(LZ4_NOT_INSTALLED_ERROR)\n\n def compressor_file(self, fileobj, compresslevel=None):\n """Returns an instance of a compressor file object."""\n self._check_versions()\n if compresslevel is None:\n return self.fileobj_factory(fileobj, "wb")\n else:\n return self.fileobj_factory(fileobj, "wb", compression_level=compresslevel)\n\n def decompressor_file(self, fileobj):\n """Returns an instance of a decompressor file object."""\n self._check_versions()\n return self.fileobj_factory(fileobj, "rb")\n\n\n###############################################################################\n# base file compression/decompression object definition\n_MODE_CLOSED = 0\n_MODE_READ = 1\n_MODE_READ_EOF = 2\n_MODE_WRITE = 3\n_BUFFER_SIZE = 8192\n\n\nclass BinaryZlibFile(io.BufferedIOBase):\n """A file object providing transparent zlib (de)compression.\n\n TODO python2_drop: is it still needed since we dropped Python 2 support A\n BinaryZlibFile can act as a wrapper for an existing file object, or refer\n directly to a named file on disk.\n\n Note that BinaryZlibFile provides only a *binary* file interface: data read\n is returned as bytes, and data to be written should be given as bytes.\n\n This object is an adaptation of the BZ2File object and is compatible with\n versions of python >= 2.7.\n\n If filename is a str or bytes object, it gives the name\n of the file to be opened. Otherwise, it should be a file object,\n which will be used to read or write the compressed data.\n\n mode can be 'rb' for reading (default) or 'wb' for (over)writing\n\n If mode is 'wb', compresslevel can be a number between 1\n and 9 specifying the level of compression: 1 produces the least\n compression, and 9 produces the most compression. 3 is the default.\n """\n\n wbits = zlib.MAX_WBITS\n\n def __init__(self, filename, mode="rb", compresslevel=3):\n # This lock must be recursive, so that BufferedIOBase's\n # readline(), readlines() and writelines() don't deadlock.\n self._lock = RLock()\n self._fp = None\n self._closefp = False\n self._mode = _MODE_CLOSED\n self._pos = 0\n self._size = -1\n self.compresslevel = compresslevel\n\n if not isinstance(compresslevel, int) or not (1 <= compresslevel <= 9):\n raise ValueError(\n "'compresslevel' must be an integer "\n "between 1 and 9. You provided 'compresslevel={}'".format(compresslevel)\n )\n\n if mode == "rb":\n self._mode = _MODE_READ\n self._decompressor = zlib.decompressobj(self.wbits)\n self._buffer = b""\n self._buffer_offset = 0\n elif mode == "wb":\n self._mode = _MODE_WRITE\n self._compressor = zlib.compressobj(\n self.compresslevel, zlib.DEFLATED, self.wbits, zlib.DEF_MEM_LEVEL, 0\n )\n else:\n raise ValueError("Invalid mode: %r" % (mode,))\n\n if isinstance(filename, str):\n self._fp = io.open(filename, mode)\n self._closefp = True\n elif hasattr(filename, "read") or hasattr(filename, "write"):\n self._fp = filename\n else:\n raise TypeError("filename must be a str or bytes object, or a file")\n\n def close(self):\n """Flush and close the file.\n\n May be called more than once without error. Once the file is\n closed, any other operation on it will raise a ValueError.\n """\n with self._lock:\n if self._mode == _MODE_CLOSED:\n return\n try:\n if self._mode in (_MODE_READ, _MODE_READ_EOF):\n self._decompressor = None\n elif self._mode == _MODE_WRITE:\n self._fp.write(self._compressor.flush())\n self._compressor = None\n finally:\n try:\n if self._closefp:\n self._fp.close()\n finally:\n self._fp = None\n self._closefp = False\n self._mode = _MODE_CLOSED\n self._buffer = b""\n self._buffer_offset = 0\n\n @property\n def closed(self):\n """True if this file is closed."""\n return self._mode == _MODE_CLOSED\n\n def fileno(self):\n """Return the file descriptor for the underlying file."""\n self._check_not_closed()\n return self._fp.fileno()\n\n def seekable(self):\n """Return whether the file supports seeking."""\n return self.readable() and self._fp.seekable()\n\n def readable(self):\n """Return whether the file was opened for reading."""\n self._check_not_closed()\n return self._mode in (_MODE_READ, _MODE_READ_EOF)\n\n def writable(self):\n """Return whether the file was opened for writing."""\n self._check_not_closed()\n return self._mode == _MODE_WRITE\n\n # Mode-checking helper functions.\n\n def _check_not_closed(self):\n if self.closed:\n fname = getattr(self._fp, "name", None)\n msg = "I/O operation on closed file"\n if fname is not None:\n msg += " {}".format(fname)\n msg += "."\n raise ValueError(msg)\n\n def _check_can_read(self):\n if self._mode not in (_MODE_READ, _MODE_READ_EOF):\n self._check_not_closed()\n raise io.UnsupportedOperation("File not open for reading")\n\n def _check_can_write(self):\n if self._mode != _MODE_WRITE:\n self._check_not_closed()\n raise io.UnsupportedOperation("File not open for writing")\n\n def _check_can_seek(self):\n if self._mode not in (_MODE_READ, _MODE_READ_EOF):\n self._check_not_closed()\n raise io.UnsupportedOperation(\n "Seeking is only supported on files open for reading"\n )\n if not self._fp.seekable():\n raise io.UnsupportedOperation(\n "The underlying file object does not support seeking"\n )\n\n # Fill the readahead buffer if it is empty. Returns False on EOF.\n def _fill_buffer(self):\n if self._mode == _MODE_READ_EOF:\n return False\n # Depending on the input data, our call to the decompressor may not\n # return any data. In this case, try again after reading another block.\n while self._buffer_offset == len(self._buffer):\n try:\n rawblock = self._decompressor.unused_data or self._fp.read(_BUFFER_SIZE)\n if not rawblock:\n raise EOFError\n except EOFError:\n # End-of-stream marker and end of file. We're good.\n self._mode = _MODE_READ_EOF\n self._size = self._pos\n return False\n else:\n self._buffer = self._decompressor.decompress(rawblock)\n self._buffer_offset = 0\n return True\n\n # Read data until EOF.\n # If return_data is false, consume the data without returning it.\n def _read_all(self, return_data=True):\n # The loop assumes that _buffer_offset is 0. Ensure that this is true.\n self._buffer = self._buffer[self._buffer_offset :]\n self._buffer_offset = 0\n\n blocks = []\n while self._fill_buffer():\n if return_data:\n blocks.append(self._buffer)\n self._pos += len(self._buffer)\n self._buffer = b""\n if return_data:\n return b"".join(blocks)\n\n # Read a block of up to n bytes.\n # If return_data is false, consume the data without returning it.\n def _read_block(self, n_bytes, return_data=True):\n # If we have enough data buffered, return immediately.\n end = self._buffer_offset + n_bytes\n if end <= len(self._buffer):\n data = self._buffer[self._buffer_offset : end]\n self._buffer_offset = end\n self._pos += len(data)\n return data if return_data else None\n\n # The loop assumes that _buffer_offset is 0. Ensure that this is true.\n self._buffer = self._buffer[self._buffer_offset :]\n self._buffer_offset = 0\n\n blocks = []\n while n_bytes > 0 and self._fill_buffer():\n if n_bytes < len(self._buffer):\n data = self._buffer[:n_bytes]\n self._buffer_offset = n_bytes\n else:\n data = self._buffer\n self._buffer = b""\n if return_data:\n blocks.append(data)\n self._pos += len(data)\n n_bytes -= len(data)\n if return_data:\n return b"".join(blocks)\n\n def read(self, size=-1):\n """Read up to size uncompressed bytes from the file.\n\n If size is negative or omitted, read until EOF is reached.\n Returns b'' if the file is already at EOF.\n """\n with self._lock:\n self._check_can_read()\n if size == 0:\n return b""\n elif size < 0:\n return self._read_all()\n else:\n return self._read_block(size)\n\n def readinto(self, b):\n """Read up to len(b) bytes into b.\n\n Returns the number of bytes read (0 for EOF).\n """\n with self._lock:\n return io.BufferedIOBase.readinto(self, b)\n\n def write(self, data):\n """Write a byte string to the file.\n\n Returns the number of uncompressed bytes written, which is\n always len(data). Note that due to buffering, the file on disk\n may not reflect the data written until close() is called.\n """\n with self._lock:\n self._check_can_write()\n # Convert data type if called by io.BufferedWriter.\n if isinstance(data, memoryview):\n data = data.tobytes()\n\n compressed = self._compressor.compress(data)\n self._fp.write(compressed)\n self._pos += len(data)\n return len(data)\n\n # Rewind the file to the beginning of the data stream.\n def _rewind(self):\n self._fp.seek(0, 0)\n self._mode = _MODE_READ\n self._pos = 0\n self._decompressor = zlib.decompressobj(self.wbits)\n self._buffer = b""\n self._buffer_offset = 0\n\n def seek(self, offset, whence=0):\n """Change the file position.\n\n The new position is specified by offset, relative to the\n position indicated by whence. Values for whence are:\n\n 0: start of stream (default); offset must not be negative\n 1: current stream position\n 2: end of stream; offset must not be positive\n\n Returns the new file position.\n\n Note that seeking is emulated, so depending on the parameters,\n this operation may be extremely slow.\n """\n with self._lock:\n self._check_can_seek()\n\n # Recalculate offset as an absolute file position.\n if whence == 0:\n pass\n elif whence == 1:\n offset = self._pos + offset\n elif whence == 2:\n # Seeking relative to EOF - we need to know the file's size.\n if self._size < 0:\n self._read_all(return_data=False)\n offset = self._size + offset\n else:\n raise ValueError("Invalid value for whence: %s" % (whence,))\n\n # Make it so that offset is the number of bytes to skip forward.\n if offset < self._pos:\n self._rewind()\n else:\n offset -= self._pos\n\n # Read and discard data until we reach the desired position.\n self._read_block(offset, return_data=False)\n\n return self._pos\n\n def tell(self):\n """Return the current file position."""\n with self._lock:\n self._check_not_closed()\n return self._pos\n\n\nclass ZlibCompressorWrapper(CompressorWrapper):\n def __init__(self):\n CompressorWrapper.__init__(\n self, obj=BinaryZlibFile, prefix=_ZLIB_PREFIX, extension=".z"\n )\n\n\nclass BinaryGzipFile(BinaryZlibFile):\n """A file object providing transparent gzip (de)compression.\n\n If filename is a str or bytes object, it gives the name\n of the file to be opened. Otherwise, it should be a file object,\n which will be used to read or write the compressed data.\n\n mode can be 'rb' for reading (default) or 'wb' for (over)writing\n\n If mode is 'wb', compresslevel can be a number between 1\n and 9 specifying the level of compression: 1 produces the least\n compression, and 9 produces the most compression. 3 is the default.\n """\n\n wbits = 31 # zlib compressor/decompressor wbits value for gzip format.\n\n\nclass GzipCompressorWrapper(CompressorWrapper):\n def __init__(self):\n CompressorWrapper.__init__(\n self, obj=BinaryGzipFile, prefix=_GZIP_PREFIX, extension=".gz"\n )\n
.venv\Lib\site-packages\joblib\compressor.py
compressor.py
Python
19,281
0.95
0.20979
0.051282
node-utils
254
2024-09-26T02:25:07.265615
BSD-3-Clause
false
fb680679673d2ec632c4cedb49c93313
"""\nDisk management utilities.\n"""\n\n# Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Lars Buitinck\n# Copyright (c) 2010 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nimport errno\nimport os\nimport shutil\nimport sys\nimport time\nfrom multiprocessing import util\n\ntry:\n WindowsError\nexcept NameError:\n WindowsError = OSError\n\n\ndef disk_used(path):\n """Return the disk usage in a directory."""\n size = 0\n for file in os.listdir(path) + ["."]:\n stat = os.stat(os.path.join(path, file))\n if hasattr(stat, "st_blocks"):\n size += stat.st_blocks * 512\n else:\n # on some platform st_blocks is not available (e.g., Windows)\n # approximate by rounding to next multiple of 512\n size += (stat.st_size // 512 + 1) * 512\n # We need to convert to int to avoid having longs on some systems (we\n # don't want longs to avoid problems we SQLite)\n return int(size / 1024.0)\n\n\ndef memstr_to_bytes(text):\n """Convert a memory text to its value in bytes."""\n kilo = 1024\n units = dict(K=kilo, M=kilo**2, G=kilo**3)\n try:\n size = int(units[text[-1]] * float(text[:-1]))\n except (KeyError, ValueError) as e:\n raise ValueError(\n "Invalid literal for size give: %s (type %s) should be "\n "alike '10G', '500M', '50K'." % (text, type(text))\n ) from e\n return size\n\n\ndef mkdirp(d):\n """Ensure directory d exists (like mkdir -p on Unix)\n No guarantee that the directory is writable.\n """\n try:\n os.makedirs(d)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\n# if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),\n# then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the\n# exception. this mechanism ensures that the sub-process gc have the time to\n# collect and close the memmaps before we fail.\nRM_SUBDIRS_RETRY_TIME = 0.1\nRM_SUBDIRS_N_RETRY = 10\n\n\ndef rm_subdirs(path, onerror=None):\n """Remove all subdirectories in this path.\n\n The directory indicated by `path` is left in place, and its subdirectories\n are erased.\n\n If onerror is set, it is called to handle the error with arguments (func,\n path, exc_info) where func is os.listdir, os.remove, or os.rmdir;\n path is the argument to that function that caused it to fail; and\n exc_info is a tuple returned by sys.exc_info(). If onerror is None,\n an exception is raised.\n """\n\n # NOTE this code is adapted from the one in shutil.rmtree, and is\n # just as fast\n\n names = []\n try:\n names = os.listdir(path)\n except os.error:\n if onerror is not None:\n onerror(os.listdir, path, sys.exc_info())\n else:\n raise\n\n for name in names:\n fullname = os.path.join(path, name)\n delete_folder(fullname, onerror=onerror)\n\n\ndef delete_folder(folder_path, onerror=None, allow_non_empty=True):\n """Utility function to cleanup a temporary folder if it still exists."""\n if os.path.isdir(folder_path):\n if onerror is not None:\n shutil.rmtree(folder_path, False, onerror)\n else:\n # allow the rmtree to fail once, wait and re-try.\n # if the error is raised again, fail\n err_count = 0\n while True:\n files = os.listdir(folder_path)\n try:\n if len(files) == 0 or allow_non_empty:\n shutil.rmtree(folder_path, ignore_errors=False, onerror=None)\n util.debug("Successfully deleted {}".format(folder_path))\n break\n else:\n raise OSError(\n "Expected empty folder {} but got {} files.".format(\n folder_path, len(files)\n )\n )\n except (OSError, WindowsError):\n err_count += 1\n if err_count > RM_SUBDIRS_N_RETRY:\n # the folder cannot be deleted right now. It maybe\n # because some temporary files have not been deleted\n # yet.\n raise\n time.sleep(RM_SUBDIRS_RETRY_TIME)\n
.venv\Lib\site-packages\joblib\disk.py
disk.py
Python
4,332
0.95
0.21374
0.171171
node-utils
366
2023-07-26T15:40:06.222223
GPL-3.0
false
2283f4fced12fa64b97c9f23115426e9
"""Utility function to construct a loky.ReusableExecutor with custom pickler.\n\nThis module provides efficient ways of working with data stored in\nshared memory with numpy.memmap arrays without inducing any memory\ncopy between the parent and child processes.\n"""\n# Author: Thomas Moreau <thomas.moreau.2010@gmail.com>\n# Copyright: 2017, Thomas Moreau\n# License: BSD 3 clause\n\nfrom ._memmapping_reducer import TemporaryResourcesManager, get_memmapping_reducers\nfrom .externals.loky.reusable_executor import _ReusablePoolExecutor\n\n_executor_args = None\n\n\ndef get_memmapping_executor(n_jobs, **kwargs):\n return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)\n\n\nclass MemmappingExecutor(_ReusablePoolExecutor):\n @classmethod\n def get_memmapping_executor(\n cls,\n n_jobs,\n timeout=300,\n initializer=None,\n initargs=(),\n env=None,\n temp_folder=None,\n context_id=None,\n **backend_args,\n ):\n """Factory for ReusableExecutor with automatic memmapping for large\n numpy arrays.\n """\n global _executor_args\n # Check if we can reuse the executor here instead of deferring the test\n # to loky as the reducers are objects that changes at each call.\n executor_args = backend_args.copy()\n executor_args.update(env if env else {})\n executor_args.update(\n dict(timeout=timeout, initializer=initializer, initargs=initargs)\n )\n reuse = _executor_args is None or _executor_args == executor_args\n _executor_args = executor_args\n\n manager = TemporaryResourcesManager(temp_folder)\n\n # reducers access the temporary folder in which to store temporary\n # pickles through a call to manager.resolve_temp_folder_name. resolving\n # the folder name dynamically is useful to use different folders across\n # calls of a same reusable executor\n job_reducers, result_reducers = get_memmapping_reducers(\n unlink_on_gc_collect=True,\n temp_folder_resolver=manager.resolve_temp_folder_name,\n **backend_args,\n )\n _executor, executor_is_reused = super().get_reusable_executor(\n n_jobs,\n job_reducers=job_reducers,\n result_reducers=result_reducers,\n reuse=reuse,\n timeout=timeout,\n initializer=initializer,\n initargs=initargs,\n env=env,\n )\n\n if not executor_is_reused:\n # Only set a _temp_folder_manager for new executors. Reused\n # executors already have a _temporary_folder_manager that must not\n # be re-assigned like that because it is referenced in various\n # places in the reducing machinery of the executor.\n _executor._temp_folder_manager = manager\n\n if context_id is not None:\n # Only register the specified context once we know which manager\n # the current executor is using, in order to not register an atexit\n # finalizer twice for the same folder.\n _executor._temp_folder_manager.register_new_context(context_id)\n\n return _executor\n\n def terminate(self, kill_workers=False):\n self.shutdown(kill_workers=kill_workers)\n\n # When workers are killed in a brutal manner, they cannot execute the\n # finalizer of their shared memmaps. The refcount of those memmaps may\n # be off by an unknown number, so instead of decref'ing them, we force\n # delete the whole temporary folder, and unregister them. There is no\n # risk of PermissionError at folder deletion because at this\n # point, all child processes are dead, so all references to temporary\n # memmaps are closed. Otherwise, just try to delete as much as possible\n # with allow_non_empty=True but if we can't, it will be clean up later\n # on by the resource_tracker.\n with self._submit_resize_lock:\n self._temp_folder_manager._clean_temporary_resources(\n force=kill_workers, allow_non_empty=True\n )\n\n @property\n def _temp_folder(self):\n # Legacy property in tests. could be removed if we refactored the\n # memmapping tests. SHOULD ONLY BE USED IN TESTS!\n # We cache this property because it is called late in the tests - at\n # this point, all context have been unregistered, and\n # resolve_temp_folder_name raises an error.\n if getattr(self, "_cached_temp_folder", None) is not None:\n return self._cached_temp_folder\n else:\n self._cached_temp_folder = (\n self._temp_folder_manager.resolve_temp_folder_name()\n ) # noqa\n return self._cached_temp_folder\n\n\nclass _TestingMemmappingExecutor(MemmappingExecutor):\n """Wrapper around ReusableExecutor to ease memmapping testing with Pool\n and Executor. This is only for testing purposes.\n\n """\n\n def apply_async(self, func, args):\n """Schedule a func to be run"""\n future = self.submit(func, *args)\n future.get = future.result\n return future\n\n def map(self, f, *args):\n return list(super().map(f, *args))\n
.venv\Lib\site-packages\joblib\executor.py
executor.py
Python
5,229
0.95
0.167939
0.288288
vue-tools
16
2025-01-14T16:46:21.981753
Apache-2.0
false
a8a129c44ceffddd00edcb01290349f5
"""\nMy own variation on function-specific inspect-like features.\n"""\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Copyright (c) 2009 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nimport collections\nimport inspect\nimport os\nimport re\nimport warnings\nfrom itertools import islice\nfrom tokenize import open as open_py_source\n\nfrom .logger import pformat\n\nfull_argspec_fields = (\n "args varargs varkw defaults kwonlyargs kwonlydefaults annotations"\n)\nfull_argspec_type = collections.namedtuple("FullArgSpec", full_argspec_fields)\n\n\ndef get_func_code(func):\n """Attempts to retrieve a reliable function code hash.\n\n The reason we don't use inspect.getsource is that it caches the\n source, whereas we want this to be modified on the fly when the\n function is modified.\n\n Returns\n -------\n func_code: string\n The function code\n source_file: string\n The path to the file in which the function is defined.\n first_line: int\n The first line of the code in the source file.\n\n Notes\n ------\n This function does a bit more magic than inspect, and is thus\n more robust.\n """\n source_file = None\n try:\n code = func.__code__\n source_file = code.co_filename\n if not os.path.exists(source_file):\n # Use inspect for lambda functions and functions defined in an\n # interactive shell, or in doctests\n source_code = "".join(inspect.getsourcelines(func)[0])\n line_no = 1\n if source_file.startswith("<doctest "):\n source_file, line_no = re.match(\n r"\<doctest (.*\.rst)\[(.*)\]\>", source_file\n ).groups()\n line_no = int(line_no)\n source_file = "<doctest %s>" % source_file\n return source_code, source_file, line_no\n # Try to retrieve the source code.\n with open_py_source(source_file) as source_file_obj:\n first_line = code.co_firstlineno\n # All the lines after the function definition:\n source_lines = list(islice(source_file_obj, first_line - 1, None))\n return "".join(inspect.getblock(source_lines)), source_file, first_line\n except: # noqa: E722\n # If the source code fails, we use the hash. This is fragile and\n # might change from one session to another.\n if hasattr(func, "__code__"):\n # Python 3.X\n return str(func.__code__.__hash__()), source_file, -1\n else:\n # Weird objects like numpy ufunc don't have __code__\n # This is fragile, as quite often the id of the object is\n # in the repr, so it might not persist across sessions,\n # however it will work for ufuncs.\n return repr(func), source_file, -1\n\n\ndef _clean_win_chars(string):\n """Windows cannot encode some characters in filename."""\n import urllib\n\n if hasattr(urllib, "quote"):\n quote = urllib.quote\n else:\n # In Python 3, quote is elsewhere\n import urllib.parse\n\n quote = urllib.parse.quote\n for char in ("<", ">", "!", ":", "\\"):\n string = string.replace(char, quote(char))\n return string\n\n\ndef get_func_name(func, resolv_alias=True, win_characters=True):\n """Return the function import path (as a list of module names), and\n a name for the function.\n\n Parameters\n ----------\n func: callable\n The func to inspect\n resolv_alias: boolean, optional\n If true, possible local aliases are indicated.\n win_characters: boolean, optional\n If true, substitute special characters using urllib.quote\n This is useful in Windows, as it cannot encode some filenames\n """\n if hasattr(func, "__module__"):\n module = func.__module__\n else:\n try:\n module = inspect.getmodule(func)\n except TypeError:\n if hasattr(func, "__class__"):\n module = func.__class__.__module__\n else:\n module = "unknown"\n if module is None:\n # Happens in doctests, eg\n module = ""\n if module == "__main__":\n try:\n filename = os.path.abspath(inspect.getsourcefile(func))\n except: # noqa: E722\n filename = None\n if filename is not None:\n # mangling of full path to filename\n parts = filename.split(os.sep)\n if parts[-1].startswith("<ipython-input"):\n # We're in a IPython (or notebook) session. parts[-1] comes\n # from func.__code__.co_filename and is of the form\n # <ipython-input-N-XYZ>, where:\n # - N is the cell number where the function was defined\n # - XYZ is a hash representing the function's code (and name).\n # It will be consistent across sessions and kernel restarts,\n # and will change if the function's code/name changes\n # We remove N so that cache is properly hit if the cell where\n # the func is defined is re-exectuted.\n # The XYZ hash should avoid collisions between functions with\n # the same name, both within the same notebook but also across\n # notebooks\n split = parts[-1].split("-")\n parts[-1] = "-".join(split[:2] + split[3:])\n elif len(parts) > 2 and parts[-2].startswith("ipykernel_"):\n # In a notebook session (ipykernel). Filename seems to be 'xyz'\n # of above. parts[-2] has the structure ipykernel_XXXXXX where\n # XXXXXX is a six-digit number identifying the current run (?).\n # If we split it off, the function again has the same\n # identifier across runs.\n parts[-2] = "ipykernel"\n filename = "-".join(parts)\n if filename.endswith(".py"):\n filename = filename[:-3]\n module = module + "-" + filename\n module = module.split(".")\n if hasattr(func, "func_name"):\n name = func.func_name\n elif hasattr(func, "__name__"):\n name = func.__name__\n else:\n name = "unknown"\n # Hack to detect functions not defined at the module-level\n if resolv_alias:\n # TODO: Maybe add a warning here?\n if hasattr(func, "func_globals") and name in func.func_globals:\n if func.func_globals[name] is not func:\n name = "%s-alias" % name\n if hasattr(func, "__qualname__") and func.__qualname__ != name:\n # Extend the module name in case of nested functions to avoid\n # (module, name) collisions\n module.extend(func.__qualname__.split(".")[:-1])\n if inspect.ismethod(func):\n # We need to add the name of the class\n if hasattr(func, "im_class"):\n klass = func.im_class\n module.append(klass.__name__)\n if os.name == "nt" and win_characters:\n # Windows can't encode certain characters in filenames\n name = _clean_win_chars(name)\n module = [_clean_win_chars(s) for s in module]\n return module, name\n\n\ndef _signature_str(function_name, arg_sig):\n """Helper function to output a function signature"""\n return "{}{}".format(function_name, arg_sig)\n\n\ndef _function_called_str(function_name, args, kwargs):\n """Helper function to output a function call"""\n template_str = "{0}({1}, {2})"\n\n args_str = repr(args)[1:-1]\n kwargs_str = ", ".join("%s=%s" % (k, v) for k, v in kwargs.items())\n return template_str.format(function_name, args_str, kwargs_str)\n\n\ndef filter_args(func, ignore_lst, args=(), kwargs=dict()):\n """Filters the given args and kwargs using a list of arguments to\n ignore, and a function specification.\n\n Parameters\n ----------\n func: callable\n Function giving the argument specification\n ignore_lst: list of strings\n List of arguments to ignore (either a name of an argument\n in the function spec, or '*', or '**')\n *args: list\n Positional arguments passed to the function.\n **kwargs: dict\n Keyword arguments passed to the function\n\n Returns\n -------\n filtered_args: list\n List of filtered positional and keyword arguments.\n """\n args = list(args)\n if isinstance(ignore_lst, str):\n # Catch a common mistake\n raise ValueError(\n "ignore_lst must be a list of parameters to ignore "\n "%s (type %s) was given" % (ignore_lst, type(ignore_lst))\n )\n # Special case for functools.partial objects\n if not inspect.ismethod(func) and not inspect.isfunction(func):\n if ignore_lst:\n warnings.warn(\n "Cannot inspect object %s, ignore list will not work." % func,\n stacklevel=2,\n )\n return {"*": args, "**": kwargs}\n arg_sig = inspect.signature(func)\n arg_names = []\n arg_defaults = []\n arg_kwonlyargs = []\n arg_varargs = None\n arg_varkw = None\n for param in arg_sig.parameters.values():\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n arg_names.append(param.name)\n elif param.kind is param.KEYWORD_ONLY:\n arg_names.append(param.name)\n arg_kwonlyargs.append(param.name)\n elif param.kind is param.VAR_POSITIONAL:\n arg_varargs = param.name\n elif param.kind is param.VAR_KEYWORD:\n arg_varkw = param.name\n if param.default is not param.empty:\n arg_defaults.append(param.default)\n if inspect.ismethod(func):\n # First argument is 'self', it has been removed by Python\n # we need to add it back:\n args = [\n func.__self__,\n ] + args\n # func is an instance method, inspect.signature(func) does not\n # include self, we need to fetch it from the class method, i.e\n # func.__func__\n class_method_sig = inspect.signature(func.__func__)\n self_name = next(iter(class_method_sig.parameters))\n arg_names = [self_name] + arg_names\n # XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such\n # as on ndarrays.\n\n _, name = get_func_name(func, resolv_alias=False)\n arg_dict = dict()\n arg_position = -1\n for arg_position, arg_name in enumerate(arg_names):\n if arg_position < len(args):\n # Positional argument or keyword argument given as positional\n if arg_name not in arg_kwonlyargs:\n arg_dict[arg_name] = args[arg_position]\n else:\n raise ValueError(\n "Keyword-only parameter '%s' was passed as "\n "positional parameter for %s:\n"\n " %s was called."\n % (\n arg_name,\n _signature_str(name, arg_sig),\n _function_called_str(name, args, kwargs),\n )\n )\n\n else:\n position = arg_position - len(arg_names)\n if arg_name in kwargs:\n arg_dict[arg_name] = kwargs[arg_name]\n else:\n try:\n arg_dict[arg_name] = arg_defaults[position]\n except (IndexError, KeyError) as e:\n # Missing argument\n raise ValueError(\n "Wrong number of arguments for %s:\n"\n " %s was called."\n % (\n _signature_str(name, arg_sig),\n _function_called_str(name, args, kwargs),\n )\n ) from e\n\n varkwargs = dict()\n for arg_name, arg_value in sorted(kwargs.items()):\n if arg_name in arg_dict:\n arg_dict[arg_name] = arg_value\n elif arg_varkw is not None:\n varkwargs[arg_name] = arg_value\n else:\n raise TypeError(\n "Ignore list for %s() contains an unexpected "\n "keyword argument '%s'" % (name, arg_name)\n )\n\n if arg_varkw is not None:\n arg_dict["**"] = varkwargs\n if arg_varargs is not None:\n varargs = args[arg_position + 1 :]\n arg_dict["*"] = varargs\n\n # Now remove the arguments to be ignored\n for item in ignore_lst:\n if item in arg_dict:\n arg_dict.pop(item)\n else:\n raise ValueError(\n "Ignore list: argument '%s' is not defined for "\n "function %s" % (item, _signature_str(name, arg_sig))\n )\n # XXX: Return a sorted list of pairs?\n return arg_dict\n\n\ndef _format_arg(arg):\n formatted_arg = pformat(arg, indent=2)\n if len(formatted_arg) > 1500:\n formatted_arg = "%s..." % formatted_arg[:700]\n return formatted_arg\n\n\ndef format_signature(func, *args, **kwargs):\n # XXX: Should this use inspect.formatargvalues/formatargspec?\n module, name = get_func_name(func)\n module = [m for m in module if m]\n if module:\n module.append(name)\n module_path = ".".join(module)\n else:\n module_path = name\n arg_str = list()\n previous_length = 0\n for arg in args:\n formatted_arg = _format_arg(arg)\n if previous_length > 80:\n formatted_arg = "\n%s" % formatted_arg\n previous_length = len(formatted_arg)\n arg_str.append(formatted_arg)\n arg_str.extend(["%s=%s" % (v, _format_arg(i)) for v, i in kwargs.items()])\n arg_str = ", ".join(arg_str)\n\n signature = "%s(%s)" % (name, arg_str)\n return module_path, signature\n\n\ndef format_call(func, args, kwargs, object_name="Memory"):\n """Returns a nicely formatted statement displaying the function\n call with the given arguments.\n """\n path, signature = format_signature(func, *args, **kwargs)\n msg = "%s\n[%s] Calling %s...\n%s" % (80 * "_", object_name, path, signature)\n return msg\n # XXX: Not using logging framework\n # self.debug(msg)\n
.venv\Lib\site-packages\joblib\func_inspect.py
func_inspect.py
Python
14,017
0.95
0.248021
0.169591
vue-tools
707
2024-07-26T10:53:53.682582
GPL-3.0
false
5d116f483f8b3f7208e2e87b00116db0
"""\nFast cryptographic hash of Python objects, with a special case for fast\nhashing of numpy arrays.\n"""\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Copyright (c) 2009 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nimport decimal\nimport hashlib\nimport io\nimport pickle\nimport struct\nimport sys\nimport types\n\nPickler = pickle._Pickler\n\n\nclass _ConsistentSet(object):\n """Class used to ensure the hash of Sets is preserved\n whatever the order of its items.\n """\n\n def __init__(self, set_sequence):\n # Forces order of elements in set to ensure consistent hash.\n try:\n # Trying first to order the set assuming the type of elements is\n # consistent and orderable.\n # This fails on python 3 when elements are unorderable\n # but we keep it in a try as it's faster.\n self._sequence = sorted(set_sequence)\n except (TypeError, decimal.InvalidOperation):\n # If elements are unorderable, sorting them using their hash.\n # This is slower but works in any case.\n self._sequence = sorted((hash(e) for e in set_sequence))\n\n\nclass _MyHash(object):\n """Class used to hash objects that won't normally pickle"""\n\n def __init__(self, *args):\n self.args = args\n\n\nclass Hasher(Pickler):\n """A subclass of pickler, to do cryptographic hashing, rather than\n pickling. This is used to produce a unique hash of the given\n Python object that is not necessarily cryptographically secure.\n """\n\n def __init__(self, hash_name="md5"):\n self.stream = io.BytesIO()\n # By default we want a pickle protocol that only changes with\n # the major python version and not the minor one\n protocol = 3\n Pickler.__init__(self, self.stream, protocol=protocol)\n # Initialise the hash obj\n self._hash = hashlib.new(hash_name, usedforsecurity=False)\n\n def hash(self, obj, return_digest=True):\n try:\n self.dump(obj)\n except pickle.PicklingError as e:\n e.args += ("PicklingError while hashing %r: %r" % (obj, e),)\n raise\n dumps = self.stream.getvalue()\n self._hash.update(dumps)\n if return_digest:\n return self._hash.hexdigest()\n\n def save(self, obj):\n if isinstance(obj, (types.MethodType, type({}.pop))):\n # the Pickler cannot pickle instance methods; here we decompose\n # them into components that make them uniquely identifiable\n if hasattr(obj, "__func__"):\n func_name = obj.__func__.__name__\n else:\n func_name = obj.__name__\n inst = obj.__self__\n if type(inst) is type(pickle):\n obj = _MyHash(func_name, inst.__name__)\n elif inst is None:\n # type(None) or type(module) do not pickle\n obj = _MyHash(func_name, inst)\n else:\n cls = obj.__self__.__class__\n obj = _MyHash(func_name, inst, cls)\n Pickler.save(self, obj)\n\n def memoize(self, obj):\n # We want hashing to be sensitive to value instead of reference.\n # For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]]\n # to hash to the same value and that's why we disable memoization\n # for strings\n if isinstance(obj, (bytes, str)):\n return\n Pickler.memoize(self, obj)\n\n # The dispatch table of the pickler is not accessible in Python\n # 3, as these lines are only bugware for IPython, we skip them.\n def save_global(self, obj, name=None, pack=struct.pack):\n # We have to override this method in order to deal with objects\n # defined interactively in IPython that are not injected in\n # __main__\n kwargs = dict(name=name, pack=pack)\n del kwargs["pack"]\n try:\n Pickler.save_global(self, obj, **kwargs)\n except pickle.PicklingError:\n Pickler.save_global(self, obj, **kwargs)\n module = getattr(obj, "__module__", None)\n if module == "__main__":\n my_name = name\n if my_name is None:\n my_name = obj.__name__\n mod = sys.modules[module]\n if not hasattr(mod, my_name):\n # IPython doesn't inject the variables define\n # interactively in __main__\n setattr(mod, my_name, obj)\n\n dispatch = Pickler.dispatch.copy()\n # builtin\n dispatch[type(len)] = save_global\n # type\n dispatch[type(object)] = save_global\n # classobj\n dispatch[type(Pickler)] = save_global\n # function\n dispatch[type(pickle.dump)] = save_global\n\n # We use *args in _batch_setitems signature because _batch_setitems has an\n # additional 'obj' argument in Python 3.14\n def _batch_setitems(self, items, *args):\n # forces order of keys in dict to ensure consistent hash.\n try:\n # Trying first to compare dict assuming the type of keys is\n # consistent and orderable.\n # This fails on python 3 when keys are unorderable\n # but we keep it in a try as it's faster.\n Pickler._batch_setitems(self, iter(sorted(items)), *args)\n except TypeError:\n # If keys are unorderable, sorting them using their hash. This is\n # slower but works in any case.\n Pickler._batch_setitems(\n self, iter(sorted((hash(k), v) for k, v in items)), *args\n )\n\n def save_set(self, set_items):\n # forces order of items in Set to ensure consistent hash\n Pickler.save(self, _ConsistentSet(set_items))\n\n dispatch[type(set())] = save_set\n\n\nclass NumpyHasher(Hasher):\n """Special case the hasher for when numpy is loaded."""\n\n def __init__(self, hash_name="md5", coerce_mmap=False):\n """\n Parameters\n ----------\n hash_name: string\n The hash algorithm to be used\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n objects.\n """\n self.coerce_mmap = coerce_mmap\n Hasher.__init__(self, hash_name=hash_name)\n # delayed import of numpy, to avoid tight coupling\n import numpy as np\n\n self.np = np\n if hasattr(np, "getbuffer"):\n self._getbuffer = np.getbuffer\n else:\n self._getbuffer = memoryview\n\n def save(self, obj):\n """Subclass the save method, to hash ndarray subclass, rather\n than pickling them. Off course, this is a total abuse of\n the Pickler class.\n """\n if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:\n # Compute a hash of the object\n # The update function of the hash requires a c_contiguous buffer.\n if obj.shape == ():\n # 0d arrays need to be flattened because viewing them as bytes\n # raises a ValueError exception.\n obj_c_contiguous = obj.flatten()\n elif obj.flags.c_contiguous:\n obj_c_contiguous = obj\n elif obj.flags.f_contiguous:\n obj_c_contiguous = obj.T\n else:\n # Cater for non-single-segment arrays: this creates a\n # copy, and thus alleviates this issue.\n # XXX: There might be a more efficient way of doing this\n obj_c_contiguous = obj.flatten()\n\n # memoryview is not supported for some dtypes, e.g. datetime64, see\n # https://github.com/numpy/numpy/issues/4983. The\n # workaround is to view the array as bytes before\n # taking the memoryview.\n self._hash.update(self._getbuffer(obj_c_contiguous.view(self.np.uint8)))\n\n # We store the class, to be able to distinguish between\n # Objects with the same binary content, but different\n # classes.\n if self.coerce_mmap and isinstance(obj, self.np.memmap):\n # We don't make the difference between memmap and\n # normal ndarrays, to be able to reload previously\n # computed results with memmap.\n klass = self.np.ndarray\n else:\n klass = obj.__class__\n # We also return the dtype and the shape, to distinguish\n # different views on the same data with different dtypes.\n\n # The object will be pickled by the pickler hashed at the end.\n obj = (klass, ("HASHED", obj.dtype, obj.shape, obj.strides))\n elif isinstance(obj, self.np.dtype):\n # numpy.dtype consistent hashing is tricky to get right. This comes\n # from the fact that atomic np.dtype objects are interned:\n # ``np.dtype('f4') is np.dtype('f4')``. The situation is\n # complicated by the fact that this interning does not resist a\n # simple pickle.load/dump roundtrip:\n # ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not\n # np.dtype('f4') Because pickle relies on memoization during\n # pickling, it is easy to\n # produce different hashes for seemingly identical objects, such as\n # ``[np.dtype('f4'), np.dtype('f4')]``\n # and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``.\n # To prevent memoization from interfering with hashing, we isolate\n # the serialization (and thus the pickle memoization) of each dtype\n # using each time a different ``pickle.dumps`` call unrelated to\n # the current Hasher instance.\n self._hash.update("_HASHED_DTYPE".encode("utf-8"))\n self._hash.update(pickle.dumps(obj))\n return\n Hasher.save(self, obj)\n\n\ndef hash(obj, hash_name="md5", coerce_mmap=False):\n """Quick calculation of a hash to identify uniquely Python objects\n containing numpy arrays.\n\n Parameters\n ----------\n hash_name: 'md5' or 'sha1'\n Hashing algorithm used. sha1 is supposedly safer, but md5 is\n faster.\n coerce_mmap: boolean\n Make no difference between np.memmap and np.ndarray\n """\n valid_hash_names = ("md5", "sha1")\n if hash_name not in valid_hash_names:\n raise ValueError(\n "Valid options for 'hash_name' are {}. Got hash_name={!r} instead.".format(\n valid_hash_names, hash_name\n )\n )\n if "numpy" in sys.modules:\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\n else:\n hasher = Hasher(hash_name=hash_name)\n return hasher.hash(obj)\n
.venv\Lib\site-packages\joblib\hashing.py
hashing.py
Python
10,694
0.95
0.188889
0.322176
python-kit
542
2024-07-12T15:37:29.753371
Apache-2.0
false
ea9d6ccea9fa077bc82f851e6ee44e2c
"""Numpy pickle compatibility functions."""\n\nimport inspect\nimport os\nimport pickle\nimport zlib\nfrom io import BytesIO\n\nfrom .numpy_pickle_utils import (\n _ZFILE_PREFIX,\n Unpickler,\n _ensure_native_byte_order,\n _reconstruct,\n)\n\n\ndef hex_str(an_int):\n """Convert an int to an hexadecimal string."""\n return "{:#x}".format(an_int)\n\n\ndef asbytes(s):\n if isinstance(s, bytes):\n return s\n return s.encode("latin1")\n\n\n_MAX_LEN = len(hex_str(2**64))\n_CHUNK_SIZE = 64 * 1024\n\n\ndef read_zfile(file_handle):\n """Read the z-file and return the content as a string.\n\n Z-files are raw data compressed with zlib used internally by joblib\n for persistence. Backward compatibility is not guaranteed. Do not\n use for external purposes.\n """\n file_handle.seek(0)\n header_length = len(_ZFILE_PREFIX) + _MAX_LEN\n length = file_handle.read(header_length)\n length = length[len(_ZFILE_PREFIX) :]\n length = int(length, 16)\n\n # With python2 and joblib version <= 0.8.4 compressed pickle header is one\n # character wider so we need to ignore an additional space if present.\n # Note: the first byte of the zlib data is guaranteed not to be a\n # space according to\n # https://tools.ietf.org/html/rfc6713#section-2.1\n next_byte = file_handle.read(1)\n if next_byte != b" ":\n # The zlib compressed data has started and we need to go back\n # one byte\n file_handle.seek(header_length)\n\n # We use the known length of the data to tell Zlib the size of the\n # buffer to allocate.\n data = zlib.decompress(file_handle.read(), 15, length)\n assert len(data) == length, (\n "Incorrect data length while decompressing %s."\n "The file could be corrupted." % file_handle\n )\n return data\n\n\ndef write_zfile(file_handle, data, compress=1):\n """Write the data in the given file as a Z-file.\n\n Z-files are raw data compressed with zlib used internally by joblib\n for persistence. Backward compatibility is not guaranteed. Do not\n use for external purposes.\n """\n file_handle.write(_ZFILE_PREFIX)\n length = hex_str(len(data))\n # Store the length of the data\n file_handle.write(asbytes(length.ljust(_MAX_LEN)))\n file_handle.write(zlib.compress(asbytes(data), compress))\n\n\n###############################################################################\n# Utility objects for persistence.\n\n\nclass NDArrayWrapper(object):\n """An object to be persisted instead of numpy arrays.\n\n The only thing this object does, is to carry the filename in which\n the array has been persisted, and the array subclass.\n """\n\n def __init__(self, filename, subclass, allow_mmap=True):\n """Constructor. Store the useful information for later."""\n self.filename = filename\n self.subclass = subclass\n self.allow_mmap = allow_mmap\n\n def read(self, unpickler):\n """Reconstruct the array."""\n filename = os.path.join(unpickler._dirname, self.filename)\n # Load the array from the disk\n # use getattr instead of self.allow_mmap to ensure backward compat\n # with NDArrayWrapper instances pickled with joblib < 0.9.0\n allow_mmap = getattr(self, "allow_mmap", True)\n kwargs = {}\n if allow_mmap:\n kwargs["mmap_mode"] = unpickler.mmap_mode\n if "allow_pickle" in inspect.signature(unpickler.np.load).parameters:\n # Required in numpy 1.16.3 and later to acknowledge the security\n # risk.\n kwargs["allow_pickle"] = True\n array = unpickler.np.load(filename, **kwargs)\n\n # Detect byte order mismatch and swap as needed.\n array = _ensure_native_byte_order(array)\n\n # Reconstruct subclasses. This does not work with old\n # versions of numpy\n if hasattr(array, "__array_prepare__") and self.subclass not in (\n unpickler.np.ndarray,\n unpickler.np.memmap,\n ):\n # We need to reconstruct another subclass\n new_array = _reconstruct(self.subclass, (0,), "b")\n return new_array.__array_prepare__(array)\n else:\n return array\n\n\nclass ZNDArrayWrapper(NDArrayWrapper):\n """An object to be persisted instead of numpy arrays.\n\n This object store the Zfile filename in which\n the data array has been persisted, and the meta information to\n retrieve it.\n The reason that we store the raw buffer data of the array and\n the meta information, rather than array representation routine\n (tobytes) is that it enables us to use completely the strided\n model to avoid memory copies (a and a.T store as fast). In\n addition saving the heavy information separately can avoid\n creating large temporary buffers when unpickling data with\n large arrays.\n """\n\n def __init__(self, filename, init_args, state):\n """Constructor. Store the useful information for later."""\n self.filename = filename\n self.state = state\n self.init_args = init_args\n\n def read(self, unpickler):\n """Reconstruct the array from the meta-information and the z-file."""\n # Here we a simply reproducing the unpickling mechanism for numpy\n # arrays\n filename = os.path.join(unpickler._dirname, self.filename)\n array = _reconstruct(*self.init_args)\n with open(filename, "rb") as f:\n data = read_zfile(f)\n state = self.state + (data,)\n array.__setstate__(state)\n return array\n\n\nclass ZipNumpyUnpickler(Unpickler):\n """A subclass of the Unpickler to unpickle our numpy pickles."""\n\n dispatch = Unpickler.dispatch.copy()\n\n def __init__(self, filename, file_handle, mmap_mode=None):\n """Constructor."""\n self._filename = os.path.basename(filename)\n self._dirname = os.path.dirname(filename)\n self.mmap_mode = mmap_mode\n self.file_handle = self._open_pickle(file_handle)\n Unpickler.__init__(self, self.file_handle)\n try:\n import numpy as np\n except ImportError:\n np = None\n self.np = np\n\n def _open_pickle(self, file_handle):\n return BytesIO(read_zfile(file_handle))\n\n def load_build(self):\n """Set the state of a newly created object.\n\n We capture it to replace our place-holder objects,\n NDArrayWrapper, by the array we are interested in. We\n replace them directly in the stack of pickler.\n """\n Unpickler.load_build(self)\n if isinstance(self.stack[-1], NDArrayWrapper):\n if self.np is None:\n raise ImportError(\n "Trying to unpickle an ndarray, but numpy didn't import correctly"\n )\n nd_array_wrapper = self.stack.pop()\n array = nd_array_wrapper.read(self)\n self.stack.append(array)\n\n dispatch[pickle.BUILD[0]] = load_build\n\n\ndef load_compatibility(filename):\n """Reconstruct a Python object from a file persisted with joblib.dump.\n\n This function ensures the compatibility with joblib old persistence format\n (<= 0.9.3).\n\n Parameters\n ----------\n filename: string\n The name of the file from which to load the object\n\n Returns\n -------\n result: any Python object\n The object stored in the file.\n\n See Also\n --------\n joblib.dump : function to save an object\n\n Notes\n -----\n\n This function can load numpy array files saved separately during the\n dump.\n """\n with open(filename, "rb") as file_handle:\n # We are careful to open the file handle early and keep it open to\n # avoid race-conditions on renames. That said, if data is stored in\n # companion files, moving the directory will create a race when\n # joblib tries to access the companion files.\n unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)\n try:\n obj = unpickler.load()\n except UnicodeDecodeError as exc:\n # More user-friendly error message\n new_exc = ValueError(\n "You may be trying to read with "\n "python 3 a joblib pickle generated with python 2. "\n "This feature is not supported by joblib."\n )\n new_exc.__cause__ = exc\n raise new_exc\n finally:\n if hasattr(unpickler, "file_handle"):\n unpickler.file_handle.close()\n return obj\n
.venv\Lib\site-packages\joblib\numpy_pickle_compat.py
numpy_pickle_compat.py
Python
8,451
0.95
0.156
0.137255
react-lib
806
2024-06-13T13:44:38.621127
MIT
false
d89f34e90e5efc44a83c896510f4842c
"""Utilities for fast persistence of big data, with optional compression."""\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Copyright (c) 2009 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nimport contextlib\nimport io\nimport pickle\nimport sys\nimport warnings\n\nfrom .compressor import _COMPRESSORS, _ZFILE_PREFIX\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\nUnpickler = pickle._Unpickler\nPickler = pickle._Pickler\nxrange = range\n\n\ntry:\n # The python standard library can be built without bz2 so we make bz2\n # usage optional.\n # see https://github.com/scikit-learn/scikit-learn/issues/7526 for more\n # details.\n import bz2\nexcept ImportError:\n bz2 = None\n\n# Buffer size used in io.BufferedReader and io.BufferedWriter\n_IO_BUFFER_SIZE = 1024**2\n\n\ndef _is_raw_file(fileobj):\n """Check if fileobj is a raw file object, e.g created with open."""\n fileobj = getattr(fileobj, "raw", fileobj)\n return isinstance(fileobj, io.FileIO)\n\n\ndef _get_prefixes_max_len():\n # Compute the max prefix len of registered compressors.\n prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()]\n prefixes += [len(_ZFILE_PREFIX)]\n return max(prefixes)\n\n\ndef _is_numpy_array_byte_order_mismatch(array):\n """Check if numpy array is having byte order mismatch"""\n return (\n sys.byteorder == "big"\n and (\n array.dtype.byteorder == "<"\n or (\n array.dtype.byteorder == "|"\n and array.dtype.fields\n and all(e[0].byteorder == "<" for e in array.dtype.fields.values())\n )\n )\n ) or (\n sys.byteorder == "little"\n and (\n array.dtype.byteorder == ">"\n or (\n array.dtype.byteorder == "|"\n and array.dtype.fields\n and all(e[0].byteorder == ">" for e in array.dtype.fields.values())\n )\n )\n )\n\n\ndef _ensure_native_byte_order(array):\n """Use the byte order of the host while preserving values\n\n Does nothing if array already uses the system byte order.\n """\n if _is_numpy_array_byte_order_mismatch(array):\n array = array.byteswap().view(array.dtype.newbyteorder("="))\n return array\n\n\n###############################################################################\n# Cache file utilities\ndef _detect_compressor(fileobj):\n """Return the compressor matching fileobj.\n\n Parameters\n ----------\n fileobj: file object\n\n Returns\n -------\n str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'}\n """\n # Read the magic number in the first bytes of the file.\n max_prefix_len = _get_prefixes_max_len()\n if hasattr(fileobj, "peek"):\n # Peek allows to read those bytes without moving the cursor in the\n # file which.\n first_bytes = fileobj.peek(max_prefix_len)\n else:\n # Fallback to seek if the fileobject is not peekable.\n first_bytes = fileobj.read(max_prefix_len)\n fileobj.seek(0)\n\n if first_bytes.startswith(_ZFILE_PREFIX):\n return "compat"\n else:\n for name, compressor in _COMPRESSORS.items():\n if first_bytes.startswith(compressor.prefix):\n return name\n\n return "not-compressed"\n\n\ndef _buffered_read_file(fobj):\n """Return a buffered version of a read file object."""\n return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE)\n\n\ndef _buffered_write_file(fobj):\n """Return a buffered version of a write file object."""\n return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE)\n\n\n@contextlib.contextmanager\ndef _validate_fileobject_and_memmap(fileobj, filename, mmap_mode=None):\n """Utility function opening the right fileobject from a filename.\n\n The magic number is used to choose between the type of file object to open:\n * regular file object (default)\n * zlib file object\n * gzip file object\n * bz2 file object\n * lzma file object (for xz and lzma compressor)\n\n Parameters\n ----------\n fileobj: file object\n filename: str\n filename path corresponding to the fileobj parameter.\n mmap_mode: str\n memory map mode that should be used to open the pickle file. This\n parameter is useful to verify that the user is not trying to one with\n compression. Default: None.\n\n Returns\n -------\n a tuple with a file like object, and the validated mmap_mode.\n\n """\n # Detect if the fileobj contains compressed data.\n compressor = _detect_compressor(fileobj)\n validated_mmap_mode = mmap_mode\n\n if compressor == "compat":\n # Compatibility with old pickle mode: simply return the input\n # filename "as-is" and let the compatibility function be called by the\n # caller.\n warnings.warn(\n "The file '%s' has been generated with a joblib "\n "version less than 0.10. "\n "Please regenerate this pickle file." % filename,\n DeprecationWarning,\n stacklevel=2,\n )\n yield filename, validated_mmap_mode\n else:\n if compressor in _COMPRESSORS:\n # based on the compressor detected in the file, we open the\n # correct decompressor file object, wrapped in a buffer.\n compressor_wrapper = _COMPRESSORS[compressor]\n inst = compressor_wrapper.decompressor_file(fileobj)\n fileobj = _buffered_read_file(inst)\n\n # Checking if incompatible load parameters with the type of file:\n # mmap_mode cannot be used with compressed file or in memory buffers\n # such as io.BytesIO.\n if mmap_mode is not None:\n validated_mmap_mode = None\n if isinstance(fileobj, io.BytesIO):\n warnings.warn(\n "In memory persistence is not compatible with "\n 'mmap_mode "%(mmap_mode)s" flag passed. '\n "mmap_mode option will be ignored." % locals(),\n stacklevel=2,\n )\n elif compressor != "not-compressed":\n warnings.warn(\n 'mmap_mode "%(mmap_mode)s" is not compatible '\n "with compressed file %(filename)s. "\n '"%(mmap_mode)s" flag will be ignored.' % locals(),\n stacklevel=2,\n )\n elif not _is_raw_file(fileobj):\n warnings.warn(\n '"%(fileobj)r" is not a raw file, mmap_mode '\n '"%(mmap_mode)s" flag will be ignored.' % locals(),\n stacklevel=2,\n )\n else:\n validated_mmap_mode = mmap_mode\n\n yield fileobj, validated_mmap_mode\n\n\ndef _write_fileobject(filename, compress=("zlib", 3)):\n """Return the right compressor file object in write mode."""\n compressmethod = compress[0]\n compresslevel = compress[1]\n\n if compressmethod in _COMPRESSORS.keys():\n file_instance = _COMPRESSORS[compressmethod].compressor_file(\n filename, compresslevel=compresslevel\n )\n return _buffered_write_file(file_instance)\n else:\n file_instance = _COMPRESSORS["zlib"].compressor_file(\n filename, compresslevel=compresslevel\n )\n return _buffered_write_file(file_instance)\n\n\n# Utility functions/variables from numpy required for writing arrays.\n# We need at least the functions introduced in version 1.9 of numpy. Here,\n# we use the ones from numpy 1.10.2.\nBUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes\n\n\ndef _read_bytes(fp, size, error_template="ran out of data"):\n """Read from file-like object until size bytes are read.\n\n TODO python2_drop: is it still needed? The docstring mentions python 2.6\n and it looks like this can be at least simplified ...\n\n Raises ValueError if not EOF is encountered before size bytes are read.\n Non-blocking objects only supported if they derive from io objects.\n\n Required as e.g. ZipExtFile in python 2.6 can return less data than\n requested.\n\n This function was taken from numpy/lib/format.py in version 1.10.2.\n\n Parameters\n ----------\n fp: file-like object\n size: int\n error_template: str\n\n Returns\n -------\n a bytes object\n The data read in bytes.\n\n """\n data = bytes()\n while True:\n # io files (default in python3) return None or raise on\n # would-block, python2 file will truncate, probably nothing can be\n # done about that. note that regular files can't be non-blocking\n try:\n r = fp.read(size - len(data))\n data += r\n if len(r) == 0 or len(data) == size:\n break\n except io.BlockingIOError:\n pass\n if len(data) != size:\n msg = "EOF: reading %s, expected %d bytes got %d"\n raise ValueError(msg % (error_template, size, len(data)))\n else:\n return data\n\n\ndef _reconstruct(*args, **kwargs):\n # Wrapper for numpy._core.multiarray._reconstruct with backward compat\n # for numpy 1.X\n #\n # XXX: Remove this function when numpy 1.X is not supported anymore\n\n np_major_version = np.__version__[:2]\n if np_major_version == "1.":\n from numpy.core.multiarray import _reconstruct as np_reconstruct\n elif np_major_version == "2.":\n from numpy._core.multiarray import _reconstruct as np_reconstruct\n\n return np_reconstruct(*args, **kwargs)\n
.venv\Lib\site-packages\joblib\numpy_pickle_utils.py
numpy_pickle_utils.py
Python
9,497
0.95
0.175258
0.164557
node-utils
115
2025-02-03T17:46:26.683255
BSD-3-Clause
false
2ebb3d6cfb5a28ffa2346b5a3926461e
"""Custom implementation of multiprocessing.Pool with custom pickler.\n\nThis module provides efficient ways of working with data stored in\nshared memory with numpy.memmap arrays without inducing any memory\ncopy between the parent and child processes.\n\nThis module should not be imported if multiprocessing is not\navailable as it implements subclasses of multiprocessing Pool\nthat uses a custom alternative to SimpleQueue.\n\n"""\n# Author: Olivier Grisel <olivier.grisel@ensta.org>\n# Copyright: 2012, Olivier Grisel\n# License: BSD 3 clause\n\nimport copyreg\nimport sys\nimport warnings\nfrom time import sleep\n\ntry:\n WindowsError\nexcept NameError:\n WindowsError = type(None)\n\nfrom io import BytesIO\n\n# We need the class definition to derive from it, not the multiprocessing.Pool\n# factory function\nfrom multiprocessing.pool import Pool\nfrom pickle import HIGHEST_PROTOCOL, Pickler\n\nfrom ._memmapping_reducer import TemporaryResourcesManager, get_memmapping_reducers\nfrom ._multiprocessing_helpers import assert_spawning, mp\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\n\n###############################################################################\n# Enable custom pickling in Pool queues\n\n\nclass CustomizablePickler(Pickler):\n """Pickler that accepts custom reducers.\n\n TODO python2_drop : can this be simplified ?\n\n HIGHEST_PROTOCOL is selected by default as this pickler is used\n to pickle ephemeral datastructures for interprocess communication\n hence no backward compatibility is required.\n\n `reducers` is expected to be a dictionary with key/values\n being `(type, callable)` pairs where `callable` is a function that\n give an instance of `type` will return a tuple `(constructor,\n tuple_of_objects)` to rebuild an instance out of the pickled\n `tuple_of_objects` as would return a `__reduce__` method. See the\n standard library documentation on pickling for more details.\n\n """\n\n # We override the pure Python pickler as its the only way to be able to\n # customize the dispatch table without side effects in Python 2.7\n # to 3.2. For Python 3.3+ leverage the new dispatch_table\n # feature from https://bugs.python.org/issue14166 that makes it possible\n # to use the C implementation of the Pickler which is faster.\n\n def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):\n Pickler.__init__(self, writer, protocol=protocol)\n if reducers is None:\n reducers = {}\n if hasattr(Pickler, "dispatch"):\n # Make the dispatch registry an instance level attribute instead of\n # a reference to the class dictionary under Python 2\n self.dispatch = Pickler.dispatch.copy()\n else:\n # Under Python 3 initialize the dispatch table with a copy of the\n # default registry\n self.dispatch_table = copyreg.dispatch_table.copy()\n for type, reduce_func in reducers.items():\n self.register(type, reduce_func)\n\n def register(self, type, reduce_func):\n """Attach a reducer function to a given type in the dispatch table."""\n if hasattr(Pickler, "dispatch"):\n # Python 2 pickler dispatching is not explicitly customizable.\n # Let us use a closure to workaround this limitation.\n def dispatcher(self, obj):\n reduced = reduce_func(obj)\n self.save_reduce(obj=obj, *reduced)\n\n self.dispatch[type] = dispatcher\n else:\n self.dispatch_table[type] = reduce_func\n\n\nclass CustomizablePicklingQueue(object):\n """Locked Pipe implementation that uses a customizable pickler.\n\n This class is an alternative to the multiprocessing implementation\n of SimpleQueue in order to make it possible to pass custom\n pickling reducers, for instance to avoid memory copy when passing\n memory mapped datastructures.\n\n `reducers` is expected to be a dict with key / values being\n `(type, callable)` pairs where `callable` is a function that, given an\n instance of `type`, will return a tuple `(constructor, tuple_of_objects)`\n to rebuild an instance out of the pickled `tuple_of_objects` as would\n return a `__reduce__` method.\n\n See the standard library documentation on pickling for more details.\n """\n\n def __init__(self, context, reducers=None):\n self._reducers = reducers\n self._reader, self._writer = context.Pipe(duplex=False)\n self._rlock = context.Lock()\n if sys.platform == "win32":\n self._wlock = None\n else:\n self._wlock = context.Lock()\n self._make_methods()\n\n def __getstate__(self):\n assert_spawning(self)\n return (self._reader, self._writer, self._rlock, self._wlock, self._reducers)\n\n def __setstate__(self, state):\n (self._reader, self._writer, self._rlock, self._wlock, self._reducers) = state\n self._make_methods()\n\n def empty(self):\n return not self._reader.poll()\n\n def _make_methods(self):\n self._recv = recv = self._reader.recv\n racquire, rrelease = self._rlock.acquire, self._rlock.release\n\n def get():\n racquire()\n try:\n return recv()\n finally:\n rrelease()\n\n self.get = get\n\n if self._reducers:\n\n def send(obj):\n buffer = BytesIO()\n CustomizablePickler(buffer, self._reducers).dump(obj)\n self._writer.send_bytes(buffer.getvalue())\n\n self._send = send\n else:\n self._send = send = self._writer.send\n if self._wlock is None:\n # writes to a message oriented win32 pipe are atomic\n self.put = send\n else:\n wlock_acquire, wlock_release = (self._wlock.acquire, self._wlock.release)\n\n def put(obj):\n wlock_acquire()\n try:\n return send(obj)\n finally:\n wlock_release()\n\n self.put = put\n\n\nclass PicklingPool(Pool):\n """Pool implementation with customizable pickling reducers.\n\n This is useful to control how data is shipped between processes\n and makes it possible to use shared memory without useless\n copies induces by the default pickling methods of the original\n objects passed as arguments to dispatch.\n\n `forward_reducers` and `backward_reducers` are expected to be\n dictionaries with key/values being `(type, callable)` pairs where\n `callable` is a function that, given an instance of `type`, will return a\n tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the\n pickled `tuple_of_objects` as would return a `__reduce__` method.\n See the standard library documentation about pickling for more details.\n\n """\n\n def __init__(\n self, processes=None, forward_reducers=None, backward_reducers=None, **kwargs\n ):\n if forward_reducers is None:\n forward_reducers = dict()\n if backward_reducers is None:\n backward_reducers = dict()\n self._forward_reducers = forward_reducers\n self._backward_reducers = backward_reducers\n poolargs = dict(processes=processes)\n poolargs.update(kwargs)\n super(PicklingPool, self).__init__(**poolargs)\n\n def _setup_queues(self):\n context = getattr(self, "_ctx", mp)\n self._inqueue = CustomizablePicklingQueue(context, self._forward_reducers)\n self._outqueue = CustomizablePicklingQueue(context, self._backward_reducers)\n self._quick_put = self._inqueue._send\n self._quick_get = self._outqueue._recv\n\n\nclass MemmappingPool(PicklingPool):\n """Process pool that shares large arrays to avoid memory copy.\n\n This drop-in replacement for `multiprocessing.pool.Pool` makes\n it possible to work efficiently with shared memory in a numpy\n context.\n\n Existing instances of numpy.memmap are preserved: the child\n suprocesses will have access to the same shared memory in the\n original mode except for the 'w+' mode that is automatically\n transformed as 'r+' to avoid zeroing the original data upon\n instantiation.\n\n Furthermore large arrays from the parent process are automatically\n dumped to a temporary folder on the filesystem such as child\n processes to access their content via memmapping (file system\n backed shared memory).\n\n Note: it is important to call the terminate method to collect\n the temporary folder used by the pool.\n\n Parameters\n ----------\n processes: int, optional\n Number of worker processes running concurrently in the pool.\n initializer: callable, optional\n Callable executed on worker process creation.\n initargs: tuple, optional\n Arguments passed to the initializer callable.\n temp_folder: (str, callable) optional\n If str:\n Folder to be used by the pool for memmapping large arrays\n for sharing memory with worker processes. If None, this will try in\n order:\n - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,\n - /dev/shm if the folder exists and is writable: this is a RAMdisk\n filesystem available by default on modern Linux distributions,\n - the default system temporary folder that can be overridden\n with TMP, TMPDIR or TEMP environment variables, typically /tmp\n under Unix operating systems.\n if callable:\n An callable in charge of dynamically resolving a temporary folder\n for memmapping large arrays.\n max_nbytes int or None, optional, 1e6 by default\n Threshold on the size of arrays passed to the workers that\n triggers automated memory mapping in temp_folder.\n Use None to disable memmapping of large arrays.\n mmap_mode: {'r+', 'r', 'w+', 'c'}\n Memmapping mode for numpy arrays passed to workers.\n See 'max_nbytes' parameter documentation for more details.\n forward_reducers: dictionary, optional\n Reducers used to pickle objects passed from main process to worker\n processes: see below.\n backward_reducers: dictionary, optional\n Reducers used to pickle return values from workers back to the\n main process.\n verbose: int, optional\n Make it possible to monitor how the communication of numpy arrays\n with the subprocess is handled (pickling or memmapping)\n prewarm: bool or str, optional, "auto" by default.\n If True, force a read on newly memmapped array to make sure that OS\n pre-cache it in memory. This can be useful to avoid concurrent disk\n access when the same data array is passed to different worker\n processes. If "auto" (by default), prewarm is set to True, unless the\n Linux shared memory partition /dev/shm is available and used as temp\n folder.\n\n `forward_reducers` and `backward_reducers` are expected to be\n dictionaries with key/values being `(type, callable)` pairs where\n `callable` is a function that give an instance of `type` will return\n a tuple `(constructor, tuple_of_objects)` to rebuild an instance out\n of the pickled `tuple_of_objects` as would return a `__reduce__`\n method. See the standard library documentation on pickling for more\n details.\n\n """\n\n def __init__(\n self,\n processes=None,\n temp_folder=None,\n max_nbytes=1e6,\n mmap_mode="r",\n forward_reducers=None,\n backward_reducers=None,\n verbose=0,\n prewarm=False,\n **kwargs,\n ):\n manager = TemporaryResourcesManager(temp_folder)\n self._temp_folder_manager = manager\n\n # The usage of a temp_folder_resolver over a simple temp_folder is\n # superfluous for multiprocessing pools, as they don't get reused, see\n # get_memmapping_executor for more details. We still use it for code\n # simplicity.\n forward_reducers, backward_reducers = get_memmapping_reducers(\n temp_folder_resolver=manager.resolve_temp_folder_name,\n max_nbytes=max_nbytes,\n mmap_mode=mmap_mode,\n forward_reducers=forward_reducers,\n backward_reducers=backward_reducers,\n verbose=verbose,\n unlink_on_gc_collect=False,\n prewarm=prewarm,\n )\n\n poolargs = dict(\n processes=processes,\n forward_reducers=forward_reducers,\n backward_reducers=backward_reducers,\n )\n poolargs.update(kwargs)\n super(MemmappingPool, self).__init__(**poolargs)\n\n def terminate(self):\n n_retries = 10\n for i in range(n_retries):\n try:\n super(MemmappingPool, self).terminate()\n break\n except OSError as e:\n if isinstance(e, WindowsError):\n # Workaround occasional "[Error 5] Access is denied" issue\n # when trying to terminate a process under windows.\n sleep(0.1)\n if i + 1 == n_retries:\n warnings.warn(\n "Failed to terminate worker processes in"\n " multiprocessing pool: %r" % e\n )\n\n # Clean up the temporary resources as the workers should now be off.\n self._temp_folder_manager._clean_temporary_resources()\n\n @property\n def _temp_folder(self):\n # Legacy property in tests. could be removed if we refactored the\n # memmapping tests. SHOULD ONLY BE USED IN TESTS!\n # We cache this property because it is called late in the tests - at\n # this point, all context have been unregistered, and\n # resolve_temp_folder_name raises an error.\n if getattr(self, "_cached_temp_folder", None) is not None:\n return self._cached_temp_folder\n else:\n self._cached_temp_folder = (\n self._temp_folder_manager.resolve_temp_folder_name()\n ) # noqa\n return self._cached_temp_folder\n
.venv\Lib\site-packages\joblib\pool.py
pool.py
Python
14,134
0.95
0.187845
0.10596
react-lib
289
2025-05-31T02:53:05.784530
BSD-3-Clause
false
7abde4b6950db04403570d91ffedb850
"""\nSmall shim of loky's cloudpickle_wrapper to avoid failure when\nmultiprocessing is not available.\n"""\n\nfrom ._multiprocessing_helpers import mp\n\n\ndef _my_wrap_non_picklable_objects(obj, keep_wrapper=True):\n return obj\n\n\nif mp is not None:\n from .externals.loky import wrap_non_picklable_objects\nelse:\n wrap_non_picklable_objects = _my_wrap_non_picklable_objects\n\n__all__ = ["wrap_non_picklable_objects"]\n
.venv\Lib\site-packages\joblib\_cloudpickle_wrapper.py
_cloudpickle_wrapper.py
Python
416
0.85
0.111111
0
node-utils
254
2023-08-12T11:01:28.156846
Apache-2.0
false
8c7b21056f5835648072a9005bd0174d
"""Helper module to factorize the conditional multiprocessing import logic\n\nWe use a distinct module to simplify import statements and avoid introducing\ncircular dependencies (for instance for the assert_spawning name).\n"""\n\nimport os\nimport warnings\n\n# Obtain possible configuration from the environment, assuming 1 (on)\n# by default, upon 0 set to None. Should instructively fail if some non\n# 0/1 value is set.\nmp = int(os.environ.get("JOBLIB_MULTIPROCESSING", 1)) or None\nif mp:\n try:\n import _multiprocessing # noqa\n import multiprocessing as mp\n except ImportError:\n mp = None\n\n# 2nd stage: validate that locking is available on the system and\n# issue a warning if not\nif mp is not None:\n try:\n # try to create a named semaphore using SemLock to make sure they are\n # available on this platform. We use the low level object\n # _multiprocessing.SemLock to avoid spawning a resource tracker on\n # Unix system or changing the default backend.\n import tempfile\n from _multiprocessing import SemLock\n\n _rand = tempfile._RandomNameSequence()\n for i in range(100):\n try:\n name = "/joblib-{}-{}".format(os.getpid(), next(_rand))\n _sem = SemLock(0, 0, 1, name=name, unlink=True)\n del _sem # cleanup\n break\n except FileExistsError as e: # pragma: no cover\n if i >= 99:\n raise FileExistsError("cannot find name for semaphore") from e\n except (FileExistsError, AttributeError, ImportError, OSError) as e:\n mp = None\n warnings.warn("%s. joblib will operate in serial mode" % (e,))\n\n\n# 3rd stage: backward compat for the assert_spawning helper\nif mp is not None:\n from multiprocessing.context import assert_spawning\nelse:\n assert_spawning = None\n
.venv\Lib\site-packages\joblib\_multiprocessing_helpers.py
_multiprocessing_helpers.py
Python
1,878
0.95
0.294118
0.227273
react-lib
665
2023-08-14T12:07:58.101499
MIT
false
48b5aedd06fde68705bf48f974d0a40c
"""\nBackends for embarrassingly parallel code.\n"""\n\nimport contextlib\nimport gc\nimport os\nimport threading\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nfrom ._multiprocessing_helpers import mp\nfrom ._utils import (\n _retrieve_traceback_capturing_wrapped_call,\n _TracebackCapturingWrapper,\n)\n\nif mp is not None:\n from multiprocessing.pool import ThreadPool\n\n from .executor import get_memmapping_executor\n\n # Import loky only if multiprocessing is present\n from .externals.loky import cpu_count, process_executor\n from .externals.loky.process_executor import ShutdownExecutorError\n from .pool import MemmappingPool\n\n\nclass ParallelBackendBase(metaclass=ABCMeta):\n """Helper abc which defines all methods a ParallelBackend must implement"""\n\n default_n_jobs = 1\n\n supports_inner_max_num_threads = False\n\n # This flag was introduced for backward compatibility reasons.\n # New backends should always set it to True and implement the\n # `retrieve_result_callback` method.\n supports_retrieve_callback = False\n\n @property\n def supports_return_generator(self):\n return self.supports_retrieve_callback\n\n @property\n def supports_timeout(self):\n return self.supports_retrieve_callback\n\n nesting_level = None\n\n def __init__(\n self, nesting_level=None, inner_max_num_threads=None, **backend_kwargs\n ):\n super().__init__()\n self.nesting_level = nesting_level\n self.inner_max_num_threads = inner_max_num_threads\n self.backend_kwargs = backend_kwargs\n\n MAX_NUM_THREADS_VARS = [\n "OMP_NUM_THREADS",\n "OPENBLAS_NUM_THREADS",\n "MKL_NUM_THREADS",\n "BLIS_NUM_THREADS",\n "VECLIB_MAXIMUM_THREADS",\n "NUMBA_NUM_THREADS",\n "NUMEXPR_NUM_THREADS",\n ]\n\n TBB_ENABLE_IPC_VAR = "ENABLE_IPC"\n\n @abstractmethod\n def effective_n_jobs(self, n_jobs):\n """Determine the number of jobs that can actually run in parallel\n\n n_jobs is the number of workers requested by the callers. Passing\n n_jobs=-1 means requesting all available workers for instance matching\n the number of CPU cores on the worker host(s).\n\n This method should return a guesstimate of the number of workers that\n can actually perform work concurrently. The primary use case is to make\n it possible for the caller to know in how many chunks to slice the\n work.\n\n In general working on larger data chunks is more efficient (less\n scheduling overhead and better use of CPU cache prefetching heuristics)\n as long as all the workers have enough work to do.\n """\n\n def apply_async(self, func, callback=None):\n """Deprecated: implement `submit` instead."""\n raise NotImplementedError("Implement `submit` instead.")\n\n def submit(self, func, callback=None):\n """Schedule a function to be run and return a future-like object.\n\n This method should return a future-like object that allow tracking\n the progress of the task.\n\n If ``supports_retrieve_callback`` is False, the return value of this\n method is passed to ``retrieve_result`` instead of calling\n ``retrieve_result_callback``.\n\n Parameters\n ----------\n func: callable\n The function to be run in parallel.\n\n callback: callable\n A callable that will be called when the task is completed. This callable\n is a wrapper around ``retrieve_result_callback``. This should be added\n to the future-like object returned by this method, so that the callback\n is called when the task is completed.\n\n For future-like backends, this can be achieved with something like\n ``future.add_done_callback(callback)``.\n\n Returns\n -------\n future: future-like\n A future-like object to track the execution of the submitted function.\n """\n warnings.warn(\n "`apply_async` is deprecated, implement and use `submit` instead.",\n DeprecationWarning,\n )\n return self.apply_async(func, callback)\n\n def retrieve_result_callback(self, out):\n """Called within the callback function passed to `submit`.\n\n This method can customise how the result of the function is retrieved\n from the future-like object.\n\n Parameters\n ----------\n future: future-like\n The future-like object returned by the `submit` method.\n\n Returns\n -------\n result: object\n The result of the function executed in parallel.\n """\n\n def retrieve_result(self, out, timeout=None):\n """Hook to retrieve the result when support_retrieve_callback=False.\n\n The argument `out` is the result of the `submit` call. This method\n should return the result of the computation or raise an exception if\n the computation failed.\n """\n if self.supports_timeout:\n return out.get(timeout=timeout)\n else:\n return out.get()\n\n def configure(\n self, n_jobs=1, parallel=None, prefer=None, require=None, **backend_kwargs\n ):\n """Reconfigure the backend and return the number of workers.\n\n This makes it possible to reuse an existing backend instance for\n successive independent calls to Parallel with different parameters.\n """\n self.parallel = parallel\n return self.effective_n_jobs(n_jobs)\n\n def start_call(self):\n """Call-back method called at the beginning of a Parallel call"""\n\n def stop_call(self):\n """Call-back method called at the end of a Parallel call"""\n\n def terminate(self):\n """Shutdown the workers and free the shared memory."""\n\n def compute_batch_size(self):\n """Determine the optimal batch size"""\n return 1\n\n def batch_completed(self, batch_size, duration):\n """Callback indicate how long it took to run a batch"""\n\n def abort_everything(self, ensure_ready=True):\n """Abort any running tasks\n\n This is called when an exception has been raised when executing a task\n and all the remaining tasks will be ignored and can therefore be\n aborted to spare computation resources.\n\n If ensure_ready is True, the backend should be left in an operating\n state as future tasks might be re-submitted via that same backend\n instance.\n\n If ensure_ready is False, the implementer of this method can decide\n to leave the backend in a closed / terminated state as no new task\n are expected to be submitted to this backend.\n\n Setting ensure_ready to False is an optimization that can be leveraged\n when aborting tasks via killing processes from a local process pool\n managed by the backend it-self: if we expect no new tasks, there is no\n point in re-creating new workers.\n """\n # Does nothing by default: to be overridden in subclasses when\n # canceling tasks is possible.\n pass\n\n def get_nested_backend(self):\n """Backend instance to be used by nested Parallel calls.\n\n By default a thread-based backend is used for the first level of\n nesting. Beyond, switch to sequential backend to avoid spawning too\n many threads on the host.\n """\n nesting_level = getattr(self, "nesting_level", 0) + 1\n if nesting_level > 1:\n return SequentialBackend(nesting_level=nesting_level), None\n else:\n return ThreadingBackend(nesting_level=nesting_level), None\n\n def _prepare_worker_env(self, n_jobs):\n """Return environment variables limiting threadpools in external libs.\n\n This function return a dict containing environment variables to pass\n when creating a pool of process. These environment variables limit the\n number of threads to `n_threads` for OpenMP, MKL, Accelerated and\n OpenBLAS libraries in the child processes.\n """\n explicit_n_threads = self.inner_max_num_threads\n default_n_threads = max(cpu_count() // n_jobs, 1)\n\n # Set the inner environment variables to self.inner_max_num_threads if\n # it is given. Else, default to cpu_count // n_jobs unless the variable\n # is already present in the parent process environment.\n env = {}\n for var in self.MAX_NUM_THREADS_VARS:\n if explicit_n_threads is None:\n var_value = os.environ.get(var, default_n_threads)\n else:\n var_value = explicit_n_threads\n\n env[var] = str(var_value)\n\n if self.TBB_ENABLE_IPC_VAR not in os.environ:\n # To avoid over-subscription when using TBB, let the TBB schedulers\n # use Inter Process Communication to coordinate:\n env[self.TBB_ENABLE_IPC_VAR] = "1"\n return env\n\n @contextlib.contextmanager\n def retrieval_context(self):\n """Context manager to manage an execution context.\n\n Calls to Parallel.retrieve will be made inside this context.\n\n By default, this does nothing. It may be useful for subclasses to\n handle nested parallelism. In particular, it may be required to avoid\n deadlocks if a backend manages a fixed number of workers, when those\n workers may be asked to do nested Parallel calls. Without\n 'retrieval_context' this could lead to deadlock, as all the workers\n managed by the backend may be "busy" waiting for the nested parallel\n calls to finish, but the backend has no free workers to execute those\n tasks.\n """\n yield\n\n @staticmethod\n def in_main_thread():\n return isinstance(threading.current_thread(), threading._MainThread)\n\n\nclass SequentialBackend(ParallelBackendBase):\n """A ParallelBackend which will execute all batches sequentially.\n\n Does not use/create any threading objects, and hence has minimal\n overhead. Used when n_jobs == 1.\n """\n\n uses_threads = True\n supports_timeout = False\n supports_retrieve_callback = False\n supports_sharedmem = True\n\n def effective_n_jobs(self, n_jobs):\n """Determine the number of jobs which are going to run in parallel"""\n if n_jobs == 0:\n raise ValueError("n_jobs == 0 in Parallel has no meaning")\n return 1\n\n def submit(self, func, callback=None):\n """Schedule a func to be run"""\n raise RuntimeError("Should never be called for SequentialBackend.")\n\n def retrieve_result_callback(self, out):\n raise RuntimeError("Should never be called for SequentialBackend.")\n\n def get_nested_backend(self):\n # import is not top level to avoid cyclic import errors.\n from .parallel import get_active_backend\n\n # SequentialBackend should neither change the nesting level, the\n # default backend or the number of jobs. Just return the current one.\n return get_active_backend()\n\n\nclass PoolManagerMixin(object):\n """A helper class for managing pool of workers."""\n\n _pool = None\n\n def effective_n_jobs(self, n_jobs):\n """Determine the number of jobs which are going to run in parallel"""\n if n_jobs == 0:\n raise ValueError("n_jobs == 0 in Parallel has no meaning")\n elif mp is None or n_jobs is None:\n # multiprocessing is not available or disabled, fallback\n # to sequential mode\n return 1\n elif n_jobs < 0:\n n_jobs = max(cpu_count() + 1 + n_jobs, 1)\n return n_jobs\n\n def terminate(self):\n """Shutdown the process or thread pool"""\n if self._pool is not None:\n self._pool.close()\n self._pool.terminate() # terminate does a join()\n self._pool = None\n\n def _get_pool(self):\n """Used by `submit` to make it possible to implement lazy init"""\n return self._pool\n\n def submit(self, func, callback=None):\n """Schedule a func to be run"""\n # Here, we need a wrapper to avoid crashes on KeyboardInterruptErrors.\n # We also call the callback on error, to make sure the pool does not\n # wait on crashed jobs.\n return self._get_pool().apply_async(\n _TracebackCapturingWrapper(func),\n (),\n callback=callback,\n error_callback=callback,\n )\n\n def retrieve_result_callback(self, result):\n """Mimic concurrent.futures results, raising an error if needed."""\n # In the multiprocessing Pool API, the callback are called with the\n # result value as an argument so `result`(`out`) is the output of\n # job.get(). It's either the result or the exception raised while\n # collecting the result.\n return _retrieve_traceback_capturing_wrapped_call(result)\n\n def abort_everything(self, ensure_ready=True):\n """Shutdown the pool and restart a new one with the same parameters"""\n self.terminate()\n if ensure_ready:\n self.configure(\n n_jobs=self.parallel.n_jobs,\n parallel=self.parallel,\n **self.parallel._backend_kwargs,\n )\n\n\nclass AutoBatchingMixin(object):\n """A helper class for automagically batching jobs."""\n\n # In seconds, should be big enough to hide multiprocessing dispatching\n # overhead.\n # This settings was found by running benchmarks/bench_auto_batching.py\n # with various parameters on various platforms.\n MIN_IDEAL_BATCH_DURATION = 0.2\n\n # Should not be too high to avoid stragglers: long jobs running alone\n # on a single worker while other workers have no work to process any more.\n MAX_IDEAL_BATCH_DURATION = 2\n\n # Batching counters default values\n _DEFAULT_EFFECTIVE_BATCH_SIZE = 1\n _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE\n self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION\n\n def compute_batch_size(self):\n """Determine the optimal batch size"""\n old_batch_size = self._effective_batch_size\n batch_duration = self._smoothed_batch_duration\n if batch_duration > 0 and batch_duration < self.MIN_IDEAL_BATCH_DURATION:\n # The current batch size is too small: the duration of the\n # processing of a batch of task is not large enough to hide\n # the scheduling overhead.\n ideal_batch_size = int(\n old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration\n )\n # Multiply by two to limit oscilations between min and max.\n ideal_batch_size *= 2\n\n # dont increase the batch size too fast to limit huge batch sizes\n # potentially leading to starving worker\n batch_size = min(2 * old_batch_size, ideal_batch_size)\n\n batch_size = max(batch_size, 1)\n\n self._effective_batch_size = batch_size\n if self.parallel.verbose >= 10:\n self.parallel._print(\n f"Batch computation too fast ({batch_duration}s.) "\n f"Setting batch_size={batch_size}."\n )\n elif batch_duration > self.MAX_IDEAL_BATCH_DURATION and old_batch_size >= 2:\n # The current batch size is too big. If we schedule overly long\n # running batches some CPUs might wait with nothing left to do\n # while a couple of CPUs a left processing a few long running\n # batches. Better reduce the batch size a bit to limit the\n # likelihood of scheduling such stragglers.\n\n # decrease the batch size quickly to limit potential starving\n ideal_batch_size = int(\n old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration\n )\n # Multiply by two to limit oscilations between min and max.\n batch_size = max(2 * ideal_batch_size, 1)\n self._effective_batch_size = batch_size\n if self.parallel.verbose >= 10:\n self.parallel._print(\n f"Batch computation too slow ({batch_duration}s.) "\n f"Setting batch_size={batch_size}."\n )\n else:\n # No batch size adjustment\n batch_size = old_batch_size\n\n if batch_size != old_batch_size:\n # Reset estimation of the smoothed mean batch duration: this\n # estimate is updated in the multiprocessing apply_async\n # CallBack as long as the batch_size is constant. Therefore\n # we need to reset the estimate whenever we re-tune the batch\n # size.\n self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION\n\n return batch_size\n\n def batch_completed(self, batch_size, duration):\n """Callback indicate how long it took to run a batch"""\n if batch_size == self._effective_batch_size:\n # Update the smoothed streaming estimate of the duration of a batch\n # from dispatch to completion\n old_duration = self._smoothed_batch_duration\n if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:\n # First record of duration for this batch size after the last\n # reset.\n new_duration = duration\n else:\n # Update the exponentially weighted average of the duration of\n # batch for the current effective size.\n new_duration = 0.8 * old_duration + 0.2 * duration\n self._smoothed_batch_duration = new_duration\n\n def reset_batch_stats(self):\n """Reset batch statistics to default values.\n\n This avoids interferences with future jobs.\n """\n self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE\n self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION\n\n\nclass ThreadingBackend(PoolManagerMixin, ParallelBackendBase):\n """A ParallelBackend which will use a thread pool to execute batches in.\n\n This is a low-overhead backend but it suffers from the Python Global\n Interpreter Lock if the called function relies a lot on Python objects.\n Mostly useful when the execution bottleneck is a compiled extension that\n explicitly releases the GIL (for instance a Cython loop wrapped in a "with\n nogil" block or an expensive call to a library such as NumPy).\n\n The actual thread pool is lazily initialized: the actual thread pool\n construction is delayed to the first call to apply_async.\n\n ThreadingBackend is used as the default backend for nested calls.\n """\n\n supports_retrieve_callback = True\n uses_threads = True\n supports_sharedmem = True\n\n def configure(self, n_jobs=1, parallel=None, **backend_kwargs):\n """Build a process or thread pool and return the number of workers"""\n n_jobs = self.effective_n_jobs(n_jobs)\n if n_jobs == 1:\n # Avoid unnecessary overhead and use sequential backend instead.\n raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))\n self.parallel = parallel\n self._n_jobs = n_jobs\n return n_jobs\n\n def _get_pool(self):\n """Lazily initialize the thread pool\n\n The actual pool of worker threads is only initialized at the first\n call to apply_async.\n """\n if self._pool is None:\n self._pool = ThreadPool(self._n_jobs)\n return self._pool\n\n\nclass MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin, ParallelBackendBase):\n """A ParallelBackend which will use a multiprocessing.Pool.\n\n Will introduce some communication and memory overhead when exchanging\n input and output data with the with the worker Python processes.\n However, does not suffer from the Python Global Interpreter Lock.\n """\n\n supports_retrieve_callback = True\n supports_return_generator = False\n\n def effective_n_jobs(self, n_jobs):\n """Determine the number of jobs which are going to run in parallel.\n\n This also checks if we are attempting to create a nested parallel\n loop.\n """\n if mp is None:\n return 1\n\n if mp.current_process().daemon:\n # Daemonic processes cannot have children\n if n_jobs != 1:\n if inside_dask_worker():\n msg = (\n "Inside a Dask worker with daemon=True, "\n "setting n_jobs=1.\nPossible work-arounds:\n"\n "- dask.config.set("\n "{'distributed.worker.daemon': False})"\n "- set the environment variable "\n "DASK_DISTRIBUTED__WORKER__DAEMON=False\n"\n "before creating your Dask cluster."\n )\n else:\n msg = (\n "Multiprocessing-backed parallel loops "\n "cannot be nested, setting n_jobs=1"\n )\n warnings.warn(msg, stacklevel=3)\n return 1\n\n if process_executor._CURRENT_DEPTH > 0:\n # Mixing loky and multiprocessing in nested loop is not supported\n if n_jobs != 1:\n warnings.warn(\n "Multiprocessing-backed parallel loops cannot be nested,"\n " below loky, setting n_jobs=1",\n stacklevel=3,\n )\n return 1\n\n elif not (self.in_main_thread() or self.nesting_level == 0):\n # Prevent posix fork inside in non-main posix threads\n if n_jobs != 1:\n warnings.warn(\n "Multiprocessing-backed parallel loops cannot be nested"\n " below threads, setting n_jobs=1",\n stacklevel=3,\n )\n return 1\n\n return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)\n\n def configure(\n self,\n n_jobs=1,\n parallel=None,\n prefer=None,\n require=None,\n **memmapping_pool_kwargs,\n ):\n """Build a process or thread pool and return the number of workers"""\n n_jobs = self.effective_n_jobs(n_jobs)\n if n_jobs == 1:\n raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))\n\n memmapping_pool_kwargs = {\n **self.backend_kwargs,\n **memmapping_pool_kwargs,\n }\n\n # Make sure to free as much memory as possible before forking\n gc.collect()\n self._pool = MemmappingPool(n_jobs, **memmapping_pool_kwargs)\n self.parallel = parallel\n return n_jobs\n\n def terminate(self):\n """Shutdown the process or thread pool"""\n super(MultiprocessingBackend, self).terminate()\n self.reset_batch_stats()\n\n\nclass LokyBackend(AutoBatchingMixin, ParallelBackendBase):\n """Managing pool of workers with loky instead of multiprocessing."""\n\n supports_retrieve_callback = True\n supports_inner_max_num_threads = True\n\n def configure(\n self,\n n_jobs=1,\n parallel=None,\n prefer=None,\n require=None,\n idle_worker_timeout=None,\n **memmapping_executor_kwargs,\n ):\n """Build a process executor and return the number of workers"""\n n_jobs = self.effective_n_jobs(n_jobs)\n if n_jobs == 1:\n raise FallbackToBackend(SequentialBackend(nesting_level=self.nesting_level))\n\n memmapping_executor_kwargs = {\n **self.backend_kwargs,\n **memmapping_executor_kwargs,\n }\n\n # Prohibit the use of 'timeout' in the LokyBackend, as 'idle_worker_timeout'\n # better describes the backend's behavior.\n if "timeout" in memmapping_executor_kwargs:\n raise ValueError(\n "The 'timeout' parameter is not supported by the LokyBackend. "\n "Please use the `idle_worker_timeout` parameter instead."\n )\n if idle_worker_timeout is None:\n idle_worker_timeout = self.backend_kwargs.get("idle_worker_timeout", 300)\n\n self._workers = get_memmapping_executor(\n n_jobs,\n timeout=idle_worker_timeout,\n env=self._prepare_worker_env(n_jobs=n_jobs),\n context_id=parallel._id,\n **memmapping_executor_kwargs,\n )\n self.parallel = parallel\n return n_jobs\n\n def effective_n_jobs(self, n_jobs):\n """Determine the number of jobs which are going to run in parallel"""\n if n_jobs == 0:\n raise ValueError("n_jobs == 0 in Parallel has no meaning")\n elif mp is None or n_jobs is None:\n # multiprocessing is not available or disabled, fallback\n # to sequential mode\n return 1\n elif mp.current_process().daemon:\n # Daemonic processes cannot have children\n if n_jobs != 1:\n if inside_dask_worker():\n msg = (\n "Inside a Dask worker with daemon=True, "\n "setting n_jobs=1.\nPossible work-arounds:\n"\n "- dask.config.set("\n "{'distributed.worker.daemon': False})\n"\n "- set the environment variable "\n "DASK_DISTRIBUTED__WORKER__DAEMON=False\n"\n "before creating your Dask cluster."\n )\n else:\n msg = (\n "Loky-backed parallel loops cannot be called in a"\n " multiprocessing, setting n_jobs=1"\n )\n warnings.warn(msg, stacklevel=3)\n\n return 1\n elif not (self.in_main_thread() or self.nesting_level == 0):\n # Prevent posix fork inside in non-main posix threads\n if n_jobs != 1:\n warnings.warn(\n "Loky-backed parallel loops cannot be nested below "\n "threads, setting n_jobs=1",\n stacklevel=3,\n )\n return 1\n elif n_jobs < 0:\n n_jobs = max(cpu_count() + 1 + n_jobs, 1)\n return n_jobs\n\n def submit(self, func, callback=None):\n """Schedule a func to be run"""\n future = self._workers.submit(func)\n if callback is not None:\n future.add_done_callback(callback)\n return future\n\n def retrieve_result_callback(self, future):\n """Retrieve the result, here out is the future given by submit"""\n try:\n return future.result()\n except ShutdownExecutorError:\n raise RuntimeError(\n "The executor underlying Parallel has been shutdown. "\n "This is likely due to the garbage collection of a previous "\n "generator from a call to Parallel with return_as='generator'."\n " Make sure the generator is not garbage collected when "\n "submitting a new job or that it is first properly exhausted."\n )\n\n def terminate(self):\n if self._workers is not None:\n # Don't terminate the workers as we want to reuse them in later\n # calls, but cleanup the temporary resources that the Parallel call\n # created. This 'hack' requires a private, low-level operation.\n self._workers._temp_folder_manager._clean_temporary_resources(\n context_id=self.parallel._id, force=False\n )\n self._workers = None\n\n self.reset_batch_stats()\n\n def abort_everything(self, ensure_ready=True):\n """Shutdown the workers and restart a new one with the same parameters"""\n self._workers.terminate(kill_workers=True)\n self._workers = None\n\n if ensure_ready:\n self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)\n\n\nclass FallbackToBackend(Exception):\n """Raised when configuration should fallback to another backend"""\n\n def __init__(self, backend):\n self.backend = backend\n\n\ndef inside_dask_worker():\n """Check whether the current function is executed inside a Dask worker."""\n # This function can not be in joblib._dask because there would be a\n # circular import:\n # _dask imports _parallel_backend that imports _dask ...\n try:\n from distributed import get_worker\n except ImportError:\n return False\n\n try:\n get_worker()\n return True\n except ValueError:\n return False\n
.venv\Lib\site-packages\joblib\_parallel_backends.py
_parallel_backends.py
Python
28,766
0.95
0.177955
0.12945
awesome-app
592
2025-06-28T21:09:10.748878
BSD-3-Clause
false
e366130da889a98a48f1f3558699843f
"""Storage providers backends for Memory caching."""\n\nimport collections\nimport datetime\nimport json\nimport operator\nimport os\nimport os.path\nimport re\nimport shutil\nimport threading\nimport time\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom pickle import PicklingError\n\nfrom . import numpy_pickle\nfrom .backports import concurrency_safe_rename\nfrom .disk import memstr_to_bytes, mkdirp, rm_subdirs\nfrom .logger import format_time\n\nCacheItemInfo = collections.namedtuple("CacheItemInfo", "path size last_access")\n\n\nclass CacheWarning(Warning):\n """Warning to capture dump failures except for PicklingError."""\n\n pass\n\n\ndef concurrency_safe_write(object_to_write, filename, write_func):\n """Writes an object into a unique file in a concurrency-safe way."""\n thread_id = id(threading.current_thread())\n temporary_filename = "{}.thread-{}-pid-{}".format(filename, thread_id, os.getpid())\n write_func(object_to_write, temporary_filename)\n\n return temporary_filename\n\n\nclass StoreBackendBase(metaclass=ABCMeta):\n """Helper Abstract Base Class which defines all methods that\n a StorageBackend must implement."""\n\n location = None\n\n @abstractmethod\n def _open_item(self, f, mode):\n """Opens an item on the store and return a file-like object.\n\n This method is private and only used by the StoreBackendMixin object.\n\n Parameters\n ----------\n f: a file-like object\n The file-like object where an item is stored and retrieved\n mode: string, optional\n the mode in which the file-like object is opened allowed valued are\n 'rb', 'wb'\n\n Returns\n -------\n a file-like object\n """\n\n @abstractmethod\n def _item_exists(self, location):\n """Checks if an item location exists in the store.\n\n This method is private and only used by the StoreBackendMixin object.\n\n Parameters\n ----------\n location: string\n The location of an item. On a filesystem, this corresponds to the\n absolute path, including the filename, of a file.\n\n Returns\n -------\n True if the item exists, False otherwise\n """\n\n @abstractmethod\n def _move_item(self, src, dst):\n """Moves an item from src to dst in the store.\n\n This method is private and only used by the StoreBackendMixin object.\n\n Parameters\n ----------\n src: string\n The source location of an item\n dst: string\n The destination location of an item\n """\n\n @abstractmethod\n def create_location(self, location):\n """Creates a location on the store.\n\n Parameters\n ----------\n location: string\n The location in the store. On a filesystem, this corresponds to a\n directory.\n """\n\n @abstractmethod\n def clear_location(self, location):\n """Clears a location on the store.\n\n Parameters\n ----------\n location: string\n The location in the store. On a filesystem, this corresponds to a\n directory or a filename absolute path\n """\n\n @abstractmethod\n def get_items(self):\n """Returns the whole list of items available in the store.\n\n Returns\n -------\n The list of items identified by their ids (e.g filename in a\n filesystem).\n """\n\n @abstractmethod\n def configure(self, location, verbose=0, backend_options=dict()):\n """Configures the store.\n\n Parameters\n ----------\n location: string\n The base location used by the store. On a filesystem, this\n corresponds to a directory.\n verbose: int\n The level of verbosity of the store\n backend_options: dict\n Contains a dictionary of named parameters used to configure the\n store backend.\n """\n\n\nclass StoreBackendMixin(object):\n """Class providing all logic for managing the store in a generic way.\n\n The StoreBackend subclass has to implement 3 methods: create_location,\n clear_location and configure. The StoreBackend also has to provide\n a private _open_item, _item_exists and _move_item methods. The _open_item\n method has to have the same signature as the builtin open and return a\n file-like object.\n """\n\n def load_item(self, call_id, verbose=1, timestamp=None, metadata=None):\n """Load an item from the store given its id as a list of str."""\n full_path = os.path.join(self.location, *call_id)\n\n if verbose > 1:\n ts_string = (\n "{: <16}".format(format_time(time.time() - timestamp))\n if timestamp is not None\n else ""\n )\n signature = os.path.basename(call_id[0])\n if metadata is not None and "input_args" in metadata:\n kwargs = ", ".join(\n "{}={}".format(*item) for item in metadata["input_args"].items()\n )\n signature += "({})".format(kwargs)\n msg = "[Memory]{}: Loading {}".format(ts_string, signature)\n if verbose < 10:\n print("{0}...".format(msg))\n else:\n print("{0} from {1}".format(msg, full_path))\n\n mmap_mode = None if not hasattr(self, "mmap_mode") else self.mmap_mode\n\n filename = os.path.join(full_path, "output.pkl")\n if not self._item_exists(filename):\n raise KeyError(\n "Non-existing item (may have been "\n "cleared).\nFile %s does not exist" % filename\n )\n\n # file-like object cannot be used when mmap_mode is set\n if mmap_mode is None:\n with self._open_item(filename, "rb") as f:\n item = numpy_pickle.load(f)\n else:\n item = numpy_pickle.load(filename, mmap_mode=mmap_mode)\n return item\n\n def dump_item(self, call_id, item, verbose=1):\n """Dump an item in the store at the id given as a list of str."""\n try:\n item_path = os.path.join(self.location, *call_id)\n if not self._item_exists(item_path):\n self.create_location(item_path)\n filename = os.path.join(item_path, "output.pkl")\n if verbose > 10:\n print("Persisting in %s" % item_path)\n\n def write_func(to_write, dest_filename):\n with self._open_item(dest_filename, "wb") as f:\n try:\n numpy_pickle.dump(to_write, f, compress=self.compress)\n except PicklingError as e:\n # TODO(1.5) turn into error\n warnings.warn(\n "Unable to cache to disk: failed to pickle "\n "output. In version 1.5 this will raise an "\n f"exception. Exception: {e}.",\n FutureWarning,\n )\n\n self._concurrency_safe_write(item, filename, write_func)\n except Exception as e: # noqa: E722\n warnings.warn(\n "Unable to cache to disk. Possibly a race condition in the "\n f"creation of the directory. Exception: {e}.",\n CacheWarning,\n )\n\n def clear_item(self, call_id):\n """Clear the item at the id, given as a list of str."""\n item_path = os.path.join(self.location, *call_id)\n if self._item_exists(item_path):\n self.clear_location(item_path)\n\n def contains_item(self, call_id):\n """Check if there is an item at the id, given as a list of str."""\n item_path = os.path.join(self.location, *call_id)\n filename = os.path.join(item_path, "output.pkl")\n\n return self._item_exists(filename)\n\n def get_item_info(self, call_id):\n """Return information about item."""\n return {"location": os.path.join(self.location, *call_id)}\n\n def get_metadata(self, call_id):\n """Return actual metadata of an item."""\n try:\n item_path = os.path.join(self.location, *call_id)\n filename = os.path.join(item_path, "metadata.json")\n with self._open_item(filename, "rb") as f:\n return json.loads(f.read().decode("utf-8"))\n except: # noqa: E722\n return {}\n\n def store_metadata(self, call_id, metadata):\n """Store metadata of a computation."""\n try:\n item_path = os.path.join(self.location, *call_id)\n self.create_location(item_path)\n filename = os.path.join(item_path, "metadata.json")\n\n def write_func(to_write, dest_filename):\n with self._open_item(dest_filename, "wb") as f:\n f.write(json.dumps(to_write).encode("utf-8"))\n\n self._concurrency_safe_write(metadata, filename, write_func)\n except: # noqa: E722\n pass\n\n def contains_path(self, call_id):\n """Check cached function is available in store."""\n func_path = os.path.join(self.location, *call_id)\n return self.object_exists(func_path)\n\n def clear_path(self, call_id):\n """Clear all items with a common path in the store."""\n func_path = os.path.join(self.location, *call_id)\n if self._item_exists(func_path):\n self.clear_location(func_path)\n\n def store_cached_func_code(self, call_id, func_code=None):\n """Store the code of the cached function."""\n func_path = os.path.join(self.location, *call_id)\n if not self._item_exists(func_path):\n self.create_location(func_path)\n\n if func_code is not None:\n filename = os.path.join(func_path, "func_code.py")\n with self._open_item(filename, "wb") as f:\n f.write(func_code.encode("utf-8"))\n\n def get_cached_func_code(self, call_id):\n """Store the code of the cached function."""\n filename = os.path.join(self.location, *call_id, "func_code.py")\n try:\n with self._open_item(filename, "rb") as f:\n return f.read().decode("utf-8")\n except: # noqa: E722\n raise\n\n def get_cached_func_info(self, call_id):\n """Return information related to the cached function if it exists."""\n return {"location": os.path.join(self.location, *call_id)}\n\n def clear(self):\n """Clear the whole store content."""\n self.clear_location(self.location)\n\n def enforce_store_limits(self, bytes_limit, items_limit=None, age_limit=None):\n """\n Remove the store's oldest files to enforce item, byte, and age limits.\n """\n items_to_delete = self._get_items_to_delete(bytes_limit, items_limit, age_limit)\n\n for item in items_to_delete:\n if self.verbose > 10:\n print("Deleting item {0}".format(item))\n try:\n self.clear_location(item.path)\n except OSError:\n # Even with ignore_errors=True shutil.rmtree can raise OSError\n # with:\n # [Errno 116] Stale file handle if another process has deleted\n # the folder already.\n pass\n\n def _get_items_to_delete(self, bytes_limit, items_limit=None, age_limit=None):\n """\n Get items to delete to keep the store under size, file, & age limits.\n """\n if isinstance(bytes_limit, str):\n bytes_limit = memstr_to_bytes(bytes_limit)\n\n items = self.get_items()\n if not items:\n return []\n\n size = sum(item.size for item in items)\n\n if bytes_limit is not None:\n to_delete_size = size - bytes_limit\n else:\n to_delete_size = 0\n\n if items_limit is not None:\n to_delete_items = len(items) - items_limit\n else:\n to_delete_items = 0\n\n if age_limit is not None:\n older_item = min(item.last_access for item in items)\n if age_limit.total_seconds() < 0:\n raise ValueError("age_limit has to be a positive timedelta")\n deadline = datetime.datetime.now() - age_limit\n else:\n deadline = None\n\n if (\n to_delete_size <= 0\n and to_delete_items <= 0\n and (deadline is None or older_item > deadline)\n ):\n return []\n\n # We want to delete first the cache items that were accessed a\n # long time ago\n items.sort(key=operator.attrgetter("last_access"))\n\n items_to_delete = []\n size_so_far = 0\n items_so_far = 0\n\n for item in items:\n if (\n (size_so_far >= to_delete_size)\n and items_so_far >= to_delete_items\n and (deadline is None or deadline < item.last_access)\n ):\n break\n\n items_to_delete.append(item)\n size_so_far += item.size\n items_so_far += 1\n\n return items_to_delete\n\n def _concurrency_safe_write(self, to_write, filename, write_func):\n """Writes an object into a file in a concurrency-safe way."""\n temporary_filename = concurrency_safe_write(to_write, filename, write_func)\n self._move_item(temporary_filename, filename)\n\n def __repr__(self):\n """Printable representation of the store location."""\n return '{class_name}(location="{location}")'.format(\n class_name=self.__class__.__name__, location=self.location\n )\n\n\nclass FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):\n """A StoreBackend used with local or network file systems."""\n\n _open_item = staticmethod(open)\n _item_exists = staticmethod(os.path.exists)\n _move_item = staticmethod(concurrency_safe_rename)\n\n def clear_location(self, location):\n """Delete location on store."""\n if location == self.location:\n rm_subdirs(location)\n else:\n shutil.rmtree(location, ignore_errors=True)\n\n def create_location(self, location):\n """Create object location on store"""\n mkdirp(location)\n\n def get_items(self):\n """Returns the whole list of items available in the store."""\n items = []\n\n for dirpath, _, filenames in os.walk(self.location):\n is_cache_hash_dir = re.match("[a-f0-9]{32}", os.path.basename(dirpath))\n\n if is_cache_hash_dir:\n output_filename = os.path.join(dirpath, "output.pkl")\n try:\n last_access = os.path.getatime(output_filename)\n except OSError:\n try:\n last_access = os.path.getatime(dirpath)\n except OSError:\n # The directory has already been deleted\n continue\n\n last_access = datetime.datetime.fromtimestamp(last_access)\n try:\n full_filenames = [os.path.join(dirpath, fn) for fn in filenames]\n dirsize = sum(os.path.getsize(fn) for fn in full_filenames)\n except OSError:\n # Either output_filename or one of the files in\n # dirpath does not exist any more. We assume this\n # directory is being cleaned by another process already\n continue\n\n items.append(CacheItemInfo(dirpath, dirsize, last_access))\n\n return items\n\n def configure(self, location, verbose=1, backend_options=None):\n """Configure the store backend.\n\n For this backend, valid store options are 'compress' and 'mmap_mode'\n """\n if backend_options is None:\n backend_options = {}\n\n # setup location directory\n self.location = location\n if not os.path.exists(self.location):\n mkdirp(self.location)\n\n # Automatically add `.gitignore` file to the cache folder.\n # XXX: the condition is necessary because in `Memory.__init__`, the user\n # passed `location` param is modified to be either `{location}` or\n # `{location}/joblib` depending on input type (`pathlib.Path` vs `str`).\n # The proper resolution of this inconsistency is tracked in:\n # https://github.com/joblib/joblib/issues/1684\n cache_directory = (\n os.path.dirname(location)\n if os.path.dirname(location) and os.path.basename(location) == "joblib"\n else location\n )\n with open(os.path.join(cache_directory, ".gitignore"), "w") as file:\n file.write("# Created by joblib automatically.\n")\n file.write("*\n")\n\n # item can be stored compressed for faster I/O\n self.compress = backend_options.get("compress", False)\n\n # FileSystemStoreBackend can be used with mmap_mode options under\n # certain conditions.\n mmap_mode = backend_options.get("mmap_mode")\n if self.compress and mmap_mode is not None:\n warnings.warn(\n "Compressed items cannot be memmapped in a "\n "filesystem store. Option will be ignored.",\n stacklevel=2,\n )\n\n self.mmap_mode = mmap_mode\n self.verbose = verbose\n
.venv\Lib\site-packages\joblib\_store_backends.py
_store_backends.py
Python
17,343
0.95
0.190574
0.055416
node-utils
862
2024-03-23T15:33:53.589623
MIT
false
5edd2a30bf458cdbcafbb4693b1a4e62
"""Joblib is a set of tools to provide **lightweight pipelining in\nPython**. In particular:\n\n1. transparent disk-caching of functions and lazy re-evaluation\n (memoize pattern)\n\n2. easy simple parallel computing\n\nJoblib is optimized to be **fast** and **robust** on large\ndata in particular and has specific optimizations for `numpy` arrays. It is\n**BSD-licensed**.\n\n\n ==================== ===============================================\n **Documentation:** https://joblib.readthedocs.io\n\n **Download:** https://pypi.python.org/pypi/joblib#downloads\n\n **Source code:** https://github.com/joblib/joblib\n\n **Report issues:** https://github.com/joblib/joblib/issues\n ==================== ===============================================\n\n\nVision\n--------\n\nThe vision is to provide tools to easily achieve better performance and\nreproducibility when working with long running jobs.\n\n * **Avoid computing the same thing twice**: code is often rerun again and\n again, for instance when prototyping computational-heavy jobs (as in\n scientific development), but hand-crafted solutions to alleviate this\n issue are error-prone and often lead to unreproducible results.\n\n * **Persist to disk transparently**: efficiently persisting\n arbitrary objects containing large data is hard. Using\n joblib's caching mechanism avoids hand-written persistence and\n implicitly links the file on disk to the execution context of\n the original Python object. As a result, joblib's persistence is\n good for resuming an application status or computational job, eg\n after a crash.\n\nJoblib addresses these problems while **leaving your code and your flow\ncontrol as unmodified as possible** (no framework, no new paradigms).\n\nMain features\n------------------\n\n1) **Transparent and fast disk-caching of output value:** a memoize or\n make-like functionality for Python functions that works well for\n arbitrary Python objects, including very large numpy arrays. Separate\n persistence and flow-execution logic from domain logic or algorithmic\n code by writing the operations as a set of steps with well-defined\n inputs and outputs: Python functions. Joblib can save their\n computation to disk and rerun it only if necessary::\n\n >>> from joblib import Memory\n >>> location = 'your_cache_dir_goes_here'\n >>> mem = Memory(location, verbose=1)\n >>> import numpy as np\n >>> a = np.vander(np.arange(3)).astype(float)\n >>> square = mem.cache(np.square)\n >>> b = square(a) # doctest: +ELLIPSIS\n ______________________________________________________________________...\n [Memory] Calling ...square...\n square(array([[0., 0., 1.],\n [1., 1., 1.],\n [4., 2., 1.]]))\n _________________________________________________...square - ...s, 0.0min\n\n >>> c = square(a)\n >>> # The above call did not trigger an evaluation\n\n2) **Embarrassingly parallel helper:** to make it easy to write readable\n parallel code and debug it quickly::\n\n >>> from joblib import Parallel, delayed\n >>> from math import sqrt\n >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))\n [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]\n\n\n3) **Fast compressed Persistence**: a replacement for pickle to work\n efficiently on Python objects containing large data (\n *joblib.dump* & *joblib.load* ).\n\n..\n >>> import shutil ; shutil.rmtree(location)\n\n"""\n\n# PEP0440 compatible formatted version, see:\n# https://www.python.org/dev/peps/pep-0440/\n#\n# Generic release markers:\n# X.Y\n# X.Y.Z # For bugfix releases\n#\n# Admissible pre-release markers:\n# X.YaN # Alpha release\n# X.YbN # Beta release\n# X.YrcN # Release Candidate\n# X.Y # Final release\n#\n# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.\n# 'X.Y.dev0' is the canonical version of 'X.Y.dev'\n#\n__version__ = "1.5.1"\n\n\nimport os\n\nfrom ._cloudpickle_wrapper import wrap_non_picklable_objects\nfrom ._parallel_backends import ParallelBackendBase\nfrom ._store_backends import StoreBackendBase\nfrom .compressor import register_compressor\nfrom .hashing import hash\nfrom .logger import Logger, PrintTime\nfrom .memory import MemorizedResult, Memory, expires_after, register_store_backend\nfrom .numpy_pickle import dump, load\nfrom .parallel import (\n Parallel,\n cpu_count,\n delayed,\n effective_n_jobs,\n parallel_backend,\n parallel_config,\n register_parallel_backend,\n)\n\n__all__ = [\n # On-disk result caching\n "Memory",\n "MemorizedResult",\n "expires_after",\n # Parallel code execution\n "Parallel",\n "delayed",\n "cpu_count",\n "effective_n_jobs",\n "wrap_non_picklable_objects",\n # Context to change the backend globally\n "parallel_config",\n "parallel_backend",\n # Helpers to define and register store/parallel backends\n "ParallelBackendBase",\n "StoreBackendBase",\n "register_compressor",\n "register_parallel_backend",\n "register_store_backend",\n # Helpers kept for backward compatibility\n "PrintTime",\n "Logger",\n "hash",\n "dump",\n "load",\n]\n\n\n# Workaround issue discovered in intel-openmp 2019.5:\n# https://github.com/ContinuumIO/anaconda-issues/issues/11294\nos.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")\n
.venv\Lib\site-packages\joblib\__init__.py
__init__.py
Python
5,337
0.95
0.06135
0.234848
react-lib
688
2024-06-18T15:48:37.447327
Apache-2.0
false
45cc8f0fcc346cd69fbe2cba57eb6c12
"""Compatibility module.\n\nIt can be necessary to load files generated by previous versions of cloudpickle\nthat rely on symbols being defined under the `cloudpickle.cloudpickle_fast`\nnamespace.\n\nSee: tests/test_backward_compat.py\n"""\n\nfrom . import cloudpickle\n\n\ndef __getattr__(name):\n return getattr(cloudpickle, name)\n
.venv\Lib\site-packages\joblib\externals\cloudpickle\cloudpickle_fast.py
cloudpickle_fast.py
Python
323
0.85
0.071429
0
node-utils
623
2024-08-10T19:19:42.288777
BSD-3-Clause
false
513a7e6b02c83ea8f5d27bb7c4b3e263
from . import cloudpickle\nfrom .cloudpickle import * # noqa\n\n__doc__ = cloudpickle.__doc__\n\n__version__ = "3.1.1"\n\n__all__ = [ # noqa\n "__version__",\n "Pickler",\n "CloudPickler",\n "dumps",\n "loads",\n "dump",\n "load",\n "register_pickle_by_value",\n "unregister_pickle_by_value",\n]\n
.venv\Lib\site-packages\joblib\externals\cloudpickle\__init__.py
__init__.py
Python
308
0.95
0
0
python-kit
415
2025-04-03T12:20:22.195398
GPL-3.0
false
fc14a198724d469afa786ea051a78d98
\n\n
.venv\Lib\site-packages\joblib\externals\cloudpickle\__pycache__\cloudpickle.cpython-313.pyc
cloudpickle.cpython-313.pyc
Other
55,183
0.75
0.121597
0.016
react-lib
871
2025-05-31T19:05:36.894881
MIT
false
765861ab6a65cb04e15bf115bbec48c7
\n\n
.venv\Lib\site-packages\joblib\externals\cloudpickle\__pycache__\cloudpickle_fast.cpython-313.pyc
cloudpickle_fast.cpython-313.pyc
Other
659
0.7
0
0
awesome-app
544
2024-02-08T07:00:32.634723
GPL-3.0
false
24cd132b48f7c3331e93e213791c7bc1
\n\n
.venv\Lib\site-packages\joblib\externals\cloudpickle\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
467
0.7
0
0
node-utils
685
2023-07-15T10:46:44.163219
BSD-3-Clause
false
f21ed0c21760a91c99f3bb450ccae563
###############################################################################\n# Reusable ProcessPoolExecutor\n#\n# author: Thomas Moreau and Olivier Grisel\n#\nimport time\nimport warnings\nimport threading\nimport multiprocessing as mp\n\nfrom .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS\nfrom .backend.context import cpu_count\nfrom .backend import get_context\n\n__all__ = ["get_reusable_executor"]\n\n# Singleton executor and id management\n_executor_lock = threading.RLock()\n_next_executor_id = 0\n_executor = None\n_executor_kwargs = None\n\n\ndef _get_next_executor_id():\n """Ensure that each successive executor instance has a unique, monotonic id.\n\n The purpose of this monotonic id is to help debug and test automated\n instance creation.\n """\n global _next_executor_id\n with _executor_lock:\n executor_id = _next_executor_id\n _next_executor_id += 1\n return executor_id\n\n\ndef get_reusable_executor(\n max_workers=None,\n context=None,\n timeout=10,\n kill_workers=False,\n reuse="auto",\n job_reducers=None,\n result_reducers=None,\n initializer=None,\n initargs=(),\n env=None,\n):\n """Return the current ReusableExectutor instance.\n\n Start a new instance if it has not been started already or if the previous\n instance was left in a broken state.\n\n If the previous instance does not have the requested number of workers, the\n executor is dynamically resized to adjust the number of workers prior to\n returning.\n\n Reusing a singleton instance spares the overhead of starting new worker\n processes and importing common python packages each time.\n\n ``max_workers`` controls the maximum number of tasks that can be running in\n parallel in worker processes. By default this is set to the number of\n CPUs on the host.\n\n Setting ``timeout`` (in seconds) makes idle workers automatically shutdown\n so as to release system resources. New workers are respawn upon submission\n of new tasks so that ``max_workers`` are available to accept the newly\n submitted tasks. Setting ``timeout`` to around 100 times the time required\n to spawn new processes and import packages in them (on the order of 100ms)\n ensures that the overhead of spawning workers is negligible.\n\n Setting ``kill_workers=True`` makes it possible to forcibly interrupt\n previously spawned jobs to get a new instance of the reusable executor\n with new constructor argument values.\n\n The ``job_reducers`` and ``result_reducers`` are used to customize the\n pickling of tasks and results send to the executor.\n\n When provided, the ``initializer`` is run first in newly spawned\n processes with argument ``initargs``.\n\n The environment variable in the child process are a copy of the values in\n the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and\n ``VAL`` are string literals to overwrite the environment variable ``ENV``\n in the child processes to value ``VAL``. The environment variables are set\n in the children before any module is loaded. This only works with the\n ``loky`` context.\n """\n _executor, _ = _ReusablePoolExecutor.get_reusable_executor(\n max_workers=max_workers,\n context=context,\n timeout=timeout,\n kill_workers=kill_workers,\n reuse=reuse,\n job_reducers=job_reducers,\n result_reducers=result_reducers,\n initializer=initializer,\n initargs=initargs,\n env=env,\n )\n return _executor\n\n\nclass _ReusablePoolExecutor(ProcessPoolExecutor):\n def __init__(\n self,\n submit_resize_lock,\n max_workers=None,\n context=None,\n timeout=None,\n executor_id=0,\n job_reducers=None,\n result_reducers=None,\n initializer=None,\n initargs=(),\n env=None,\n ):\n super().__init__(\n max_workers=max_workers,\n context=context,\n timeout=timeout,\n job_reducers=job_reducers,\n result_reducers=result_reducers,\n initializer=initializer,\n initargs=initargs,\n env=env,\n )\n self.executor_id = executor_id\n self._submit_resize_lock = submit_resize_lock\n\n @classmethod\n def get_reusable_executor(\n cls,\n max_workers=None,\n context=None,\n timeout=10,\n kill_workers=False,\n reuse="auto",\n job_reducers=None,\n result_reducers=None,\n initializer=None,\n initargs=(),\n env=None,\n ):\n with _executor_lock:\n global _executor, _executor_kwargs\n executor = _executor\n\n if max_workers is None:\n if reuse is True and executor is not None:\n max_workers = executor._max_workers\n else:\n max_workers = cpu_count()\n elif max_workers <= 0:\n raise ValueError(\n f"max_workers must be greater than 0, got {max_workers}."\n )\n\n if isinstance(context, str):\n context = get_context(context)\n if context is not None and context.get_start_method() == "fork":\n raise ValueError(\n "Cannot use reusable executor with the 'fork' context"\n )\n\n kwargs = dict(\n context=context,\n timeout=timeout,\n job_reducers=job_reducers,\n result_reducers=result_reducers,\n initializer=initializer,\n initargs=initargs,\n env=env,\n )\n if executor is None:\n is_reused = False\n mp.util.debug(\n f"Create a executor with max_workers={max_workers}."\n )\n executor_id = _get_next_executor_id()\n _executor_kwargs = kwargs\n _executor = executor = cls(\n _executor_lock,\n max_workers=max_workers,\n executor_id=executor_id,\n **kwargs,\n )\n else:\n if reuse == "auto":\n reuse = kwargs == _executor_kwargs\n if (\n executor._flags.broken\n or executor._flags.shutdown\n or not reuse\n or executor.queue_size < max_workers\n ):\n if executor._flags.broken:\n reason = "broken"\n elif executor._flags.shutdown:\n reason = "shutdown"\n elif executor.queue_size < max_workers:\n # Do not reuse the executor if the queue size is too\n # small as this would lead to limited parallelism.\n reason = "queue size is too small"\n else:\n reason = "arguments have changed"\n mp.util.debug(\n "Creating a new executor with max_workers="\n f"{max_workers} as the previous instance cannot be "\n f"reused ({reason})."\n )\n executor.shutdown(wait=True, kill_workers=kill_workers)\n _executor = executor = _executor_kwargs = None\n # Recursive call to build a new instance\n return cls.get_reusable_executor(\n max_workers=max_workers, **kwargs\n )\n else:\n mp.util.debug(\n "Reusing existing executor with "\n f"max_workers={executor._max_workers}."\n )\n is_reused = True\n executor._resize(max_workers)\n\n return executor, is_reused\n\n def submit(self, fn, *args, **kwargs):\n with self._submit_resize_lock:\n return super().submit(fn, *args, **kwargs)\n\n def _resize(self, max_workers):\n with self._submit_resize_lock:\n if max_workers is None:\n raise ValueError("Trying to resize with max_workers=None")\n elif max_workers == self._max_workers:\n return\n\n if self._executor_manager_thread is None:\n # If the executor_manager_thread has not been started\n # then no processes have been spawned and we can just\n # update _max_workers and return\n self._max_workers = max_workers\n return\n\n self._wait_job_completion()\n\n # Some process might have returned due to timeout so check how many\n # children are still alive. Use the _process_management_lock to\n # ensure that no process are spawned or timeout during the resize.\n with self._processes_management_lock:\n processes = list(self._processes.values())\n nb_children_alive = sum(p.is_alive() for p in processes)\n self._max_workers = max_workers\n for _ in range(max_workers, nb_children_alive):\n self._call_queue.put(None)\n while (\n len(self._processes) > max_workers and not self._flags.broken\n ):\n time.sleep(1e-3)\n\n self._adjust_process_count()\n processes = list(self._processes.values())\n while not all(p.is_alive() for p in processes):\n time.sleep(1e-3)\n\n def _wait_job_completion(self):\n """Wait for the cache to be empty before resizing the pool."""\n # Issue a warning to the user about the bad effect of this usage.\n if self._pending_work_items:\n warnings.warn(\n "Trying to resize an executor with running jobs: "\n "waiting for jobs completion before resizing.",\n UserWarning,\n )\n mp.util.debug(\n f"Executor {self.executor_id} waiting for jobs completion "\n "before resizing"\n )\n # Wait for the completion of the jobs\n while self._pending_work_items:\n time.sleep(1e-3)\n\n def _setup_queues(self, job_reducers, result_reducers):\n # As this executor can be resized, use a large queue size to avoid\n # underestimating capacity and introducing overhead\n # Also handle the case where the user set max_workers to a value larger\n # than cpu_count(), to avoid limiting the number of parallel jobs.\n\n min_queue_size = max(cpu_count(), self._max_workers)\n self.queue_size = 2 * min_queue_size + EXTRA_QUEUED_CALLS\n super()._setup_queues(\n job_reducers, result_reducers, queue_size=self.queue_size\n )\n
.venv\Lib\site-packages\joblib\externals\loky\reusable_executor.py
reusable_executor.py
Python
10,863
0.95
0.112245
0.084291
awesome-app
223
2024-05-11T18:43:08.324990
BSD-3-Clause
false
466daf54838f29c4a1fbdac646257543
###############################################################################\n# Modification of concurrent.futures.Future\n#\n# author: Thomas Moreau and Olivier Grisel\n#\n# adapted from concurrent/futures/_base.py (17/02/2017)\n# * Do not use yield from\n# * Use old super syntax\n#\n# Copyright 2009 Brian Quinlan. All Rights Reserved.\n# Licensed to PSF under a Contributor Agreement.\n\nfrom concurrent.futures import Future as _BaseFuture\nfrom concurrent.futures._base import LOGGER\n\n\n# To make loky._base.Future instances awaitable by concurrent.futures.wait,\n# derive our custom Future class from _BaseFuture. _invoke_callback is the only\n# modification made to this class in loky.\n# TODO investigate why using `concurrent.futures.Future` directly does not\n# always work in our test suite.\nclass Future(_BaseFuture):\n def _invoke_callbacks(self):\n for callback in self._done_callbacks:\n try:\n callback(self)\n except BaseException:\n LOGGER.exception(f"exception calling callback for {self!r}")\n
.venv\Lib\site-packages\joblib\externals\loky\_base.py
_base.py
Python
1,057
0.95
0.25
0.64
python-kit
861
2023-09-07T01:49:49.679766
MIT
false
83d8b91fedca820e45236ab4cf75ce38
r"""The :mod:`loky` module manages a pool of worker that can be re-used across time.\nIt provides a robust and dynamic implementation os the\n:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which\nhide the pool management under the hood.\n"""\n\nfrom concurrent.futures import (\n ALL_COMPLETED,\n FIRST_COMPLETED,\n FIRST_EXCEPTION,\n CancelledError,\n Executor,\n TimeoutError,\n as_completed,\n wait,\n)\n\nfrom ._base import Future\nfrom .backend.context import cpu_count\nfrom .backend.reduction import set_loky_pickler\nfrom .reusable_executor import get_reusable_executor\nfrom .cloudpickle_wrapper import wrap_non_picklable_objects\nfrom .process_executor import BrokenProcessPool, ProcessPoolExecutor\n\n\n__all__ = [\n "get_reusable_executor",\n "cpu_count",\n "wait",\n "as_completed",\n "Future",\n "Executor",\n "ProcessPoolExecutor",\n "BrokenProcessPool",\n "CancelledError",\n "TimeoutError",\n "FIRST_COMPLETED",\n "FIRST_EXCEPTION",\n "ALL_COMPLETED",\n "wrap_non_picklable_objects",\n "set_loky_pickler",\n]\n\n\n__version__ = "3.5.5"\n
.venv\Lib\site-packages\joblib\externals\loky\__init__.py
__init__.py
Python
1,105
0.85
0.044444
0
python-kit
892
2024-12-01T23:44:49.079767
BSD-3-Clause
false
7b4ee9b956bba36289b0848a9e4751b6
###############################################################################\n# Customizable Pickler with some basic reducers\n#\n# author: Thomas Moreau\n#\n# adapted from multiprocessing/reduction.py (17/02/2017)\n# * Replace the ForkingPickler with a similar _LokyPickler,\n# * Add CustomizableLokyPickler to allow customizing pickling process\n# on the fly.\n#\nimport copyreg\nimport io\nimport functools\nimport types\nimport sys\nimport os\n\nfrom multiprocessing import util\nfrom pickle import loads, HIGHEST_PROTOCOL\n\n###############################################################################\n# Enable custom pickling in Loky.\n\n_dispatch_table = {}\n\n\ndef register(type_, reduce_function):\n _dispatch_table[type_] = reduce_function\n\n\n###############################################################################\n# Registers extra pickling routines to improve picklization for loky\n\n\n# make methods picklable\ndef _reduce_method(m):\n if m.__self__ is None:\n return getattr, (m.__class__, m.__func__.__name__)\n else:\n return getattr, (m.__self__, m.__func__.__name__)\n\n\nclass _C:\n def f(self):\n pass\n\n @classmethod\n def h(cls):\n pass\n\n\nregister(type(_C().f), _reduce_method)\nregister(type(_C.h), _reduce_method)\n\n\ndef _reduce_method_descriptor(m):\n return getattr, (m.__objclass__, m.__name__)\n\n\nregister(type(list.append), _reduce_method_descriptor)\nregister(type(int.__add__), _reduce_method_descriptor)\n\n\n# Make partial func pickable\ndef _reduce_partial(p):\n return _rebuild_partial, (p.func, p.args, p.keywords or {})\n\n\ndef _rebuild_partial(func, args, keywords):\n return functools.partial(func, *args, **keywords)\n\n\nregister(functools.partial, _reduce_partial)\n\nif sys.platform != "win32":\n from ._posix_reduction import _mk_inheritable # noqa: F401\nelse:\n from . import _win_reduction # noqa: F401\n\n# global variable to change the pickler behavior\ntry:\n from joblib.externals import cloudpickle # noqa: F401\n\n DEFAULT_ENV = "cloudpickle"\nexcept ImportError:\n # If cloudpickle is not present, fallback to pickle\n DEFAULT_ENV = "pickle"\n\nENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)\n_LokyPickler = None\n_loky_pickler_name = None\n\n\ndef set_loky_pickler(loky_pickler=None):\n global _LokyPickler, _loky_pickler_name\n\n if loky_pickler is None:\n loky_pickler = ENV_LOKY_PICKLER\n\n loky_pickler_cls = None\n\n # The default loky_pickler is cloudpickle\n if loky_pickler in ["", None]:\n loky_pickler = "cloudpickle"\n\n if loky_pickler == _loky_pickler_name:\n return\n\n if loky_pickler == "cloudpickle":\n from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls\n else:\n try:\n from importlib import import_module\n\n module_pickle = import_module(loky_pickler)\n loky_pickler_cls = module_pickle.Pickler\n except (ImportError, AttributeError) as e:\n extra_info = (\n "\nThis error occurred while setting loky_pickler to"\n f" '{loky_pickler}', as required by the env variable "\n "LOKY_PICKLER or the function set_loky_pickler."\n )\n e.args = (e.args[0] + extra_info,) + e.args[1:]\n e.msg = e.args[0]\n raise e\n\n util.debug(\n f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for "\n "serialization."\n )\n\n class CustomizablePickler(loky_pickler_cls):\n _loky_pickler_cls = loky_pickler_cls\n\n def _set_dispatch_table(self, dispatch_table):\n for ancestor_class in self._loky_pickler_cls.mro():\n dt_attribute = getattr(ancestor_class, "dispatch_table", None)\n if isinstance(dt_attribute, types.MemberDescriptorType):\n # Ancestor class (typically _pickle.Pickler) has a\n # member_descriptor for its "dispatch_table" attribute. Use\n # it to set the dispatch_table as a member instead of a\n # dynamic attribute in the __dict__ of the instance,\n # otherwise it will not be taken into account by the C\n # implementation of the dump method if a subclass defines a\n # class-level dispatch_table attribute as was done in\n # cloudpickle 1.6.0:\n # https://github.com/joblib/loky/pull/260\n dt_attribute.__set__(self, dispatch_table)\n break\n\n # On top of member descriptor set, also use setattr such that code\n # that directly access self.dispatch_table gets a consistent view\n # of the same table.\n self.dispatch_table = dispatch_table\n\n def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):\n loky_pickler_cls.__init__(self, writer, protocol=protocol)\n if reducers is None:\n reducers = {}\n\n if hasattr(self, "dispatch_table"):\n # Force a copy that we will update without mutating the\n # any class level defined dispatch_table.\n loky_dt = dict(self.dispatch_table)\n else:\n # Use standard reducers as bases\n loky_dt = copyreg.dispatch_table.copy()\n\n # Register loky specific reducers\n loky_dt.update(_dispatch_table)\n\n # Set the new dispatch table, taking care of the fact that we\n # need to use the member_descriptor when we inherit from a\n # subclass of the C implementation of the Pickler base class\n # with an class level dispatch_table attribute.\n self._set_dispatch_table(loky_dt)\n\n # Register the reducers\n for type, reduce_func in reducers.items():\n self.register(type, reduce_func)\n\n def register(self, type, reduce_func):\n """Attach a reducer function to a given type in the dispatch table."""\n self.dispatch_table[type] = reduce_func\n\n _LokyPickler = CustomizablePickler\n _loky_pickler_name = loky_pickler\n\n\ndef get_loky_pickler_name():\n global _loky_pickler_name\n return _loky_pickler_name\n\n\ndef get_loky_pickler():\n global _LokyPickler\n return _LokyPickler\n\n\n# Set it to its default value\nset_loky_pickler()\n\n\ndef dump(obj, file, reducers=None, protocol=None):\n """Replacement for pickle.dump() using _LokyPickler."""\n global _LokyPickler\n _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)\n\n\ndef dumps(obj, reducers=None, protocol=None):\n global _LokyPickler\n\n buf = io.BytesIO()\n dump(obj, buf, reducers=reducers, protocol=protocol)\n return buf.getbuffer()\n\n\n__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]\n\nif sys.platform == "win32":\n from multiprocessing.reduction import duplicate\n\n __all__ += ["duplicate"]\n
.venv\Lib\site-packages\joblib\externals\loky\backend\reduction.py
reduction.py
Python
6,926
0.95
0.201794
0.254658
node-utils
981
2023-08-21T18:22:43.299446
GPL-3.0
false
ddc0e6536f01c804408524bd1bd44266
###############################################################################\n# Prepares and processes the data to setup the new process environment\n#\n# author: Thomas Moreau and Olivier Grisel\n#\n# adapted from multiprocessing/spawn.py (17/02/2017)\n# * Improve logging data\n#\nimport os\nimport sys\nimport runpy\nimport textwrap\nimport types\nfrom multiprocessing import process, util\n\n\nif sys.platform != "win32":\n WINEXE = False\n WINSERVICE = False\nelse:\n import msvcrt\n from multiprocessing.reduction import duplicate\n\n WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False)\n WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")\n\nif WINSERVICE:\n _python_exe = os.path.join(sys.exec_prefix, "python.exe")\nelse:\n _python_exe = sys.executable\n\n\ndef get_executable():\n return _python_exe\n\n\ndef _check_not_importing_main():\n if getattr(process.current_process(), "_inheriting", False):\n raise RuntimeError(\n textwrap.dedent(\n """\\n An attempt has been made to start a new process before the\n current process has finished its bootstrapping phase.\n\n This probably means that you are not using fork to start your\n child processes and you have forgotten to use the proper idiom\n in the main module:\n\n if __name__ == '__main__':\n freeze_support()\n ...\n\n The "freeze_support()" line can be omitted if the program\n is not going to be frozen to produce an executable."""\n )\n )\n\n\ndef get_preparation_data(name, init_main_module=True):\n """Return info about parent needed by child to unpickle process object."""\n _check_not_importing_main()\n d = dict(\n log_to_stderr=util._log_to_stderr,\n authkey=bytes(process.current_process().authkey),\n name=name,\n sys_argv=sys.argv,\n orig_dir=process.ORIGINAL_DIR,\n dir=os.getcwd(),\n )\n\n # Send sys_path and make sure the current directory will not be changed\n d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path]\n\n # Make sure to pass the information if the multiprocessing logger is active\n if util._logger is not None:\n d["log_level"] = util._logger.getEffectiveLevel()\n if util._logger.handlers:\n h = util._logger.handlers[0]\n d["log_fmt"] = h.formatter._fmt\n\n # Tell the child how to communicate with the resource_tracker\n from .resource_tracker import _resource_tracker\n\n _resource_tracker.ensure_running()\n if sys.platform == "win32":\n d["tracker_fd"] = msvcrt.get_osfhandle(_resource_tracker._fd)\n else:\n d["tracker_fd"] = _resource_tracker._fd\n\n if os.name == "posix":\n # joblib/loky#242: allow loky processes to retrieve the resource\n # tracker of their parent in case the child processes depickles\n # shared_memory objects, that are still tracked by multiprocessing's\n # resource_tracker by default.\n # XXX: this is a workaround that may be error prone: in the future, it\n # would be better to have loky subclass multiprocessing's shared_memory\n # to force registration of shared_memory segments via loky's\n # resource_tracker.\n from multiprocessing.resource_tracker import (\n _resource_tracker as mp_resource_tracker,\n )\n\n # multiprocessing's resource_tracker must be running before loky\n # process is created (othewise the child won't be able to use it if it\n # is created later on)\n mp_resource_tracker.ensure_running()\n d["mp_tracker_fd"] = mp_resource_tracker._fd\n\n # Figure out whether to initialise main in the subprocess as a module\n # or through direct execution (or to leave it alone entirely)\n if init_main_module:\n main_module = sys.modules["__main__"]\n try:\n main_mod_name = getattr(main_module.__spec__, "name", None)\n except BaseException:\n main_mod_name = None\n if main_mod_name is not None:\n d["init_main_from_name"] = main_mod_name\n elif sys.platform != "win32" or (not WINEXE and not WINSERVICE):\n main_path = getattr(main_module, "__file__", None)\n if main_path is not None:\n if (\n not os.path.isabs(main_path)\n and process.ORIGINAL_DIR is not None\n ):\n main_path = os.path.join(process.ORIGINAL_DIR, main_path)\n d["init_main_from_path"] = os.path.normpath(main_path)\n\n return d\n\n\n#\n# Prepare current process\n#\nold_main_modules = []\n\n\ndef prepare(data, parent_sentinel=None):\n """Try to get current process ready to unpickle process object."""\n if "name" in data:\n process.current_process().name = data["name"]\n\n if "authkey" in data:\n process.current_process().authkey = data["authkey"]\n\n if "log_to_stderr" in data and data["log_to_stderr"]:\n util.log_to_stderr()\n\n if "log_level" in data:\n util.get_logger().setLevel(data["log_level"])\n\n if "log_fmt" in data:\n import logging\n\n util.get_logger().handlers[0].setFormatter(\n logging.Formatter(data["log_fmt"])\n )\n\n if "sys_path" in data:\n sys.path = data["sys_path"]\n\n if "sys_argv" in data:\n sys.argv = data["sys_argv"]\n\n if "dir" in data:\n os.chdir(data["dir"])\n\n if "orig_dir" in data:\n process.ORIGINAL_DIR = data["orig_dir"]\n\n if "mp_tracker_fd" in data:\n from multiprocessing.resource_tracker import (\n _resource_tracker as mp_resource_tracker,\n )\n\n mp_resource_tracker._fd = data["mp_tracker_fd"]\n if "tracker_fd" in data:\n from .resource_tracker import _resource_tracker\n\n if sys.platform == "win32":\n handle = data["tracker_fd"]\n handle = duplicate(handle, source_process=parent_sentinel)\n _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)\n else:\n _resource_tracker._fd = data["tracker_fd"]\n\n if "init_main_from_name" in data:\n _fixup_main_from_name(data["init_main_from_name"])\n elif "init_main_from_path" in data:\n _fixup_main_from_path(data["init_main_from_path"])\n\n\n# Multiprocessing module helpers to fix up the main module in\n# spawned subprocesses\ndef _fixup_main_from_name(mod_name):\n # __main__.py files for packages, directories, zip archives, etc, run\n # their "main only" code unconditionally, so we don't even try to\n # populate anything in __main__, nor do we make any changes to\n # __main__ attributes\n current_main = sys.modules["__main__"]\n if mod_name == "__main__" or mod_name.endswith(".__main__"):\n return\n\n # If this process was forked, __main__ may already be populated\n if getattr(current_main.__spec__, "name", None) == mod_name:\n return\n\n # Otherwise, __main__ may contain some non-main code where we need to\n # support unpickling it properly. We rerun it as __mp_main__ and make\n # the normal __main__ an alias to that\n old_main_modules.append(current_main)\n main_module = types.ModuleType("__mp_main__")\n main_content = runpy.run_module(\n mod_name, run_name="__mp_main__", alter_sys=True\n )\n main_module.__dict__.update(main_content)\n sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module\n\n\ndef _fixup_main_from_path(main_path):\n # If this process was forked, __main__ may already be populated\n current_main = sys.modules["__main__"]\n\n # Unfortunately, the main ipython launch script historically had no\n # "if __name__ == '__main__'" guard, so we work around that\n # by treating it like a __main__.py file\n # See https://github.com/ipython/ipython/issues/4698\n main_name = os.path.splitext(os.path.basename(main_path))[0]\n if main_name == "ipython":\n return\n\n # Otherwise, if __file__ already has the setting we expect,\n # there's nothing more to do\n if getattr(current_main, "__file__", None) == main_path:\n return\n\n # If the parent process has sent a path through rather than a module\n # name we assume it is an executable script that may contain\n # non-main code that needs to be executed\n old_main_modules.append(current_main)\n main_module = types.ModuleType("__mp_main__")\n main_content = runpy.run_path(main_path, run_name="__mp_main__")\n main_module.__dict__.update(main_content)\n sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module\n
.venv\Lib\site-packages\joblib\externals\loky\backend\spawn.py
spawn.py
Python
8,626
0.95
0.184426
0.238579
python-kit
588
2024-01-23T05:20:03.072646
Apache-2.0
false
bf72b471757f5c86037ac9edbf9af452
import os\nimport sys\nimport time\nimport errno\nimport signal\nimport warnings\nimport subprocess\nimport traceback\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\n\ndef kill_process_tree(process, use_psutil=True):\n """Terminate process and its descendants with SIGKILL"""\n if use_psutil and psutil is not None:\n _kill_process_tree_with_psutil(process)\n else:\n _kill_process_tree_without_psutil(process)\n\n\ndef recursive_terminate(process, use_psutil=True):\n warnings.warn(\n "recursive_terminate is deprecated in loky 3.2, use kill_process_tree"\n "instead",\n DeprecationWarning,\n )\n kill_process_tree(process, use_psutil=use_psutil)\n\n\ndef _kill_process_tree_with_psutil(process):\n try:\n descendants = psutil.Process(process.pid).children(recursive=True)\n except psutil.NoSuchProcess:\n return\n\n # Kill the descendants in reverse order to avoid killing the parents before\n # the descendant in cases where there are more processes nested.\n for descendant in descendants[::-1]:\n try:\n descendant.kill()\n except psutil.NoSuchProcess:\n pass\n\n try:\n psutil.Process(process.pid).kill()\n except psutil.NoSuchProcess:\n pass\n process.join()\n\n\ndef _kill_process_tree_without_psutil(process):\n """Terminate a process and its descendants."""\n try:\n if sys.platform == "win32":\n _windows_taskkill_process_tree(process.pid)\n else:\n _posix_recursive_kill(process.pid)\n except Exception: # pragma: no cover\n details = traceback.format_exc()\n warnings.warn(\n "Failed to kill subprocesses on this platform. Please install"\n "psutil: https://github.com/giampaolo/psutil\n"\n f"Details:\n{details}"\n )\n # In case we cannot introspect or kill the descendants, we fall back to\n # only killing the main process.\n #\n # Note: on Windows, process.kill() is an alias for process.terminate()\n # which in turns calls the Win32 API function TerminateProcess().\n process.kill()\n process.join()\n\n\ndef _windows_taskkill_process_tree(pid):\n # On windows, the taskkill function with option `/T` terminate a given\n # process pid and its children.\n try:\n subprocess.check_output(\n ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None\n )\n except subprocess.CalledProcessError as e:\n # In Windows, taskkill returns 128, 255 for no process found.\n if e.returncode not in [128, 255]:\n # Let's raise to let the caller log the error details in a\n # warning and only kill the root process.\n raise # pragma: no cover\n\n\ndef _kill(pid):\n # Not all systems (e.g. Windows) have a SIGKILL, but the C specification\n # mandates a SIGTERM signal. While Windows is handled specifically above,\n # let's try to be safe for other hypothetic platforms that only have\n # SIGTERM without SIGKILL.\n kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)\n try:\n os.kill(pid, kill_signal)\n except OSError as e:\n # if OSError is raised with [Errno 3] no such process, the process\n # is already terminated, else, raise the error and let the top\n # level function raise a warning and retry to kill the process.\n if e.errno != errno.ESRCH:\n raise # pragma: no cover\n\n\ndef _posix_recursive_kill(pid):\n """Recursively kill the descendants of a process before killing it."""\n try:\n children_pids = subprocess.check_output(\n ["pgrep", "-P", str(pid)], stderr=None, text=True\n )\n except subprocess.CalledProcessError as e:\n # `ps` returns 1 when no child process has been found\n if e.returncode == 1:\n children_pids = ""\n else:\n raise # pragma: no cover\n\n # Decode the result, split the cpid and remove the trailing line\n for cpid in children_pids.splitlines():\n cpid = int(cpid)\n _posix_recursive_kill(cpid)\n\n _kill(pid)\n\n\ndef get_exitcodes_terminated_worker(processes):\n """Return a formatted string with the exitcodes of terminated workers.\n\n If necessary, wait (up to .25s) for the system to correctly set the\n exitcode of one terminated worker.\n """\n patience = 5\n\n # Catch the exitcode of the terminated workers. There should at least be\n # one. If not, wait a bit for the system to correctly set the exitcode of\n # the terminated worker.\n exitcodes = [\n p.exitcode for p in list(processes.values()) if p.exitcode is not None\n ]\n while not exitcodes and patience > 0:\n patience -= 1\n exitcodes = [\n p.exitcode\n for p in list(processes.values())\n if p.exitcode is not None\n ]\n time.sleep(0.05)\n\n return _format_exitcodes(exitcodes)\n\n\ndef _format_exitcodes(exitcodes):\n """Format a list of exit code with names of the signals if possible"""\n str_exitcodes = [\n f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None\n ]\n return "{" + ", ".join(str_exitcodes) + "}"\n\n\ndef _get_exitcode_name(exitcode):\n if sys.platform == "win32":\n # The exitcode are unreliable on windows (see bpo-31863).\n # For this case, return UNKNOWN\n return "UNKNOWN"\n\n if exitcode < 0:\n try:\n import signal\n\n return signal.Signals(-exitcode).name\n except ValueError:\n return "UNKNOWN"\n elif exitcode != 255:\n # The exitcode are unreliable on forkserver were 255 is always returned\n # (see bpo-30589). For this case, return UNKNOWN\n return "EXIT"\n\n return "UNKNOWN"\n
.venv\Lib\site-packages\joblib\externals\loky\backend\utils.py
utils.py
Python
5,757
0.95
0.254144
0.186667
react-lib
685
2024-03-13T03:25:37.233468
BSD-3-Clause
false
2c81fdc6540d126cfcd296152424a0cb
###############################################################################\n# Extra reducers for Windows system and connections objects\n#\n# author: Thomas Moreau and Olivier Grisel\n#\n# adapted from multiprocessing/reduction.py (17/02/2017)\n# * Add adapted reduction for LokyProcesses and socket/PipeConnection\n#\nimport socket\nfrom multiprocessing import connection\nfrom multiprocessing.reduction import _reduce_socket\n\nfrom .reduction import register\n\n# register reduction for win32 communication objects\nregister(socket.socket, _reduce_socket)\nregister(connection.Connection, connection.reduce_connection)\nregister(connection.PipeConnection, connection.reduce_pipe_connection)\n
.venv\Lib\site-packages\joblib\externals\loky\backend\_win_reduction.py
_win_reduction.py
Python
683
0.95
0.166667
0.5625
node-utils
880
2023-11-05T11:00:07.316603
BSD-3-Clause
false
3cf710bd5e32d1ffe0a27eb9d1b37a9f
import os\nfrom multiprocessing import synchronize\n\nfrom .context import get_context\n\n\ndef _make_name():\n return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}"\n\n\n# monkey patch the name creation for multiprocessing\nsynchronize.SemLock._make_name = staticmethod(_make_name)\n\n__all__ = ["get_context"]\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__init__.py
__init__.py
Python
312
0.95
0.142857
0.125
awesome-app
368
2024-02-21T06:30:00.989810
MIT
false
ba53d800ab928c984b2bcde6b343ef5a
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\context.cpython-313.pyc
context.cpython-313.pyc
Other
14,508
0.95
0.044586
0.056338
vue-tools
403
2023-10-17T03:23:56.853959
MIT
false
ec4d09da6776395e43ecc6b4a8007055
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\fork_exec.cpython-313.pyc
fork_exec.cpython-313.pyc
Other
1,956
0.8
0
0.030303
react-lib
430
2024-08-24T07:37:33.299639
MIT
false
2f3a3481095f7a0988e59960196e5f48
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\popen_loky_posix.cpython-313.pyc
popen_loky_posix.cpython-313.pyc
Other
8,999
0.95
0.02
0
python-kit
865
2025-06-25T12:47:13.713575
Apache-2.0
false
8cd44a827732603b40e7354ac7526509
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\popen_loky_win32.cpython-313.pyc
popen_loky_win32.cpython-313.pyc
Other
7,049
0.95
0.011494
0
node-utils
502
2024-06-01T09:16:55.800970
GPL-3.0
false
1dfa83a109801bfb550bd96c2725acaa
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\process.cpython-313.pyc
process.cpython-313.pyc
Other
2,704
0.8
0.035714
0
awesome-app
537
2023-11-21T12:55:55.877022
MIT
false
8a30d4c4026fb2ba26b2a13707aaef8b
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\queues.cpython-313.pyc
queues.cpython-313.pyc
Other
8,190
0.8
0
0
vue-tools
918
2024-01-08T14:00:24.764268
GPL-3.0
false
ea7450b105853aae5c3cbf3271d48c9c
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\reduction.cpython-313.pyc
reduction.cpython-313.pyc
Other
7,274
0.95
0.076923
0.015873
react-lib
614
2024-12-16T19:07:54.287218
GPL-3.0
false
470d6832f72c428e881c95b943b3c889
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\resource_tracker.cpython-313.pyc
resource_tracker.cpython-313.pyc
Other
12,857
0.95
0.037313
0
awesome-app
110
2024-07-16T09:31:04.070878
GPL-3.0
false
d8622ba0399ad7e798158d0f16cb82ac
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\spawn.cpython-313.pyc
spawn.cpython-313.pyc
Other
8,736
0.95
0.020833
0
vue-tools
139
2024-12-07T14:51:04.606207
GPL-3.0
false
8aae691dbc5911f31aa36c5bb0c63663
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\synchronize.cpython-313.pyc
synchronize.cpython-313.pyc
Other
18,877
0.95
0.012422
0
awesome-app
612
2025-05-26T23:34:19.695566
BSD-3-Clause
false
4a46d0302aea9aadbfba7d160f15bee3
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
6,509
0.8
0.02381
0.026667
react-lib
951
2024-06-07T09:58:40.749671
GPL-3.0
false
9006daad25d65c725cd9f40fbd6eb9a5
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\_posix_reduction.cpython-313.pyc
_posix_reduction.cpython-313.pyc
Other
2,684
0.8
0.033333
0
node-utils
71
2024-05-14T14:18:25.143010
GPL-3.0
false
bf9601a8813b5fd62cb43cc7d1c4e5fd
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\_win_reduction.cpython-313.pyc
_win_reduction.cpython-313.pyc
Other
682
0.7
0
0
vue-tools
71
2024-09-25T01:55:54.542984
GPL-3.0
false
86b0ec0e8ce1ff79aed831b0585479a1
\n\n
.venv\Lib\site-packages\joblib\externals\loky\backend\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
747
0.7
0
0
react-lib
926
2024-07-31T11:14:45.921946
Apache-2.0
false
91fc9997e751ef405540e5e83eb286f3
\n\n
.venv\Lib\site-packages\joblib\externals\loky\__pycache__\cloudpickle_wrapper.cpython-313.pyc
cloudpickle_wrapper.cpython-313.pyc
Other
4,882
0.95
0.041667
0
awesome-app
223
2024-12-01T16:56:48.397265
Apache-2.0
false
7a5a4cc3b4afaedf25e08b90955bc8b1
\n\n
.venv\Lib\site-packages\joblib\externals\loky\__pycache__\initializers.cpython-313.pyc
initializers.cpython-313.pyc
Other
3,178
0.8
0
0
node-utils
33
2024-01-15T14:42:44.801242
Apache-2.0
false
48a824169296144f78c597eee63402e9
\n\n
.venv\Lib\site-packages\joblib\externals\loky\__pycache__\process_executor.cpython-313.pyc
process_executor.cpython-313.pyc
Other
52,350
0.95
0.037453
0.010331
node-utils
939
2024-09-08T19:44:11.222094
BSD-3-Clause
false
4fb16c63c1760aee50de219092e286de
\n\n
.venv\Lib\site-packages\joblib\externals\loky\__pycache__\reusable_executor.cpython-313.pyc
reusable_executor.cpython-313.pyc
Other
10,863
0.95
0.034965
0.007813
react-lib
142
2023-07-19T22:28:24.514258
MIT
false
198295447e8d40fdd8609739ce592939
\n\n
.venv\Lib\site-packages\joblib\externals\loky\__pycache__\_base.cpython-313.pyc
_base.cpython-313.pyc
Other
991
0.8
0.111111
0
python-kit
305
2024-10-16T23:37:23.294034
GPL-3.0
false
c18812f22ca6932a0488eb39e0444a94
\n\n
.venv\Lib\site-packages\joblib\externals\loky\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,213
0.85
0.125
0
node-utils
730
2024-12-22T10:54:53.168603
MIT
false
de748d023def0f96b63962ea4b4ea079
\n\n
.venv\Lib\site-packages\joblib\externals\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
191
0.7
0
0
react-lib
297
2024-08-09T21:01:31.489685
Apache-2.0
false
1b73eb5866daa338da6560edb0f7eb4c
"""\nSmall utilities for testing.\n"""\n\nimport gc\nimport os\nimport sys\nimport sysconfig\n\nfrom joblib._multiprocessing_helpers import mp\nfrom joblib.testing import SkipTest, skipif\n\ntry:\n import lz4\nexcept ImportError:\n lz4 = None\n\n# TODO straight removal since in joblib.test.common?\nIS_PYPY = hasattr(sys, "pypy_version_info")\nIS_GIL_DISABLED = (\n sysconfig.get_config_var("Py_GIL_DISABLED") and not sys._is_gil_enabled()\n)\n\n# A decorator to run tests only when numpy is available\ntry:\n import numpy as np\n\n def with_numpy(func):\n """A decorator to skip tests requiring numpy."""\n return func\n\nexcept ImportError:\n\n def with_numpy(func):\n """A decorator to skip tests requiring numpy."""\n\n def my_func():\n raise SkipTest("Test requires numpy")\n\n return my_func\n\n np = None\n\n# TODO: Turn this back on after refactoring yield based tests in test_hashing\n# with_numpy = skipif(not np, reason='Test requires numpy.')\n\n# we use memory_profiler library for memory consumption checks\ntry:\n from memory_profiler import memory_usage\n\n def with_memory_profiler(func):\n """A decorator to skip tests requiring memory_profiler."""\n return func\n\n def memory_used(func, *args, **kwargs):\n """Compute memory usage when executing func."""\n gc.collect()\n mem_use = memory_usage((func, args, kwargs), interval=0.001)\n return max(mem_use) - min(mem_use)\n\nexcept ImportError:\n\n def with_memory_profiler(func):\n """A decorator to skip tests requiring memory_profiler."""\n\n def dummy_func():\n raise SkipTest("Test requires memory_profiler.")\n\n return dummy_func\n\n memory_usage = memory_used = None\n\n\nwith_multiprocessing = skipif(mp is None, reason="Needs multiprocessing to run.")\n\n\nwith_dev_shm = skipif(\n not os.path.exists("/dev/shm"),\n reason="This test requires a large /dev/shm shared memory fs.",\n)\n\nwith_lz4 = skipif(lz4 is None, reason="Needs lz4 compression to run")\n\nwithout_lz4 = skipif(lz4 is not None, reason="Needs lz4 not being installed to run")\n
.venv\Lib\site-packages\joblib\test\common.py
common.py
Python
2,102
0.95
0.142857
0.086207
react-lib
162
2024-10-21T11:41:06.191571
Apache-2.0
true
63f138c811cf7771ca4f191fd77715a3
def return_slice_of_data(arr, start_idx, end_idx):\n return arr[start_idx:end_idx]\n\n\ndef print_filename_and_raise(arr):\n from joblib._memmapping_reducer import _get_backing_memmap\n\n print(_get_backing_memmap(arr).filename)\n raise ValueError\n
.venv\Lib\site-packages\joblib\test\testutils.py
testutils.py
Python
252
0.85
0.222222
0
python-kit
20
2025-03-19T18:17:54.192879
MIT
true
4d50586dab684789e340511f9e5a082a
import mmap\n\nfrom joblib import Parallel, delayed\nfrom joblib.backports import concurrency_safe_rename, make_memmap\nfrom joblib.test.common import with_numpy\nfrom joblib.testing import parametrize\n\n\n@with_numpy\ndef test_memmap(tmpdir):\n fname = tmpdir.join("test.mmap").strpath\n size = 5 * mmap.ALLOCATIONGRANULARITY\n offset = mmap.ALLOCATIONGRANULARITY + 1\n memmap_obj = make_memmap(fname, shape=size, mode="w+", offset=offset)\n assert memmap_obj.offset == offset\n\n\n@parametrize("dst_content", [None, "dst content"])\n@parametrize("backend", [None, "threading"])\ndef test_concurrency_safe_rename(tmpdir, dst_content, backend):\n src_paths = [tmpdir.join("src_%d" % i) for i in range(4)]\n for src_path in src_paths:\n src_path.write("src content")\n dst_path = tmpdir.join("dst")\n if dst_content is not None:\n dst_path.write(dst_content)\n\n Parallel(n_jobs=4, backend=backend)(\n delayed(concurrency_safe_rename)(src_path.strpath, dst_path.strpath)\n for src_path in src_paths\n )\n assert dst_path.exists()\n assert dst_path.read() == "src content"\n for src_path in src_paths:\n assert not src_path.exists()\n
.venv\Lib\site-packages\joblib\test\test_backports.py
test_backports.py
Python
1,175
0.85
0.2
0
node-utils
62
2025-01-04T01:55:33.586863
BSD-3-Clause
true
8d8fe060c318ab1d1905fb80ab2e8b2b
import os\n\nfrom joblib._parallel_backends import (\n LokyBackend,\n MultiprocessingBackend,\n ThreadingBackend,\n)\nfrom joblib.parallel import (\n BACKENDS,\n DEFAULT_BACKEND,\n EXTERNAL_BACKENDS,\n Parallel,\n delayed,\n parallel_backend,\n parallel_config,\n)\nfrom joblib.test.common import np, with_multiprocessing, with_numpy\nfrom joblib.test.test_parallel import check_memmap\nfrom joblib.testing import parametrize, raises\n\n\n@parametrize("context", [parallel_config, parallel_backend])\ndef test_global_parallel_backend(context):\n default = Parallel()._backend\n\n pb = context("threading")\n try:\n assert isinstance(Parallel()._backend, ThreadingBackend)\n finally:\n pb.unregister()\n assert type(Parallel()._backend) is type(default)\n\n\n@parametrize("context", [parallel_config, parallel_backend])\ndef test_external_backends(context):\n def register_foo():\n BACKENDS["foo"] = ThreadingBackend\n\n EXTERNAL_BACKENDS["foo"] = register_foo\n try:\n with context("foo"):\n assert isinstance(Parallel()._backend, ThreadingBackend)\n finally:\n del EXTERNAL_BACKENDS["foo"]\n\n\n@with_numpy\n@with_multiprocessing\ndef test_parallel_config_no_backend(tmpdir):\n # Check that parallel_config allows to change the config\n # even if no backend is set.\n with parallel_config(n_jobs=2, max_nbytes=1, temp_folder=tmpdir):\n with Parallel(prefer="processes") as p:\n assert isinstance(p._backend, LokyBackend)\n assert p.n_jobs == 2\n\n # Checks that memmapping is enabled\n p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)\n assert len(os.listdir(tmpdir)) > 0\n\n\n@with_numpy\n@with_multiprocessing\ndef test_parallel_config_params_explicit_set(tmpdir):\n with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir):\n with Parallel(n_jobs=2, prefer="processes", max_nbytes="1M") as p:\n assert isinstance(p._backend, LokyBackend)\n assert p.n_jobs == 2\n\n # Checks that memmapping is disabled\n with raises(TypeError, match="Expected np.memmap instance"):\n p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)\n\n\n@parametrize("param", ["prefer", "require"])\ndef test_parallel_config_bad_params(param):\n # Check that an error is raised when setting a wrong backend\n # hint or constraint\n with raises(ValueError, match=f"{param}=wrong is not a valid"):\n with parallel_config(**{param: "wrong"}):\n Parallel()\n\n\ndef test_parallel_config_constructor_params():\n # Check that an error is raised when backend is None\n # but backend constructor params are given\n with raises(ValueError, match="only supported when backend is not None"):\n with parallel_config(inner_max_num_threads=1):\n pass\n\n with raises(ValueError, match="only supported when backend is not None"):\n with parallel_config(backend_param=1):\n pass\n\n with raises(ValueError, match="only supported when backend is a string"):\n with parallel_config(backend=BACKENDS[DEFAULT_BACKEND], backend_param=1):\n pass\n\n\ndef test_parallel_config_nested():\n # Check that nested configuration retrieves the info from the\n # parent config and do not reset them.\n\n with parallel_config(n_jobs=2):\n p = Parallel()\n assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND])\n assert p.n_jobs == 2\n\n with parallel_config(backend="threading"):\n with parallel_config(n_jobs=2):\n p = Parallel()\n assert isinstance(p._backend, ThreadingBackend)\n assert p.n_jobs == 2\n\n with parallel_config(verbose=100):\n with parallel_config(n_jobs=2):\n p = Parallel()\n assert p.verbose == 100\n assert p.n_jobs == 2\n\n\n@with_numpy\n@with_multiprocessing\n@parametrize(\n "backend",\n ["multiprocessing", "threading", MultiprocessingBackend(), ThreadingBackend()],\n)\n@parametrize("context", [parallel_config, parallel_backend])\ndef test_threadpool_limitation_in_child_context_error(context, backend):\n with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"):\n context(backend, inner_max_num_threads=1)\n\n\n@parametrize("context", [parallel_config, parallel_backend])\ndef test_parallel_n_jobs_none(context):\n # Check that n_jobs=None is interpreted as "unset" in Parallel\n # non regression test for #1473\n with context(backend="threading", n_jobs=2):\n with Parallel(n_jobs=None) as p:\n assert p.n_jobs == 2\n\n with context(backend="threading"):\n default_n_jobs = Parallel().n_jobs\n with Parallel(n_jobs=None) as p:\n assert p.n_jobs == default_n_jobs\n\n\n@parametrize("context", [parallel_config, parallel_backend])\ndef test_parallel_config_n_jobs_none(context):\n # Check that n_jobs=None is interpreted as "explicitly set" in\n # parallel_(config/backend)\n # non regression test for #1473\n with context(backend="threading", n_jobs=2):\n with context(backend="threading", n_jobs=None):\n # n_jobs=None resets n_jobs to backend's default\n with Parallel() as p:\n assert p.n_jobs == 1\n
.venv\Lib\site-packages\joblib\test\test_config.py
test_config.py
Python
5,255
0.95
0.11465
0.126984
node-utils
858
2025-02-28T16:10:00.918748
GPL-3.0
true
fb899e38a6294bd5709137dd231fdc28
"""\nUnit tests for the disk utilities.\n"""\n\n# Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Lars Buitinck\n# Copyright (c) 2010 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nfrom __future__ import with_statement\n\nimport array\nimport os\n\nfrom joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs\nfrom joblib.testing import parametrize, raises\n\n###############################################################################\n\n\ndef test_disk_used(tmpdir):\n cachedir = tmpdir.strpath\n # Not write a file that is 1M big in this directory, and check the\n # size. The reason we use such a big file is that it makes us robust\n # to errors due to block allocation.\n a = array.array("i")\n sizeof_i = a.itemsize\n target_size = 1024\n n = int(target_size * 1024 / sizeof_i)\n a = array.array("i", n * (1,))\n with open(os.path.join(cachedir, "test"), "wb") as output:\n a.tofile(output)\n assert disk_used(cachedir) >= target_size\n assert disk_used(cachedir) < target_size + 12\n\n\n@parametrize(\n "text,value",\n [\n ("80G", 80 * 1024**3),\n ("1.4M", int(1.4 * 1024**2)),\n ("120M", 120 * 1024**2),\n ("53K", 53 * 1024),\n ],\n)\ndef test_memstr_to_bytes(text, value):\n assert memstr_to_bytes(text) == value\n\n\n@parametrize(\n "text,exception,regex",\n [\n ("fooG", ValueError, r"Invalid literal for size.*fooG.*"),\n ("1.4N", ValueError, r"Invalid literal for size.*1.4N.*"),\n ],\n)\ndef test_memstr_to_bytes_exception(text, exception, regex):\n with raises(exception) as excinfo:\n memstr_to_bytes(text)\n assert excinfo.match(regex)\n\n\ndef test_mkdirp(tmpdir):\n mkdirp(os.path.join(tmpdir.strpath, "ham"))\n mkdirp(os.path.join(tmpdir.strpath, "ham"))\n mkdirp(os.path.join(tmpdir.strpath, "spam", "spam"))\n\n # Not all OSErrors are ignored\n with raises(OSError):\n mkdirp("")\n\n\ndef test_rm_subdirs(tmpdir):\n sub_path = os.path.join(tmpdir.strpath, "subdir_one", "subdir_two")\n full_path = os.path.join(sub_path, "subdir_three")\n mkdirp(os.path.join(full_path))\n\n rm_subdirs(sub_path)\n assert os.path.exists(sub_path)\n assert not os.path.exists(full_path)\n
.venv\Lib\site-packages\joblib\test\test_disk.py
test_disk.py
Python
2,223
0.95
0.1
0.142857
awesome-app
428
2024-03-31T04:43:46.531617
Apache-2.0
true
ad638fee4d95cde20a556ddc6f264ee6
"""\nTest the func_inspect module.\n"""\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Copyright (c) 2009 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nimport functools\n\nfrom joblib.func_inspect import (\n _clean_win_chars,\n filter_args,\n format_signature,\n get_func_code,\n get_func_name,\n)\nfrom joblib.memory import Memory\nfrom joblib.test.common import with_numpy\nfrom joblib.testing import fixture, parametrize, raises\n\n\n###############################################################################\n# Module-level functions and fixture, for tests\ndef f(x, y=0):\n pass\n\n\ndef g(x):\n pass\n\n\ndef h(x, y=0, *args, **kwargs):\n pass\n\n\ndef i(x=1):\n pass\n\n\ndef j(x, y, **kwargs):\n pass\n\n\ndef k(*args, **kwargs):\n pass\n\n\ndef m1(x, *, y):\n pass\n\n\ndef m2(x, *, y, z=3):\n pass\n\n\n@fixture(scope="module")\ndef cached_func(tmpdir_factory):\n # Create a Memory object to test decorated functions.\n # We should be careful not to call the decorated functions, so that\n # cache directories are not created in the temp dir.\n cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect")\n mem = Memory(cachedir.strpath)\n\n @mem.cache\n def cached_func_inner(x):\n return x\n\n return cached_func_inner\n\n\nclass Klass(object):\n def f(self, x):\n return x\n\n\n###############################################################################\n# Tests\n\n\n@parametrize(\n "func,args,filtered_args",\n [\n (f, [[], (1,)], {"x": 1, "y": 0}),\n (f, [["x"], (1,)], {"y": 0}),\n (f, [["y"], (0,)], {"x": 0}),\n (f, [["y"], (0,), {"y": 1}], {"x": 0}),\n (f, [["x", "y"], (0,)], {}),\n (f, [[], (0,), {"y": 1}], {"x": 0, "y": 1}),\n (f, [["y"], (), {"x": 2, "y": 1}], {"x": 2}),\n (g, [[], (), {"x": 1}], {"x": 1}),\n (i, [[], (2,)], {"x": 2}),\n ],\n)\ndef test_filter_args(func, args, filtered_args):\n assert filter_args(func, *args) == filtered_args\n\n\ndef test_filter_args_method():\n obj = Klass()\n assert filter_args(obj.f, [], (1,)) == {"x": 1, "self": obj}\n\n\n@parametrize(\n "func,args,filtered_args",\n [\n (h, [[], (1,)], {"x": 1, "y": 0, "*": [], "**": {}}),\n (h, [[], (1, 2, 3, 4)], {"x": 1, "y": 2, "*": [3, 4], "**": {}}),\n (h, [[], (1, 25), {"ee": 2}], {"x": 1, "y": 25, "*": [], "**": {"ee": 2}}),\n (h, [["*"], (1, 2, 25), {"ee": 2}], {"x": 1, "y": 2, "**": {"ee": 2}}),\n ],\n)\ndef test_filter_varargs(func, args, filtered_args):\n assert filter_args(func, *args) == filtered_args\n\n\ntest_filter_kwargs_extra_params = [\n (m1, [[], (1,), {"y": 2}], {"x": 1, "y": 2}),\n (m2, [[], (1,), {"y": 2}], {"x": 1, "y": 2, "z": 3}),\n]\n\n\n@parametrize(\n "func,args,filtered_args",\n [\n (k, [[], (1, 2), {"ee": 2}], {"*": [1, 2], "**": {"ee": 2}}),\n (k, [[], (3, 4)], {"*": [3, 4], "**": {}}),\n ]\n + test_filter_kwargs_extra_params,\n)\ndef test_filter_kwargs(func, args, filtered_args):\n assert filter_args(func, *args) == filtered_args\n\n\ndef test_filter_args_2():\n assert filter_args(j, [], (1, 2), {"ee": 2}) == {"x": 1, "y": 2, "**": {"ee": 2}}\n\n ff = functools.partial(f, 1)\n # filter_args has to special-case partial\n assert filter_args(ff, [], (1,)) == {"*": [1], "**": {}}\n assert filter_args(ff, ["y"], (1,)) == {"*": [1], "**": {}}\n\n\n@parametrize("func,funcname", [(f, "f"), (g, "g"), (cached_func, "cached_func")])\ndef test_func_name(func, funcname):\n # Check that we are not confused by decoration\n # here testcase 'cached_func' is the function itself\n assert get_func_name(func)[1] == funcname\n\n\ndef test_func_name_on_inner_func(cached_func):\n # Check that we are not confused by decoration\n # here testcase 'cached_func' is the 'cached_func_inner' function\n # returned by 'cached_func' fixture\n assert get_func_name(cached_func)[1] == "cached_func_inner"\n\n\ndef test_func_name_collision_on_inner_func():\n # Check that two functions defining and caching an inner function\n # with the same do not cause (module, name) collision\n def f():\n def inner_func():\n return # pragma: no cover\n\n return get_func_name(inner_func)\n\n def g():\n def inner_func():\n return # pragma: no cover\n\n return get_func_name(inner_func)\n\n module, name = f()\n other_module, other_name = g()\n\n assert name == other_name\n assert module != other_module\n\n\ndef test_func_inspect_errors():\n # Check that func_inspect is robust and will work on weird objects\n assert get_func_name("a".lower)[-1] == "lower"\n assert get_func_code("a".lower)[1:] == (None, -1)\n ff = lambda x: x # noqa: E731\n assert get_func_name(ff, win_characters=False)[-1] == "<lambda>"\n assert get_func_code(ff)[1] == __file__.replace(".pyc", ".py")\n # Simulate a function defined in __main__\n ff.__module__ = "__main__"\n assert get_func_name(ff, win_characters=False)[-1] == "<lambda>"\n assert get_func_code(ff)[1] == __file__.replace(".pyc", ".py")\n\n\ndef func_with_kwonly_args(a, b, *, kw1="kw1", kw2="kw2"):\n pass\n\n\ndef func_with_signature(a: int, b: int) -> None:\n pass\n\n\ndef test_filter_args_edge_cases():\n assert filter_args(func_with_kwonly_args, [], (1, 2), {"kw1": 3, "kw2": 4}) == {\n "a": 1,\n "b": 2,\n "kw1": 3,\n "kw2": 4,\n }\n\n # filter_args doesn't care about keyword-only arguments so you\n # can pass 'kw1' into *args without any problem\n with raises(ValueError) as excinfo:\n filter_args(func_with_kwonly_args, [], (1, 2, 3), {"kw2": 2})\n excinfo.match("Keyword-only parameter 'kw1' was passed as positional parameter")\n\n assert filter_args(\n func_with_kwonly_args, ["b", "kw2"], (1, 2), {"kw1": 3, "kw2": 4}\n ) == {"a": 1, "kw1": 3}\n\n assert filter_args(func_with_signature, ["b"], (1, 2)) == {"a": 1}\n\n\ndef test_bound_methods():\n """Make sure that calling the same method on two different instances\n of the same class does resolv to different signatures.\n """\n a = Klass()\n b = Klass()\n assert filter_args(a.f, [], (1,)) != filter_args(b.f, [], (1,))\n\n\n@parametrize(\n "exception,regex,func,args",\n [\n (\n ValueError,\n "ignore_lst must be a list of parameters to ignore",\n f,\n ["bar", (None,)],\n ),\n (\n ValueError,\n r"Ignore list: argument \'(.*)\' is not defined",\n g,\n [["bar"], (None,)],\n ),\n (ValueError, "Wrong number of arguments", h, [[]]),\n ],\n)\ndef test_filter_args_error_msg(exception, regex, func, args):\n """Make sure that filter_args returns decent error messages, for the\n sake of the user.\n """\n with raises(exception) as excinfo:\n filter_args(func, *args)\n excinfo.match(regex)\n\n\ndef test_filter_args_no_kwargs_mutation():\n """None-regression test against 0.12.0 changes.\n\n https://github.com/joblib/joblib/pull/75\n\n Make sure filter args doesn't mutate the kwargs dict that gets passed in.\n """\n kwargs = {"x": 0}\n filter_args(g, [], [], kwargs)\n assert kwargs == {"x": 0}\n\n\ndef test_clean_win_chars():\n string = r"C:\foo\bar\main.py"\n mangled_string = _clean_win_chars(string)\n for char in ("\\", ":", "<", ">", "!"):\n assert char not in mangled_string\n\n\n@parametrize(\n "func,args,kwargs,sgn_expected",\n [\n (g, [list(range(5))], {}, "g([0, 1, 2, 3, 4])"),\n (k, [1, 2, (3, 4)], {"y": True}, "k(1, 2, (3, 4), y=True)"),\n ],\n)\ndef test_format_signature(func, args, kwargs, sgn_expected):\n # Test signature formatting.\n path, sgn_result = format_signature(func, *args, **kwargs)\n assert sgn_result == sgn_expected\n\n\ndef test_format_signature_long_arguments():\n shortening_threshold = 1500\n # shortening gets it down to 700 characters but there is the name\n # of the function in the signature and a few additional things\n # like dots for the ellipsis\n shortening_target = 700 + 10\n\n arg = "a" * shortening_threshold\n _, signature = format_signature(h, arg)\n assert len(signature) < shortening_target\n\n nb_args = 5\n args = [arg for _ in range(nb_args)]\n _, signature = format_signature(h, *args)\n assert len(signature) < shortening_target * nb_args\n\n kwargs = {str(i): arg for i, arg in enumerate(args)}\n _, signature = format_signature(h, **kwargs)\n assert len(signature) < shortening_target * nb_args\n\n _, signature = format_signature(h, *args, **kwargs)\n assert len(signature) < shortening_target * 2 * nb_args\n\n\n@with_numpy\ndef test_format_signature_numpy():\n """Test the format signature formatting with numpy."""\n\n\ndef test_special_source_encoding():\n from joblib.test.test_func_inspect_special_encoding import big5_f\n\n func_code, source_file, first_line = get_func_code(big5_f)\n assert first_line == 5\n assert "def big5_f():" in func_code\n assert "test_func_inspect_special_encoding" in source_file\n\n\ndef _get_code():\n from joblib.test.test_func_inspect_special_encoding import big5_f\n\n return get_func_code(big5_f)[0]\n\n\ndef test_func_code_consistency():\n from joblib.parallel import Parallel, delayed\n\n codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5))\n assert len(set(codes)) == 1\n
.venv\Lib\site-packages\joblib\test\test_func_inspect.py
test_func_inspect.py
Python
9,314
0.95
0.153846
0.105263
awesome-app
472
2024-06-03T23:24:23.140965
GPL-3.0
true
d39747d877b90ae4cef54c49f23491f9
# -*- coding: big5 -*-\n\n\n# Some Traditional Chinese characters: @Ǥr\ndef big5_f():\n """Ωժ\n """\n # \n return 0\n
.venv\Lib\site-packages\joblib\test\test_func_inspect_special_encoding.py
test_func_inspect_special_encoding.py
Python
145
0.95
0.111111
0.428571
react-lib
217
2023-11-03T15:56:19.846825
Apache-2.0
true
f0488b4a6d7289fa7e834dbdc27e535b
"""\nTest the hashing module.\n"""\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Copyright (c) 2009 Gael Varoquaux\n# License: BSD Style, 3 clauses.\n\nimport collections\nimport gc\nimport hashlib\nimport io\nimport itertools\nimport pickle\nimport random\nimport sys\nimport time\nfrom concurrent.futures import ProcessPoolExecutor\nfrom decimal import Decimal\n\nfrom joblib.func_inspect import filter_args\nfrom joblib.hashing import hash\nfrom joblib.memory import Memory\nfrom joblib.test.common import np, with_numpy\nfrom joblib.testing import fixture, parametrize, raises, skipif\n\n\ndef unicode(s):\n return s\n\n\n###############################################################################\n# Helper functions for the tests\ndef time_func(func, *args):\n """Time function func on *args."""\n times = list()\n for _ in range(3):\n t1 = time.time()\n func(*args)\n times.append(time.time() - t1)\n return min(times)\n\n\ndef relative_time(func1, func2, *args):\n """Return the relative time between func1 and func2 applied on\n *args.\n """\n time_func1 = time_func(func1, *args)\n time_func2 = time_func(func2, *args)\n relative_diff = 0.5 * (abs(time_func1 - time_func2) / (time_func1 + time_func2))\n return relative_diff\n\n\nclass Klass(object):\n def f(self, x):\n return x\n\n\nclass KlassWithCachedMethod(object):\n def __init__(self, cachedir):\n mem = Memory(location=cachedir)\n self.f = mem.cache(self.f)\n\n def f(self, x):\n return x\n\n\n###############################################################################\n# Tests\n\ninput_list = [\n 1,\n 2,\n 1.0,\n 2.0,\n 1 + 1j,\n 2.0 + 1j,\n "a",\n "b",\n (1,),\n (\n 1,\n 1,\n ),\n [\n 1,\n ],\n [\n 1,\n 1,\n ],\n {1: 1},\n {1: 2},\n {2: 1},\n None,\n gc.collect,\n [\n 1,\n ].append,\n # Next 2 sets have unorderable elements in python 3.\n set(("a", 1)),\n set(("a", 1, ("a", 1))),\n # Next 2 dicts have unorderable type of keys in python 3.\n {"a": 1, 1: 2},\n {"a": 1, 1: 2, "d": {"a": 1}},\n]\n\n\n@parametrize("obj1", input_list)\n@parametrize("obj2", input_list)\ndef test_trivial_hash(obj1, obj2):\n """Smoke test hash on various types."""\n # Check that 2 objects have the same hash only if they are the same.\n are_hashes_equal = hash(obj1) == hash(obj2)\n are_objs_identical = obj1 is obj2\n assert are_hashes_equal == are_objs_identical\n\n\ndef test_hash_methods():\n # Check that hashing instance methods works\n a = io.StringIO(unicode("a"))\n assert hash(a.flush) == hash(a.flush)\n a1 = collections.deque(range(10))\n a2 = collections.deque(range(9))\n assert hash(a1.extend) != hash(a2.extend)\n\n\n@fixture(scope="function")\n@with_numpy\ndef three_np_arrays():\n rnd = np.random.RandomState(0)\n arr1 = rnd.random_sample((10, 10))\n arr2 = arr1.copy()\n arr3 = arr2.copy()\n arr3[0] += 1\n return arr1, arr2, arr3\n\n\ndef test_hash_numpy_arrays(three_np_arrays):\n arr1, arr2, arr3 = three_np_arrays\n\n for obj1, obj2 in itertools.product(three_np_arrays, repeat=2):\n are_hashes_equal = hash(obj1) == hash(obj2)\n are_arrays_equal = np.all(obj1 == obj2)\n assert are_hashes_equal == are_arrays_equal\n\n assert hash(arr1) != hash(arr1.T)\n\n\ndef test_hash_numpy_dict_of_arrays(three_np_arrays):\n arr1, arr2, arr3 = three_np_arrays\n\n d1 = {1: arr1, 2: arr2}\n d2 = {1: arr2, 2: arr1}\n d3 = {1: arr2, 2: arr3}\n\n assert hash(d1) == hash(d2)\n assert hash(d1) != hash(d3)\n\n\n@with_numpy\n@parametrize("dtype", ["datetime64[s]", "timedelta64[D]"])\ndef test_numpy_datetime_array(dtype):\n # memoryview is not supported for some dtypes e.g. datetime64\n # see https://github.com/joblib/joblib/issues/188 for more details\n a_hash = hash(np.arange(10))\n array = np.arange(0, 10, dtype=dtype)\n assert hash(array) != a_hash\n\n\n@with_numpy\ndef test_hash_numpy_noncontiguous():\n a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), order="F")[:, :1, :]\n b = np.ascontiguousarray(a)\n assert hash(a) != hash(b)\n\n c = np.asfortranarray(a)\n assert hash(a) != hash(c)\n\n\n@with_numpy\n@parametrize("coerce_mmap", [True, False])\ndef test_hash_memmap(tmpdir, coerce_mmap):\n """Check that memmap and arrays hash identically if coerce_mmap is True."""\n filename = tmpdir.join("memmap_temp").strpath\n try:\n m = np.memmap(filename, shape=(10, 10), mode="w+")\n a = np.asarray(m)\n are_hashes_equal = hash(a, coerce_mmap=coerce_mmap) == hash(\n m, coerce_mmap=coerce_mmap\n )\n assert are_hashes_equal == coerce_mmap\n finally:\n if "m" in locals():\n del m\n # Force a garbage-collection cycle, to be certain that the\n # object is delete, and we don't run in a problem under\n # Windows with a file handle still open.\n gc.collect()\n\n\n@with_numpy\n@skipif(\n sys.platform == "win32",\n reason="This test is not stable under windows for some reason",\n)\ndef test_hash_numpy_performance():\n """Check the performance of hashing numpy arrays:\n\n In [22]: a = np.random.random(1000000)\n\n In [23]: %timeit hashlib.md5(a).hexdigest()\n 100 loops, best of 3: 20.7 ms per loop\n\n In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest()\n 1 loops, best of 3: 73.1 ms per loop\n\n In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest()\n 10 loops, best of 3: 53.9 ms per loop\n\n In [26]: %timeit hash(a)\n 100 loops, best of 3: 20.8 ms per loop\n """\n rnd = np.random.RandomState(0)\n a = rnd.random_sample(1000000)\n\n def md5_hash(x):\n return hashlib.md5(memoryview(x)).hexdigest()\n\n relative_diff = relative_time(md5_hash, hash, a)\n assert relative_diff < 0.3\n\n # Check that hashing an tuple of 3 arrays takes approximately\n # 3 times as much as hashing one array\n time_hashlib = 3 * time_func(md5_hash, a)\n time_hash = time_func(hash, (a, a, a))\n relative_diff = 0.5 * (abs(time_hash - time_hashlib) / (time_hash + time_hashlib))\n assert relative_diff < 0.3\n\n\ndef test_bound_methods_hash():\n """Make sure that calling the same method on two different instances\n of the same class does resolve to the same hashes.\n """\n a = Klass()\n b = Klass()\n assert hash(filter_args(a.f, [], (1,))) == hash(filter_args(b.f, [], (1,)))\n\n\ndef test_bound_cached_methods_hash(tmpdir):\n """Make sure that calling the same _cached_ method on two different\n instances of the same class does resolve to the same hashes.\n """\n a = KlassWithCachedMethod(tmpdir.strpath)\n b = KlassWithCachedMethod(tmpdir.strpath)\n assert hash(filter_args(a.f.func, [], (1,))) == hash(\n filter_args(b.f.func, [], (1,))\n )\n\n\n@with_numpy\ndef test_hash_object_dtype():\n """Make sure that ndarrays with dtype `object' hash correctly."""\n\n a = np.array([np.arange(i) for i in range(6)], dtype=object)\n b = np.array([np.arange(i) for i in range(6)], dtype=object)\n\n assert hash(a) == hash(b)\n\n\n@with_numpy\ndef test_numpy_scalar():\n # Numpy scalars are built from compiled functions, and lead to\n # strange pickling paths explored, that can give hash collisions\n a = np.float64(2.0)\n b = np.float64(3.0)\n assert hash(a) != hash(b)\n\n\ndef test_dict_hash(tmpdir):\n # Check that dictionaries hash consistently, even though the ordering\n # of the keys is not guaranteed\n k = KlassWithCachedMethod(tmpdir.strpath)\n\n d = {\n "#s12069__c_maps.nii.gz": [33],\n "#s12158__c_maps.nii.gz": [33],\n "#s12258__c_maps.nii.gz": [33],\n "#s12277__c_maps.nii.gz": [33],\n "#s12300__c_maps.nii.gz": [33],\n "#s12401__c_maps.nii.gz": [33],\n "#s12430__c_maps.nii.gz": [33],\n "#s13817__c_maps.nii.gz": [33],\n "#s13903__c_maps.nii.gz": [33],\n "#s13916__c_maps.nii.gz": [33],\n "#s13981__c_maps.nii.gz": [33],\n "#s13982__c_maps.nii.gz": [33],\n "#s13983__c_maps.nii.gz": [33],\n }\n\n a = k.f(d)\n b = k.f(a)\n\n assert hash(a) == hash(b)\n\n\ndef test_set_hash(tmpdir):\n # Check that sets hash consistently, even though their ordering\n # is not guaranteed\n k = KlassWithCachedMethod(tmpdir.strpath)\n\n s = set(\n [\n "#s12069__c_maps.nii.gz",\n "#s12158__c_maps.nii.gz",\n "#s12258__c_maps.nii.gz",\n "#s12277__c_maps.nii.gz",\n "#s12300__c_maps.nii.gz",\n "#s12401__c_maps.nii.gz",\n "#s12430__c_maps.nii.gz",\n "#s13817__c_maps.nii.gz",\n "#s13903__c_maps.nii.gz",\n "#s13916__c_maps.nii.gz",\n "#s13981__c_maps.nii.gz",\n "#s13982__c_maps.nii.gz",\n "#s13983__c_maps.nii.gz",\n ]\n )\n\n a = k.f(s)\n b = k.f(a)\n\n assert hash(a) == hash(b)\n\n\ndef test_set_decimal_hash():\n # Check that sets containing decimals hash consistently, even though\n # ordering is not guaranteed\n assert hash(set([Decimal(0), Decimal("NaN")])) == hash(\n set([Decimal("NaN"), Decimal(0)])\n )\n\n\ndef test_string():\n # Test that we obtain the same hash for object owning several strings,\n # whatever the past of these strings (which are immutable in Python)\n string = "foo"\n a = {string: "bar"}\n b = {string: "bar"}\n c = pickle.loads(pickle.dumps(b))\n assert hash([a, b]) == hash([a, c])\n\n\n@with_numpy\ndef test_numpy_dtype_pickling():\n # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080,\n # #1082, and explanatory comments inside\n # ``joblib.hashing.NumpyHasher.save``.\n\n # In this test, we make sure that the pickling of numpy dtypes is robust to\n # object identity and object copy.\n\n dt1 = np.dtype("f4")\n dt2 = np.dtype("f4")\n\n # simple dtypes objects are interned\n assert dt1 is dt2\n assert hash(dt1) == hash(dt2)\n\n dt1_roundtripped = pickle.loads(pickle.dumps(dt1))\n assert dt1 is not dt1_roundtripped\n assert hash(dt1) == hash(dt1_roundtripped)\n\n assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped])\n assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped])\n\n complex_dt1 = np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,))])\n complex_dt2 = np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,))])\n\n # complex dtypes objects are not interned\n assert hash(complex_dt1) == hash(complex_dt2)\n\n complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1))\n assert complex_dt1_roundtripped is not complex_dt1\n assert hash(complex_dt1) == hash(complex_dt1_roundtripped)\n\n assert hash([complex_dt1, complex_dt1]) == hash(\n [complex_dt1_roundtripped, complex_dt1_roundtripped]\n )\n assert hash([complex_dt1, complex_dt1]) == hash(\n [complex_dt1_roundtripped, complex_dt1]\n )\n\n\n@parametrize(\n "to_hash,expected",\n [\n ("This is a string to hash", "71b3f47df22cb19431d85d92d0b230b2"),\n ("C'est l\xe9t\xe9", "2d8d189e9b2b0b2e384d93c868c0e576"),\n ((123456, 54321, -98765), "e205227dd82250871fa25aa0ec690aa3"),\n (\n [random.Random(42).random() for _ in range(5)],\n "a11ffad81f9682a7d901e6edc3d16c84",\n ),\n ({"abcde": 123, "sadfas": [-9999, 2, 3]}, "aeda150553d4bb5c69f0e69d51b0e2ef"),\n ],\n)\ndef test_hashes_stay_the_same(to_hash, expected):\n # We want to make sure that hashes don't change with joblib\n # version. For end users, that would mean that they have to\n # regenerate their cache from scratch, which potentially means\n # lengthy recomputations.\n # Expected results have been generated with joblib 0.9.2\n assert hash(to_hash) == expected\n\n\n@with_numpy\ndef test_hashes_are_different_between_c_and_fortran_contiguous_arrays():\n # We want to be sure that the c-contiguous and f-contiguous versions of the\n # same array produce 2 different hashes.\n rng = np.random.RandomState(0)\n arr_c = rng.random_sample((10, 10))\n arr_f = np.asfortranarray(arr_c)\n assert hash(arr_c) != hash(arr_f)\n\n\n@with_numpy\ndef test_0d_array():\n hash(np.array(0))\n\n\n@with_numpy\ndef test_0d_and_1d_array_hashing_is_different():\n assert hash(np.array(0)) != hash(np.array([0]))\n\n\n@with_numpy\ndef test_hashes_stay_the_same_with_numpy_objects():\n # Note: joblib used to test numpy objects hashing by comparing the produced\n # hash of an object with some hard-coded target value to guarantee that\n # hashing remains the same across joblib versions. However, since numpy\n # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation\n # details of numpy to hash np.dtype objects, which makes the stability of\n # hash values across different environments hard to guarantee and to test.\n # As a result, hashing stability across joblib versions becomes best-effort\n # only, and we only test the consistency within a single environment by\n # making sure:\n # - the hash of two copies of the same objects is the same\n # - hashing some object in two different python processes produces the same\n # value. This should be viewed as a proxy for testing hash consistency\n # through time between Python sessions (provided no change in the\n # environment was done between sessions).\n\n def create_objects_to_hash():\n rng = np.random.RandomState(42)\n # Being explicit about dtypes in order to avoid\n # architecture-related differences. Also using 'f4' rather than\n # 'f8' for float arrays because 'f8' arrays generated by\n # rng.random.randn don't seem to be bit-identical on 32bit and\n # 64bit machines.\n to_hash_list = [\n rng.randint(-1000, high=1000, size=50).astype("<i8"),\n tuple(rng.randn(3).astype("<f4") for _ in range(5)),\n [rng.randn(3).astype("<f4") for _ in range(5)],\n {\n -3333: rng.randn(3, 5).astype("<f4"),\n 0: [\n rng.randint(10, size=20).astype("<i8"),\n rng.randn(10).astype("<f4"),\n ],\n },\n # Non regression cases for\n # https://github.com/joblib/joblib/issues/308\n np.arange(100, dtype="<i8").reshape((10, 10)),\n # Fortran contiguous array\n np.asfortranarray(np.arange(100, dtype="<i8").reshape((10, 10))),\n # Non contiguous array\n np.arange(100, dtype="<i8").reshape((10, 10))[:, :2],\n ]\n return to_hash_list\n\n # Create two lists containing copies of the same objects. joblib.hash\n # should return the same hash for to_hash_list_one[i] and\n # to_hash_list_two[i]\n to_hash_list_one = create_objects_to_hash()\n to_hash_list_two = create_objects_to_hash()\n\n e1 = ProcessPoolExecutor(max_workers=1)\n e2 = ProcessPoolExecutor(max_workers=1)\n\n try:\n for obj_1, obj_2 in zip(to_hash_list_one, to_hash_list_two):\n # testing consistency of hashes across python processes\n hash_1 = e1.submit(hash, obj_1).result()\n hash_2 = e2.submit(hash, obj_1).result()\n assert hash_1 == hash_2\n\n # testing consistency when hashing two copies of the same objects.\n hash_3 = e1.submit(hash, obj_2).result()\n assert hash_1 == hash_3\n\n finally:\n e1.shutdown()\n e2.shutdown()\n\n\ndef test_hashing_pickling_error():\n def non_picklable():\n return 42\n\n with raises(pickle.PicklingError) as excinfo:\n hash(non_picklable)\n excinfo.match("PicklingError while hashing")\n\n\ndef test_wrong_hash_name():\n msg = "Valid options for 'hash_name' are"\n with raises(ValueError, match=msg):\n data = {"foo": "bar"}\n hash(data, hash_name="invalid")\n
.venv\Lib\site-packages\joblib\test\test_hashing.py
test_hashing.py
Python
15,820
0.95
0.123077
0.170673
react-lib
428
2024-12-08T10:11:19.270911
Apache-2.0
true
4637061ad504eb0c7298991da9aa1fc0
# Basic test case to test functioning of module's top-level\n\ntry:\n from joblib import * # noqa\n\n _top_import_error = None\nexcept Exception as ex: # pragma: no cover\n _top_import_error = ex\n\n\ndef test_import_joblib():\n # Test either above import has failed for some reason\n # "import *" only allowed at module level, hence we\n # rely on setting up the variable above\n assert _top_import_error is None\n
.venv\Lib\site-packages\joblib\test\test_init.py
test_init.py
Python
423
0.95
0.2
0.363636
node-utils
172
2024-08-07T13:08:54.548784
BSD-3-Clause
true
7038f6132a59d640727a142b3a902aca
"""\nTest the logger module.\n"""\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Copyright (c) 2009 Gael Varoquaux\n# License: BSD Style, 3 clauses.\nimport re\n\nfrom joblib.logger import PrintTime\n\n\ndef test_print_time(tmpdir, capsys):\n # A simple smoke test for PrintTime.\n logfile = tmpdir.join("test.log").strpath\n print_time = PrintTime(logfile=logfile)\n print_time("Foo")\n # Create a second time, to smoke test log rotation.\n print_time = PrintTime(logfile=logfile)\n print_time("Foo")\n # And a third time\n print_time = PrintTime(logfile=logfile)\n print_time("Foo")\n\n out_printed_text, err_printed_text = capsys.readouterr()\n # Use regexps to be robust to time variations\n match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + r".\..s, 0..min\n"\n if not re.match(match, err_printed_text):\n raise AssertionError("Excepted %s, got %s" % (match, err_printed_text))\n
.venv\Lib\site-packages\joblib\test\test_logger.py
test_logger.py
Python
941
0.95
0.103448
0.291667
awesome-app
165
2023-09-18T18:22:23.683367
Apache-2.0
true
5d51db211321060b67d01d9839705736
"""\nPyodide and other single-threaded Python builds will be missing the\n_multiprocessing module. Test that joblib still works in this environment.\n"""\n\nimport os\nimport subprocess\nimport sys\n\n\ndef test_missing_multiprocessing(tmp_path):\n """\n Test that import joblib works even if _multiprocessing is missing.\n\n pytest has already imported everything from joblib. The most reasonable way\n to test importing joblib with modified environment is to invoke a separate\n Python process. This also ensures that we don't break other tests by\n importing a bad `_multiprocessing` module.\n """\n (tmp_path / "_multiprocessing.py").write_text(\n 'raise ImportError("No _multiprocessing module!")'\n )\n env = dict(os.environ)\n # For subprocess, use current sys.path with our custom version of\n # multiprocessing inserted.\n env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path)\n subprocess.check_call(\n [\n sys.executable,\n "-c",\n "import joblib, math; "\n "joblib.Parallel(n_jobs=1)("\n "joblib.delayed(math.sqrt)(i**2) for i in range(10))",\n ],\n env=env,\n )\n
.venv\Lib\site-packages\joblib\test\test_missing_multiprocessing.py
test_missing_multiprocessing.py
Python
1,171
0.95
0.083333
0.0625
react-lib
361
2024-07-26T19:17:50.632316
Apache-2.0
true
911605c02087cad526e6d1bb5cbe7170
import sys\n\nimport joblib\nfrom joblib.test.common import with_multiprocessing\nfrom joblib.testing import check_subprocess_call\n\n\ndef test_version():\n assert hasattr(joblib, "__version__"), (\n "There are no __version__ argument on the joblib module"\n )\n\n\n@with_multiprocessing\ndef test_no_start_method_side_effect_on_import():\n # check that importing joblib does not implicitly set the global\n # start_method for multiprocessing.\n code = """if True:\n import joblib\n import multiprocessing as mp\n # The following line would raise RuntimeError if the\n # start_method is already set.\n mp.set_start_method("loky")\n """\n check_subprocess_call([sys.executable, "-c", code])\n\n\n@with_multiprocessing\ndef test_no_semaphore_tracker_on_import():\n # check that importing joblib does not implicitly spawn a resource tracker\n # or a semaphore tracker\n code = """if True:\n import joblib\n from multiprocessing import semaphore_tracker\n # The following line would raise RuntimeError if the\n # start_method is already set.\n msg = "multiprocessing.semaphore_tracker has been spawned on import"\n assert semaphore_tracker._semaphore_tracker._fd is None, msg"""\n if sys.version_info >= (3, 8):\n # semaphore_tracker was renamed in Python 3.8:\n code = code.replace("semaphore_tracker", "resource_tracker")\n check_subprocess_call([sys.executable, "-c", code])\n\n\n@with_multiprocessing\ndef test_no_resource_tracker_on_import():\n code = """if True:\n import joblib\n from joblib.externals.loky.backend import resource_tracker\n # The following line would raise RuntimeError if the\n # start_method is already set.\n msg = "loky.resource_tracker has been spawned on import"\n assert resource_tracker._resource_tracker._fd is None, msg\n """\n check_subprocess_call([sys.executable, "-c", code])\n
.venv\Lib\site-packages\joblib\test\test_module.py
test_module.py
Python
1,942
0.95
0.218182
0.23913
python-kit
582
2024-02-23T02:04:58.565213
Apache-2.0
true
55396ce1fe3c626db74d15e4d0979e09
"""Test the numpy pickler as a replacement of the standard pickler."""\n\nimport bz2\nimport copy\nimport gzip\nimport io\nimport mmap\nimport os\nimport pickle\nimport random\nimport re\nimport socket\nimport sys\nimport warnings\nimport zlib\nfrom contextlib import closing\nfrom pathlib import Path\n\ntry:\n import lzma\nexcept ImportError:\n lzma = None\n\nimport pytest\n\n# numpy_pickle is not a drop-in replacement of pickle, as it takes\n# filenames instead of open files as arguments.\nfrom joblib import numpy_pickle, register_compressor\nfrom joblib.compressor import (\n _COMPRESSORS,\n _LZ4_PREFIX,\n LZ4_NOT_INSTALLED_ERROR,\n BinaryZlibFile,\n CompressorWrapper,\n)\nfrom joblib.numpy_pickle_utils import (\n _IO_BUFFER_SIZE,\n _detect_compressor,\n _ensure_native_byte_order,\n _is_numpy_array_byte_order_mismatch,\n)\nfrom joblib.test import data\nfrom joblib.test.common import (\n memory_used,\n np,\n with_lz4,\n with_memory_profiler,\n with_numpy,\n without_lz4,\n)\nfrom joblib.testing import parametrize, raises, warns\n\n###############################################################################\n# Define a list of standard types.\n# Borrowed from dill, initial author: Micheal McKerns:\n# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py\n\ntypelist = []\n\n# testing types\n_none = None\ntypelist.append(_none)\n_type = type\ntypelist.append(_type)\n_bool = bool(1)\ntypelist.append(_bool)\n_int = int(1)\ntypelist.append(_int)\n_float = float(1)\ntypelist.append(_float)\n_complex = complex(1)\ntypelist.append(_complex)\n_string = str(1)\ntypelist.append(_string)\n_tuple = ()\ntypelist.append(_tuple)\n_list = []\ntypelist.append(_list)\n_dict = {}\ntypelist.append(_dict)\n_builtin = len\ntypelist.append(_builtin)\n\n\ndef _function(x):\n yield x\n\n\nclass _class:\n def _method(self):\n pass\n\n\nclass _newclass(object):\n def _method(self):\n pass\n\n\ntypelist.append(_function)\ntypelist.append(_class)\ntypelist.append(_newclass) # <type 'type'>\n_instance = _class()\ntypelist.append(_instance)\n_object = _newclass()\ntypelist.append(_object) # <type 'class'>\n\n\n###############################################################################\n# Tests\n\n\n@parametrize("compress", [0, 1])\n@parametrize("member", typelist)\ndef test_standard_types(tmpdir, compress, member):\n # Test pickling and saving with standard types.\n filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump(member, filename, compress=compress)\n _member = numpy_pickle.load(filename)\n # We compare the pickled instance to the reloaded one only if it\n # can be compared to a copied one\n if member == copy.deepcopy(member):\n assert member == _member\n\n\ndef test_value_error():\n # Test inverting the input arguments to dump\n with raises(ValueError):\n numpy_pickle.dump("foo", dict())\n\n\n@parametrize("wrong_compress", [-1, 10, dict()])\ndef test_compress_level_error(wrong_compress):\n # Verify that passing an invalid compress argument raises an error.\n exception_msg = 'Non valid compress level given: "{0}"'.format(wrong_compress)\n with raises(ValueError) as excinfo:\n numpy_pickle.dump("dummy", "foo", compress=wrong_compress)\n excinfo.match(exception_msg)\n\n\n@with_numpy\n@parametrize("compress", [False, True, 0, 3, "zlib"])\ndef test_numpy_persistence(tmpdir, compress):\n filename = tmpdir.join("test.pkl").strpath\n rnd = np.random.RandomState(0)\n a = rnd.random_sample((10, 2))\n # We use 'a.T' to have a non C-contiguous array.\n for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])):\n filenames = numpy_pickle.dump(obj, filename, compress=compress)\n\n # All is cached in one file\n assert len(filenames) == 1\n # Check that only one file was created\n assert filenames[0] == filename\n # Check that this file does exist\n assert os.path.exists(filenames[0])\n\n # Unpickle the object\n obj_ = numpy_pickle.load(filename)\n # Check that the items are indeed arrays\n for item in obj_:\n assert isinstance(item, np.ndarray)\n # And finally, check that all the values are equal.\n np.testing.assert_array_equal(np.array(obj), np.array(obj_))\n\n # Now test with an array subclass\n obj = np.memmap(filename + "mmap", mode="w+", shape=4, dtype=np.float64)\n filenames = numpy_pickle.dump(obj, filename, compress=compress)\n # All is cached in one file\n assert len(filenames) == 1\n\n obj_ = numpy_pickle.load(filename)\n if type(obj) is not np.memmap and hasattr(obj, "__array_prepare__"):\n # We don't reconstruct memmaps\n assert isinstance(obj_, type(obj))\n\n np.testing.assert_array_equal(obj_, obj)\n\n # Test with an object containing multiple numpy arrays\n obj = ComplexTestObject()\n filenames = numpy_pickle.dump(obj, filename, compress=compress)\n # All is cached in one file\n assert len(filenames) == 1\n\n obj_loaded = numpy_pickle.load(filename)\n assert isinstance(obj_loaded, type(obj))\n np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float)\n np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int)\n np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj)\n\n\n@with_numpy\ndef test_numpy_persistence_bufferred_array_compression(tmpdir):\n big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8)\n filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump(big_array, filename, compress=True)\n arr_reloaded = numpy_pickle.load(filename)\n\n np.testing.assert_array_equal(big_array, arr_reloaded)\n\n\n@with_numpy\ndef test_memmap_persistence(tmpdir):\n rnd = np.random.RandomState(0)\n a = rnd.random_sample(10)\n filename = tmpdir.join("test1.pkl").strpath\n numpy_pickle.dump(a, filename)\n b = numpy_pickle.load(filename, mmap_mode="r")\n\n assert isinstance(b, np.memmap)\n\n # Test with an object containing multiple numpy arrays\n filename = tmpdir.join("test2.pkl").strpath\n obj = ComplexTestObject()\n numpy_pickle.dump(obj, filename)\n obj_loaded = numpy_pickle.load(filename, mmap_mode="r")\n assert isinstance(obj_loaded, type(obj))\n assert isinstance(obj_loaded.array_float, np.memmap)\n assert not obj_loaded.array_float.flags.writeable\n assert isinstance(obj_loaded.array_int, np.memmap)\n assert not obj_loaded.array_int.flags.writeable\n # Memory map not allowed for numpy object arrays\n assert not isinstance(obj_loaded.array_obj, np.memmap)\n np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float)\n np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int)\n np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj)\n\n # Test we can write in memmapped arrays\n obj_loaded = numpy_pickle.load(filename, mmap_mode="r+")\n assert obj_loaded.array_float.flags.writeable\n obj_loaded.array_float[0:10] = 10.0\n assert obj_loaded.array_int.flags.writeable\n obj_loaded.array_int[0:10] = 10\n\n obj_reloaded = numpy_pickle.load(filename, mmap_mode="r")\n np.testing.assert_array_equal(obj_reloaded.array_float, obj_loaded.array_float)\n np.testing.assert_array_equal(obj_reloaded.array_int, obj_loaded.array_int)\n\n # Test w+ mode is caught and the mode has switched to r+\n numpy_pickle.load(filename, mmap_mode="w+")\n assert obj_loaded.array_int.flags.writeable\n assert obj_loaded.array_int.mode == "r+"\n assert obj_loaded.array_float.flags.writeable\n assert obj_loaded.array_float.mode == "r+"\n\n\n@with_numpy\ndef test_memmap_persistence_mixed_dtypes(tmpdir):\n # loading datastructures that have sub-arrays with dtype=object\n # should not prevent memmapping on fixed size dtype sub-arrays.\n rnd = np.random.RandomState(0)\n a = rnd.random_sample(10)\n b = np.array([1, "b"], dtype=object)\n construct = (a, b)\n filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump(construct, filename)\n a_clone, b_clone = numpy_pickle.load(filename, mmap_mode="r")\n\n # the floating point array has been memory mapped\n assert isinstance(a_clone, np.memmap)\n\n # the object-dtype array has been loaded in memory\n assert not isinstance(b_clone, np.memmap)\n\n\n@with_numpy\ndef test_masked_array_persistence(tmpdir):\n # The special-case picker fails, because saving masked_array\n # not implemented, but it just delegates to the standard pickler.\n rnd = np.random.RandomState(0)\n a = rnd.random_sample(10)\n a = np.ma.masked_greater(a, 0.5)\n filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump(a, filename)\n b = numpy_pickle.load(filename, mmap_mode="r")\n assert isinstance(b, np.ma.masked_array)\n\n\n@with_numpy\ndef test_compress_mmap_mode_warning(tmpdir):\n # Test the warning in case of compress + mmap_mode\n rnd = np.random.RandomState(0)\n obj = rnd.random_sample(10)\n this_filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump(obj, this_filename, compress=1)\n with warns(UserWarning) as warninfo:\n reloaded_obj = numpy_pickle.load(this_filename, mmap_mode="r+")\n debug_msg = "\n".join([str(w) for w in warninfo])\n warninfo = [w.message for w in warninfo]\n assert not isinstance(reloaded_obj, np.memmap)\n np.testing.assert_array_equal(obj, reloaded_obj)\n assert len(warninfo) == 1, debug_msg\n assert (\n str(warninfo[0]) == 'mmap_mode "r+" is not compatible with compressed '\n f'file {this_filename}. "r+" flag will be ignored.'\n )\n\n\n@with_numpy\n@with_memory_profiler\n@parametrize("compress", [True, False])\ndef test_memory_usage(tmpdir, compress):\n # Verify memory stays within expected bounds.\n filename = tmpdir.join("test.pkl").strpath\n small_array = np.ones((10, 10))\n big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8)\n\n for obj in (small_array, big_array):\n size = obj.nbytes / 1e6\n obj_filename = filename + str(np.random.randint(0, 1000))\n mem_used = memory_used(numpy_pickle.dump, obj, obj_filename, compress=compress)\n\n # The memory used to dump the object shouldn't exceed the buffer\n # size used to write array chunks (16MB).\n write_buf_size = _IO_BUFFER_SIZE + 16 * 1024**2 / 1e6\n assert mem_used <= write_buf_size\n\n mem_used = memory_used(numpy_pickle.load, obj_filename)\n # memory used should be less than array size + buffer size used to\n # read the array chunk by chunk.\n read_buf_size = 32 + _IO_BUFFER_SIZE # MiB\n assert mem_used < size + read_buf_size\n\n\n@with_numpy\ndef test_compressed_pickle_dump_and_load(tmpdir):\n expected_list = [\n np.arange(5, dtype=np.dtype("<i8")),\n np.arange(5, dtype=np.dtype(">i8")),\n np.arange(5, dtype=np.dtype("<f8")),\n np.arange(5, dtype=np.dtype(">f8")),\n np.array([1, "abc", {"a": 1, "b": 2}], dtype="O"),\n np.arange(256, dtype=np.uint8).tobytes(),\n "C'est l'\xe9t\xe9 !",\n ]\n\n fname = tmpdir.join("temp.pkl.gz").strpath\n\n dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1)\n assert len(dumped_filenames) == 1\n result_list = numpy_pickle.load(fname)\n for result, expected in zip(result_list, expected_list):\n if isinstance(expected, np.ndarray):\n expected = _ensure_native_byte_order(expected)\n assert result.dtype == expected.dtype\n np.testing.assert_equal(result, expected)\n else:\n assert result == expected\n\n\n@with_numpy\ndef test_memmap_load(tmpdir):\n little_endian_dtype = np.dtype("<i8")\n big_endian_dtype = np.dtype(">i8")\n all_dtypes = (little_endian_dtype, big_endian_dtype)\n\n le_array = np.arange(5, dtype=little_endian_dtype)\n be_array = np.arange(5, dtype=big_endian_dtype)\n\n fname = tmpdir.join("temp.pkl").strpath\n\n numpy_pickle.dump([le_array, be_array], fname)\n\n le_array_native_load, be_array_native_load = numpy_pickle.load(\n fname, ensure_native_byte_order=True\n )\n\n assert le_array_native_load.dtype == be_array_native_load.dtype\n assert le_array_native_load.dtype in all_dtypes\n\n le_array_nonnative_load, be_array_nonnative_load = numpy_pickle.load(\n fname, ensure_native_byte_order=False\n )\n\n assert le_array_nonnative_load.dtype == le_array.dtype\n assert be_array_nonnative_load.dtype == be_array.dtype\n\n\ndef test_invalid_parameters_raise():\n expected_msg = (\n "Native byte ordering can only be enforced if 'mmap_mode' parameter "\n "is set to None, but got 'mmap_mode=r+' instead."\n )\n\n with raises(ValueError, match=re.escape(expected_msg)):\n numpy_pickle.load(\n "/path/to/some/dump.pkl", ensure_native_byte_order=True, mmap_mode="r+"\n )\n\n\ndef _check_pickle(filename, expected_list, mmap_mode=None):\n """Helper function to test joblib pickle content.\n\n Note: currently only pickles containing an iterable are supported\n by this function.\n """\n version_match = re.match(r".+py(\d)(\d).+", filename)\n py_version_used_for_writing = int(version_match.group(1))\n\n py_version_to_default_pickle_protocol = {2: 2, 3: 3}\n pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4)\n pickle_writing_protocol = py_version_to_default_pickle_protocol.get(\n py_version_used_for_writing, 4\n )\n if pickle_reading_protocol >= pickle_writing_protocol:\n try:\n with warnings.catch_warnings(record=True) as warninfo:\n warnings.simplefilter("always")\n result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode)\n filename_base = os.path.basename(filename)\n expected_nb_deprecation_warnings = (\n 1 if ("_0.9" in filename_base or "_0.8.4" in filename_base) else 0\n )\n\n expected_nb_user_warnings = (\n 3\n if (re.search("_0.1.+.pkl$", filename_base) and mmap_mode is not None)\n else 0\n )\n expected_nb_warnings = (\n expected_nb_deprecation_warnings + expected_nb_user_warnings\n )\n assert len(warninfo) == expected_nb_warnings, (\n "Did not get the expected number of warnings. Expected "\n f"{expected_nb_warnings} but got warnings: "\n f"{[w.message for w in warninfo]}"\n )\n\n deprecation_warnings = [\n w for w in warninfo if issubclass(w.category, DeprecationWarning)\n ]\n user_warnings = [w for w in warninfo if issubclass(w.category, UserWarning)]\n for w in deprecation_warnings:\n assert (\n str(w.message)\n == "The file '{0}' has been generated with a joblib "\n "version less than 0.10. Please regenerate this "\n "pickle file.".format(filename)\n )\n\n for w in user_warnings:\n escaped_filename = re.escape(filename)\n assert re.search(\n f"memmapped.+{escaped_filename}.+segmentation fault", str(w.message)\n )\n\n for result, expected in zip(result_list, expected_list):\n if isinstance(expected, np.ndarray):\n expected = _ensure_native_byte_order(expected)\n assert result.dtype == expected.dtype\n np.testing.assert_equal(result, expected)\n else:\n assert result == expected\n except Exception as exc:\n # When trying to read with python 3 a pickle generated\n # with python 2 we expect a user-friendly error\n if py_version_used_for_writing == 2:\n assert isinstance(exc, ValueError)\n message = (\n "You may be trying to read with "\n "python 3 a joblib pickle generated with python 2."\n )\n assert message in str(exc)\n elif filename.endswith(".lz4") and with_lz4.args[0]:\n assert isinstance(exc, ValueError)\n assert LZ4_NOT_INSTALLED_ERROR in str(exc)\n else:\n raise\n else:\n # Pickle protocol used for writing is too high. We expect a\n # "unsupported pickle protocol" error message\n try:\n numpy_pickle.load(filename)\n raise AssertionError(\n "Numpy pickle loading should have raised a ValueError exception"\n )\n except ValueError as e:\n message = "unsupported pickle protocol: {0}".format(pickle_writing_protocol)\n assert message in str(e.args)\n\n\n@with_numpy\ndef test_joblib_pickle_across_python_versions():\n # We need to be specific about dtypes in particular endianness\n # because the pickles can be generated on one architecture and\n # the tests run on another one. See\n # https://github.com/joblib/joblib/issues/279.\n expected_list = [\n np.arange(5, dtype=np.dtype("<i8")),\n np.arange(5, dtype=np.dtype("<f8")),\n np.array([1, "abc", {"a": 1, "b": 2}], dtype="O"),\n np.arange(256, dtype=np.uint8).tobytes(),\n # np.matrix is a subclass of np.ndarray, here we want\n # to verify this type of object is correctly unpickled\n # among versions.\n np.matrix([0, 1, 2], dtype=np.dtype("<i8")),\n "C'est l'\xe9t\xe9 !",\n ]\n\n # Testing all the compressed and non compressed\n # pickles in joblib/test/data. These pickles were generated by\n # the joblib/test/data/create_numpy_pickle.py script for the\n # relevant python, joblib and numpy versions.\n test_data_dir = os.path.dirname(os.path.abspath(data.__file__))\n\n pickle_extensions = (".pkl", ".gz", ".gzip", ".bz2", "lz4")\n if lzma is not None:\n pickle_extensions += (".xz", ".lzma")\n pickle_filenames = [\n os.path.join(test_data_dir, fn)\n for fn in os.listdir(test_data_dir)\n if any(fn.endswith(ext) for ext in pickle_extensions)\n ]\n\n for fname in pickle_filenames:\n _check_pickle(fname, expected_list)\n\n\n@with_numpy\ndef test_joblib_pickle_across_python_versions_with_mmap():\n expected_list = [\n np.arange(5, dtype=np.dtype("<i8")),\n np.arange(5, dtype=np.dtype("<f8")),\n np.array([1, "abc", {"a": 1, "b": 2}], dtype="O"),\n np.arange(256, dtype=np.uint8).tobytes(),\n # np.matrix is a subclass of np.ndarray, here we want\n # to verify this type of object is correctly unpickled\n # among versions.\n np.matrix([0, 1, 2], dtype=np.dtype("<i8")),\n "C'est l'\xe9t\xe9 !",\n ]\n\n test_data_dir = os.path.dirname(os.path.abspath(data.__file__))\n\n pickle_filenames = [\n os.path.join(test_data_dir, fn)\n for fn in os.listdir(test_data_dir)\n if fn.endswith(".pkl")\n ]\n for fname in pickle_filenames:\n _check_pickle(fname, expected_list, mmap_mode="r")\n\n\n@with_numpy\ndef test_numpy_array_byte_order_mismatch_detection():\n # List of numpy arrays with big endian byteorder.\n be_arrays = [\n np.array([(1, 2.0), (3, 4.0)], dtype=[("", ">i8"), ("", ">f8")]),\n np.arange(3, dtype=np.dtype(">i8")),\n np.arange(3, dtype=np.dtype(">f8")),\n ]\n\n # Verify the byteorder mismatch is correctly detected.\n for array in be_arrays:\n if sys.byteorder == "big":\n assert not _is_numpy_array_byte_order_mismatch(array)\n else:\n assert _is_numpy_array_byte_order_mismatch(array)\n converted = _ensure_native_byte_order(array)\n if converted.dtype.fields:\n for f in converted.dtype.fields.values():\n f[0].byteorder == "="\n else:\n assert converted.dtype.byteorder == "="\n\n # List of numpy arrays with little endian byteorder.\n le_arrays = [\n np.array([(1, 2.0), (3, 4.0)], dtype=[("", "<i8"), ("", "<f8")]),\n np.arange(3, dtype=np.dtype("<i8")),\n np.arange(3, dtype=np.dtype("<f8")),\n ]\n\n # Verify the byteorder mismatch is correctly detected.\n for array in le_arrays:\n if sys.byteorder == "little":\n assert not _is_numpy_array_byte_order_mismatch(array)\n else:\n assert _is_numpy_array_byte_order_mismatch(array)\n converted = _ensure_native_byte_order(array)\n if converted.dtype.fields:\n for f in converted.dtype.fields.values():\n f[0].byteorder == "="\n else:\n assert converted.dtype.byteorder == "="\n\n\n@parametrize("compress_tuple", [("zlib", 3), ("gzip", 3)])\ndef test_compress_tuple_argument(tmpdir, compress_tuple):\n # Verify the tuple is correctly taken into account.\n filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump("dummy", filename, compress=compress_tuple)\n # Verify the file contains the right magic number\n with open(filename, "rb") as f:\n assert _detect_compressor(f) == compress_tuple[0]\n\n\n@parametrize(\n "compress_tuple,message",\n [\n (\n ("zlib", 3, "extra"), # wrong compress tuple\n "Compress argument tuple should contain exactly 2 elements",\n ),\n (\n ("wrong", 3), # wrong compress method\n 'Non valid compression method given: "{}"'.format("wrong"),\n ),\n (\n ("zlib", "wrong"), # wrong compress level\n 'Non valid compress level given: "{}"'.format("wrong"),\n ),\n ],\n)\ndef test_compress_tuple_argument_exception(tmpdir, compress_tuple, message):\n filename = tmpdir.join("test.pkl").strpath\n # Verify setting a wrong compress tuple raises a ValueError.\n with raises(ValueError) as excinfo:\n numpy_pickle.dump("dummy", filename, compress=compress_tuple)\n excinfo.match(message)\n\n\n@parametrize("compress_string", ["zlib", "gzip"])\ndef test_compress_string_argument(tmpdir, compress_string):\n # Verify the string is correctly taken into account.\n filename = tmpdir.join("test.pkl").strpath\n numpy_pickle.dump("dummy", filename, compress=compress_string)\n # Verify the file contains the right magic number\n with open(filename, "rb") as f:\n assert _detect_compressor(f) == compress_string\n\n\n@with_numpy\n@parametrize("compress", [1, 3, 6])\n@parametrize("cmethod", _COMPRESSORS)\ndef test_joblib_compression_formats(tmpdir, compress, cmethod):\n filename = tmpdir.join("test.pkl").strpath\n objects = (\n np.ones(shape=(100, 100), dtype="f8"),\n range(10),\n {"a": 1, 2: "b"},\n [],\n (),\n {},\n 0,\n 1.0,\n )\n\n if cmethod in ("lzma", "xz") and lzma is None:\n pytest.skip("lzma is support not available")\n\n elif cmethod == "lz4" and with_lz4.args[0]:\n # Skip the test if lz4 is not installed. We here use the with_lz4\n # skipif fixture whose argument is True when lz4 is not installed\n pytest.skip("lz4 is not installed.")\n\n dump_filename = filename + "." + cmethod\n for obj in objects:\n numpy_pickle.dump(obj, dump_filename, compress=(cmethod, compress))\n # Verify the file contains the right magic number\n with open(dump_filename, "rb") as f:\n assert _detect_compressor(f) == cmethod\n # Verify the reloaded object is correct\n obj_reloaded = numpy_pickle.load(dump_filename)\n assert isinstance(obj_reloaded, type(obj))\n if isinstance(obj, np.ndarray):\n np.testing.assert_array_equal(obj_reloaded, obj)\n else:\n assert obj_reloaded == obj\n\n\ndef _gzip_file_decompress(source_filename, target_filename):\n """Decompress a gzip file."""\n with closing(gzip.GzipFile(source_filename, "rb")) as fo:\n buf = fo.read()\n\n with open(target_filename, "wb") as fo:\n fo.write(buf)\n\n\ndef _zlib_file_decompress(source_filename, target_filename):\n """Decompress a zlib file."""\n with open(source_filename, "rb") as fo:\n buf = zlib.decompress(fo.read())\n\n with open(target_filename, "wb") as fo:\n fo.write(buf)\n\n\n@parametrize(\n "extension,decompress",\n [(".z", _zlib_file_decompress), (".gz", _gzip_file_decompress)],\n)\ndef test_load_externally_decompressed_files(tmpdir, extension, decompress):\n # Test that BinaryZlibFile generates valid gzip and zlib compressed files.\n obj = "a string to persist"\n filename_raw = tmpdir.join("test.pkl").strpath\n\n filename_compressed = filename_raw + extension\n # Use automatic extension detection to compress with the right method.\n numpy_pickle.dump(obj, filename_compressed)\n\n # Decompress with the corresponding method\n decompress(filename_compressed, filename_raw)\n\n # Test that the uncompressed pickle can be loaded and\n # that the result is correct.\n obj_reloaded = numpy_pickle.load(filename_raw)\n assert obj == obj_reloaded\n\n\n@parametrize(\n "extension,cmethod",\n # valid compressor extensions\n [\n (".z", "zlib"),\n (".gz", "gzip"),\n (".bz2", "bz2"),\n (".lzma", "lzma"),\n (".xz", "xz"),\n # invalid compressor extensions\n (".pkl", "not-compressed"),\n ("", "not-compressed"),\n ],\n)\ndef test_compression_using_file_extension(tmpdir, extension, cmethod):\n if cmethod in ("lzma", "xz") and lzma is None:\n pytest.skip("lzma is missing")\n # test that compression method corresponds to the given filename extension.\n filename = tmpdir.join("test.pkl").strpath\n obj = "object to dump"\n\n dump_fname = filename + extension\n numpy_pickle.dump(obj, dump_fname)\n # Verify the file contains the right magic number\n with open(dump_fname, "rb") as f:\n assert _detect_compressor(f) == cmethod\n # Verify the reloaded object is correct\n obj_reloaded = numpy_pickle.load(dump_fname)\n assert isinstance(obj_reloaded, type(obj))\n assert obj_reloaded == obj\n\n\n@with_numpy\ndef test_file_handle_persistence(tmpdir):\n objs = [np.random.random((10, 10)), "some data"]\n fobjs = [bz2.BZ2File, gzip.GzipFile]\n if lzma is not None:\n fobjs += [lzma.LZMAFile]\n filename = tmpdir.join("test.pkl").strpath\n\n for obj in objs:\n for fobj in fobjs:\n with fobj(filename, "wb") as f:\n numpy_pickle.dump(obj, f)\n\n # using the same decompressor prevents from internally\n # decompress again.\n with fobj(filename, "rb") as f:\n obj_reloaded = numpy_pickle.load(f)\n\n # when needed, the correct decompressor should be used when\n # passing a raw file handle.\n with open(filename, "rb") as f:\n obj_reloaded_2 = numpy_pickle.load(f)\n\n if isinstance(obj, np.ndarray):\n np.testing.assert_array_equal(obj_reloaded, obj)\n np.testing.assert_array_equal(obj_reloaded_2, obj)\n else:\n assert obj_reloaded == obj\n assert obj_reloaded_2 == obj\n\n\n@with_numpy\ndef test_in_memory_persistence():\n objs = [np.random.random((10, 10)), "some data"]\n for obj in objs:\n f = io.BytesIO()\n numpy_pickle.dump(obj, f)\n obj_reloaded = numpy_pickle.load(f)\n if isinstance(obj, np.ndarray):\n np.testing.assert_array_equal(obj_reloaded, obj)\n else:\n assert obj_reloaded == obj\n\n\n@with_numpy\ndef test_file_handle_persistence_mmap(tmpdir):\n obj = np.random.random((10, 10))\n filename = tmpdir.join("test.pkl").strpath\n\n with open(filename, "wb") as f:\n numpy_pickle.dump(obj, f)\n\n with open(filename, "rb") as f:\n obj_reloaded = numpy_pickle.load(f, mmap_mode="r+")\n\n np.testing.assert_array_equal(obj_reloaded, obj)\n\n\n@with_numpy\ndef test_file_handle_persistence_compressed_mmap(tmpdir):\n obj = np.random.random((10, 10))\n filename = tmpdir.join("test.pkl").strpath\n\n with open(filename, "wb") as f:\n numpy_pickle.dump(obj, f, compress=("gzip", 3))\n\n with closing(gzip.GzipFile(filename, "rb")) as f:\n with warns(UserWarning) as warninfo:\n numpy_pickle.load(f, mmap_mode="r+")\n assert len(warninfo) == 1\n assert (\n str(warninfo[0].message)\n == '"%(fileobj)r" is not a raw file, mmap_mode "%(mmap_mode)s" '\n "flag will be ignored." % {"fileobj": f, "mmap_mode": "r+"}\n )\n\n\n@with_numpy\ndef test_file_handle_persistence_in_memory_mmap():\n obj = np.random.random((10, 10))\n buf = io.BytesIO()\n\n numpy_pickle.dump(obj, buf)\n\n with warns(UserWarning) as warninfo:\n numpy_pickle.load(buf, mmap_mode="r+")\n assert len(warninfo) == 1\n assert (\n str(warninfo[0].message)\n == "In memory persistence is not compatible with mmap_mode "\n '"%(mmap_mode)s" flag passed. mmap_mode option will be '\n "ignored." % {"mmap_mode": "r+"}\n )\n\n\n@parametrize(\n "data",\n [\n b"a little data as bytes.",\n # More bytes\n 10000 * "{}".format(random.randint(0, 1000) * 1000).encode("latin-1"),\n ],\n ids=["a little data as bytes.", "a large data as bytes."],\n)\n@parametrize("compress_level", [1, 3, 9])\ndef test_binary_zlibfile(tmpdir, data, compress_level):\n filename = tmpdir.join("test.pkl").strpath\n # Regular cases\n with open(filename, "wb") as f:\n with BinaryZlibFile(f, "wb", compresslevel=compress_level) as fz:\n assert fz.writable()\n fz.write(data)\n assert fz.fileno() == f.fileno()\n with raises(io.UnsupportedOperation):\n fz._check_can_read()\n\n with raises(io.UnsupportedOperation):\n fz._check_can_seek()\n assert fz.closed\n with raises(ValueError):\n fz._check_not_closed()\n\n with open(filename, "rb") as f:\n with BinaryZlibFile(f) as fz:\n assert fz.readable()\n assert fz.seekable()\n assert fz.fileno() == f.fileno()\n assert fz.read() == data\n with raises(io.UnsupportedOperation):\n fz._check_can_write()\n assert fz.seekable()\n fz.seek(0)\n assert fz.tell() == 0\n assert fz.closed\n\n # Test with a filename as input\n with BinaryZlibFile(filename, "wb", compresslevel=compress_level) as fz:\n assert fz.writable()\n fz.write(data)\n\n with BinaryZlibFile(filename, "rb") as fz:\n assert fz.read() == data\n assert fz.seekable()\n\n # Test without context manager\n fz = BinaryZlibFile(filename, "wb", compresslevel=compress_level)\n assert fz.writable()\n fz.write(data)\n fz.close()\n\n fz = BinaryZlibFile(filename, "rb")\n assert fz.read() == data\n fz.close()\n\n\n@parametrize("bad_value", [-1, 10, 15, "a", (), {}])\ndef test_binary_zlibfile_bad_compression_levels(tmpdir, bad_value):\n filename = tmpdir.join("test.pkl").strpath\n with raises(ValueError) as excinfo:\n BinaryZlibFile(filename, "wb", compresslevel=bad_value)\n pattern = re.escape(\n "'compresslevel' must be an integer between 1 and 9. "\n "You provided 'compresslevel={}'".format(bad_value)\n )\n excinfo.match(pattern)\n\n\n@parametrize("bad_mode", ["a", "x", "r", "w", 1, 2])\ndef test_binary_zlibfile_invalid_modes(tmpdir, bad_mode):\n filename = tmpdir.join("test.pkl").strpath\n with raises(ValueError) as excinfo:\n BinaryZlibFile(filename, bad_mode)\n excinfo.match("Invalid mode")\n\n\n@parametrize("bad_file", [1, (), {}])\ndef test_binary_zlibfile_invalid_filename_type(bad_file):\n with raises(TypeError) as excinfo:\n BinaryZlibFile(bad_file, "rb")\n excinfo.match("filename must be a str or bytes object, or a file")\n\n\n###############################################################################\n# Test dumping array subclasses\nif np is not None:\n\n class SubArray(np.ndarray):\n def __reduce__(self):\n return _load_sub_array, (np.asarray(self),)\n\n def _load_sub_array(arr):\n d = SubArray(arr.shape)\n d[:] = arr\n return d\n\n class ComplexTestObject:\n """A complex object containing numpy arrays as attributes."""\n\n def __init__(self):\n self.array_float = np.arange(100, dtype="float64")\n self.array_int = np.ones(100, dtype="int32")\n self.array_obj = np.array(["a", 10, 20.0], dtype="object")\n\n\n@with_numpy\ndef test_numpy_subclass(tmpdir):\n filename = tmpdir.join("test.pkl").strpath\n a = SubArray((10,))\n numpy_pickle.dump(a, filename)\n c = numpy_pickle.load(filename)\n assert isinstance(c, SubArray)\n np.testing.assert_array_equal(c, a)\n\n\ndef test_pathlib(tmpdir):\n filename = tmpdir.join("test.pkl").strpath\n value = 123\n numpy_pickle.dump(value, Path(filename))\n assert numpy_pickle.load(filename) == value\n numpy_pickle.dump(value, filename)\n assert numpy_pickle.load(Path(filename)) == value\n\n\n@with_numpy\ndef test_non_contiguous_array_pickling(tmpdir):\n filename = tmpdir.join("test.pkl").strpath\n\n for array in [ # Array that triggers a contiguousness issue with nditer,\n # see https://github.com/joblib/joblib/pull/352 and see\n # https://github.com/joblib/joblib/pull/353\n np.asfortranarray([[1, 2], [3, 4]])[1:],\n # Non contiguous array with works fine with nditer\n np.ones((10, 50, 20), order="F")[:, :1, :],\n ]:\n assert not array.flags.c_contiguous\n assert not array.flags.f_contiguous\n numpy_pickle.dump(array, filename)\n array_reloaded = numpy_pickle.load(filename)\n np.testing.assert_array_equal(array_reloaded, array)\n\n\n@with_numpy\ndef test_pickle_highest_protocol(tmpdir):\n # ensure persistence of a numpy array is valid even when using\n # the pickle HIGHEST_PROTOCOL.\n # see https://github.com/joblib/joblib/issues/362\n\n filename = tmpdir.join("test.pkl").strpath\n test_array = np.zeros(10)\n\n numpy_pickle.dump(test_array, filename, protocol=pickle.HIGHEST_PROTOCOL)\n array_reloaded = numpy_pickle.load(filename)\n\n np.testing.assert_array_equal(array_reloaded, test_array)\n\n\n@with_numpy\ndef test_pickle_in_socket():\n # test that joblib can pickle in sockets\n test_array = np.arange(10)\n _ADDR = ("localhost", 12345)\n listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listener.bind(_ADDR)\n listener.listen(1)\n\n with socket.create_connection(_ADDR) as client:\n server, client_addr = listener.accept()\n\n with server.makefile("wb") as sf:\n numpy_pickle.dump(test_array, sf)\n\n with client.makefile("rb") as cf:\n array_reloaded = numpy_pickle.load(cf)\n\n np.testing.assert_array_equal(array_reloaded, test_array)\n\n # Check that a byte-aligned numpy array written in a file can be send\n # over a socket and then read on the other side\n bytes_to_send = io.BytesIO()\n numpy_pickle.dump(test_array, bytes_to_send)\n server.send(bytes_to_send.getvalue())\n\n with client.makefile("rb") as cf:\n array_reloaded = numpy_pickle.load(cf)\n\n np.testing.assert_array_equal(array_reloaded, test_array)\n\n\n@with_numpy\ndef test_load_memmap_with_big_offset(tmpdir):\n # Test that numpy memmap offset is set correctly if greater than\n # mmap.ALLOCATIONGRANULARITY, see\n # https://github.com/joblib/joblib/issues/451 and\n # https://github.com/numpy/numpy/pull/8443 for more details.\n fname = tmpdir.join("test.mmap").strpath\n size = mmap.ALLOCATIONGRANULARITY\n obj = [np.zeros(size, dtype="uint8"), np.ones(size, dtype="uint8")]\n numpy_pickle.dump(obj, fname)\n memmaps = numpy_pickle.load(fname, mmap_mode="r")\n assert isinstance(memmaps[1], np.memmap)\n assert memmaps[1].offset > size\n np.testing.assert_array_equal(obj, memmaps)\n\n\ndef test_register_compressor(tmpdir):\n # Check that registering compressor file works.\n compressor_name = "test-name"\n compressor_prefix = "test-prefix"\n\n class BinaryCompressorTestFile(io.BufferedIOBase):\n pass\n\n class BinaryCompressorTestWrapper(CompressorWrapper):\n def __init__(self):\n CompressorWrapper.__init__(\n self, obj=BinaryCompressorTestFile, prefix=compressor_prefix\n )\n\n register_compressor(compressor_name, BinaryCompressorTestWrapper())\n\n assert _COMPRESSORS[compressor_name].fileobj_factory == BinaryCompressorTestFile\n assert _COMPRESSORS[compressor_name].prefix == compressor_prefix\n\n # Remove this dummy compressor file from extra compressors because other\n # tests might fail because of this\n _COMPRESSORS.pop(compressor_name)\n\n\n@parametrize("invalid_name", [1, (), {}])\ndef test_register_compressor_invalid_name(invalid_name):\n # Test that registering an invalid compressor name is not allowed.\n with raises(ValueError) as excinfo:\n register_compressor(invalid_name, None)\n excinfo.match("Compressor name should be a string")\n\n\ndef test_register_compressor_invalid_fileobj():\n # Test that registering an invalid file object is not allowed.\n\n class InvalidFileObject:\n pass\n\n class InvalidFileObjectWrapper(CompressorWrapper):\n def __init__(self):\n CompressorWrapper.__init__(self, obj=InvalidFileObject, prefix=b"prefix")\n\n with raises(ValueError) as excinfo:\n register_compressor("invalid", InvalidFileObjectWrapper())\n\n excinfo.match(\n "Compressor 'fileobj_factory' attribute should implement "\n "the file object interface"\n )\n\n\nclass AnotherZlibCompressorWrapper(CompressorWrapper):\n def __init__(self):\n CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b"prefix")\n\n\nclass StandardLibGzipCompressorWrapper(CompressorWrapper):\n def __init__(self):\n CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b"prefix")\n\n\ndef test_register_compressor_already_registered():\n # Test registration of existing compressor files.\n compressor_name = "test-name"\n\n # register a test compressor\n register_compressor(compressor_name, AnotherZlibCompressorWrapper())\n\n with raises(ValueError) as excinfo:\n register_compressor(compressor_name, StandardLibGzipCompressorWrapper())\n excinfo.match("Compressor '{}' already registered.".format(compressor_name))\n\n register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), force=True)\n\n assert compressor_name in _COMPRESSORS\n assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile\n\n # Remove this dummy compressor file from extra compressors because other\n # tests might fail because of this\n _COMPRESSORS.pop(compressor_name)\n\n\n@with_lz4\ndef test_lz4_compression(tmpdir):\n # Check that lz4 can be used when dependency is available.\n import lz4.frame\n\n compressor = "lz4"\n assert compressor in _COMPRESSORS\n assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile\n\n fname = tmpdir.join("test.pkl").strpath\n data = "test data"\n numpy_pickle.dump(data, fname, compress=compressor)\n\n with open(fname, "rb") as f:\n assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX\n assert numpy_pickle.load(fname) == data\n\n # Test that LZ4 is applied based on file extension\n numpy_pickle.dump(data, fname + ".lz4")\n with open(fname, "rb") as f:\n assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX\n assert numpy_pickle.load(fname) == data\n\n\n@without_lz4\ndef test_lz4_compression_without_lz4(tmpdir):\n # Check that lz4 cannot be used when dependency is not available.\n fname = tmpdir.join("test.nolz4").strpath\n data = "test data"\n msg = LZ4_NOT_INSTALLED_ERROR\n with raises(ValueError) as excinfo:\n numpy_pickle.dump(data, fname, compress="lz4")\n excinfo.match(msg)\n\n with raises(ValueError) as excinfo:\n numpy_pickle.dump(data, fname + ".lz4")\n excinfo.match(msg)\n\n\nprotocols = [pickle.DEFAULT_PROTOCOL]\nif pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL:\n protocols.append(pickle.HIGHEST_PROTOCOL)\n\n\n@with_numpy\n@parametrize("protocol", protocols)\ndef test_memmap_alignment_padding(tmpdir, protocol):\n # Test that memmaped arrays returned by numpy.load are correctly aligned\n fname = tmpdir.join("test.mmap").strpath\n\n a = np.random.randn(2)\n numpy_pickle.dump(a, fname, protocol=protocol)\n memmap = numpy_pickle.load(fname, mmap_mode="r")\n assert isinstance(memmap, np.memmap)\n np.testing.assert_array_equal(a, memmap)\n assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0\n assert memmap.flags.aligned\n\n array_list = [\n np.random.randn(2),\n np.random.randn(2),\n np.random.randn(2),\n np.random.randn(2),\n ]\n\n # On Windows OSError 22 if reusing the same path for memmap ...\n fname = tmpdir.join("test1.mmap").strpath\n numpy_pickle.dump(array_list, fname, protocol=protocol)\n l_reloaded = numpy_pickle.load(fname, mmap_mode="r")\n\n for idx, memmap in enumerate(l_reloaded):\n assert isinstance(memmap, np.memmap)\n np.testing.assert_array_equal(array_list[idx], memmap)\n assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0\n assert memmap.flags.aligned\n\n array_dict = {\n "a0": np.arange(2, dtype=np.uint8),\n "a1": np.arange(3, dtype=np.uint8),\n "a2": np.arange(5, dtype=np.uint8),\n "a3": np.arange(7, dtype=np.uint8),\n "a4": np.arange(11, dtype=np.uint8),\n "a5": np.arange(13, dtype=np.uint8),\n "a6": np.arange(17, dtype=np.uint8),\n "a7": np.arange(19, dtype=np.uint8),\n "a8": np.arange(23, dtype=np.uint8),\n }\n\n # On Windows OSError 22 if reusing the same path for memmap ...\n fname = tmpdir.join("test2.mmap").strpath\n numpy_pickle.dump(array_dict, fname, protocol=protocol)\n d_reloaded = numpy_pickle.load(fname, mmap_mode="r")\n\n for key, memmap in d_reloaded.items():\n assert isinstance(memmap, np.memmap)\n np.testing.assert_array_equal(array_dict[key], memmap)\n assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0\n assert memmap.flags.aligned\n
.venv\Lib\site-packages\joblib\test\test_numpy_pickle.py
test_numpy_pickle.py
Python
42,130
0.95
0.112653
0.121364
python-kit
389
2025-02-12T16:29:07.775525
MIT
true
56d8f4d09a4a2c84199ea1693041b648
"""Test the old numpy pickler, compatibility version."""\n\n# numpy_pickle is not a drop-in replacement of pickle, as it takes\n# filenames instead of open files as arguments.\nfrom joblib import numpy_pickle_compat\n\n\ndef test_z_file(tmpdir):\n # Test saving and loading data with Zfiles.\n filename = tmpdir.join("test.pkl").strpath\n data = numpy_pickle_compat.asbytes("Foo, \n Bar, baz, \n\nfoobar")\n with open(filename, "wb") as f:\n numpy_pickle_compat.write_zfile(f, data)\n with open(filename, "rb") as f:\n data_read = numpy_pickle_compat.read_zfile(f)\n assert data == data_read\n
.venv\Lib\site-packages\joblib\test\test_numpy_pickle_compat.py
test_numpy_pickle_compat.py
Python
609
0.95
0.0625
0.230769
react-lib
832
2024-11-30T22:03:31.748526
MIT
true
9f42d544e33d95e5eee3897c01cdb025
from joblib.compressor import BinaryZlibFile\nfrom joblib.testing import parametrize\n\n\n@parametrize("filename", ["test", "test"]) # testing str and unicode names\ndef test_binary_zlib_file(tmpdir, filename):\n """Testing creation of files depending on the type of the filenames."""\n binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode="wb")\n binary_file.close()\n
.venv\Lib\site-packages\joblib\test\test_numpy_pickle_utils.py
test_numpy_pickle_utils.py
Python
382
0.95
0.111111
0
python-kit
861
2024-08-07T20:04:53.495373
Apache-2.0
true
a4e3a6476cd1f8b49a5616528af87dd3
try:\n # Python 2.7: use the C pickle to speed up\n # test_concurrency_safe_write which pickles big python objects\n import cPickle as cpickle\nexcept ImportError:\n import pickle as cpickle\nimport functools\nimport time\nfrom pickle import PicklingError\n\nimport pytest\n\nfrom joblib import Parallel, delayed\nfrom joblib._store_backends import (\n CacheWarning,\n FileSystemStoreBackend,\n concurrency_safe_write,\n)\nfrom joblib.backports import concurrency_safe_rename\nfrom joblib.test.common import with_multiprocessing\nfrom joblib.testing import parametrize, timeout\n\n\ndef write_func(output, filename):\n with open(filename, "wb") as f:\n cpickle.dump(output, f)\n\n\ndef load_func(expected, filename):\n for i in range(10):\n try:\n with open(filename, "rb") as f:\n reloaded = cpickle.load(f)\n break\n except (OSError, IOError):\n # On Windows you can have WindowsError ([Error 5] Access\n # is denied or [Error 13] Permission denied) when reading the file,\n # probably because a writer process has a lock on the file\n time.sleep(0.1)\n else:\n raise\n assert expected == reloaded\n\n\ndef concurrency_safe_write_rename(to_write, filename, write_func):\n temporary_filename = concurrency_safe_write(to_write, filename, write_func)\n concurrency_safe_rename(temporary_filename, filename)\n\n\n@timeout(0) # No timeout as this test can be long\n@with_multiprocessing\n@parametrize("backend", ["multiprocessing", "loky", "threading"])\ndef test_concurrency_safe_write(tmpdir, backend):\n # Add one item to cache\n filename = tmpdir.join("test.pkl").strpath\n\n obj = {str(i): i for i in range(int(1e5))}\n funcs = [\n functools.partial(concurrency_safe_write_rename, write_func=write_func)\n if i % 3 != 2\n else load_func\n for i in range(12)\n ]\n Parallel(n_jobs=2, backend=backend)(delayed(func)(obj, filename) for func in funcs)\n\n\ndef test_warning_on_dump_failure(tmpdir):\n # Check that a warning is raised when the dump fails for any reason but\n # a PicklingError.\n class UnpicklableObject(object):\n def __reduce__(self):\n raise RuntimeError("some exception")\n\n backend = FileSystemStoreBackend()\n backend.location = tmpdir.join("test_warning_on_pickling_error").strpath\n backend.compress = None\n\n with pytest.warns(CacheWarning, match="some exception"):\n backend.dump_item("testpath", UnpicklableObject())\n\n\ndef test_warning_on_pickling_error(tmpdir):\n # This is separate from test_warning_on_dump_failure because in the\n # future we will turn this into an exception.\n class UnpicklableObject(object):\n def __reduce__(self):\n raise PicklingError("not picklable")\n\n backend = FileSystemStoreBackend()\n backend.location = tmpdir.join("test_warning_on_pickling_error").strpath\n backend.compress = None\n\n with pytest.warns(FutureWarning, match="not picklable"):\n backend.dump_item("testpath", UnpicklableObject())\n
.venv\Lib\site-packages\joblib\test\test_store_backends.py
test_store_backends.py
Python
3,057
0.95
0.191489
0.133333
vue-tools
802
2023-07-14T12:02:41.758554
Apache-2.0
true
5280f57f39c10b5c7c526da2a25d7658
import re\nimport sys\n\nfrom joblib.testing import check_subprocess_call, raises\n\n\ndef test_check_subprocess_call():\n code = "\n".join(\n ["result = 1 + 2 * 3", "print(result)", "my_list = [1, 2, 3]", "print(my_list)"]\n )\n\n check_subprocess_call([sys.executable, "-c", code])\n\n # Now checking stdout with a regex\n check_subprocess_call(\n [sys.executable, "-c", code],\n # Regex needed for platform-specific line endings\n stdout_regex=r"7\s{1,2}\[1, 2, 3\]",\n )\n\n\ndef test_check_subprocess_call_non_matching_regex():\n code = "42"\n non_matching_pattern = "_no_way_this_matches_anything_"\n\n with raises(ValueError) as excinfo:\n check_subprocess_call(\n [sys.executable, "-c", code], stdout_regex=non_matching_pattern\n )\n excinfo.match("Unexpected stdout.+{}".format(non_matching_pattern))\n\n\ndef test_check_subprocess_call_wrong_command():\n wrong_command = "_a_command_that_does_not_exist_"\n with raises(OSError):\n check_subprocess_call([wrong_command])\n\n\ndef test_check_subprocess_call_non_zero_return_code():\n code_with_non_zero_exit = "\n".join(\n [\n "import sys",\n 'print("writing on stdout")',\n 'sys.stderr.write("writing on stderr")',\n "sys.exit(123)",\n ]\n )\n\n pattern = re.compile(\n "Non-zero return code: 123.+"\n "Stdout:\nwriting on stdout.+"\n "Stderr:\nwriting on stderr",\n re.DOTALL,\n )\n\n with raises(ValueError) as excinfo:\n check_subprocess_call([sys.executable, "-c", code_with_non_zero_exit])\n excinfo.match(pattern)\n\n\ndef test_check_subprocess_call_timeout():\n code_timing_out = "\n".join(\n [\n "import time",\n "import sys",\n 'print("before sleep on stdout")',\n "sys.stdout.flush()",\n 'sys.stderr.write("before sleep on stderr")',\n "sys.stderr.flush()",\n # We need to sleep for at least 2 * timeout seconds in case the SIGKILL\n # is triggered.\n "time.sleep(10)",\n 'print("process should have be killed before")',\n "sys.stdout.flush()",\n ]\n )\n\n pattern = re.compile(\n "Non-zero return code:.+"\n "Stdout:\nbefore sleep on stdout\\s+"\n "Stderr:\nbefore sleep on stderr",\n re.DOTALL,\n )\n\n with raises(ValueError) as excinfo:\n check_subprocess_call([sys.executable, "-c", code_timing_out], timeout=1)\n excinfo.match(pattern)\n
.venv\Lib\site-packages\joblib\test\test_testing.py
test_testing.py
Python
2,520
0.95
0.08046
0.057971
awesome-app
910
2024-08-06T05:29:02.349761
Apache-2.0
true
35f23e734228561b13e6dac8cd1a0f1d
"""\nThis script is used to generate test data for joblib/test/test_numpy_pickle.py\n"""\n\nimport re\nimport sys\n\n# pytest needs to be able to import this module even when numpy is\n# not installed\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\nimport joblib\n\n\ndef get_joblib_version(joblib_version=joblib.__version__):\n """Normalize joblib version by removing suffix.\n\n >>> get_joblib_version('0.8.4')\n '0.8.4'\n >>> get_joblib_version('0.8.4b1')\n '0.8.4'\n >>> get_joblib_version('0.9.dev0')\n '0.9'\n """\n matches = [re.match(r"(\d+).*", each) for each in joblib_version.split(".")]\n return ".".join([m.group(1) for m in matches if m is not None])\n\n\ndef write_test_pickle(to_pickle, args):\n kwargs = {}\n compress = args.compress\n method = args.method\n joblib_version = get_joblib_version()\n py_version = "{0[0]}{0[1]}".format(sys.version_info)\n numpy_version = "".join(np.__version__.split(".")[:2])\n\n # The game here is to generate the right filename according to the options.\n body = "_compressed" if (compress and method == "zlib") else ""\n if compress:\n if method == "zlib":\n kwargs["compress"] = True\n extension = ".gz"\n else:\n kwargs["compress"] = (method, 3)\n extension = ".pkl.{}".format(method)\n if args.cache_size:\n kwargs["cache_size"] = 0\n body += "_cache_size"\n else:\n extension = ".pkl"\n\n pickle_filename = "joblib_{}{}_pickle_py{}_np{}{}".format(\n joblib_version, body, py_version, numpy_version, extension\n )\n\n try:\n joblib.dump(to_pickle, pickle_filename, **kwargs)\n except Exception as e:\n # With old python version (=< 3.3.), we can arrive there when\n # dumping compressed pickle with LzmaFile.\n print(\n "Error: cannot generate file '{}' with arguments '{}'. "\n "Error was: {}".format(pickle_filename, kwargs, e)\n )\n else:\n print("File '{}' generated successfully.".format(pickle_filename))\n\n\nif __name__ == "__main__":\n import argparse\n\n parser = argparse.ArgumentParser(description="Joblib pickle data generator.")\n parser.add_argument(\n "--cache_size",\n action="store_true",\n help="Force creation of companion numpy files for pickled arrays.",\n )\n parser.add_argument(\n "--compress", action="store_true", help="Generate compress pickles."\n )\n parser.add_argument(\n "--method",\n type=str,\n default="zlib",\n choices=["zlib", "gzip", "bz2", "xz", "lzma", "lz4"],\n help="Set compression method.",\n )\n # We need to be specific about dtypes in particular endianness\n # because the pickles can be generated on one architecture and\n # the tests run on another one. See\n # https://github.com/joblib/joblib/issues/279.\n to_pickle = [\n np.arange(5, dtype=np.dtype("<i8")),\n np.arange(5, dtype=np.dtype("<f8")),\n np.array([1, "abc", {"a": 1, "b": 2}], dtype="O"),\n # all possible bytes as a byte string\n np.arange(256, dtype=np.uint8).tobytes(),\n np.matrix([0, 1, 2], dtype=np.dtype("<i8")),\n # unicode string with non-ascii chars\n "C'est l'\xe9t\xe9 !",\n ]\n\n write_test_pickle(to_pickle, parser.parse_args())\n
.venv\Lib\site-packages\joblib\test\data\create_numpy_pickle.py
create_numpy_pickle.py
Python
3,334
0.95
0.132075
0.120879
awesome-app
41
2024-01-04T18:53:18.905097
Apache-2.0
true
9169fed6fab46af5a6b207c674c20180

.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_compressed_pickle_py27_np16.gz
joblib_0.10.0_compressed_pickle_py27_np16.gz
Other
769
0.7
0
0.076923
react-lib
405
2023-07-29T13:24:10.186492
GPL-3.0
true
d5461d3f1500d45f2dd3861b364d0f55
x^k`-dHOL+-/LIq+Ë \nR\n5 j 5BYSJ* R YZ<BPLB6ooBB\noPFBN???@
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_compressed_pickle_py27_np17.gz
joblib_0.10.0_compressed_pickle_py27_np17.gz
Other
757
0.7
0
0.076923
awesome-app
855
2025-04-16T14:56:48.391884
MIT
true
29d9c40badc827b9ab048c71cc4dcb43
x^eSexPn]&a+Vn0<PXQ4B*7M!3}wwww|cp{=$u(tO)p'Zv`fqQ<v3<RA*\nf@CK0iP#{ \n\\nq5D!l67B\n0q>VtCpÒ({MPB'nx)#=3:mt S<{)RbPQGV,+rY\n+dȍ+Ci$0/Yu#NF\nqi>"md҂(ǴP8iAZ YI* v +.Gུ_"Tvڃ1Ngh
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_compressed_pickle_py33_np18.gz
joblib_0.10.0_compressed_pickle_py33_np18.gz
Other
792
0.8
0
0
react-lib
478
2023-10-06T05:21:57.874532
GPL-3.0
true
baa1e0ec4d0086286ff4485fe6b79196
x^eSexPn]&a+Vn0<PXQ4B*7M\n}$Cv佗s&@a{(N^;ӽ83z<O&vI1PJFBP(lB\n-I+ \nc
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_compressed_pickle_py34_np19.gz
joblib_0.10.0_compressed_pickle_py34_np19.gz
Other
794
0.8
0
0
react-lib
698
2024-12-24T08:25:52.981739
BSD-3-Clause
true
e22873e20bdbbdc125863a9fd8f85587
x^uSe|P]I;؆+C76tEd$\nʥ)00dw)2>K/w/E2=A0V`wۜfPQgY<9ŝNE@^z (<c*Lq'>*T kXPcr~; !҈CSGh@P*OQc4 ? C#a6m( ].y\`@\n!`\nfoYy<WObBh5KkhehX ϒ/?Ak3hTe#VoŊ<PP
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_compressed_pickle_py35_np19.gz
joblib_0.10.0_compressed_pickle_py35_np19.gz
Other
790
0.8
0
0
awesome-app
109
2024-06-03T01:05:20.176986
GPL-3.0
true
3b2f88f7c67109732e821e7ea0cc3bcb
]q
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py27_np17.pkl
joblib_0.10.0_pickle_py27_np17.pkl
Other
986
0.8
0
0
vue-tools
938
2024-10-30T22:04:42.358999
Apache-2.0
true
ae9f75a3f2b794d2ddd5eeef342d1505
BZh91AY&SYrG(
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py27_np17.pkl.bz2
joblib_0.10.0_pickle_py27_np17.pkl.bz2
Other
997
0.8
0
0
react-lib
506
2024-11-19T21:22:41.544703
MIT
true
d5b74cc90e3d917f3775f6299d635e8c
zV
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py27_np17.pkl.gzip
joblib_0.10.0_pickle_py27_np17.pkl.gzip
Other
798
0.7
0
0.076923
react-lib
639
2024-10-26T14:59:43.464818
GPL-3.0
true
8173a616d6168cfc7426c07ed618ffdc
]
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py27_np17.pkl.lzma
joblib_0.10.0_pickle_py27_np17.pkl.lzma
Other
660
0.8
0
0
react-lib
494
2024-12-29T15:21:41.248229
BSD-3-Clause
true
c8e26aa9684533c41fb1b158c0ac0a94
7zXZ
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py27_np17.pkl.xz
joblib_0.10.0_pickle_py27_np17.pkl.xz
Other
712
0.8
0
0
python-kit
115
2024-08-06T05:05:46.984902
BSD-3-Clause
true
24d9af538cf4ddad09aaaf4dcfdecf77
]q
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py33_np18.pkl
joblib_0.10.0_pickle_py33_np18.pkl
Other
1,068
0.8
0
0
node-utils
406
2024-10-27T06:32:34.150426
MIT
true
e8a97d5e47bdb9647fe660751f386549
BZh91AY&SY
.venv\Lib\site-packages\joblib\test\data\joblib_0.10.0_pickle_py33_np18.pkl.bz2
joblib_0.10.0_pickle_py33_np18.pkl.bz2
Other
1,000
0.7
0
0
awesome-app
861
2023-11-25T01:56:25.782762
Apache-2.0
true
be5f6401d0fb74a856b04bad90044421