Soptq commited on
Commit
13cd31f
·
verified ·
1 Parent(s): c04ef3b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. material/dataset/Datasets/Electron Microscopy Image Masks/TiO2_Masks_Manual_4connected_4Classes/1908284_cm.tif +3 -0
  3. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/attr/__pycache__/_cmp.cpython-310.pyc +0 -0
  4. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/attr/__pycache__/_make.cpython-310.pyc +0 -0
  5. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/__init__.py +70 -0
  6. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/access.py +562 -0
  7. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/getattr_static.py +121 -0
  8. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/mixed.py +309 -0
  9. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/subprocess/functions.py +257 -0
  10. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/value.py +626 -0
  11. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/base.cpython-310.pyc +0 -0
  12. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/generics.cpython-310.pyc +0 -0
  13. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/stub_value.cpython-310.pyc +0 -0
  14. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/typeshed.cpython-310.pyc +0 -0
  15. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc +0 -0
  16. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc +0 -0
  17. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc +0 -0
  18. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc +0 -0
  19. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc +0 -0
  20. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc +0 -0
  21. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc +0 -0
  22. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc +0 -0
  23. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc +0 -0
  24. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc +0 -0
  25. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc +0 -0
  26. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc +0 -0
  27. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc +0 -0
  28. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc +0 -0
  29. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc +0 -0
  30. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc +0 -0
  31. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc +0 -0
  32. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip +0 -0
  33. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma +0 -0
  34. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z +0 -0
  35. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_backports.py +35 -0
  36. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_config.py +151 -0
  37. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_dask.py +499 -0
  38. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_hashing.py +495 -0
  39. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_logger.py +31 -0
  40. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_memmapping.py +1191 -0
  41. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_memory.py +1526 -0
  42. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_memory_async.py +170 -0
  43. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py +32 -0
  44. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_module.py +53 -0
  45. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py +1159 -0
  46. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py +16 -0
  47. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_parallel.py +2056 -0
  48. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_store_backends.py +94 -0
  49. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/testutils.py +8 -0
  50. material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jupyter_console-6.6.3.dist-info/INSTALLER +1 -0
.gitattributes CHANGED
@@ -504,3 +504,4 @@ material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/wrapt/_wrapp
504
  material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_Manual_4connected_4Classes/1908250_cm.tif filter=lfs diff=lfs merge=lfs -text
505
  material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_TSEM/Image_Registration/Output/1908272/1908272.tif filter=lfs diff=lfs merge=lfs -text
506
  material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_Manual_4connected/1908308_m.tif filter=lfs diff=lfs merge=lfs -text
 
 
504
  material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_Manual_4connected_4Classes/1908250_cm.tif filter=lfs diff=lfs merge=lfs -text
505
  material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_TSEM/Image_Registration/Output/1908272/1908272.tif filter=lfs diff=lfs merge=lfs -text
506
  material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_Manual_4connected/1908308_m.tif filter=lfs diff=lfs merge=lfs -text
507
+ material/dataset/Datasets/Electron[[:space:]]Microscopy[[:space:]]Image[[:space:]]Masks/TiO2_Masks_Manual_4connected_4Classes/1908284_cm.tif filter=lfs diff=lfs merge=lfs -text
material/dataset/Datasets/Electron Microscopy Image Masks/TiO2_Masks_Manual_4connected_4Classes/1908284_cm.tif ADDED

Git LFS Details

  • SHA256: e44302554a21bb6b9e2bff41555e272fff11f458d6c551318ac7b4f17c8e4c99
  • Pointer size: 132 Bytes
  • Size of remote file: 2.16 MB
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/attr/__pycache__/_cmp.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/attr/__pycache__/_make.cpython-310.pyc ADDED
Binary file (69.6 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file also re-exports symbols for wider use. We configure mypy and flake8
2
+ # to be aware that this file does this.
3
+
4
+ from jedi.inference.compiled.value import CompiledValue, CompiledName, \
5
+ CompiledValueFilter, CompiledValueName, create_from_access_path
6
+ from jedi.inference.base_value import LazyValueWrapper
7
+
8
+
9
+ def builtin_from_name(inference_state, string):
10
+ typing_builtins_module = inference_state.builtins_module
11
+ if string in ('None', 'True', 'False'):
12
+ builtins, = typing_builtins_module.non_stub_value_set
13
+ filter_ = next(builtins.get_filters())
14
+ else:
15
+ filter_ = next(typing_builtins_module.get_filters())
16
+ name, = filter_.get(string)
17
+ value, = name.infer()
18
+ return value
19
+
20
+
21
+ class ExactValue(LazyValueWrapper):
22
+ """
23
+ This class represents exact values, that makes operations like additions
24
+ and exact boolean values possible, while still being a "normal" stub.
25
+ """
26
+ def __init__(self, compiled_value):
27
+ self.inference_state = compiled_value.inference_state
28
+ self._compiled_value = compiled_value
29
+
30
+ def __getattribute__(self, name):
31
+ if name in ('get_safe_value', 'execute_operation', 'access_handle',
32
+ 'negate', 'py__bool__', 'is_compiled'):
33
+ return getattr(self._compiled_value, name)
34
+ return super().__getattribute__(name)
35
+
36
+ def _get_wrapped_value(self):
37
+ instance, = builtin_from_name(
38
+ self.inference_state, self._compiled_value.name.string_name).execute_with_values()
39
+ return instance
40
+
41
+ def __repr__(self):
42
+ return '<%s: %s>' % (self.__class__.__name__, self._compiled_value)
43
+
44
+
45
+ def create_simple_object(inference_state, obj):
46
+ """
47
+ Only allows creations of objects that are easily picklable across Python
48
+ versions.
49
+ """
50
+ assert type(obj) in (int, float, str, bytes, slice, complex, bool), repr(obj)
51
+ compiled_value = create_from_access_path(
52
+ inference_state,
53
+ inference_state.compiled_subprocess.create_simple_object(obj)
54
+ )
55
+ return ExactValue(compiled_value)
56
+
57
+
58
+ def get_string_value_set(inference_state):
59
+ return builtin_from_name(inference_state, 'str').execute_with_values()
60
+
61
+
62
+ def load_module(inference_state, dotted_name, **kwargs):
63
+ # Temporary, some tensorflow builtins cannot be loaded, so it's tried again
64
+ # and again and it's really slow.
65
+ if dotted_name.startswith('tensorflow.'):
66
+ return None
67
+ access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
68
+ if access_path is None:
69
+ return None
70
+ return create_from_access_path(inference_state, access_path)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/access.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import types
3
+ import traceback
4
+ import sys
5
+ import operator as op
6
+ from collections import namedtuple
7
+ import warnings
8
+ import re
9
+ import builtins
10
+ import typing
11
+ from pathlib import Path
12
+ from typing import Optional, Tuple
13
+
14
+ from jedi.inference.compiled.getattr_static import getattr_static
15
+
16
+ ALLOWED_GETITEM_TYPES = (str, list, tuple, bytes, bytearray, dict)
17
+
18
+ MethodDescriptorType = type(str.replace)
19
+ # These are not considered classes and access is granted even though they have
20
+ # a __class__ attribute.
21
+ NOT_CLASS_TYPES = (
22
+ types.BuiltinFunctionType,
23
+ types.CodeType,
24
+ types.FrameType,
25
+ types.FunctionType,
26
+ types.GeneratorType,
27
+ types.GetSetDescriptorType,
28
+ types.LambdaType,
29
+ types.MemberDescriptorType,
30
+ types.MethodType,
31
+ types.ModuleType,
32
+ types.TracebackType,
33
+ MethodDescriptorType,
34
+ types.MappingProxyType,
35
+ types.SimpleNamespace,
36
+ types.DynamicClassAttribute,
37
+ )
38
+
39
+ # Those types don't exist in typing.
40
+ MethodDescriptorType = type(str.replace)
41
+ WrapperDescriptorType = type(set.__iter__)
42
+ # `object.__subclasshook__` is an already executed descriptor.
43
+ object_class_dict = type.__dict__["__dict__"].__get__(object) # type: ignore[index]
44
+ ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
45
+
46
+ _sentinel = object()
47
+
48
+ # Maps Python syntax to the operator module.
49
+ COMPARISON_OPERATORS = {
50
+ '==': op.eq,
51
+ '!=': op.ne,
52
+ 'is': op.is_,
53
+ 'is not': op.is_not,
54
+ '<': op.lt,
55
+ '<=': op.le,
56
+ '>': op.gt,
57
+ '>=': op.ge,
58
+ }
59
+
60
+ _OPERATORS = {
61
+ '+': op.add,
62
+ '-': op.sub,
63
+ }
64
+ _OPERATORS.update(COMPARISON_OPERATORS)
65
+
66
+ ALLOWED_DESCRIPTOR_ACCESS = (
67
+ types.FunctionType,
68
+ types.GetSetDescriptorType,
69
+ types.MemberDescriptorType,
70
+ MethodDescriptorType,
71
+ WrapperDescriptorType,
72
+ ClassMethodDescriptorType,
73
+ staticmethod,
74
+ classmethod,
75
+ )
76
+
77
+
78
+ def safe_getattr(obj, name, default=_sentinel):
79
+ try:
80
+ attr, is_get_descriptor = getattr_static(obj, name)
81
+ except AttributeError:
82
+ if default is _sentinel:
83
+ raise
84
+ return default
85
+ else:
86
+ if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS):
87
+ # In case of descriptors that have get methods we cannot return
88
+ # it's value, because that would mean code execution.
89
+ # Since it's an isinstance call, code execution is still possible,
90
+ # but this is not really a security feature, but much more of a
91
+ # safety feature. Code execution is basically always possible when
92
+ # a module is imported. This is here so people don't shoot
93
+ # themselves in the foot.
94
+ return getattr(obj, name)
95
+ return attr
96
+
97
+
98
+ SignatureParam = namedtuple(
99
+ 'SignatureParam',
100
+ 'name has_default default default_string has_annotation annotation annotation_string kind_name'
101
+ )
102
+
103
+
104
+ def shorten_repr(func):
105
+ def wrapper(self):
106
+ r = func(self)
107
+ if len(r) > 50:
108
+ r = r[:50] + '..'
109
+ return r
110
+ return wrapper
111
+
112
+
113
+ def create_access(inference_state, obj):
114
+ return inference_state.compiled_subprocess.get_or_create_access_handle(obj)
115
+
116
+
117
+ def load_module(inference_state, dotted_name, sys_path):
118
+ temp, sys.path = sys.path, sys_path
119
+ try:
120
+ __import__(dotted_name)
121
+ except ImportError:
122
+ # If a module is "corrupt" or not really a Python module or whatever.
123
+ warnings.warn(
124
+ "Module %s not importable in path %s." % (dotted_name, sys_path),
125
+ UserWarning,
126
+ stacklevel=2,
127
+ )
128
+ return None
129
+ except Exception:
130
+ # Since __import__ pretty much makes code execution possible, just
131
+ # catch any error here and print it.
132
+ warnings.warn(
133
+ "Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2
134
+ )
135
+ return None
136
+ finally:
137
+ sys.path = temp
138
+
139
+ # Just access the cache after import, because of #59 as well as the very
140
+ # complicated import structure of Python.
141
+ module = sys.modules[dotted_name]
142
+ return create_access_path(inference_state, module)
143
+
144
+
145
+ class AccessPath:
146
+ def __init__(self, accesses):
147
+ self.accesses = accesses
148
+
149
+
150
+ def create_access_path(inference_state, obj) -> AccessPath:
151
+ access = create_access(inference_state, obj)
152
+ return AccessPath(access.get_access_path_tuples())
153
+
154
+
155
+ def get_api_type(obj):
156
+ if inspect.isclass(obj):
157
+ return 'class'
158
+ elif inspect.ismodule(obj):
159
+ return 'module'
160
+ elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
161
+ or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
162
+ return 'function'
163
+ # Everything else...
164
+ return 'instance'
165
+
166
+
167
+ class DirectObjectAccess:
168
+ def __init__(self, inference_state, obj):
169
+ self._inference_state = inference_state
170
+ self._obj = obj
171
+
172
+ def __repr__(self):
173
+ return '%s(%s)' % (self.__class__.__name__, self.get_repr())
174
+
175
+ def _create_access(self, obj):
176
+ return create_access(self._inference_state, obj)
177
+
178
+ def _create_access_path(self, obj) -> AccessPath:
179
+ return create_access_path(self._inference_state, obj)
180
+
181
+ def py__bool__(self):
182
+ return bool(self._obj)
183
+
184
+ def py__file__(self) -> Optional[Path]:
185
+ try:
186
+ return Path(self._obj.__file__)
187
+ except AttributeError:
188
+ return None
189
+
190
+ def py__doc__(self):
191
+ return inspect.getdoc(self._obj) or ''
192
+
193
+ def py__name__(self):
194
+ if not _is_class_instance(self._obj) or \
195
+ inspect.ismethoddescriptor(self._obj): # slots
196
+ cls = self._obj
197
+ else:
198
+ try:
199
+ cls = self._obj.__class__
200
+ except AttributeError:
201
+ # happens with numpy.core.umath._UFUNC_API (you get it
202
+ # automatically by doing `import numpy`.
203
+ return None
204
+
205
+ try:
206
+ return cls.__name__
207
+ except AttributeError:
208
+ return None
209
+
210
+ def py__mro__accesses(self):
211
+ return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
212
+
213
+ def py__getitem__all_values(self):
214
+ if isinstance(self._obj, dict):
215
+ return [self._create_access_path(v) for v in self._obj.values()]
216
+ if isinstance(self._obj, (list, tuple)):
217
+ return [self._create_access_path(v) for v in self._obj]
218
+
219
+ if self.is_instance():
220
+ cls = DirectObjectAccess(self._inference_state, self._obj.__class__)
221
+ return cls.py__getitem__all_values()
222
+
223
+ try:
224
+ getitem = self._obj.__getitem__
225
+ except AttributeError:
226
+ pass
227
+ else:
228
+ annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation()
229
+ if annotation is not None:
230
+ return [annotation]
231
+ return None
232
+
233
+ def py__simple_getitem__(self, index, *, safe=True):
234
+ if safe and type(self._obj) not in ALLOWED_GETITEM_TYPES:
235
+ # Get rid of side effects, we won't call custom `__getitem__`s.
236
+ return None
237
+
238
+ return self._create_access_path(self._obj[index])
239
+
240
+ def py__iter__list(self):
241
+ try:
242
+ iter_method = self._obj.__iter__
243
+ except AttributeError:
244
+ return None
245
+ else:
246
+ p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation()
247
+ if p is not None:
248
+ return [p]
249
+
250
+ if type(self._obj) not in ALLOWED_GETITEM_TYPES:
251
+ # Get rid of side effects, we won't call custom `__getitem__`s.
252
+ return []
253
+
254
+ lst = []
255
+ for i, part in enumerate(self._obj):
256
+ if i > 20:
257
+ # Should not go crazy with large iterators
258
+ break
259
+ lst.append(self._create_access_path(part))
260
+ return lst
261
+
262
+ def py__class__(self):
263
+ return self._create_access_path(self._obj.__class__)
264
+
265
+ def py__bases__(self):
266
+ return [self._create_access_path(base) for base in self._obj.__bases__]
267
+
268
+ def py__path__(self):
269
+ paths = getattr(self._obj, '__path__', None)
270
+ # Avoid some weird hacks that would just fail, because they cannot be
271
+ # used by pickle.
272
+ if not isinstance(paths, list) \
273
+ or not all(isinstance(p, str) for p in paths):
274
+ return None
275
+ return paths
276
+
277
+ @shorten_repr
278
+ def get_repr(self):
279
+ if inspect.ismodule(self._obj):
280
+ return repr(self._obj)
281
+ # Try to avoid execution of the property.
282
+ if safe_getattr(self._obj, '__module__', default='') == 'builtins':
283
+ return repr(self._obj)
284
+
285
+ type_ = type(self._obj)
286
+ if type_ == type:
287
+ return type.__repr__(self._obj)
288
+
289
+ if safe_getattr(type_, '__module__', default='') == 'builtins':
290
+ # Allow direct execution of repr for builtins.
291
+ return repr(self._obj)
292
+ return object.__repr__(self._obj)
293
+
294
+ def is_class(self):
295
+ return inspect.isclass(self._obj)
296
+
297
+ def is_function(self):
298
+ return inspect.isfunction(self._obj) or inspect.ismethod(self._obj)
299
+
300
+ def is_module(self):
301
+ return inspect.ismodule(self._obj)
302
+
303
+ def is_instance(self):
304
+ return _is_class_instance(self._obj)
305
+
306
+ def ismethoddescriptor(self):
307
+ return inspect.ismethoddescriptor(self._obj)
308
+
309
+ def get_qualified_names(self):
310
+ def try_to_get_name(obj):
311
+ return getattr(obj, '__qualname__', getattr(obj, '__name__', None))
312
+
313
+ if self.is_module():
314
+ return ()
315
+ name = try_to_get_name(self._obj)
316
+ if name is None:
317
+ name = try_to_get_name(type(self._obj))
318
+ if name is None:
319
+ return ()
320
+ return tuple(name.split('.'))
321
+
322
+ def dir(self):
323
+ return dir(self._obj)
324
+
325
+ def has_iter(self):
326
+ try:
327
+ iter(self._obj)
328
+ return True
329
+ except TypeError:
330
+ return False
331
+
332
+ def is_allowed_getattr(self, name, safe=True) -> Tuple[bool, bool, Optional[AccessPath]]:
333
+ # TODO this API is ugly.
334
+ try:
335
+ attr, is_get_descriptor = getattr_static(self._obj, name)
336
+ except AttributeError:
337
+ if not safe:
338
+ # Unsafe is mostly used to check for __getattr__/__getattribute__.
339
+ # getattr_static works for properties, but the underscore methods
340
+ # are just ignored (because it's safer and avoids more code
341
+ # execution). See also GH #1378.
342
+
343
+ # Avoid warnings, see comment in the next function.
344
+ with warnings.catch_warnings(record=True):
345
+ warnings.simplefilter("always")
346
+ try:
347
+ return hasattr(self._obj, name), False, None
348
+ except Exception:
349
+ # Obviously has an attribute (probably a property) that
350
+ # gets executed, so just avoid all exceptions here.
351
+ pass
352
+ return False, False, None
353
+ else:
354
+ if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
355
+ if isinstance(attr, property):
356
+ if hasattr(attr.fget, '__annotations__'):
357
+ a = DirectObjectAccess(self._inference_state, attr.fget)
358
+ return True, True, a.get_return_annotation()
359
+ # In case of descriptors that have get methods we cannot return
360
+ # it's value, because that would mean code execution.
361
+ return True, True, None
362
+ return True, False, None
363
+
364
+ def getattr_paths(self, name, default=_sentinel):
365
+ try:
366
+ # Make sure no warnings are printed here, this is autocompletion,
367
+ # warnings should not be shown. See also GH #1383.
368
+ with warnings.catch_warnings(record=True):
369
+ warnings.simplefilter("always")
370
+ return_obj = getattr(self._obj, name)
371
+ except Exception as e:
372
+ if default is _sentinel:
373
+ if isinstance(e, AttributeError):
374
+ # Happens e.g. in properties of
375
+ # PyQt4.QtGui.QStyleOptionComboBox.currentText
376
+ # -> just set it to None
377
+ raise
378
+ # Just in case anything happens, return an AttributeError. It
379
+ # should not crash.
380
+ raise AttributeError
381
+ return_obj = default
382
+ access = self._create_access(return_obj)
383
+ if inspect.ismodule(return_obj):
384
+ return [access]
385
+
386
+ try:
387
+ module = return_obj.__module__
388
+ except AttributeError:
389
+ pass
390
+ else:
391
+ if module is not None and isinstance(module, str):
392
+ try:
393
+ __import__(module)
394
+ # For some modules like _sqlite3, the __module__ for classes is
395
+ # different, in this case it's sqlite3. So we have to try to
396
+ # load that "original" module, because it's not loaded yet. If
397
+ # we don't do that, we don't really have a "parent" module and
398
+ # we would fall back to builtins.
399
+ except ImportError:
400
+ pass
401
+
402
+ module = inspect.getmodule(return_obj)
403
+ if module is None:
404
+ module = inspect.getmodule(type(return_obj))
405
+ if module is None:
406
+ module = builtins
407
+ return [self._create_access(module), access]
408
+
409
+ def get_safe_value(self):
410
+ if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None:
411
+ return self._obj
412
+ raise ValueError("Object is type %s and not simple" % type(self._obj))
413
+
414
+ def get_api_type(self):
415
+ return get_api_type(self._obj)
416
+
417
+ def get_array_type(self):
418
+ if isinstance(self._obj, dict):
419
+ return 'dict'
420
+ return None
421
+
422
+ def get_key_paths(self):
423
+ def iter_partial_keys():
424
+ # We could use list(keys()), but that might take a lot more memory.
425
+ for (i, k) in enumerate(self._obj.keys()):
426
+ # Limit key listing at some point. This is artificial, but this
427
+ # way we don't get stalled because of slow completions
428
+ if i > 50:
429
+ break
430
+ yield k
431
+
432
+ return [self._create_access_path(k) for k in iter_partial_keys()]
433
+
434
+ def get_access_path_tuples(self):
435
+ accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()]
436
+ return [(access.py__name__(), access) for access in accesses]
437
+
438
+ def _get_objects_path(self):
439
+ def get():
440
+ obj = self._obj
441
+ yield obj
442
+ try:
443
+ obj = obj.__objclass__
444
+ except AttributeError:
445
+ pass
446
+ else:
447
+ yield obj
448
+
449
+ try:
450
+ # Returns a dotted string path.
451
+ imp_plz = obj.__module__
452
+ except AttributeError:
453
+ # Unfortunately in some cases like `int` there's no __module__
454
+ if not inspect.ismodule(obj):
455
+ yield builtins
456
+ else:
457
+ if imp_plz is None:
458
+ # Happens for example in `(_ for _ in []).send.__module__`.
459
+ yield builtins
460
+ else:
461
+ try:
462
+ yield sys.modules[imp_plz]
463
+ except KeyError:
464
+ # __module__ can be something arbitrary that doesn't exist.
465
+ yield builtins
466
+
467
+ return list(reversed(list(get())))
468
+
469
+ def execute_operation(self, other_access_handle, operator):
470
+ other_access = other_access_handle.access
471
+ op = _OPERATORS[operator]
472
+ return self._create_access_path(op(self._obj, other_access._obj))
473
+
474
+ def get_annotation_name_and_args(self):
475
+ """
476
+ Returns Tuple[Optional[str], Tuple[AccessPath, ...]]
477
+ """
478
+ name = None
479
+ args = ()
480
+ if safe_getattr(self._obj, '__module__', default='') == 'typing':
481
+ m = re.match(r'typing.(\w+)\[', repr(self._obj))
482
+ if m is not None:
483
+ name = m.group(1)
484
+
485
+ import typing
486
+ if sys.version_info >= (3, 8):
487
+ args = typing.get_args(self._obj)
488
+ else:
489
+ args = safe_getattr(self._obj, '__args__', default=None)
490
+ return name, tuple(self._create_access_path(arg) for arg in args)
491
+
492
+ def needs_type_completions(self):
493
+ return inspect.isclass(self._obj) and self._obj != type
494
+
495
+ def _annotation_to_str(self, annotation):
496
+ return inspect.formatannotation(annotation)
497
+
498
+ def get_signature_params(self):
499
+ return [
500
+ SignatureParam(
501
+ name=p.name,
502
+ has_default=p.default is not p.empty,
503
+ default=self._create_access_path(p.default),
504
+ default_string=repr(p.default),
505
+ has_annotation=p.annotation is not p.empty,
506
+ annotation=self._create_access_path(p.annotation),
507
+ annotation_string=self._annotation_to_str(p.annotation),
508
+ kind_name=str(p.kind)
509
+ ) for p in self._get_signature().parameters.values()
510
+ ]
511
+
512
+ def _get_signature(self):
513
+ obj = self._obj
514
+ try:
515
+ return inspect.signature(obj)
516
+ except (RuntimeError, TypeError):
517
+ # Reading the code of the function in Python 3.6 implies there are
518
+ # at least these errors that might occur if something is wrong with
519
+ # the signature. In that case we just want a simple escape for now.
520
+ raise ValueError
521
+
522
+ def get_return_annotation(self) -> Optional[AccessPath]:
523
+ try:
524
+ o = self._obj.__annotations__.get('return')
525
+ except AttributeError:
526
+ return None
527
+
528
+ if o is None:
529
+ return None
530
+
531
+ try:
532
+ o = typing.get_type_hints(self._obj).get('return')
533
+ except Exception:
534
+ pass
535
+
536
+ return self._create_access_path(o)
537
+
538
+ def negate(self):
539
+ return self._create_access_path(-self._obj)
540
+
541
+ def get_dir_infos(self):
542
+ """
543
+ Used to return a couple of infos that are needed when accessing the sub
544
+ objects of an objects
545
+ """
546
+ tuples = dict(
547
+ (name, self.is_allowed_getattr(name))
548
+ for name in self.dir()
549
+ )
550
+ return self.needs_type_completions(), tuples
551
+
552
+
553
+ def _is_class_instance(obj):
554
+ """Like inspect.* methods."""
555
+ try:
556
+ cls = obj.__class__
557
+ except AttributeError:
558
+ return False
559
+ else:
560
+ # The isinstance check for cls is just there so issubclass doesn't
561
+ # raise an exception.
562
+ return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/getattr_static.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A static version of getattr.
3
+ This is a backport of the Python 3 code with a little bit of additional
4
+ information returned to enable Jedi to make decisions.
5
+ """
6
+
7
+ import types
8
+
9
+ from jedi import debug
10
+
11
+ _sentinel = object()
12
+
13
+
14
+ def _check_instance(obj, attr):
15
+ instance_dict = {}
16
+ try:
17
+ instance_dict = object.__getattribute__(obj, "__dict__")
18
+ except AttributeError:
19
+ pass
20
+ return dict.get(instance_dict, attr, _sentinel)
21
+
22
+
23
+ def _check_class(klass, attr):
24
+ for entry in _static_getmro(klass):
25
+ if _shadowed_dict(type(entry)) is _sentinel:
26
+ try:
27
+ return entry.__dict__[attr]
28
+ except KeyError:
29
+ pass
30
+ return _sentinel
31
+
32
+
33
+ def _is_type(obj):
34
+ try:
35
+ _static_getmro(obj)
36
+ except TypeError:
37
+ return False
38
+ return True
39
+
40
+
41
+ def _shadowed_dict(klass):
42
+ dict_attr = type.__dict__["__dict__"]
43
+ for entry in _static_getmro(klass):
44
+ try:
45
+ class_dict = dict_attr.__get__(entry)["__dict__"]
46
+ except KeyError:
47
+ pass
48
+ else:
49
+ if not (type(class_dict) is types.GetSetDescriptorType
50
+ and class_dict.__name__ == "__dict__"
51
+ and class_dict.__objclass__ is entry):
52
+ return class_dict
53
+ return _sentinel
54
+
55
+
56
+ def _static_getmro(klass):
57
+ mro = type.__dict__['__mro__'].__get__(klass)
58
+ if not isinstance(mro, (tuple, list)):
59
+ # There are unfortunately no tests for this, I was not able to
60
+ # reproduce this in pure Python. However should still solve the issue
61
+ # raised in GH #1517.
62
+ debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro))
63
+ return ()
64
+ return mro
65
+
66
+
67
+ def _safe_hasattr(obj, name):
68
+ return _check_class(type(obj), name) is not _sentinel
69
+
70
+
71
+ def _safe_is_data_descriptor(obj):
72
+ return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')
73
+
74
+
75
+ def getattr_static(obj, attr, default=_sentinel):
76
+ """Retrieve attributes without triggering dynamic lookup via the
77
+ descriptor protocol, __getattr__ or __getattribute__.
78
+
79
+ Note: this function may not be able to retrieve all attributes
80
+ that getattr can fetch (like dynamically created attributes)
81
+ and may find attributes that getattr can't (like descriptors
82
+ that raise AttributeError). It can also return descriptor objects
83
+ instead of instance members in some cases. See the
84
+ documentation for details.
85
+
86
+ Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that
87
+ the attribute is a descriptor that has a `__get__` attribute.
88
+ """
89
+ instance_result = _sentinel
90
+ if not _is_type(obj):
91
+ klass = type(obj)
92
+ dict_attr = _shadowed_dict(klass)
93
+ if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):
94
+ instance_result = _check_instance(obj, attr)
95
+ else:
96
+ klass = obj
97
+
98
+ klass_result = _check_class(klass, attr)
99
+
100
+ if instance_result is not _sentinel and klass_result is not _sentinel:
101
+ if _safe_hasattr(klass_result, '__get__') \
102
+ and _safe_is_data_descriptor(klass_result):
103
+ # A get/set descriptor has priority over everything.
104
+ return klass_result, True
105
+
106
+ if instance_result is not _sentinel:
107
+ return instance_result, False
108
+ if klass_result is not _sentinel:
109
+ return klass_result, _safe_hasattr(klass_result, '__get__')
110
+
111
+ if obj is klass:
112
+ # for types we check the metaclass too
113
+ for entry in _static_getmro(type(klass)):
114
+ if _shadowed_dict(type(entry)) is _sentinel:
115
+ try:
116
+ return entry.__dict__[attr], False
117
+ except KeyError:
118
+ pass
119
+ if default is not _sentinel:
120
+ return default, False
121
+ raise AttributeError(attr)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/mixed.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Used only for REPL Completion.
3
+ """
4
+
5
+ import inspect
6
+ from pathlib import Path
7
+
8
+ from jedi.parser_utils import get_cached_code_lines
9
+
10
+ from jedi import settings
11
+ from jedi.cache import memoize_method
12
+ from jedi.inference import compiled
13
+ from jedi.file_io import FileIO
14
+ from jedi.inference.names import NameWrapper
15
+ from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES
16
+ from jedi.inference.value import ModuleValue
17
+ from jedi.inference.cache import inference_state_function_cache, \
18
+ inference_state_method_cache
19
+ from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type
20
+ from jedi.inference.gradual.conversion import to_stub
21
+ from jedi.inference.context import CompiledContext, CompiledModuleContext, \
22
+ TreeContextMixin
23
+
24
+ _sentinel = object()
25
+
26
+
27
+ class MixedObject(ValueWrapper):
28
+ """
29
+ A ``MixedObject`` is used in two ways:
30
+
31
+ 1. It uses the default logic of ``parser.python.tree`` objects,
32
+ 2. except for getattr calls and signatures. The names dicts are generated
33
+ in a fashion like ``CompiledValue``.
34
+
35
+ This combined logic makes it possible to provide more powerful REPL
36
+ completion. It allows side effects that are not noticable with the default
37
+ parser structure to still be completable.
38
+
39
+ The biggest difference from CompiledValue to MixedObject is that we are
40
+ generally dealing with Python code and not with C code. This will generate
41
+ fewer special cases, because we in Python you don't have the same freedoms
42
+ to modify the runtime.
43
+ """
44
+ def __init__(self, compiled_value, tree_value):
45
+ super().__init__(tree_value)
46
+ self.compiled_value = compiled_value
47
+ self.access_handle = compiled_value.access_handle
48
+
49
+ def get_filters(self, *args, **kwargs):
50
+ yield MixedObjectFilter(
51
+ self.inference_state, self.compiled_value, self._wrapped_value)
52
+
53
+ def get_signatures(self):
54
+ # Prefer `inspect.signature` over somehow analyzing Python code. It
55
+ # should be very precise, especially for stuff like `partial`.
56
+ return self.compiled_value.get_signatures()
57
+
58
+ @inference_state_method_cache(default=NO_VALUES)
59
+ def py__call__(self, arguments):
60
+ # Fallback to the wrapped value if to stub returns no values.
61
+ values = to_stub(self._wrapped_value)
62
+ if not values:
63
+ values = self._wrapped_value
64
+ return values.py__call__(arguments)
65
+
66
+ def get_safe_value(self, default=_sentinel):
67
+ if default is _sentinel:
68
+ return self.compiled_value.get_safe_value()
69
+ else:
70
+ return self.compiled_value.get_safe_value(default)
71
+
72
+ @property
73
+ def array_type(self):
74
+ return self.compiled_value.array_type
75
+
76
+ def get_key_values(self):
77
+ return self.compiled_value.get_key_values()
78
+
79
+ def py__simple_getitem__(self, index):
80
+ python_object = self.compiled_value.access_handle.access._obj
81
+ if type(python_object) in ALLOWED_GETITEM_TYPES:
82
+ return self.compiled_value.py__simple_getitem__(index)
83
+ return self._wrapped_value.py__simple_getitem__(index)
84
+
85
+ def negate(self):
86
+ return self.compiled_value.negate()
87
+
88
+ def _as_context(self):
89
+ if self.parent_context is None:
90
+ return MixedModuleContext(self)
91
+ return MixedContext(self)
92
+
93
+ def __repr__(self):
94
+ return '<%s: %s; %s>' % (
95
+ type(self).__name__,
96
+ self.access_handle.get_repr(),
97
+ self._wrapped_value,
98
+ )
99
+
100
+
101
+ class MixedContext(CompiledContext, TreeContextMixin):
102
+ @property
103
+ def compiled_value(self):
104
+ return self._value.compiled_value
105
+
106
+
107
+ class MixedModuleContext(CompiledModuleContext, MixedContext):
108
+ pass
109
+
110
+
111
+ class MixedName(NameWrapper):
112
+ """
113
+ The ``CompiledName._compiled_value`` is our MixedObject.
114
+ """
115
+ def __init__(self, wrapped_name, parent_tree_value):
116
+ super().__init__(wrapped_name)
117
+ self._parent_tree_value = parent_tree_value
118
+
119
+ @property
120
+ def start_pos(self):
121
+ values = list(self.infer())
122
+ if not values:
123
+ # This means a start_pos that doesn't exist (compiled objects).
124
+ return 0, 0
125
+ return values[0].name.start_pos
126
+
127
+ @memoize_method
128
+ def infer(self):
129
+ compiled_value = self._wrapped_name.infer_compiled_value()
130
+ tree_value = self._parent_tree_value
131
+ if tree_value.is_instance() or tree_value.is_class():
132
+ tree_values = tree_value.py__getattribute__(self.string_name)
133
+ if compiled_value.is_function():
134
+ return ValueSet({MixedObject(compiled_value, v) for v in tree_values})
135
+
136
+ module_context = tree_value.get_root_context()
137
+ return _create(self._inference_state, compiled_value, module_context)
138
+
139
+
140
+ class MixedObjectFilter(compiled.CompiledValueFilter):
141
+ def __init__(self, inference_state, compiled_value, tree_value):
142
+ super().__init__(inference_state, compiled_value)
143
+ self._tree_value = tree_value
144
+
145
+ def _create_name(self, *args, **kwargs):
146
+ return MixedName(
147
+ super()._create_name(*args, **kwargs),
148
+ self._tree_value,
149
+ )
150
+
151
+
152
+ @inference_state_function_cache()
153
+ def _load_module(inference_state, path):
154
+ return inference_state.parse(
155
+ path=path,
156
+ cache=True,
157
+ diff_cache=settings.fast_parser,
158
+ cache_path=settings.cache_directory
159
+ ).get_root_node()
160
+
161
+
162
+ def _get_object_to_check(python_object):
163
+ """Check if inspect.getfile has a chance to find the source."""
164
+ try:
165
+ python_object = inspect.unwrap(python_object)
166
+ except ValueError:
167
+ # Can return a ValueError when it wraps around
168
+ pass
169
+
170
+ if (inspect.ismodule(python_object)
171
+ or inspect.isclass(python_object)
172
+ or inspect.ismethod(python_object)
173
+ or inspect.isfunction(python_object)
174
+ or inspect.istraceback(python_object)
175
+ or inspect.isframe(python_object)
176
+ or inspect.iscode(python_object)):
177
+ return python_object
178
+
179
+ try:
180
+ return python_object.__class__
181
+ except AttributeError:
182
+ raise TypeError # Prevents computation of `repr` within inspect.
183
+
184
+
185
+ def _find_syntax_node_name(inference_state, python_object):
186
+ original_object = python_object
187
+ try:
188
+ python_object = _get_object_to_check(python_object)
189
+ path = inspect.getsourcefile(python_object)
190
+ except (OSError, TypeError):
191
+ # The type might not be known (e.g. class_with_dict.__weakref__)
192
+ return None
193
+ path = None if path is None else Path(path)
194
+ try:
195
+ if path is None or not path.exists():
196
+ # The path might not exist or be e.g. <stdin>.
197
+ return None
198
+ except OSError:
199
+ # Might raise an OSError on Windows:
200
+ #
201
+ # [WinError 123] The filename, directory name, or volume label
202
+ # syntax is incorrect: '<string>'
203
+ return None
204
+
205
+ file_io = FileIO(path)
206
+ module_node = _load_module(inference_state, path)
207
+
208
+ if inspect.ismodule(python_object):
209
+ # We don't need to check names for modules, because there's not really
210
+ # a way to write a module in a module in Python (and also __name__ can
211
+ # be something like ``email.utils``).
212
+ code_lines = get_cached_code_lines(inference_state.grammar, path)
213
+ return module_node, module_node, file_io, code_lines
214
+
215
+ try:
216
+ name_str = python_object.__name__
217
+ except AttributeError:
218
+ # Stuff like python_function.__code__.
219
+ return None
220
+
221
+ if name_str == '<lambda>':
222
+ return None # It's too hard to find lambdas.
223
+
224
+ # Doesn't always work (e.g. os.stat_result)
225
+ names = module_node.get_used_names().get(name_str, [])
226
+ # Only functions and classes are relevant. If a name e.g. points to an
227
+ # import, it's probably a builtin (like collections.deque) and needs to be
228
+ # ignored.
229
+ names = [
230
+ n for n in names
231
+ if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n
232
+ ]
233
+ if not names:
234
+ return None
235
+
236
+ try:
237
+ code = python_object.__code__
238
+ # By using the line number of a code object we make the lookup in a
239
+ # file pretty easy. There's still a possibility of people defining
240
+ # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people
241
+ # do so we just don't care.
242
+ line_nr = code.co_firstlineno
243
+ except AttributeError:
244
+ pass
245
+ else:
246
+ line_names = [name for name in names if name.start_pos[0] == line_nr]
247
+ # There's a chance that the object is not available anymore, because
248
+ # the code has changed in the background.
249
+ if line_names:
250
+ names = line_names
251
+
252
+ code_lines = get_cached_code_lines(inference_state.grammar, path)
253
+ # It's really hard to actually get the right definition, here as a last
254
+ # resort we just return the last one. This chance might lead to odd
255
+ # completions at some points but will lead to mostly correct type
256
+ # inference, because people tend to define a public name in a module only
257
+ # once.
258
+ tree_node = names[-1].parent
259
+ if tree_node.type == 'funcdef' and get_api_type(original_object) == 'instance':
260
+ # If an instance is given and we're landing on a function (e.g.
261
+ # partial in 3.5), something is completely wrong and we should not
262
+ # return that.
263
+ return None
264
+ return module_node, tree_node, file_io, code_lines
265
+
266
+
267
+ @inference_state_function_cache()
268
+ def _create(inference_state, compiled_value, module_context):
269
+ # TODO accessing this is bad, but it probably doesn't matter that much,
270
+ # because we're working with interpreters only here.
271
+ python_object = compiled_value.access_handle.access._obj
272
+ result = _find_syntax_node_name(inference_state, python_object)
273
+ if result is None:
274
+ # TODO Care about generics from stuff like `[1]` and don't return like this.
275
+ if type(python_object) in (dict, list, tuple):
276
+ return ValueSet({compiled_value})
277
+
278
+ tree_values = to_stub(compiled_value)
279
+ if not tree_values:
280
+ return ValueSet({compiled_value})
281
+ else:
282
+ module_node, tree_node, file_io, code_lines = result
283
+
284
+ if module_context is None or module_context.tree_node != module_node:
285
+ root_compiled_value = compiled_value.get_root_context().get_value()
286
+ # TODO this __name__ might be wrong.
287
+ name = root_compiled_value.py__name__()
288
+ string_names = tuple(name.split('.'))
289
+ module_value = ModuleValue(
290
+ inference_state, module_node,
291
+ file_io=file_io,
292
+ string_names=string_names,
293
+ code_lines=code_lines,
294
+ is_package=root_compiled_value.is_package(),
295
+ )
296
+ if name is not None:
297
+ inference_state.module_cache.add(string_names, ValueSet([module_value]))
298
+ module_context = module_value.as_context()
299
+
300
+ tree_values = ValueSet({module_context.create_value(tree_node)})
301
+ if tree_node.type == 'classdef':
302
+ if not compiled_value.is_class():
303
+ # Is an instance, not a class.
304
+ tree_values = tree_values.execute_with_values()
305
+
306
+ return ValueSet(
307
+ MixedObject(compiled_value, tree_value=tree_value)
308
+ for tree_value in tree_values
309
+ )
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/subprocess/functions.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import inspect
4
+ import importlib
5
+ from pathlib import Path
6
+ from zipfile import ZipFile
7
+ from zipimport import zipimporter, ZipImportError
8
+ from importlib.machinery import all_suffixes
9
+
10
+ from jedi.inference.compiled import access
11
+ from jedi import debug
12
+ from jedi import parser_utils
13
+ from jedi.file_io import KnownContentFileIO, ZipFileIO
14
+
15
+
16
+ def get_sys_path():
17
+ return sys.path
18
+
19
+
20
+ def load_module(inference_state, **kwargs):
21
+ return access.load_module(inference_state, **kwargs)
22
+
23
+
24
+ def get_compiled_method_return(inference_state, id, attribute, *args, **kwargs):
25
+ handle = inference_state.compiled_subprocess.get_access_handle(id)
26
+ return getattr(handle.access, attribute)(*args, **kwargs)
27
+
28
+
29
+ def create_simple_object(inference_state, obj):
30
+ return access.create_access_path(inference_state, obj)
31
+
32
+
33
+ def get_module_info(inference_state, sys_path=None, full_name=None, **kwargs):
34
+ """
35
+ Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]]
36
+ """
37
+ if sys_path is not None:
38
+ sys.path, temp = sys_path, sys.path
39
+ try:
40
+ return _find_module(full_name=full_name, **kwargs)
41
+ except ImportError:
42
+ return None, None
43
+ finally:
44
+ if sys_path is not None:
45
+ sys.path = temp
46
+
47
+
48
+ def get_builtin_module_names(inference_state):
49
+ return sys.builtin_module_names
50
+
51
+
52
+ def _test_raise_error(inference_state, exception_type):
53
+ """
54
+ Raise an error to simulate certain problems for unit tests.
55
+ """
56
+ raise exception_type
57
+
58
+
59
+ def _test_print(inference_state, stderr=None, stdout=None):
60
+ """
61
+ Force some prints in the subprocesses. This exists for unit tests.
62
+ """
63
+ if stderr is not None:
64
+ print(stderr, file=sys.stderr)
65
+ sys.stderr.flush()
66
+ if stdout is not None:
67
+ print(stdout)
68
+ sys.stdout.flush()
69
+
70
+
71
+ def _get_init_path(directory_path):
72
+ """
73
+ The __init__ file can be searched in a directory. If found return it, else
74
+ None.
75
+ """
76
+ for suffix in all_suffixes():
77
+ path = os.path.join(directory_path, '__init__' + suffix)
78
+ if os.path.exists(path):
79
+ return path
80
+ return None
81
+
82
+
83
+ def safe_literal_eval(inference_state, value):
84
+ return parser_utils.safe_literal_eval(value)
85
+
86
+
87
+ def iter_module_names(*args, **kwargs):
88
+ return list(_iter_module_names(*args, **kwargs))
89
+
90
+
91
+ def _iter_module_names(inference_state, paths):
92
+ # Python modules/packages
93
+ for path in paths:
94
+ try:
95
+ dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path))
96
+ except OSError:
97
+ try:
98
+ zip_import_info = zipimporter(path)
99
+ # Unfortunately, there is no public way to access zipimporter's
100
+ # private _files member. We therefore have to use a
101
+ # custom function to iterate over the files.
102
+ dir_entries = _zip_list_subdirectory(
103
+ zip_import_info.archive, zip_import_info.prefix)
104
+ except ZipImportError:
105
+ # The file might not exist or reading it might lead to an error.
106
+ debug.warning("Not possible to list directory: %s", path)
107
+ continue
108
+ for name, is_dir in dir_entries:
109
+ # First Namespaces then modules/stubs
110
+ if is_dir:
111
+ # pycache is obviously not an interesting namespace. Also the
112
+ # name must be a valid identifier.
113
+ if name != '__pycache__' and name.isidentifier():
114
+ yield name
115
+ else:
116
+ if name.endswith('.pyi'): # Stub files
117
+ modname = name[:-4]
118
+ else:
119
+ modname = inspect.getmodulename(name)
120
+
121
+ if modname and '.' not in modname:
122
+ if modname != '__init__':
123
+ yield modname
124
+
125
+
126
+ def _find_module(string, path=None, full_name=None, is_global_search=True):
127
+ """
128
+ Provides information about a module.
129
+
130
+ This function isolates the differences in importing libraries introduced with
131
+ python 3.3 on; it gets a module name and optionally a path. It will return a
132
+ tuple containin an open file for the module (if not builtin), the filename
133
+ or the name of the module if it is a builtin one and a boolean indicating
134
+ if the module is contained in a package.
135
+ """
136
+ spec = None
137
+ loader = None
138
+
139
+ for finder in sys.meta_path:
140
+ if is_global_search and finder != importlib.machinery.PathFinder:
141
+ p = None
142
+ else:
143
+ p = path
144
+ try:
145
+ find_spec = finder.find_spec
146
+ except AttributeError:
147
+ # These are old-school clases that still have a different API, just
148
+ # ignore those.
149
+ continue
150
+
151
+ spec = find_spec(string, p)
152
+ if spec is not None:
153
+ if spec.origin == "frozen":
154
+ continue
155
+
156
+ loader = spec.loader
157
+
158
+ if loader is None and not spec.has_location:
159
+ # This is a namespace package.
160
+ full_name = string if not path else full_name
161
+ implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
162
+ return implicit_ns_info, True
163
+ break
164
+
165
+ return _find_module_py33(string, path, loader)
166
+
167
+
168
+ def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
169
+ if not loader:
170
+ spec = importlib.machinery.PathFinder.find_spec(string, path)
171
+ if spec is not None:
172
+ loader = spec.loader
173
+
174
+ if loader is None and path is None: # Fallback to find builtins
175
+ try:
176
+ spec = importlib.util.find_spec(string)
177
+ if spec is not None:
178
+ loader = spec.loader
179
+ except ValueError as e:
180
+ # See #491. Importlib might raise a ValueError, to avoid this, we
181
+ # just raise an ImportError to fix the issue.
182
+ raise ImportError("Originally " + repr(e))
183
+
184
+ if loader is None:
185
+ raise ImportError("Couldn't find a loader for {}".format(string))
186
+
187
+ return _from_loader(loader, string)
188
+
189
+
190
+ def _from_loader(loader, string):
191
+ try:
192
+ is_package_method = loader.is_package
193
+ except AttributeError:
194
+ is_package = False
195
+ else:
196
+ is_package = is_package_method(string)
197
+ try:
198
+ get_filename = loader.get_filename
199
+ except AttributeError:
200
+ return None, is_package
201
+ else:
202
+ module_path = get_filename(string)
203
+
204
+ # To avoid unicode and read bytes, "overwrite" loader.get_source if
205
+ # possible.
206
+ try:
207
+ f = type(loader).get_source
208
+ except AttributeError:
209
+ raise ImportError("get_source was not defined on loader")
210
+
211
+ if f is not importlib.machinery.SourceFileLoader.get_source:
212
+ # Unfortunately we are reading unicode here, not bytes.
213
+ # It seems hard to get bytes, because the zip importer
214
+ # logic just unpacks the zip file and returns a file descriptor
215
+ # that we cannot as easily access. Therefore we just read it as
216
+ # a string in the cases where get_source was overwritten.
217
+ code = loader.get_source(string)
218
+ else:
219
+ code = _get_source(loader, string)
220
+
221
+ if code is None:
222
+ return None, is_package
223
+ if isinstance(loader, zipimporter):
224
+ return ZipFileIO(module_path, code, Path(loader.archive)), is_package
225
+
226
+ return KnownContentFileIO(module_path, code), is_package
227
+
228
+
229
+ def _get_source(loader, fullname):
230
+ """
231
+ This method is here as a replacement for SourceLoader.get_source. That
232
+ method returns unicode, but we prefer bytes.
233
+ """
234
+ path = loader.get_filename(fullname)
235
+ try:
236
+ return loader.get_data(path)
237
+ except OSError:
238
+ raise ImportError('source not available through get_data()',
239
+ name=fullname)
240
+
241
+
242
+ def _zip_list_subdirectory(zip_path, zip_subdir_path):
243
+ zip_file = ZipFile(zip_path)
244
+ zip_subdir_path = Path(zip_subdir_path)
245
+ zip_content_file_paths = zip_file.namelist()
246
+ for raw_file_name in zip_content_file_paths:
247
+ file_path = Path(raw_file_name)
248
+ if file_path.parent == zip_subdir_path:
249
+ file_path = file_path.relative_to(zip_subdir_path)
250
+ yield file_path.name, raw_file_name.endswith("/")
251
+
252
+
253
+ class ImplicitNSInfo:
254
+ """Stores information returned from an implicit namespace spec"""
255
+ def __init__(self, name, paths):
256
+ self.name = name
257
+ self.paths = paths
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/compiled/value.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Imitate the parser representation.
3
+ """
4
+ import re
5
+ from functools import partial
6
+ from inspect import Parameter
7
+ from pathlib import Path
8
+ from typing import Optional
9
+
10
+ from jedi import debug
11
+ from jedi.inference.utils import to_list
12
+ from jedi.cache import memoize_method
13
+ from jedi.inference.filters import AbstractFilter
14
+ from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
15
+ ParamNameInterface
16
+ from jedi.inference.base_value import Value, ValueSet, NO_VALUES
17
+ from jedi.inference.lazy_value import LazyKnownValue
18
+ from jedi.inference.compiled.access import _sentinel
19
+ from jedi.inference.cache import inference_state_function_cache
20
+ from jedi.inference.helpers import reraise_getitem_errors
21
+ from jedi.inference.signature import BuiltinSignature
22
+ from jedi.inference.context import CompiledContext, CompiledModuleContext
23
+
24
+
25
+ class CheckAttribute:
26
+ """Raises :exc:`AttributeError` if the attribute X is not available."""
27
+ def __init__(self, check_name=None):
28
+ # Remove the py in front of e.g. py__call__.
29
+ self.check_name = check_name
30
+
31
+ def __call__(self, func):
32
+ self.func = func
33
+ if self.check_name is None:
34
+ self.check_name = func.__name__[2:]
35
+ return self
36
+
37
+ def __get__(self, instance, owner):
38
+ if instance is None:
39
+ return self
40
+
41
+ # This might raise an AttributeError. That's wanted.
42
+ instance.access_handle.getattr_paths(self.check_name)
43
+ return partial(self.func, instance)
44
+
45
+
46
+ class CompiledValue(Value):
47
+ def __init__(self, inference_state, access_handle, parent_context=None):
48
+ super().__init__(inference_state, parent_context)
49
+ self.access_handle = access_handle
50
+
51
+ def py__call__(self, arguments):
52
+ return_annotation = self.access_handle.get_return_annotation()
53
+ if return_annotation is not None:
54
+ return create_from_access_path(
55
+ self.inference_state,
56
+ return_annotation
57
+ ).execute_annotation()
58
+
59
+ try:
60
+ self.access_handle.getattr_paths('__call__')
61
+ except AttributeError:
62
+ return super().py__call__(arguments)
63
+ else:
64
+ if self.access_handle.is_class():
65
+ from jedi.inference.value import CompiledInstance
66
+ return ValueSet([
67
+ CompiledInstance(self.inference_state, self.parent_context, self, arguments)
68
+ ])
69
+ else:
70
+ return ValueSet(self._execute_function(arguments))
71
+
72
+ @CheckAttribute()
73
+ def py__class__(self):
74
+ return create_from_access_path(self.inference_state, self.access_handle.py__class__())
75
+
76
+ @CheckAttribute()
77
+ def py__mro__(self):
78
+ return (self,) + tuple(
79
+ create_from_access_path(self.inference_state, access)
80
+ for access in self.access_handle.py__mro__accesses()
81
+ )
82
+
83
+ @CheckAttribute()
84
+ def py__bases__(self):
85
+ return tuple(
86
+ create_from_access_path(self.inference_state, access)
87
+ for access in self.access_handle.py__bases__()
88
+ )
89
+
90
+ def get_qualified_names(self):
91
+ return self.access_handle.get_qualified_names()
92
+
93
+ def py__bool__(self):
94
+ return self.access_handle.py__bool__()
95
+
96
+ def is_class(self):
97
+ return self.access_handle.is_class()
98
+
99
+ def is_function(self):
100
+ return self.access_handle.is_function()
101
+
102
+ def is_module(self):
103
+ return self.access_handle.is_module()
104
+
105
+ def is_compiled(self):
106
+ return True
107
+
108
+ def is_stub(self):
109
+ return False
110
+
111
+ def is_instance(self):
112
+ return self.access_handle.is_instance()
113
+
114
+ def py__doc__(self):
115
+ return self.access_handle.py__doc__()
116
+
117
+ @to_list
118
+ def get_param_names(self):
119
+ try:
120
+ signature_params = self.access_handle.get_signature_params()
121
+ except ValueError: # Has no signature
122
+ params_str, ret = self._parse_function_doc()
123
+ if not params_str:
124
+ tokens = []
125
+ else:
126
+ tokens = params_str.split(',')
127
+ if self.access_handle.ismethoddescriptor():
128
+ tokens.insert(0, 'self')
129
+ for p in tokens:
130
+ name, _, default = p.strip().partition('=')
131
+ yield UnresolvableParamName(self, name, default)
132
+ else:
133
+ for signature_param in signature_params:
134
+ yield SignatureParamName(self, signature_param)
135
+
136
+ def get_signatures(self):
137
+ _, return_string = self._parse_function_doc()
138
+ return [BuiltinSignature(self, return_string)]
139
+
140
+ def __repr__(self):
141
+ return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
142
+
143
+ @memoize_method
144
+ def _parse_function_doc(self):
145
+ doc = self.py__doc__()
146
+ if doc is None:
147
+ return '', ''
148
+
149
+ return _parse_function_doc(doc)
150
+
151
+ @property
152
+ def api_type(self):
153
+ return self.access_handle.get_api_type()
154
+
155
+ def get_filters(self, is_instance=False, origin_scope=None):
156
+ yield self._ensure_one_filter(is_instance)
157
+
158
+ @memoize_method
159
+ def _ensure_one_filter(self, is_instance):
160
+ return CompiledValueFilter(self.inference_state, self, is_instance)
161
+
162
+ def py__simple_getitem__(self, index):
163
+ with reraise_getitem_errors(IndexError, KeyError, TypeError):
164
+ try:
165
+ access = self.access_handle.py__simple_getitem__(
166
+ index,
167
+ safe=not self.inference_state.allow_unsafe_executions
168
+ )
169
+ except AttributeError:
170
+ return super().py__simple_getitem__(index)
171
+ if access is None:
172
+ return super().py__simple_getitem__(index)
173
+
174
+ return ValueSet([create_from_access_path(self.inference_state, access)])
175
+
176
+ def py__getitem__(self, index_value_set, contextualized_node):
177
+ all_access_paths = self.access_handle.py__getitem__all_values()
178
+ if all_access_paths is None:
179
+ # This means basically that no __getitem__ has been defined on this
180
+ # object.
181
+ return super().py__getitem__(index_value_set, contextualized_node)
182
+ return ValueSet(
183
+ create_from_access_path(self.inference_state, access)
184
+ for access in all_access_paths
185
+ )
186
+
187
+ def py__iter__(self, contextualized_node=None):
188
+ if not self.access_handle.has_iter():
189
+ yield from super().py__iter__(contextualized_node)
190
+
191
+ access_path_list = self.access_handle.py__iter__list()
192
+ if access_path_list is None:
193
+ # There is no __iter__ method on this object.
194
+ return
195
+
196
+ for access in access_path_list:
197
+ yield LazyKnownValue(create_from_access_path(self.inference_state, access))
198
+
199
+ def py__name__(self):
200
+ return self.access_handle.py__name__()
201
+
202
+ @property
203
+ def name(self):
204
+ name = self.py__name__()
205
+ if name is None:
206
+ name = self.access_handle.get_repr()
207
+ return CompiledValueName(self, name)
208
+
209
+ def _execute_function(self, params):
210
+ from jedi.inference import docstrings
211
+ from jedi.inference.compiled import builtin_from_name
212
+ if self.api_type != 'function':
213
+ return
214
+
215
+ for name in self._parse_function_doc()[1].split():
216
+ try:
217
+ # TODO wtf is this? this is exactly the same as the thing
218
+ # below. It uses getattr as well.
219
+ self.inference_state.builtins_module.access_handle.getattr_paths(name)
220
+ except AttributeError:
221
+ continue
222
+ else:
223
+ bltn_obj = builtin_from_name(self.inference_state, name)
224
+ yield from self.inference_state.execute(bltn_obj, params)
225
+ yield from docstrings.infer_return_types(self)
226
+
227
+ def get_safe_value(self, default=_sentinel):
228
+ try:
229
+ return self.access_handle.get_safe_value()
230
+ except ValueError:
231
+ if default == _sentinel:
232
+ raise
233
+ return default
234
+
235
+ def execute_operation(self, other, operator):
236
+ try:
237
+ return ValueSet([create_from_access_path(
238
+ self.inference_state,
239
+ self.access_handle.execute_operation(other.access_handle, operator)
240
+ )])
241
+ except TypeError:
242
+ return NO_VALUES
243
+
244
+ def execute_annotation(self):
245
+ if self.access_handle.get_repr() == 'None':
246
+ # None as an annotation doesn't need to be executed.
247
+ return ValueSet([self])
248
+
249
+ name, args = self.access_handle.get_annotation_name_and_args()
250
+ arguments = [
251
+ ValueSet([create_from_access_path(self.inference_state, path)])
252
+ for path in args
253
+ ]
254
+ if name == 'Union':
255
+ return ValueSet.from_sets(arg.execute_annotation() for arg in arguments)
256
+ elif name:
257
+ # While with_generics only exists on very specific objects, we
258
+ # should probably be fine, because we control all the typing
259
+ # objects.
260
+ return ValueSet([
261
+ v.with_generics(arguments)
262
+ for v in self.inference_state.typing_module.py__getattribute__(name)
263
+ ]).execute_annotation()
264
+ return super().execute_annotation()
265
+
266
+ def negate(self):
267
+ return create_from_access_path(self.inference_state, self.access_handle.negate())
268
+
269
+ def get_metaclasses(self):
270
+ return NO_VALUES
271
+
272
+ def _as_context(self):
273
+ return CompiledContext(self)
274
+
275
+ @property
276
+ def array_type(self):
277
+ return self.access_handle.get_array_type()
278
+
279
+ def get_key_values(self):
280
+ return [
281
+ create_from_access_path(self.inference_state, k)
282
+ for k in self.access_handle.get_key_paths()
283
+ ]
284
+
285
+ def get_type_hint(self, add_class_info=True):
286
+ if self.access_handle.get_repr() in ('None', "<class 'NoneType'>"):
287
+ return 'None'
288
+ return None
289
+
290
+
291
+ class CompiledModule(CompiledValue):
292
+ file_io = None # For modules
293
+
294
+ def _as_context(self):
295
+ return CompiledModuleContext(self)
296
+
297
+ def py__path__(self):
298
+ return self.access_handle.py__path__()
299
+
300
+ def is_package(self):
301
+ return self.py__path__() is not None
302
+
303
+ @property
304
+ def string_names(self):
305
+ # For modules
306
+ name = self.py__name__()
307
+ if name is None:
308
+ return ()
309
+ return tuple(name.split('.'))
310
+
311
+ def py__file__(self) -> Optional[Path]:
312
+ return self.access_handle.py__file__() # type: ignore[no-any-return]
313
+
314
+
315
+ class CompiledName(AbstractNameDefinition):
316
+ def __init__(self, inference_state, parent_value, name, is_descriptor):
317
+ self._inference_state = inference_state
318
+ self.parent_context = parent_value.as_context()
319
+ self._parent_value = parent_value
320
+ self.string_name = name
321
+ self.is_descriptor = is_descriptor
322
+
323
+ def py__doc__(self):
324
+ return self.infer_compiled_value().py__doc__()
325
+
326
+ def _get_qualified_names(self):
327
+ parent_qualified_names = self.parent_context.get_qualified_names()
328
+ if parent_qualified_names is None:
329
+ return None
330
+ return parent_qualified_names + (self.string_name,)
331
+
332
+ def get_defining_qualified_value(self):
333
+ context = self.parent_context
334
+ if context.is_module() or context.is_class():
335
+ return self.parent_context.get_value() # Might be None
336
+
337
+ return None
338
+
339
+ def __repr__(self):
340
+ try:
341
+ name = self.parent_context.name # __name__ is not defined all the time
342
+ except AttributeError:
343
+ name = None
344
+ return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name)
345
+
346
+ @property
347
+ def api_type(self):
348
+ if self.is_descriptor:
349
+ # In case of properties we want to avoid executions as much as
350
+ # possible. Since the api_type can be wrong for other reasons
351
+ # anyway, we just return instance here.
352
+ return "instance"
353
+ return self.infer_compiled_value().api_type
354
+
355
+ def infer(self):
356
+ return ValueSet([self.infer_compiled_value()])
357
+
358
+ @memoize_method
359
+ def infer_compiled_value(self):
360
+ return create_from_name(self._inference_state, self._parent_value, self.string_name)
361
+
362
+
363
+ class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
364
+ def __init__(self, compiled_value, signature_param):
365
+ self.parent_context = compiled_value.parent_context
366
+ self._signature_param = signature_param
367
+
368
+ @property
369
+ def string_name(self):
370
+ return self._signature_param.name
371
+
372
+ def to_string(self):
373
+ s = self._kind_string() + self.string_name
374
+ if self._signature_param.has_annotation:
375
+ s += ': ' + self._signature_param.annotation_string
376
+ if self._signature_param.has_default:
377
+ s += '=' + self._signature_param.default_string
378
+ return s
379
+
380
+ def get_kind(self):
381
+ return getattr(Parameter, self._signature_param.kind_name)
382
+
383
+ def infer(self):
384
+ p = self._signature_param
385
+ inference_state = self.parent_context.inference_state
386
+ values = NO_VALUES
387
+ if p.has_default:
388
+ values = ValueSet([create_from_access_path(inference_state, p.default)])
389
+ if p.has_annotation:
390
+ annotation = create_from_access_path(inference_state, p.annotation)
391
+ values |= annotation.execute_with_values()
392
+ return values
393
+
394
+
395
+ class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):
396
+ def __init__(self, compiled_value, name, default):
397
+ self.parent_context = compiled_value.parent_context
398
+ self.string_name = name
399
+ self._default = default
400
+
401
+ def get_kind(self):
402
+ return Parameter.POSITIONAL_ONLY
403
+
404
+ def to_string(self):
405
+ string = self.string_name
406
+ if self._default:
407
+ string += '=' + self._default
408
+ return string
409
+
410
+ def infer(self):
411
+ return NO_VALUES
412
+
413
+
414
+ class CompiledValueName(ValueNameMixin, AbstractNameDefinition):
415
+ def __init__(self, value, name):
416
+ self.string_name = name
417
+ self._value = value
418
+ self.parent_context = value.parent_context
419
+
420
+
421
+ class EmptyCompiledName(AbstractNameDefinition):
422
+ """
423
+ Accessing some names will raise an exception. To avoid not having any
424
+ completions, just give Jedi the option to return this object. It infers to
425
+ nothing.
426
+ """
427
+ def __init__(self, inference_state, name):
428
+ self.parent_context = inference_state.builtins_module
429
+ self.string_name = name
430
+
431
+ def infer(self):
432
+ return NO_VALUES
433
+
434
+
435
+ class CompiledValueFilter(AbstractFilter):
436
+ def __init__(self, inference_state, compiled_value, is_instance=False):
437
+ self._inference_state = inference_state
438
+ self.compiled_value = compiled_value
439
+ self.is_instance = is_instance
440
+
441
+ def get(self, name):
442
+ access_handle = self.compiled_value.access_handle
443
+ safe = not self._inference_state.allow_unsafe_executions
444
+ return self._get(
445
+ name,
446
+ lambda name: access_handle.is_allowed_getattr(name, safe=safe),
447
+ lambda name: name in access_handle.dir(),
448
+ check_has_attribute=True
449
+ )
450
+
451
+ def _get(self, name, allowed_getattr_callback, in_dir_callback, check_has_attribute=False):
452
+ """
453
+ To remove quite a few access calls we introduced the callback here.
454
+ """
455
+ has_attribute, is_descriptor, property_return_annotation = allowed_getattr_callback(
456
+ name,
457
+ )
458
+ if property_return_annotation is not None:
459
+ values = create_from_access_path(
460
+ self._inference_state,
461
+ property_return_annotation
462
+ ).execute_annotation()
463
+ if values:
464
+ return [CompiledValueName(v, name) for v in values]
465
+
466
+ if check_has_attribute and not has_attribute:
467
+ return []
468
+
469
+ if (is_descriptor or not has_attribute) \
470
+ and not self._inference_state.allow_unsafe_executions:
471
+ return [self._get_cached_name(name, is_empty=True)]
472
+
473
+ if self.is_instance and not in_dir_callback(name):
474
+ return []
475
+ return [self._get_cached_name(name, is_descriptor=is_descriptor)]
476
+
477
+ @memoize_method
478
+ def _get_cached_name(self, name, is_empty=False, *, is_descriptor=False):
479
+ if is_empty:
480
+ return EmptyCompiledName(self._inference_state, name)
481
+ else:
482
+ return self._create_name(name, is_descriptor=is_descriptor)
483
+
484
+ def values(self):
485
+ from jedi.inference.compiled import builtin_from_name
486
+ names = []
487
+ needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
488
+ # We could use `safe=False` here as well, especially as a parameter to
489
+ # get_dir_infos. But this would lead to a lot of property executions
490
+ # that are probably not wanted. The drawback for this is that we
491
+ # have a different name for `get` and `values`. For `get` we always
492
+ # execute.
493
+ for name in dir_infos:
494
+ names += self._get(
495
+ name,
496
+ lambda name: dir_infos[name],
497
+ lambda name: name in dir_infos,
498
+ )
499
+
500
+ # ``dir`` doesn't include the type names.
501
+ if not self.is_instance and needs_type_completions:
502
+ for filter in builtin_from_name(self._inference_state, 'type').get_filters():
503
+ names += filter.values()
504
+ return names
505
+
506
+ def _create_name(self, name, is_descriptor):
507
+ return CompiledName(
508
+ self._inference_state,
509
+ self.compiled_value,
510
+ name,
511
+ is_descriptor,
512
+ )
513
+
514
+ def __repr__(self):
515
+ return "<%s: %s>" % (self.__class__.__name__, self.compiled_value)
516
+
517
+
518
+ docstr_defaults = {
519
+ 'floating point number': 'float',
520
+ 'character': 'str',
521
+ 'integer': 'int',
522
+ 'dictionary': 'dict',
523
+ 'string': 'str',
524
+ }
525
+
526
+
527
+ def _parse_function_doc(doc):
528
+ """
529
+ Takes a function and returns the params and return value as a tuple.
530
+ This is nothing more than a docstring parser.
531
+
532
+ TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
533
+ TODO docstrings like 'tuple of integers'
534
+ """
535
+ # parse round parentheses: def func(a, (b,c))
536
+ try:
537
+ count = 0
538
+ start = doc.index('(')
539
+ for i, s in enumerate(doc[start:]):
540
+ if s == '(':
541
+ count += 1
542
+ elif s == ')':
543
+ count -= 1
544
+ if count == 0:
545
+ end = start + i
546
+ break
547
+ param_str = doc[start + 1:end]
548
+ except (ValueError, UnboundLocalError):
549
+ # ValueError for doc.index
550
+ # UnboundLocalError for undefined end in last line
551
+ debug.dbg('no brackets found - no param')
552
+ end = 0
553
+ param_str = ''
554
+ else:
555
+ # remove square brackets, that show an optional param ( = None)
556
+ def change_options(m):
557
+ args = m.group(1).split(',')
558
+ for i, a in enumerate(args):
559
+ if a and '=' not in a:
560
+ args[i] += '=None'
561
+ return ','.join(args)
562
+
563
+ while True:
564
+ param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
565
+ change_options, param_str)
566
+ if changes == 0:
567
+ break
568
+ param_str = param_str.replace('-', '_') # see: isinstance.__doc__
569
+
570
+ # parse return value
571
+ r = re.search('-[>-]* ', doc[end:end + 7])
572
+ if r is None:
573
+ ret = ''
574
+ else:
575
+ index = end + r.end()
576
+ # get result type, which can contain newlines
577
+ pattern = re.compile(r'(,\n|[^\n-])+')
578
+ ret_str = pattern.match(doc, index).group(0).strip()
579
+ # New object -> object()
580
+ ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
581
+
582
+ ret = docstr_defaults.get(ret_str, ret_str)
583
+
584
+ return param_str, ret
585
+
586
+
587
+ def create_from_name(inference_state, compiled_value, name):
588
+ access_paths = compiled_value.access_handle.getattr_paths(name, default=None)
589
+
590
+ value = None
591
+ for access_path in access_paths:
592
+ value = create_cached_compiled_value(
593
+ inference_state,
594
+ access_path,
595
+ parent_context=None if value is None else value.as_context(),
596
+ )
597
+ return value
598
+
599
+
600
+ def _normalize_create_args(func):
601
+ """The cache doesn't care about keyword vs. normal args."""
602
+ def wrapper(inference_state, obj, parent_context=None):
603
+ return func(inference_state, obj, parent_context)
604
+ return wrapper
605
+
606
+
607
+ def create_from_access_path(inference_state, access_path):
608
+ value = None
609
+ for name, access in access_path.accesses:
610
+ value = create_cached_compiled_value(
611
+ inference_state,
612
+ access,
613
+ parent_context=None if value is None else value.as_context()
614
+ )
615
+ return value
616
+
617
+
618
+ @_normalize_create_args
619
+ @inference_state_function_cache()
620
+ def create_cached_compiled_value(inference_state, access_handle, parent_context):
621
+ assert not isinstance(parent_context, CompiledValue)
622
+ if parent_context is None:
623
+ cls = CompiledModule
624
+ else:
625
+ cls = CompiledValue
626
+ return cls(inference_state, access_handle, parent_context)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/base.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/generics.cpython-310.pyc ADDED
Binary file (5.26 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/stub_value.cpython-310.pyc ADDED
Binary file (4.31 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jedi/inference/gradual/__pycache__/typeshed.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (234 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc ADDED
Binary file (5.53 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc ADDED
Binary file (391 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc ADDED
Binary file (481 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc ADDED
Binary file (824 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc ADDED
Binary file (31 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc ADDED
Binary file (4.6 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc ADDED
Binary file (766 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc ADDED
Binary file (659 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc ADDED
Binary file (68.1 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (940 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc ADDED
Binary file (618 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.gzip ADDED
Binary file (831 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py34_np19.pkl.lzma ADDED
Binary file (697 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.4.dev0_compressed_cache_size_pickle_py35_np19.gz_03.npy.z ADDED
Binary file (37 Bytes). View file
 
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_backports.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mmap
2
+
3
+ from joblib.backports import make_memmap, concurrency_safe_rename
4
+ from joblib.test.common import with_numpy
5
+ from joblib.testing import parametrize
6
+ from joblib import Parallel, delayed
7
+
8
+
9
+ @with_numpy
10
+ def test_memmap(tmpdir):
11
+ fname = tmpdir.join('test.mmap').strpath
12
+ size = 5 * mmap.ALLOCATIONGRANULARITY
13
+ offset = mmap.ALLOCATIONGRANULARITY + 1
14
+ memmap_obj = make_memmap(fname, shape=size, mode='w+', offset=offset)
15
+ assert memmap_obj.offset == offset
16
+
17
+
18
+ @parametrize('dst_content', [None, 'dst content'])
19
+ @parametrize('backend', [None, 'threading'])
20
+ def test_concurrency_safe_rename(tmpdir, dst_content, backend):
21
+ src_paths = [tmpdir.join('src_%d' % i) for i in range(4)]
22
+ for src_path in src_paths:
23
+ src_path.write('src content')
24
+ dst_path = tmpdir.join('dst')
25
+ if dst_content is not None:
26
+ dst_path.write(dst_content)
27
+
28
+ Parallel(n_jobs=4, backend=backend)(
29
+ delayed(concurrency_safe_rename)(src_path.strpath, dst_path.strpath)
30
+ for src_path in src_paths
31
+ )
32
+ assert dst_path.exists()
33
+ assert dst_path.read() == 'src content'
34
+ for src_path in src_paths:
35
+ assert not src_path.exists()
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_config.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from joblib.parallel import parallel_config
4
+ from joblib.parallel import parallel_backend
5
+ from joblib.parallel import Parallel, delayed
6
+
7
+ from joblib.parallel import BACKENDS
8
+ from joblib.parallel import DEFAULT_BACKEND
9
+ from joblib.parallel import EXTERNAL_BACKENDS
10
+
11
+ from joblib._parallel_backends import LokyBackend
12
+ from joblib._parallel_backends import ThreadingBackend
13
+ from joblib._parallel_backends import MultiprocessingBackend
14
+
15
+ from joblib.testing import parametrize, raises
16
+ from joblib.test.common import np, with_numpy
17
+ from joblib.test.common import with_multiprocessing
18
+ from joblib.test.test_parallel import check_memmap
19
+
20
+
21
+ @parametrize("context", [parallel_config, parallel_backend])
22
+ def test_global_parallel_backend(context):
23
+ default = Parallel()._backend
24
+
25
+ pb = context('threading')
26
+ try:
27
+ assert isinstance(Parallel()._backend, ThreadingBackend)
28
+ finally:
29
+ pb.unregister()
30
+ assert type(Parallel()._backend) is type(default)
31
+
32
+
33
+ @parametrize("context", [parallel_config, parallel_backend])
34
+ def test_external_backends(context):
35
+ def register_foo():
36
+ BACKENDS['foo'] = ThreadingBackend
37
+
38
+ EXTERNAL_BACKENDS['foo'] = register_foo
39
+ try:
40
+ with context('foo'):
41
+ assert isinstance(Parallel()._backend, ThreadingBackend)
42
+ finally:
43
+ del EXTERNAL_BACKENDS['foo']
44
+
45
+
46
+ @with_numpy
47
+ @with_multiprocessing
48
+ def test_parallel_config_no_backend(tmpdir):
49
+ # Check that parallel_config allows to change the config
50
+ # even if no backend is set.
51
+ with parallel_config(n_jobs=2, max_nbytes=1, temp_folder=tmpdir):
52
+ with Parallel(prefer="processes") as p:
53
+ assert isinstance(p._backend, LokyBackend)
54
+ assert p.n_jobs == 2
55
+
56
+ # Checks that memmapping is enabled
57
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
58
+ assert len(os.listdir(tmpdir)) > 0
59
+
60
+
61
+ @with_numpy
62
+ @with_multiprocessing
63
+ def test_parallel_config_params_explicit_set(tmpdir):
64
+ with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir):
65
+ with Parallel(n_jobs=2, prefer="processes", max_nbytes='1M') as p:
66
+ assert isinstance(p._backend, LokyBackend)
67
+ assert p.n_jobs == 2
68
+
69
+ # Checks that memmapping is disabled
70
+ with raises(TypeError, match="Expected np.memmap instance"):
71
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
72
+
73
+
74
+ @parametrize("param", ["prefer", "require"])
75
+ def test_parallel_config_bad_params(param):
76
+ # Check that an error is raised when setting a wrong backend
77
+ # hint or constraint
78
+ with raises(ValueError, match=f"{param}=wrong is not a valid"):
79
+ with parallel_config(**{param: "wrong"}):
80
+ Parallel()
81
+
82
+
83
+ def test_parallel_config_constructor_params():
84
+ # Check that an error is raised when backend is None
85
+ # but backend constructor params are given
86
+ with raises(ValueError, match="only supported when backend is not None"):
87
+ with parallel_config(inner_max_num_threads=1):
88
+ pass
89
+
90
+ with raises(ValueError, match="only supported when backend is not None"):
91
+ with parallel_config(backend_param=1):
92
+ pass
93
+
94
+
95
+ def test_parallel_config_nested():
96
+ # Check that nested configuration retrieves the info from the
97
+ # parent config and do not reset them.
98
+
99
+ with parallel_config(n_jobs=2):
100
+ p = Parallel()
101
+ assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND])
102
+ assert p.n_jobs == 2
103
+
104
+ with parallel_config(backend='threading'):
105
+ with parallel_config(n_jobs=2):
106
+ p = Parallel()
107
+ assert isinstance(p._backend, ThreadingBackend)
108
+ assert p.n_jobs == 2
109
+
110
+ with parallel_config(verbose=100):
111
+ with parallel_config(n_jobs=2):
112
+ p = Parallel()
113
+ assert p.verbose == 100
114
+ assert p.n_jobs == 2
115
+
116
+
117
+ @with_numpy
118
+ @with_multiprocessing
119
+ @parametrize('backend', ['multiprocessing', 'threading',
120
+ MultiprocessingBackend(), ThreadingBackend()])
121
+ @parametrize("context", [parallel_config, parallel_backend])
122
+ def test_threadpool_limitation_in_child_context_error(context, backend):
123
+
124
+ with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"):
125
+ context(backend, inner_max_num_threads=1)
126
+
127
+
128
+ @parametrize("context", [parallel_config, parallel_backend])
129
+ def test_parallel_n_jobs_none(context):
130
+ # Check that n_jobs=None is interpreted as "unset" in Parallel
131
+ # non regression test for #1473
132
+ with context(backend="threading", n_jobs=2):
133
+ with Parallel(n_jobs=None) as p:
134
+ assert p.n_jobs == 2
135
+
136
+ with context(backend="threading"):
137
+ default_n_jobs = Parallel().n_jobs
138
+ with Parallel(n_jobs=None) as p:
139
+ assert p.n_jobs == default_n_jobs
140
+
141
+
142
+ @parametrize("context", [parallel_config, parallel_backend])
143
+ def test_parallel_config_n_jobs_none(context):
144
+ # Check that n_jobs=None is interpreted as "explicitly set" in
145
+ # parallel_(config/backend)
146
+ # non regression test for #1473
147
+ with context(backend="threading", n_jobs=2):
148
+ with context(backend="threading", n_jobs=None):
149
+ # n_jobs=None resets n_jobs to backend's default
150
+ with Parallel() as p:
151
+ assert p.n_jobs == 1
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_dask.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division, absolute_import
2
+ import os
3
+ import warnings
4
+
5
+ import pytest
6
+ from random import random
7
+ from uuid import uuid4
8
+ from time import sleep
9
+
10
+ from .. import Parallel, delayed, parallel_config
11
+ from ..parallel import ThreadingBackend, AutoBatchingMixin
12
+ from .._dask import DaskDistributedBackend
13
+
14
+ distributed = pytest.importorskip('distributed')
15
+ dask = pytest.importorskip('dask')
16
+
17
+ # These imports need to be after the pytest.importorskip hence the noqa: E402
18
+ from distributed import Client, LocalCluster, get_client # noqa: E402
19
+ from distributed.metrics import time # noqa: E402
20
+ # Note: pytest requires to manually import all fixtures used in the test
21
+ # and their dependencies.
22
+ from distributed.utils_test import cluster, inc, cleanup # noqa: E402, F401
23
+
24
+
25
+ def noop(*args, **kwargs):
26
+ pass
27
+
28
+
29
+ def slow_raise_value_error(condition, duration=0.05):
30
+ sleep(duration)
31
+ if condition:
32
+ raise ValueError("condition evaluated to True")
33
+
34
+
35
+ def count_events(event_name, client):
36
+ worker_events = client.run(lambda dask_worker: dask_worker.log)
37
+ event_counts = {}
38
+ for w, events in worker_events.items():
39
+ event_counts[w] = len([event for event in list(events)
40
+ if event[1] == event_name])
41
+ return event_counts
42
+
43
+
44
+ def test_simple(loop):
45
+ with cluster() as (s, [a, b]):
46
+ with Client(s['address'], loop=loop) as client: # noqa: F841
47
+ with parallel_config(backend='dask'):
48
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
49
+ assert seq == [inc(i) for i in range(10)]
50
+
51
+ with pytest.raises(ValueError):
52
+ Parallel()(delayed(slow_raise_value_error)(i == 3)
53
+ for i in range(10))
54
+
55
+ seq = Parallel()(delayed(inc)(i) for i in range(10))
56
+ assert seq == [inc(i) for i in range(10)]
57
+
58
+
59
+ def test_dask_backend_uses_autobatching(loop):
60
+ assert (DaskDistributedBackend.compute_batch_size
61
+ is AutoBatchingMixin.compute_batch_size)
62
+
63
+ with cluster() as (s, [a, b]):
64
+ with Client(s['address'], loop=loop) as client: # noqa: F841
65
+ with parallel_config(backend='dask'):
66
+ with Parallel() as parallel:
67
+ # The backend should be initialized with a default
68
+ # batch size of 1:
69
+ backend = parallel._backend
70
+ assert isinstance(backend, DaskDistributedBackend)
71
+ assert backend.parallel is parallel
72
+ assert backend._effective_batch_size == 1
73
+
74
+ # Launch many short tasks that should trigger
75
+ # auto-batching:
76
+ parallel(
77
+ delayed(lambda: None)()
78
+ for _ in range(int(1e4))
79
+ )
80
+ assert backend._effective_batch_size > 10
81
+
82
+
83
+ def random2():
84
+ return random()
85
+
86
+
87
+ def test_dont_assume_function_purity(loop):
88
+ with cluster() as (s, [a, b]):
89
+ with Client(s['address'], loop=loop) as client: # noqa: F841
90
+ with parallel_config(backend='dask'):
91
+ x, y = Parallel()(delayed(random2)() for i in range(2))
92
+ assert x != y
93
+
94
+
95
+ @pytest.mark.parametrize("mixed", [True, False])
96
+ def test_dask_funcname(loop, mixed):
97
+ from joblib._dask import Batch
98
+ if not mixed:
99
+ tasks = [delayed(inc)(i) for i in range(4)]
100
+ batch_repr = 'batch_of_inc_4_calls'
101
+ else:
102
+ tasks = [
103
+ delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4)
104
+ ]
105
+ batch_repr = 'mixed_batch_of_inc_4_calls'
106
+
107
+ assert repr(Batch(tasks)) == batch_repr
108
+
109
+ with cluster() as (s, [a, b]):
110
+ with Client(s['address'], loop=loop) as client:
111
+ with parallel_config(backend='dask'):
112
+ _ = Parallel(batch_size=2, pre_dispatch='all')(tasks)
113
+
114
+ def f(dask_scheduler):
115
+ return list(dask_scheduler.transition_log)
116
+ batch_repr = batch_repr.replace('4', '2')
117
+ log = client.run_on_scheduler(f)
118
+ assert all('batch_of_inc' in tup[0] for tup in log)
119
+
120
+
121
+ def test_no_undesired_distributed_cache_hit():
122
+ # Dask has a pickle cache for callables that are called many times. Because
123
+ # the dask backends used to wrap both the functions and the arguments
124
+ # under instances of the Batch callable class this caching mechanism could
125
+ # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055
126
+ # The joblib-dask backend has been refactored to avoid bundling the
127
+ # arguments as an attribute of the Batch instance to avoid this problem.
128
+ # This test serves as non-regression problem.
129
+
130
+ # Use a large number of input arguments to give the AutoBatchingMixin
131
+ # enough tasks to kick-in.
132
+ lists = [[] for _ in range(100)]
133
+ np = pytest.importorskip('numpy')
134
+ X = np.arange(int(1e6))
135
+
136
+ def isolated_operation(list_, data=None):
137
+ if data is not None:
138
+ np.testing.assert_array_equal(data, X)
139
+ list_.append(uuid4().hex)
140
+ return list_
141
+
142
+ cluster = LocalCluster(n_workers=1, threads_per_worker=2)
143
+ client = Client(cluster)
144
+ try:
145
+ with parallel_config(backend='dask'):
146
+ # dispatches joblib.parallel.BatchedCalls
147
+ res = Parallel()(
148
+ delayed(isolated_operation)(list_) for list_ in lists
149
+ )
150
+
151
+ # The original arguments should not have been mutated as the mutation
152
+ # happens in the dask worker process.
153
+ assert lists == [[] for _ in range(100)]
154
+
155
+ # Here we did not pass any large numpy array as argument to
156
+ # isolated_operation so no scattering event should happen under the
157
+ # hood.
158
+ counts = count_events('receive-from-scatter', client)
159
+ assert sum(counts.values()) == 0
160
+ assert all([len(r) == 1 for r in res])
161
+
162
+ with parallel_config(backend='dask'):
163
+ # Append a large array which will be scattered by dask, and
164
+ # dispatch joblib._dask.Batch
165
+ res = Parallel()(
166
+ delayed(isolated_operation)(list_, data=X) for list_ in lists
167
+ )
168
+
169
+ # This time, auto-scattering should have kicked it.
170
+ counts = count_events('receive-from-scatter', client)
171
+ assert sum(counts.values()) > 0
172
+ assert all([len(r) == 1 for r in res])
173
+ finally:
174
+ client.close(timeout=30)
175
+ cluster.close(timeout=30)
176
+
177
+
178
+ class CountSerialized(object):
179
+ def __init__(self, x):
180
+ self.x = x
181
+ self.count = 0
182
+
183
+ def __add__(self, other):
184
+ return self.x + getattr(other, 'x', other)
185
+
186
+ __radd__ = __add__
187
+
188
+ def __reduce__(self):
189
+ self.count += 1
190
+ return (CountSerialized, (self.x,))
191
+
192
+
193
+ def add5(a, b, c, d=0, e=0):
194
+ return a + b + c + d + e
195
+
196
+
197
+ def test_manual_scatter(loop):
198
+ x = CountSerialized(1)
199
+ y = CountSerialized(2)
200
+ z = CountSerialized(3)
201
+
202
+ with cluster() as (s, [a, b]):
203
+ with Client(s['address'], loop=loop) as client: # noqa: F841
204
+ with parallel_config(backend='dask', scatter=[x, y]):
205
+ f = delayed(add5)
206
+ tasks = [f(x, y, z, d=4, e=5),
207
+ f(x, z, y, d=5, e=4),
208
+ f(y, x, z, d=x, e=5),
209
+ f(z, z, x, d=z, e=y)]
210
+ expected = [func(*args, **kwargs)
211
+ for func, args, kwargs in tasks]
212
+ results = Parallel()(tasks)
213
+
214
+ # Scatter must take a list/tuple
215
+ with pytest.raises(TypeError):
216
+ with parallel_config(backend='dask', loop=loop, scatter=1):
217
+ pass
218
+
219
+ assert results == expected
220
+
221
+ # Scattered variables only serialized once
222
+ assert x.count == 1
223
+ assert y.count == 1
224
+ # Depending on the version of distributed, the unscattered z variable
225
+ # is either pickled 4 or 6 times, possibly because of the memoization
226
+ # of objects that appear several times in the arguments of a delayed
227
+ # task.
228
+ assert z.count in (4, 6)
229
+
230
+
231
+ # When the same IOLoop is used for multiple clients in a row, use
232
+ # loop_in_thread instead of loop to prevent the Client from closing it. See
233
+ # dask/distributed #4112
234
+ def test_auto_scatter(loop_in_thread):
235
+ np = pytest.importorskip('numpy')
236
+ data1 = np.ones(int(1e4), dtype=np.uint8)
237
+ data2 = np.ones(int(1e4), dtype=np.uint8)
238
+ data_to_process = ([data1] * 3) + ([data2] * 3)
239
+
240
+ with cluster() as (s, [a, b]):
241
+ with Client(s['address'], loop=loop_in_thread) as client:
242
+ with parallel_config(backend='dask'):
243
+ # Passing the same data as arg and kwarg triggers a single
244
+ # scatter operation whose result is reused.
245
+ Parallel()(delayed(noop)(data, data, i, opt=data)
246
+ for i, data in enumerate(data_to_process))
247
+ # By default large array are automatically scattered with
248
+ # broadcast=1 which means that one worker must directly receive
249
+ # the data from the scatter operation once.
250
+ counts = count_events('receive-from-scatter', client)
251
+ assert counts[a['address']] + counts[b['address']] == 2
252
+
253
+ with cluster() as (s, [a, b]):
254
+ with Client(s['address'], loop=loop_in_thread) as client:
255
+ with parallel_config(backend='dask'):
256
+ Parallel()(delayed(noop)(data1[:3], i) for i in range(5))
257
+ # Small arrays are passed within the task definition without going
258
+ # through a scatter operation.
259
+ counts = count_events('receive-from-scatter', client)
260
+ assert counts[a['address']] == 0
261
+ assert counts[b['address']] == 0
262
+
263
+
264
+ @pytest.mark.parametrize("retry_no", list(range(2)))
265
+ def test_nested_scatter(loop, retry_no):
266
+
267
+ np = pytest.importorskip('numpy')
268
+
269
+ NUM_INNER_TASKS = 10
270
+ NUM_OUTER_TASKS = 10
271
+
272
+ def my_sum(x, i, j):
273
+ return np.sum(x)
274
+
275
+ def outer_function_joblib(array, i):
276
+ client = get_client() # noqa
277
+ with parallel_config(backend="dask"):
278
+ results = Parallel()(
279
+ delayed(my_sum)(array[j:], i, j) for j in range(
280
+ NUM_INNER_TASKS)
281
+ )
282
+ return sum(results)
283
+
284
+ with cluster() as (s, [a, b]):
285
+ with Client(s['address'], loop=loop) as _:
286
+ with parallel_config(backend="dask"):
287
+ my_array = np.ones(10000)
288
+ _ = Parallel()(
289
+ delayed(outer_function_joblib)(
290
+ my_array[i:], i) for i in range(NUM_OUTER_TASKS)
291
+ )
292
+
293
+
294
+ def test_nested_backend_context_manager(loop_in_thread):
295
+ def get_nested_pids():
296
+ pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
297
+ pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2)))
298
+ return pids
299
+
300
+ with cluster() as (s, [a, b]):
301
+ with Client(s['address'], loop=loop_in_thread) as client:
302
+ with parallel_config(backend='dask'):
303
+ pid_groups = Parallel(n_jobs=2)(
304
+ delayed(get_nested_pids)()
305
+ for _ in range(10)
306
+ )
307
+ for pid_group in pid_groups:
308
+ assert len(set(pid_group)) <= 2
309
+
310
+ # No deadlocks
311
+ with Client(s['address'], loop=loop_in_thread) as client: # noqa: F841
312
+ with parallel_config(backend='dask'):
313
+ pid_groups = Parallel(n_jobs=2)(
314
+ delayed(get_nested_pids)()
315
+ for _ in range(10)
316
+ )
317
+ for pid_group in pid_groups:
318
+ assert len(set(pid_group)) <= 2
319
+
320
+
321
+ def test_nested_backend_context_manager_implicit_n_jobs(loop):
322
+ # Check that Parallel with no explicit n_jobs value automatically selects
323
+ # all the dask workers, including in nested calls.
324
+
325
+ def _backend_type(p):
326
+ return p._backend.__class__.__name__
327
+
328
+ def get_nested_implicit_n_jobs():
329
+ with Parallel() as p:
330
+ return _backend_type(p), p.n_jobs
331
+
332
+ with cluster() as (s, [a, b]):
333
+ with Client(s['address'], loop=loop) as client: # noqa: F841
334
+ with parallel_config(backend='dask'):
335
+ with Parallel() as p:
336
+ assert _backend_type(p) == "DaskDistributedBackend"
337
+ assert p.n_jobs == -1
338
+ all_nested_n_jobs = p(
339
+ delayed(get_nested_implicit_n_jobs)()
340
+ for _ in range(2)
341
+ )
342
+ for backend_type, nested_n_jobs in all_nested_n_jobs:
343
+ assert backend_type == "DaskDistributedBackend"
344
+ assert nested_n_jobs == -1
345
+
346
+
347
+ def test_errors(loop):
348
+ with pytest.raises(ValueError) as info:
349
+ with parallel_config(backend='dask'):
350
+ pass
351
+
352
+ assert "create a dask client" in str(info.value).lower()
353
+
354
+
355
+ def test_correct_nested_backend(loop):
356
+ with cluster() as (s, [a, b]):
357
+ with Client(s['address'], loop=loop) as client: # noqa: F841
358
+ # No requirement, should be us
359
+ with parallel_config(backend='dask'):
360
+ result = Parallel(n_jobs=2)(
361
+ delayed(outer)(nested_require=None) for _ in range(1))
362
+ assert isinstance(result[0][0][0], DaskDistributedBackend)
363
+
364
+ # Require threads, should be threading
365
+ with parallel_config(backend='dask'):
366
+ result = Parallel(n_jobs=2)(
367
+ delayed(outer)(nested_require='sharedmem')
368
+ for _ in range(1))
369
+ assert isinstance(result[0][0][0], ThreadingBackend)
370
+
371
+
372
+ def outer(nested_require):
373
+ return Parallel(n_jobs=2, prefer='threads')(
374
+ delayed(middle)(nested_require) for _ in range(1)
375
+ )
376
+
377
+
378
+ def middle(require):
379
+ return Parallel(n_jobs=2, require=require)(
380
+ delayed(inner)() for _ in range(1)
381
+ )
382
+
383
+
384
+ def inner():
385
+ return Parallel()._backend
386
+
387
+
388
+ def test_secede_with_no_processes(loop):
389
+ # https://github.com/dask/distributed/issues/1775
390
+ with Client(loop=loop, processes=False, set_as_default=True):
391
+ with parallel_config(backend='dask'):
392
+ Parallel(n_jobs=4)(delayed(id)(i) for i in range(2))
393
+
394
+
395
+ def _worker_address(_):
396
+ from distributed import get_worker
397
+ return get_worker().address
398
+
399
+
400
+ def test_dask_backend_keywords(loop):
401
+ with cluster() as (s, [a, b]):
402
+ with Client(s['address'], loop=loop) as client: # noqa: F841
403
+ with parallel_config(backend='dask', workers=a['address']):
404
+ seq = Parallel()(
405
+ delayed(_worker_address)(i) for i in range(10))
406
+ assert seq == [a['address']] * 10
407
+
408
+ with parallel_config(backend='dask', workers=b['address']):
409
+ seq = Parallel()(
410
+ delayed(_worker_address)(i) for i in range(10))
411
+ assert seq == [b['address']] * 10
412
+
413
+
414
+ def test_scheduler_tasks_cleanup(loop):
415
+ with Client(processes=False, loop=loop) as client:
416
+ with parallel_config(backend='dask'):
417
+ Parallel()(delayed(inc)(i) for i in range(10))
418
+
419
+ start = time()
420
+ while client.cluster.scheduler.tasks:
421
+ sleep(0.01)
422
+ assert time() < start + 5
423
+
424
+ assert not client.futures
425
+
426
+
427
+ @pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"])
428
+ @pytest.mark.skipif(
429
+ distributed.__version__ <= '2.1.1' and distributed.__version__ >= '1.28.0',
430
+ reason="distributed bug - https://github.com/dask/distributed/pull/2841")
431
+ def test_wait_for_workers(cluster_strategy):
432
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
433
+ client = Client(cluster)
434
+ if cluster_strategy == "adaptive":
435
+ cluster.adapt(minimum=0, maximum=2)
436
+ elif cluster_strategy == "late_scaling":
437
+ # Tell the cluster to start workers but this is a non-blocking call
438
+ # and new workers might take time to connect. In this case the Parallel
439
+ # call should wait for at least one worker to come up before starting
440
+ # to schedule work.
441
+ cluster.scale(2)
442
+ try:
443
+ with parallel_config(backend='dask'):
444
+ # The following should wait a bit for at least one worker to
445
+ # become available.
446
+ Parallel()(delayed(inc)(i) for i in range(10))
447
+ finally:
448
+ client.close()
449
+ cluster.close()
450
+
451
+
452
+ def test_wait_for_workers_timeout():
453
+ # Start a cluster with 0 worker:
454
+ cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2)
455
+ client = Client(cluster)
456
+ try:
457
+ with parallel_config(backend='dask', wait_for_workers_timeout=0.1):
458
+ # Short timeout: DaskDistributedBackend
459
+ msg = "DaskDistributedBackend has no worker after 0.1 seconds."
460
+ with pytest.raises(TimeoutError, match=msg):
461
+ Parallel()(delayed(inc)(i) for i in range(10))
462
+
463
+ with parallel_config(backend='dask', wait_for_workers_timeout=0):
464
+ # No timeout: fallback to generic joblib failure:
465
+ msg = "DaskDistributedBackend has no active worker"
466
+ with pytest.raises(RuntimeError, match=msg):
467
+ Parallel()(delayed(inc)(i) for i in range(10))
468
+ finally:
469
+ client.close()
470
+ cluster.close()
471
+
472
+
473
+ @pytest.mark.parametrize("backend", ["loky", "multiprocessing"])
474
+ def test_joblib_warning_inside_dask_daemonic_worker(backend):
475
+ cluster = LocalCluster(n_workers=2)
476
+ client = Client(cluster)
477
+ try:
478
+
479
+ def func_using_joblib_parallel():
480
+ # Somehow trying to check the warning type here (e.g. with
481
+ # pytest.warns(UserWarning)) make the test hang. Work-around:
482
+ # return the warning record to the client and the warning check is
483
+ # done client-side.
484
+ with warnings.catch_warnings(record=True) as record:
485
+ Parallel(n_jobs=2, backend=backend)(
486
+ delayed(inc)(i) for i in range(10))
487
+
488
+ return record
489
+
490
+ fut = client.submit(func_using_joblib_parallel)
491
+ record = fut.result()
492
+
493
+ assert len(record) == 1
494
+ warning = record[0].message
495
+ assert isinstance(warning, UserWarning)
496
+ assert "distributed.worker.daemon" in str(warning)
497
+ finally:
498
+ client.close(timeout=30)
499
+ cluster.close(timeout=30)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_hashing.py ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the hashing module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import time
10
+ import hashlib
11
+ import sys
12
+ import gc
13
+ import io
14
+ import collections
15
+ import itertools
16
+ import pickle
17
+ import random
18
+ from concurrent.futures import ProcessPoolExecutor
19
+ from decimal import Decimal
20
+
21
+ from joblib.hashing import hash
22
+ from joblib.func_inspect import filter_args
23
+ from joblib.memory import Memory
24
+ from joblib.testing import raises, skipif, fixture, parametrize
25
+ from joblib.test.common import np, with_numpy
26
+
27
+
28
+ def unicode(s):
29
+ return s
30
+
31
+
32
+ ###############################################################################
33
+ # Helper functions for the tests
34
+ def time_func(func, *args):
35
+ """ Time function func on *args.
36
+ """
37
+ times = list()
38
+ for _ in range(3):
39
+ t1 = time.time()
40
+ func(*args)
41
+ times.append(time.time() - t1)
42
+ return min(times)
43
+
44
+
45
+ def relative_time(func1, func2, *args):
46
+ """ Return the relative time between func1 and func2 applied on
47
+ *args.
48
+ """
49
+ time_func1 = time_func(func1, *args)
50
+ time_func2 = time_func(func2, *args)
51
+ relative_diff = 0.5 * (abs(time_func1 - time_func2)
52
+ / (time_func1 + time_func2))
53
+ return relative_diff
54
+
55
+
56
+ class Klass(object):
57
+
58
+ def f(self, x):
59
+ return x
60
+
61
+
62
+ class KlassWithCachedMethod(object):
63
+
64
+ def __init__(self, cachedir):
65
+ mem = Memory(location=cachedir)
66
+ self.f = mem.cache(self.f)
67
+
68
+ def f(self, x):
69
+ return x
70
+
71
+
72
+ ###############################################################################
73
+ # Tests
74
+
75
+ input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j,
76
+ 'a', 'b',
77
+ (1,), (1, 1,), [1, ], [1, 1, ],
78
+ {1: 1}, {1: 2}, {2: 1},
79
+ None,
80
+ gc.collect,
81
+ [1, ].append,
82
+ # Next 2 sets have unorderable elements in python 3.
83
+ set(('a', 1)),
84
+ set(('a', 1, ('a', 1))),
85
+ # Next 2 dicts have unorderable type of keys in python 3.
86
+ {'a': 1, 1: 2},
87
+ {'a': 1, 1: 2, 'd': {'a': 1}}]
88
+
89
+
90
+ @parametrize('obj1', input_list)
91
+ @parametrize('obj2', input_list)
92
+ def test_trivial_hash(obj1, obj2):
93
+ """Smoke test hash on various types."""
94
+ # Check that 2 objects have the same hash only if they are the same.
95
+ are_hashes_equal = hash(obj1) == hash(obj2)
96
+ are_objs_identical = obj1 is obj2
97
+ assert are_hashes_equal == are_objs_identical
98
+
99
+
100
+ def test_hash_methods():
101
+ # Check that hashing instance methods works
102
+ a = io.StringIO(unicode('a'))
103
+ assert hash(a.flush) == hash(a.flush)
104
+ a1 = collections.deque(range(10))
105
+ a2 = collections.deque(range(9))
106
+ assert hash(a1.extend) != hash(a2.extend)
107
+
108
+
109
+ @fixture(scope='function')
110
+ @with_numpy
111
+ def three_np_arrays():
112
+ rnd = np.random.RandomState(0)
113
+ arr1 = rnd.random_sample((10, 10))
114
+ arr2 = arr1.copy()
115
+ arr3 = arr2.copy()
116
+ arr3[0] += 1
117
+ return arr1, arr2, arr3
118
+
119
+
120
+ def test_hash_numpy_arrays(three_np_arrays):
121
+ arr1, arr2, arr3 = three_np_arrays
122
+
123
+ for obj1, obj2 in itertools.product(three_np_arrays, repeat=2):
124
+ are_hashes_equal = hash(obj1) == hash(obj2)
125
+ are_arrays_equal = np.all(obj1 == obj2)
126
+ assert are_hashes_equal == are_arrays_equal
127
+
128
+ assert hash(arr1) != hash(arr1.T)
129
+
130
+
131
+ def test_hash_numpy_dict_of_arrays(three_np_arrays):
132
+ arr1, arr2, arr3 = three_np_arrays
133
+
134
+ d1 = {1: arr1, 2: arr2}
135
+ d2 = {1: arr2, 2: arr1}
136
+ d3 = {1: arr2, 2: arr3}
137
+
138
+ assert hash(d1) == hash(d2)
139
+ assert hash(d1) != hash(d3)
140
+
141
+
142
+ @with_numpy
143
+ @parametrize('dtype', ['datetime64[s]', 'timedelta64[D]'])
144
+ def test_numpy_datetime_array(dtype):
145
+ # memoryview is not supported for some dtypes e.g. datetime64
146
+ # see https://github.com/joblib/joblib/issues/188 for more details
147
+ a_hash = hash(np.arange(10))
148
+ array = np.arange(0, 10, dtype=dtype)
149
+ assert hash(array) != a_hash
150
+
151
+
152
+ @with_numpy
153
+ def test_hash_numpy_noncontiguous():
154
+ a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
155
+ order='F')[:, :1, :]
156
+ b = np.ascontiguousarray(a)
157
+ assert hash(a) != hash(b)
158
+
159
+ c = np.asfortranarray(a)
160
+ assert hash(a) != hash(c)
161
+
162
+
163
+ @with_numpy
164
+ @parametrize('coerce_mmap', [True, False])
165
+ def test_hash_memmap(tmpdir, coerce_mmap):
166
+ """Check that memmap and arrays hash identically if coerce_mmap is True."""
167
+ filename = tmpdir.join('memmap_temp').strpath
168
+ try:
169
+ m = np.memmap(filename, shape=(10, 10), mode='w+')
170
+ a = np.asarray(m)
171
+ are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) ==
172
+ hash(m, coerce_mmap=coerce_mmap))
173
+ assert are_hashes_equal == coerce_mmap
174
+ finally:
175
+ if 'm' in locals():
176
+ del m
177
+ # Force a garbage-collection cycle, to be certain that the
178
+ # object is delete, and we don't run in a problem under
179
+ # Windows with a file handle still open.
180
+ gc.collect()
181
+
182
+
183
+ @with_numpy
184
+ @skipif(sys.platform == 'win32', reason='This test is not stable under windows'
185
+ ' for some reason')
186
+ def test_hash_numpy_performance():
187
+ """ Check the performance of hashing numpy arrays:
188
+
189
+ In [22]: a = np.random.random(1000000)
190
+
191
+ In [23]: %timeit hashlib.md5(a).hexdigest()
192
+ 100 loops, best of 3: 20.7 ms per loop
193
+
194
+ In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest()
195
+ 1 loops, best of 3: 73.1 ms per loop
196
+
197
+ In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest()
198
+ 10 loops, best of 3: 53.9 ms per loop
199
+
200
+ In [26]: %timeit hash(a)
201
+ 100 loops, best of 3: 20.8 ms per loop
202
+ """
203
+ rnd = np.random.RandomState(0)
204
+ a = rnd.random_sample(1000000)
205
+
206
+ def md5_hash(x):
207
+ return hashlib.md5(memoryview(x)).hexdigest()
208
+
209
+ relative_diff = relative_time(md5_hash, hash, a)
210
+ assert relative_diff < 0.3
211
+
212
+ # Check that hashing an tuple of 3 arrays takes approximately
213
+ # 3 times as much as hashing one array
214
+ time_hashlib = 3 * time_func(md5_hash, a)
215
+ time_hash = time_func(hash, (a, a, a))
216
+ relative_diff = 0.5 * (abs(time_hash - time_hashlib)
217
+ / (time_hash + time_hashlib))
218
+ assert relative_diff < 0.3
219
+
220
+
221
+ def test_bound_methods_hash():
222
+ """ Make sure that calling the same method on two different instances
223
+ of the same class does resolve to the same hashes.
224
+ """
225
+ a = Klass()
226
+ b = Klass()
227
+ assert (hash(filter_args(a.f, [], (1, ))) ==
228
+ hash(filter_args(b.f, [], (1, ))))
229
+
230
+
231
+ def test_bound_cached_methods_hash(tmpdir):
232
+ """ Make sure that calling the same _cached_ method on two different
233
+ instances of the same class does resolve to the same hashes.
234
+ """
235
+ a = KlassWithCachedMethod(tmpdir.strpath)
236
+ b = KlassWithCachedMethod(tmpdir.strpath)
237
+ assert (hash(filter_args(a.f.func, [], (1, ))) ==
238
+ hash(filter_args(b.f.func, [], (1, ))))
239
+
240
+
241
+ @with_numpy
242
+ def test_hash_object_dtype():
243
+ """ Make sure that ndarrays with dtype `object' hash correctly."""
244
+
245
+ a = np.array([np.arange(i) for i in range(6)], dtype=object)
246
+ b = np.array([np.arange(i) for i in range(6)], dtype=object)
247
+
248
+ assert hash(a) == hash(b)
249
+
250
+
251
+ @with_numpy
252
+ def test_numpy_scalar():
253
+ # Numpy scalars are built from compiled functions, and lead to
254
+ # strange pickling paths explored, that can give hash collisions
255
+ a = np.float64(2.0)
256
+ b = np.float64(3.0)
257
+ assert hash(a) != hash(b)
258
+
259
+
260
+ def test_dict_hash(tmpdir):
261
+ # Check that dictionaries hash consistently, even though the ordering
262
+ # of the keys is not guaranteed
263
+ k = KlassWithCachedMethod(tmpdir.strpath)
264
+
265
+ d = {'#s12069__c_maps.nii.gz': [33],
266
+ '#s12158__c_maps.nii.gz': [33],
267
+ '#s12258__c_maps.nii.gz': [33],
268
+ '#s12277__c_maps.nii.gz': [33],
269
+ '#s12300__c_maps.nii.gz': [33],
270
+ '#s12401__c_maps.nii.gz': [33],
271
+ '#s12430__c_maps.nii.gz': [33],
272
+ '#s13817__c_maps.nii.gz': [33],
273
+ '#s13903__c_maps.nii.gz': [33],
274
+ '#s13916__c_maps.nii.gz': [33],
275
+ '#s13981__c_maps.nii.gz': [33],
276
+ '#s13982__c_maps.nii.gz': [33],
277
+ '#s13983__c_maps.nii.gz': [33]}
278
+
279
+ a = k.f(d)
280
+ b = k.f(a)
281
+
282
+ assert hash(a) == hash(b)
283
+
284
+
285
+ def test_set_hash(tmpdir):
286
+ # Check that sets hash consistently, even though their ordering
287
+ # is not guaranteed
288
+ k = KlassWithCachedMethod(tmpdir.strpath)
289
+
290
+ s = set(['#s12069__c_maps.nii.gz',
291
+ '#s12158__c_maps.nii.gz',
292
+ '#s12258__c_maps.nii.gz',
293
+ '#s12277__c_maps.nii.gz',
294
+ '#s12300__c_maps.nii.gz',
295
+ '#s12401__c_maps.nii.gz',
296
+ '#s12430__c_maps.nii.gz',
297
+ '#s13817__c_maps.nii.gz',
298
+ '#s13903__c_maps.nii.gz',
299
+ '#s13916__c_maps.nii.gz',
300
+ '#s13981__c_maps.nii.gz',
301
+ '#s13982__c_maps.nii.gz',
302
+ '#s13983__c_maps.nii.gz'])
303
+
304
+ a = k.f(s)
305
+ b = k.f(a)
306
+
307
+ assert hash(a) == hash(b)
308
+
309
+
310
+ def test_set_decimal_hash():
311
+ # Check that sets containing decimals hash consistently, even though
312
+ # ordering is not guaranteed
313
+ assert (hash(set([Decimal(0), Decimal('NaN')])) ==
314
+ hash(set([Decimal('NaN'), Decimal(0)])))
315
+
316
+
317
+ def test_string():
318
+ # Test that we obtain the same hash for object owning several strings,
319
+ # whatever the past of these strings (which are immutable in Python)
320
+ string = 'foo'
321
+ a = {string: 'bar'}
322
+ b = {string: 'bar'}
323
+ c = pickle.loads(pickle.dumps(b))
324
+ assert hash([a, b]) == hash([a, c])
325
+
326
+
327
+ @with_numpy
328
+ def test_numpy_dtype_pickling():
329
+ # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080,
330
+ # #1082, and explanatory comments inside
331
+ # ``joblib.hashing.NumpyHasher.save``.
332
+
333
+ # In this test, we make sure that the pickling of numpy dtypes is robust to
334
+ # object identity and object copy.
335
+
336
+ dt1 = np.dtype('f4')
337
+ dt2 = np.dtype('f4')
338
+
339
+ # simple dtypes objects are interned
340
+ assert dt1 is dt2
341
+ assert hash(dt1) == hash(dt2)
342
+
343
+ dt1_roundtripped = pickle.loads(pickle.dumps(dt1))
344
+ assert dt1 is not dt1_roundtripped
345
+ assert hash(dt1) == hash(dt1_roundtripped)
346
+
347
+ assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped])
348
+ assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped])
349
+
350
+ complex_dt1 = np.dtype(
351
+ [('name', np.str_, 16), ('grades', np.float64, (2,))]
352
+ )
353
+ complex_dt2 = np.dtype(
354
+ [('name', np.str_, 16), ('grades', np.float64, (2,))]
355
+ )
356
+
357
+ # complex dtypes objects are not interned
358
+ assert hash(complex_dt1) == hash(complex_dt2)
359
+
360
+ complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1))
361
+ assert complex_dt1_roundtripped is not complex_dt1
362
+ assert hash(complex_dt1) == hash(complex_dt1_roundtripped)
363
+
364
+ assert hash([complex_dt1, complex_dt1]) == hash(
365
+ [complex_dt1_roundtripped, complex_dt1_roundtripped]
366
+ )
367
+ assert hash([complex_dt1, complex_dt1]) == hash(
368
+ [complex_dt1_roundtripped, complex_dt1]
369
+ )
370
+
371
+
372
+ @parametrize('to_hash,expected',
373
+ [('This is a string to hash',
374
+ '71b3f47df22cb19431d85d92d0b230b2'),
375
+ (u"C'est l\xe9t\xe9",
376
+ '2d8d189e9b2b0b2e384d93c868c0e576'),
377
+ ((123456, 54321, -98765),
378
+ 'e205227dd82250871fa25aa0ec690aa3'),
379
+ ([random.Random(42).random() for _ in range(5)],
380
+ 'a11ffad81f9682a7d901e6edc3d16c84'),
381
+ ({'abcde': 123, 'sadfas': [-9999, 2, 3]},
382
+ 'aeda150553d4bb5c69f0e69d51b0e2ef')])
383
+ def test_hashes_stay_the_same(to_hash, expected):
384
+ # We want to make sure that hashes don't change with joblib
385
+ # version. For end users, that would mean that they have to
386
+ # regenerate their cache from scratch, which potentially means
387
+ # lengthy recomputations.
388
+ # Expected results have been generated with joblib 0.9.2
389
+ assert hash(to_hash) == expected
390
+
391
+
392
+ @with_numpy
393
+ def test_hashes_are_different_between_c_and_fortran_contiguous_arrays():
394
+ # We want to be sure that the c-contiguous and f-contiguous versions of the
395
+ # same array produce 2 different hashes.
396
+ rng = np.random.RandomState(0)
397
+ arr_c = rng.random_sample((10, 10))
398
+ arr_f = np.asfortranarray(arr_c)
399
+ assert hash(arr_c) != hash(arr_f)
400
+
401
+
402
+ @with_numpy
403
+ def test_0d_array():
404
+ hash(np.array(0))
405
+
406
+
407
+ @with_numpy
408
+ def test_0d_and_1d_array_hashing_is_different():
409
+ assert hash(np.array(0)) != hash(np.array([0]))
410
+
411
+
412
+ @with_numpy
413
+ def test_hashes_stay_the_same_with_numpy_objects():
414
+ # Note: joblib used to test numpy objects hashing by comparing the produced
415
+ # hash of an object with some hard-coded target value to guarantee that
416
+ # hashing remains the same across joblib versions. However, since numpy
417
+ # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation
418
+ # details of numpy to hash np.dtype objects, which makes the stability of
419
+ # hash values across different environments hard to guarantee and to test.
420
+ # As a result, hashing stability across joblib versions becomes best-effort
421
+ # only, and we only test the consistency within a single environment by
422
+ # making sure:
423
+ # - the hash of two copies of the same objects is the same
424
+ # - hashing some object in two different python processes produces the same
425
+ # value. This should be viewed as a proxy for testing hash consistency
426
+ # through time between Python sessions (provided no change in the
427
+ # environment was done between sessions).
428
+
429
+ def create_objects_to_hash():
430
+ rng = np.random.RandomState(42)
431
+ # Being explicit about dtypes in order to avoid
432
+ # architecture-related differences. Also using 'f4' rather than
433
+ # 'f8' for float arrays because 'f8' arrays generated by
434
+ # rng.random.randn don't seem to be bit-identical on 32bit and
435
+ # 64bit machines.
436
+ to_hash_list = [
437
+ rng.randint(-1000, high=1000, size=50).astype('<i8'),
438
+ tuple(rng.randn(3).astype('<f4') for _ in range(5)),
439
+ [rng.randn(3).astype('<f4') for _ in range(5)],
440
+ {
441
+ -3333: rng.randn(3, 5).astype('<f4'),
442
+ 0: [
443
+ rng.randint(10, size=20).astype('<i8'),
444
+ rng.randn(10).astype('<f4')
445
+ ]
446
+ },
447
+ # Non regression cases for
448
+ # https://github.com/joblib/joblib/issues/308
449
+ np.arange(100, dtype='<i8').reshape((10, 10)),
450
+ # Fortran contiguous array
451
+ np.asfortranarray(np.arange(100, dtype='<i8').reshape((10, 10))),
452
+ # Non contiguous array
453
+ np.arange(100, dtype='<i8').reshape((10, 10))[:, :2],
454
+ ]
455
+ return to_hash_list
456
+
457
+ # Create two lists containing copies of the same objects. joblib.hash
458
+ # should return the same hash for to_hash_list_one[i] and
459
+ # to_hash_list_two[i]
460
+ to_hash_list_one = create_objects_to_hash()
461
+ to_hash_list_two = create_objects_to_hash()
462
+
463
+ e1 = ProcessPoolExecutor(max_workers=1)
464
+ e2 = ProcessPoolExecutor(max_workers=1)
465
+
466
+ try:
467
+ for obj_1, obj_2 in zip(to_hash_list_one, to_hash_list_two):
468
+ # testing consistency of hashes across python processes
469
+ hash_1 = e1.submit(hash, obj_1).result()
470
+ hash_2 = e2.submit(hash, obj_1).result()
471
+ assert hash_1 == hash_2
472
+
473
+ # testing consistency when hashing two copies of the same objects.
474
+ hash_3 = e1.submit(hash, obj_2).result()
475
+ assert hash_1 == hash_3
476
+
477
+ finally:
478
+ e1.shutdown()
479
+ e2.shutdown()
480
+
481
+
482
+ def test_hashing_pickling_error():
483
+ def non_picklable():
484
+ return 42
485
+
486
+ with raises(pickle.PicklingError) as excinfo:
487
+ hash(non_picklable)
488
+ excinfo.match('PicklingError while hashing')
489
+
490
+
491
+ def test_wrong_hash_name():
492
+ msg = "Valid options for 'hash_name' are"
493
+ with raises(ValueError, match=msg):
494
+ data = {'foo': 'bar'}
495
+ hash(data, hash_name='invalid')
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_logger.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the logger module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+ import re
9
+
10
+ from joblib.logger import PrintTime
11
+
12
+
13
+ def test_print_time(tmpdir, capsys):
14
+ # A simple smoke test for PrintTime.
15
+ logfile = tmpdir.join('test.log').strpath
16
+ print_time = PrintTime(logfile=logfile)
17
+ print_time('Foo')
18
+ # Create a second time, to smoke test log rotation.
19
+ print_time = PrintTime(logfile=logfile)
20
+ print_time('Foo')
21
+ # And a third time
22
+ print_time = PrintTime(logfile=logfile)
23
+ print_time('Foo')
24
+
25
+ out_printed_text, err_printed_text = capsys.readouterr()
26
+ # Use regexps to be robust to time variations
27
+ match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + \
28
+ r".\..s, 0..min\n"
29
+ if not re.match(match, err_printed_text):
30
+ raise AssertionError('Excepted %s, got %s' %
31
+ (match, err_printed_text))
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_memmapping.py ADDED
@@ -0,0 +1,1191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import mmap
3
+ import sys
4
+ import platform
5
+ import gc
6
+ import pickle
7
+ import itertools
8
+ from time import sleep
9
+ import subprocess
10
+ import threading
11
+ import faulthandler
12
+
13
+ import pytest
14
+
15
+ from joblib.test.common import with_numpy, np
16
+ from joblib.test.common import with_multiprocessing
17
+ from joblib.test.common import with_dev_shm
18
+ from joblib.testing import raises, parametrize, skipif
19
+ from joblib.backports import make_memmap
20
+ from joblib.parallel import Parallel, delayed
21
+
22
+ from joblib.pool import MemmappingPool
23
+ from joblib.executor import _TestingMemmappingExecutor as TestExecutor
24
+ from joblib._memmapping_reducer import has_shareable_memory
25
+ from joblib._memmapping_reducer import ArrayMemmapForwardReducer
26
+ from joblib._memmapping_reducer import _strided_from_memmap
27
+ from joblib._memmapping_reducer import _get_temp_dir
28
+ from joblib._memmapping_reducer import _WeakArrayKeyMap
29
+ from joblib._memmapping_reducer import _get_backing_memmap
30
+ import joblib._memmapping_reducer as jmr
31
+
32
+
33
+ def setup_module():
34
+ faulthandler.dump_traceback_later(timeout=300, exit=True)
35
+
36
+
37
+ def teardown_module():
38
+ faulthandler.cancel_dump_traceback_later()
39
+
40
+
41
+ def check_memmap_and_send_back(array):
42
+ assert _get_backing_memmap(array) is not None
43
+ return array
44
+
45
+
46
+ def check_array(args):
47
+ """Dummy helper function to be executed in subprocesses
48
+
49
+ Check that the provided array has the expected values in the provided
50
+ range.
51
+
52
+ """
53
+ data, position, expected = args
54
+ np.testing.assert_array_equal(data[position], expected)
55
+
56
+
57
+ def inplace_double(args):
58
+ """Dummy helper function to be executed in subprocesses
59
+
60
+
61
+ Check that the input array has the right values in the provided range
62
+ and perform an inplace modification to double the values in the range by
63
+ two.
64
+
65
+ """
66
+ data, position, expected = args
67
+ assert data[position] == expected
68
+ data[position] *= 2
69
+ np.testing.assert_array_equal(data[position], 2 * expected)
70
+
71
+
72
+ @with_numpy
73
+ @with_multiprocessing
74
+ def test_memmap_based_array_reducing(tmpdir):
75
+ """Check that it is possible to reduce a memmap backed array"""
76
+ assert_array_equal = np.testing.assert_array_equal
77
+ filename = tmpdir.join('test.mmap').strpath
78
+
79
+ # Create a file larger than what will be used by a
80
+ buffer = np.memmap(filename, dtype=np.float64, shape=500, mode='w+')
81
+
82
+ # Fill the original buffer with negative markers to detect over of
83
+ # underflow in case of test failures
84
+ buffer[:] = - 1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype)
85
+ buffer.flush()
86
+
87
+ # Memmap a 2D fortran array on a offsetted subsection of the previous
88
+ # buffer
89
+ a = np.memmap(filename, dtype=np.float64, shape=(3, 5, 4),
90
+ mode='r+', order='F', offset=4)
91
+ a[:] = np.arange(60).reshape(a.shape)
92
+
93
+ # Build various views that share the buffer with the original memmap
94
+
95
+ # b is an memmap sliced view on an memmap instance
96
+ b = a[1:-1, 2:-1, 2:4]
97
+
98
+ # c and d are array views
99
+ c = np.asarray(b)
100
+ d = c.T
101
+
102
+ # Array reducer with auto dumping disabled
103
+ reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True)
104
+
105
+ def reconstruct_array_or_memmap(x):
106
+ cons, args = reducer(x)
107
+ return cons(*args)
108
+
109
+ # Reconstruct original memmap
110
+ a_reconstructed = reconstruct_array_or_memmap(a)
111
+ assert has_shareable_memory(a_reconstructed)
112
+ assert isinstance(a_reconstructed, np.memmap)
113
+ assert_array_equal(a_reconstructed, a)
114
+
115
+ # Reconstruct strided memmap view
116
+ b_reconstructed = reconstruct_array_or_memmap(b)
117
+ assert has_shareable_memory(b_reconstructed)
118
+ assert_array_equal(b_reconstructed, b)
119
+
120
+ # Reconstruct arrays views on memmap base
121
+ c_reconstructed = reconstruct_array_or_memmap(c)
122
+ assert not isinstance(c_reconstructed, np.memmap)
123
+ assert has_shareable_memory(c_reconstructed)
124
+ assert_array_equal(c_reconstructed, c)
125
+
126
+ d_reconstructed = reconstruct_array_or_memmap(d)
127
+ assert not isinstance(d_reconstructed, np.memmap)
128
+ assert has_shareable_memory(d_reconstructed)
129
+ assert_array_equal(d_reconstructed, d)
130
+
131
+ # Test graceful degradation on fake memmap instances with in-memory
132
+ # buffers
133
+ a3 = a * 3
134
+ assert not has_shareable_memory(a3)
135
+ a3_reconstructed = reconstruct_array_or_memmap(a3)
136
+ assert not has_shareable_memory(a3_reconstructed)
137
+ assert not isinstance(a3_reconstructed, np.memmap)
138
+ assert_array_equal(a3_reconstructed, a * 3)
139
+
140
+ # Test graceful degradation on arrays derived from fake memmap instances
141
+ b3 = np.asarray(a3)
142
+ assert not has_shareable_memory(b3)
143
+
144
+ b3_reconstructed = reconstruct_array_or_memmap(b3)
145
+ assert isinstance(b3_reconstructed, np.ndarray)
146
+ assert not has_shareable_memory(b3_reconstructed)
147
+ assert_array_equal(b3_reconstructed, b3)
148
+
149
+
150
+ @with_multiprocessing
151
+ @skipif((sys.platform != "win32") or (),
152
+ reason="PermissionError only easily triggerable on Windows")
153
+ def test_resource_tracker_retries_when_permissionerror(tmpdir):
154
+ # Test resource_tracker retry mechanism when unlinking memmaps. See more
155
+ # thorough information in the ``unlink_file`` documentation of joblib.
156
+ filename = tmpdir.join('test.mmap').strpath
157
+ cmd = """if 1:
158
+ import os
159
+ import numpy as np
160
+ import time
161
+ from joblib.externals.loky.backend import resource_tracker
162
+ resource_tracker.VERBOSE = 1
163
+
164
+ # Start the resource tracker
165
+ resource_tracker.ensure_running()
166
+ time.sleep(1)
167
+
168
+ # Create a file containing numpy data
169
+ memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+')
170
+ memmap[:] = np.arange(10).astype(np.int8).data
171
+ memmap.flush()
172
+ assert os.path.exists(r"{filename}")
173
+ del memmap
174
+
175
+ # Create a np.memmap backed by this file
176
+ memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+')
177
+ resource_tracker.register(r"{filename}", "file")
178
+
179
+ # Ask the resource_tracker to delete the file backing the np.memmap , this
180
+ # should raise PermissionError that the resource_tracker will log.
181
+ resource_tracker.maybe_unlink(r"{filename}", "file")
182
+
183
+ # Wait for the resource_tracker to process the maybe_unlink before cleaning
184
+ # up the memmap
185
+ time.sleep(2)
186
+ """.format(filename=filename)
187
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
188
+ stdout=subprocess.PIPE)
189
+ p.wait()
190
+ out, err = p.communicate()
191
+ assert p.returncode == 0
192
+ assert out == b''
193
+ msg = 'tried to unlink {}, got PermissionError'.format(filename)
194
+ assert msg in err.decode()
195
+
196
+
197
+ @with_numpy
198
+ @with_multiprocessing
199
+ def test_high_dimension_memmap_array_reducing(tmpdir):
200
+ assert_array_equal = np.testing.assert_array_equal
201
+
202
+ filename = tmpdir.join('test.mmap').strpath
203
+
204
+ # Create a high dimensional memmap
205
+ a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3),
206
+ mode='w+')
207
+ a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape)
208
+
209
+ # Create some slices/indices at various dimensions
210
+ b = a[0:10]
211
+ c = a[:, 5:10]
212
+ d = a[:, :, :, 0]
213
+ e = a[1:3:4]
214
+
215
+ # Array reducer with auto dumping disabled
216
+ reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, 'c', True)
217
+
218
+ def reconstruct_array_or_memmap(x):
219
+ cons, args = reducer(x)
220
+ return cons(*args)
221
+
222
+ a_reconstructed = reconstruct_array_or_memmap(a)
223
+ assert has_shareable_memory(a_reconstructed)
224
+ assert isinstance(a_reconstructed, np.memmap)
225
+ assert_array_equal(a_reconstructed, a)
226
+
227
+ b_reconstructed = reconstruct_array_or_memmap(b)
228
+ assert has_shareable_memory(b_reconstructed)
229
+ assert_array_equal(b_reconstructed, b)
230
+
231
+ c_reconstructed = reconstruct_array_or_memmap(c)
232
+ assert has_shareable_memory(c_reconstructed)
233
+ assert_array_equal(c_reconstructed, c)
234
+
235
+ d_reconstructed = reconstruct_array_or_memmap(d)
236
+ assert has_shareable_memory(d_reconstructed)
237
+ assert_array_equal(d_reconstructed, d)
238
+
239
+ e_reconstructed = reconstruct_array_or_memmap(e)
240
+ assert has_shareable_memory(e_reconstructed)
241
+ assert_array_equal(e_reconstructed, e)
242
+
243
+
244
+ @with_numpy
245
+ def test__strided_from_memmap(tmpdir):
246
+ fname = tmpdir.join('test.mmap').strpath
247
+ size = 5 * mmap.ALLOCATIONGRANULARITY
248
+ offset = mmap.ALLOCATIONGRANULARITY + 1
249
+ # This line creates the mmap file that is reused later
250
+ memmap_obj = np.memmap(fname, mode='w+', shape=size + offset)
251
+ # filename, dtype, mode, offset, order, shape, strides, total_buffer_len
252
+ memmap_obj = _strided_from_memmap(fname, dtype='uint8', mode='r',
253
+ offset=offset, order='C', shape=size,
254
+ strides=None, total_buffer_len=None,
255
+ unlink_on_gc_collect=False)
256
+ assert isinstance(memmap_obj, np.memmap)
257
+ assert memmap_obj.offset == offset
258
+ memmap_backed_obj = _strided_from_memmap(
259
+ fname, dtype='uint8', mode='r', offset=offset, order='C',
260
+ shape=(size // 2,), strides=(2,), total_buffer_len=size,
261
+ unlink_on_gc_collect=False
262
+ )
263
+ assert _get_backing_memmap(memmap_backed_obj).offset == offset
264
+
265
+
266
+ @with_numpy
267
+ @with_multiprocessing
268
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
269
+ ids=["multiprocessing", "loky"])
270
+ def test_pool_with_memmap(factory, tmpdir):
271
+ """Check that subprocess can access and update shared memory memmap"""
272
+ assert_array_equal = np.testing.assert_array_equal
273
+
274
+ # Fork the subprocess before allocating the objects to be passed
275
+ pool_temp_folder = tmpdir.mkdir('pool').strpath
276
+ p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder)
277
+ try:
278
+ filename = tmpdir.join('test.mmap').strpath
279
+ a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
280
+ a.fill(1.0)
281
+
282
+ p.map(inplace_double, [(a, (i, j), 1.0)
283
+ for i in range(a.shape[0])
284
+ for j in range(a.shape[1])])
285
+
286
+ assert_array_equal(a, 2 * np.ones(a.shape))
287
+
288
+ # Open a copy-on-write view on the previous data
289
+ b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode='c')
290
+
291
+ p.map(inplace_double, [(b, (i, j), 2.0)
292
+ for i in range(b.shape[0])
293
+ for j in range(b.shape[1])])
294
+
295
+ # Passing memmap instances to the pool should not trigger the creation
296
+ # of new files on the FS
297
+ assert os.listdir(pool_temp_folder) == []
298
+
299
+ # the original data is untouched
300
+ assert_array_equal(a, 2 * np.ones(a.shape))
301
+ assert_array_equal(b, 2 * np.ones(b.shape))
302
+
303
+ # readonly maps can be read but not updated
304
+ c = np.memmap(filename, dtype=np.float32, shape=(10,), mode='r',
305
+ offset=5 * 4)
306
+
307
+ with raises(AssertionError):
308
+ p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])])
309
+
310
+ # depending on the version of numpy one can either get a RuntimeError
311
+ # or a ValueError
312
+ with raises((RuntimeError, ValueError)):
313
+ p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])])
314
+ finally:
315
+ # Clean all filehandlers held by the pool
316
+ p.terminate()
317
+ del p
318
+
319
+
320
+ @with_numpy
321
+ @with_multiprocessing
322
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
323
+ ids=["multiprocessing", "loky"])
324
+ def test_pool_with_memmap_array_view(factory, tmpdir):
325
+ """Check that subprocess can access and update shared memory array"""
326
+ assert_array_equal = np.testing.assert_array_equal
327
+
328
+ # Fork the subprocess before allocating the objects to be passed
329
+ pool_temp_folder = tmpdir.mkdir('pool').strpath
330
+ p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder)
331
+ try:
332
+
333
+ filename = tmpdir.join('test.mmap').strpath
334
+ a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode='w+')
335
+ a.fill(1.0)
336
+
337
+ # Create an ndarray view on the memmap instance
338
+ a_view = np.asarray(a)
339
+ assert not isinstance(a_view, np.memmap)
340
+ assert has_shareable_memory(a_view)
341
+
342
+ p.map(inplace_double, [(a_view, (i, j), 1.0)
343
+ for i in range(a.shape[0])
344
+ for j in range(a.shape[1])])
345
+
346
+ # Both a and the a_view have been updated
347
+ assert_array_equal(a, 2 * np.ones(a.shape))
348
+ assert_array_equal(a_view, 2 * np.ones(a.shape))
349
+
350
+ # Passing memmap array view to the pool should not trigger the
351
+ # creation of new files on the FS
352
+ assert os.listdir(pool_temp_folder) == []
353
+
354
+ finally:
355
+ p.terminate()
356
+ del p
357
+
358
+
359
+ @with_numpy
360
+ @with_multiprocessing
361
+ @parametrize("backend", ["multiprocessing", "loky"])
362
+ def test_permission_error_windows_reference_cycle(backend):
363
+ # Non regression test for:
364
+ # https://github.com/joblib/joblib/issues/806
365
+ #
366
+ # The issue happens when trying to delete a memory mapped file that has
367
+ # not yet been closed by one of the worker processes.
368
+ cmd = """if 1:
369
+ import numpy as np
370
+ from joblib import Parallel, delayed
371
+
372
+
373
+ data = np.random.rand(int(2e6)).reshape((int(1e6), 2))
374
+
375
+ # Build a complex cyclic reference that is likely to delay garbage
376
+ # collection of the memmapped array in the worker processes.
377
+ first_list = current_list = [data]
378
+ for i in range(10):
379
+ current_list = [current_list]
380
+ first_list.append(current_list)
381
+
382
+ if __name__ == "__main__":
383
+ results = Parallel(n_jobs=2, backend="{b}")(
384
+ delayed(len)(current_list) for i in range(10))
385
+ assert results == [1] * 10
386
+ """.format(b=backend)
387
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
388
+ stdout=subprocess.PIPE)
389
+ p.wait()
390
+ out, err = p.communicate()
391
+ assert p.returncode == 0, out.decode() + "\n\n" + err.decode()
392
+
393
+
394
+ @with_numpy
395
+ @with_multiprocessing
396
+ @parametrize("backend", ["multiprocessing", "loky"])
397
+ def test_permission_error_windows_memmap_sent_to_parent(backend):
398
+ # Second non-regression test for:
399
+ # https://github.com/joblib/joblib/issues/806
400
+ # previously, child process would not convert temporary memmaps to numpy
401
+ # arrays when sending the data back to the parent process. This would lead
402
+ # to permission errors on windows when deleting joblib's temporary folder,
403
+ # as the memmaped files handles would still opened in the parent process.
404
+ cmd = '''if 1:
405
+ import os
406
+ import time
407
+
408
+ import numpy as np
409
+
410
+ from joblib import Parallel, delayed
411
+ from testutils import return_slice_of_data
412
+
413
+ data = np.ones(int(2e6))
414
+
415
+ if __name__ == '__main__':
416
+ # warm-up call to launch the workers and start the resource_tracker
417
+ _ = Parallel(n_jobs=2, verbose=5, backend='{b}')(
418
+ delayed(id)(i) for i in range(20))
419
+
420
+ time.sleep(0.5)
421
+
422
+ slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')(
423
+ delayed(return_slice_of_data)(data, 0, 20) for _ in range(10))
424
+ '''.format(b=backend)
425
+
426
+ for _ in range(3):
427
+ env = os.environ.copy()
428
+ env['PYTHONPATH'] = os.path.dirname(__file__)
429
+ p = subprocess.Popen([sys.executable, '-c', cmd],
430
+ stderr=subprocess.PIPE,
431
+ stdout=subprocess.PIPE, env=env)
432
+ p.wait()
433
+ out, err = p.communicate()
434
+ assert p.returncode == 0, err
435
+ assert out == b''
436
+ if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]:
437
+ # In early versions of Python 3.8, a reference leak
438
+ # https://github.com/cloudpipe/cloudpickle/issues/327, holds
439
+ # references to pickled objects, generating race condition during
440
+ # cleanup finalizers of joblib and noisy resource_tracker outputs.
441
+ assert b'resource_tracker' not in err
442
+
443
+
444
+ @with_numpy
445
+ @with_multiprocessing
446
+ @parametrize("backend", ["multiprocessing", "loky"])
447
+ def test_parallel_isolated_temp_folders(backend):
448
+ # Test that consecutive Parallel call use isolated subfolders, even
449
+ # for the loky backend that reuses its executor instance across calls.
450
+ array = np.arange(int(1e2))
451
+ [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)(
452
+ delayed(getattr)(array, 'filename') for _ in range(1)
453
+ )
454
+ [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)(
455
+ delayed(getattr)(array, 'filename') for _ in range(1)
456
+ )
457
+ assert os.path.dirname(filename_2) != os.path.dirname(filename_1)
458
+
459
+
460
+ @with_numpy
461
+ @with_multiprocessing
462
+ @parametrize("backend", ["multiprocessing", "loky"])
463
+ def test_managed_backend_reuse_temp_folder(backend):
464
+ # Test that calls to a managed parallel object reuse the same memmaps.
465
+ array = np.arange(int(1e2))
466
+ with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p:
467
+ [filename_1] = p(
468
+ delayed(getattr)(array, 'filename') for _ in range(1)
469
+ )
470
+ [filename_2] = p(
471
+ delayed(getattr)(array, 'filename') for _ in range(1)
472
+ )
473
+ assert os.path.dirname(filename_2) == os.path.dirname(filename_1)
474
+
475
+
476
+ @with_numpy
477
+ @with_multiprocessing
478
+ def test_memmapping_temp_folder_thread_safety():
479
+ # Concurrent calls to Parallel with the loky backend will use the same
480
+ # executor, and thus the same reducers. Make sure that those reducers use
481
+ # different temporary folders depending on which Parallel objects called
482
+ # them, which is necessary to limit potential race conditions during the
483
+ # garbage collection of temporary memmaps.
484
+ array = np.arange(int(1e2))
485
+
486
+ temp_dirs_thread_1 = set()
487
+ temp_dirs_thread_2 = set()
488
+
489
+ def concurrent_get_filename(array, temp_dirs):
490
+ with Parallel(backend='loky', n_jobs=2, max_nbytes=10) as p:
491
+ for i in range(10):
492
+ [filename] = p(
493
+ delayed(getattr)(array, 'filename') for _ in range(1)
494
+ )
495
+ temp_dirs.add(os.path.dirname(filename))
496
+
497
+ t1 = threading.Thread(
498
+ target=concurrent_get_filename, args=(array, temp_dirs_thread_1)
499
+ )
500
+ t2 = threading.Thread(
501
+ target=concurrent_get_filename, args=(array, temp_dirs_thread_2)
502
+ )
503
+
504
+ t1.start()
505
+ t2.start()
506
+
507
+ t1.join()
508
+ t2.join()
509
+
510
+ assert len(temp_dirs_thread_1) == 1
511
+ assert len(temp_dirs_thread_2) == 1
512
+
513
+ assert temp_dirs_thread_1 != temp_dirs_thread_2
514
+
515
+
516
+ @with_numpy
517
+ @with_multiprocessing
518
+ def test_multithreaded_parallel_termination_resource_tracker_silent():
519
+ # test that concurrent termination attempts of a same executor does not
520
+ # emit any spurious error from the resource_tracker. We test various
521
+ # situations making 0, 1 or both parallel call sending a task that will
522
+ # make the worker (and thus the whole Parallel call) error out.
523
+ cmd = '''if 1:
524
+ import os
525
+ import numpy as np
526
+ from joblib import Parallel, delayed
527
+ from joblib.externals.loky.backend import resource_tracker
528
+ from concurrent.futures import ThreadPoolExecutor, wait
529
+
530
+ resource_tracker.VERBOSE = 0
531
+
532
+ array = np.arange(int(1e2))
533
+
534
+ temp_dirs_thread_1 = set()
535
+ temp_dirs_thread_2 = set()
536
+
537
+
538
+ def raise_error(array):
539
+ raise ValueError
540
+
541
+
542
+ def parallel_get_filename(array, temp_dirs):
543
+ with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p:
544
+ for i in range(10):
545
+ [filename] = p(
546
+ delayed(getattr)(array, "filename") for _ in range(1)
547
+ )
548
+ temp_dirs.add(os.path.dirname(filename))
549
+
550
+
551
+ def parallel_raise(array, temp_dirs):
552
+ with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p:
553
+ for i in range(10):
554
+ [filename] = p(
555
+ delayed(raise_error)(array) for _ in range(1)
556
+ )
557
+ temp_dirs.add(os.path.dirname(filename))
558
+
559
+
560
+ executor = ThreadPoolExecutor(max_workers=2)
561
+
562
+ # both function calls will use the same loky executor, but with a
563
+ # different Parallel object.
564
+ future_1 = executor.submit({f1}, array, temp_dirs_thread_1)
565
+ future_2 = executor.submit({f2}, array, temp_dirs_thread_2)
566
+
567
+ # Wait for both threads to terminate their backend
568
+ wait([future_1, future_2])
569
+
570
+ future_1.result()
571
+ future_2.result()
572
+ '''
573
+ functions_and_returncodes = [
574
+ ("parallel_get_filename", "parallel_get_filename", 0),
575
+ ("parallel_get_filename", "parallel_raise", 1),
576
+ ("parallel_raise", "parallel_raise", 1)
577
+ ]
578
+
579
+ for f1, f2, returncode in functions_and_returncodes:
580
+ p = subprocess.Popen([sys.executable, '-c', cmd.format(f1=f1, f2=f2)],
581
+ stderr=subprocess.PIPE, stdout=subprocess.PIPE)
582
+ p.wait()
583
+ out, err = p.communicate()
584
+ assert p.returncode == returncode, out.decode()
585
+ assert b"resource_tracker" not in err, err.decode()
586
+
587
+
588
+ @with_numpy
589
+ @with_multiprocessing
590
+ @parametrize("backend", ["multiprocessing", "loky"])
591
+ def test_many_parallel_calls_on_same_object(backend):
592
+ # After #966 got merged, consecutive Parallel objects were sharing temp
593
+ # folder, which would lead to race conditions happening during the
594
+ # temporary resources management with the resource_tracker. This is a
595
+ # non-regression test that makes sure that consecutive Parallel operations
596
+ # on the same object do not error out.
597
+ cmd = '''if 1:
598
+ import os
599
+ import time
600
+
601
+ import numpy as np
602
+
603
+ from joblib import Parallel, delayed
604
+ from testutils import return_slice_of_data
605
+
606
+ data = np.ones(100)
607
+
608
+ if __name__ == '__main__':
609
+ for i in range(5):
610
+ slice_of_data = Parallel(
611
+ n_jobs=2, max_nbytes=1, backend='{b}')(
612
+ delayed(return_slice_of_data)(data, 0, 20)
613
+ for _ in range(10)
614
+ )
615
+ '''.format(b=backend)
616
+ env = os.environ.copy()
617
+ env['PYTHONPATH'] = os.path.dirname(__file__)
618
+ p = subprocess.Popen(
619
+ [sys.executable, '-c', cmd],
620
+ stderr=subprocess.PIPE,
621
+ stdout=subprocess.PIPE,
622
+ env=env,
623
+ )
624
+ p.wait()
625
+ out, err = p.communicate()
626
+ assert p.returncode == 0, err
627
+ assert out == b''
628
+ if sys.version_info[:3] not in [(3, 8, 0), (3, 8, 1)]:
629
+ # In early versions of Python 3.8, a reference leak
630
+ # https://github.com/cloudpipe/cloudpickle/issues/327, holds
631
+ # references to pickled objects, generating race condition during
632
+ # cleanup finalizers of joblib and noisy resource_tracker outputs.
633
+ assert b'resource_tracker' not in err
634
+
635
+
636
+ @with_numpy
637
+ @with_multiprocessing
638
+ @parametrize("backend", ["multiprocessing", "loky"])
639
+ def test_memmap_returned_as_regular_array(backend):
640
+ data = np.ones(int(1e3))
641
+ # Check that child processes send temporary memmaps back as numpy arrays.
642
+ [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)(
643
+ delayed(check_memmap_and_send_back)(data) for _ in range(1))
644
+ assert _get_backing_memmap(result) is None
645
+
646
+
647
+ @with_numpy
648
+ @with_multiprocessing
649
+ @parametrize("backend", ["multiprocessing", "loky"])
650
+ def test_resource_tracker_silent_when_reference_cycles(backend):
651
+ # There is a variety of reasons that can make joblib with loky backend
652
+ # output noisy warnings when a reference cycle is preventing a memmap from
653
+ # being garbage collected. Especially, joblib's main process finalizer
654
+ # deletes the temporary folder if it was not done before, which can
655
+ # interact badly with the resource_tracker. We don't risk leaking any
656
+ # resources, but this will likely make joblib output a lot of low-level
657
+ # confusing messages.
658
+ #
659
+ # This test makes sure that the resource_tracker is silent when a reference
660
+ # has been collected concurrently on non-Windows platforms.
661
+ #
662
+ # Note that the script in ``cmd`` is the exact same script as in
663
+ # test_permission_error_windows_reference_cycle.
664
+ if backend == "loky" and sys.platform.startswith('win'):
665
+ # XXX: on Windows, reference cycles can delay timely garbage collection
666
+ # and make it impossible to properly delete the temporary folder in the
667
+ # main process because of permission errors.
668
+ pytest.xfail(
669
+ "The temporary folder cannot be deleted on Windows in the "
670
+ "presence of a reference cycle"
671
+ )
672
+
673
+ cmd = """if 1:
674
+ import numpy as np
675
+ from joblib import Parallel, delayed
676
+
677
+
678
+ data = np.random.rand(int(2e6)).reshape((int(1e6), 2))
679
+
680
+ # Build a complex cyclic reference that is likely to delay garbage
681
+ # collection of the memmapped array in the worker processes.
682
+ first_list = current_list = [data]
683
+ for i in range(10):
684
+ current_list = [current_list]
685
+ first_list.append(current_list)
686
+
687
+ if __name__ == "__main__":
688
+ results = Parallel(n_jobs=2, backend="{b}")(
689
+ delayed(len)(current_list) for i in range(10))
690
+ assert results == [1] * 10
691
+ """.format(b=backend)
692
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
693
+ stdout=subprocess.PIPE)
694
+ p.wait()
695
+ out, err = p.communicate()
696
+ out = out.decode()
697
+ err = err.decode()
698
+ assert p.returncode == 0, out + "\n\n" + err
699
+ assert "resource_tracker" not in err, err
700
+
701
+
702
+ @with_numpy
703
+ @with_multiprocessing
704
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
705
+ ids=["multiprocessing", "loky"])
706
+ def test_memmapping_pool_for_large_arrays(factory, tmpdir):
707
+ """Check that large arrays are not copied in memory"""
708
+
709
+ # Check that the tempfolder is empty
710
+ assert os.listdir(tmpdir.strpath) == []
711
+
712
+ # Build an array reducers that automatically dump large array content
713
+ # to filesystem backed memmap instances to avoid memory explosion
714
+ p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2)
715
+ try:
716
+ # The temporary folder for the pool is not provisioned in advance
717
+ assert os.listdir(tmpdir.strpath) == []
718
+ assert not os.path.exists(p._temp_folder)
719
+
720
+ small = np.ones(5, dtype=np.float32)
721
+ assert small.nbytes == 20
722
+ p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])])
723
+
724
+ # Memory has been copied, the pool filesystem folder is unused
725
+ assert os.listdir(tmpdir.strpath) == []
726
+
727
+ # Try with a file larger than the memmap threshold of 40 bytes
728
+ large = np.ones(100, dtype=np.float64)
729
+ assert large.nbytes == 800
730
+ p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])
731
+
732
+ # The data has been dumped in a temp folder for subprocess to share it
733
+ # without per-child memory copies
734
+ assert os.path.isdir(p._temp_folder)
735
+ dumped_filenames = os.listdir(p._temp_folder)
736
+ assert len(dumped_filenames) == 1
737
+
738
+ # Check that memory mapping is not triggered for arrays with
739
+ # dtype='object'
740
+ objects = np.array(['abc'] * 100, dtype='object')
741
+ results = p.map(has_shareable_memory, [objects])
742
+ assert not results[0]
743
+
744
+ finally:
745
+ # check FS garbage upon pool termination
746
+ p.terminate()
747
+ for i in range(10):
748
+ sleep(.1)
749
+ if not os.path.exists(p._temp_folder):
750
+ break
751
+ else: # pragma: no cover
752
+ raise AssertionError(
753
+ 'temporary folder {} was not deleted'.format(p._temp_folder)
754
+ )
755
+ del p
756
+
757
+
758
+ @with_numpy
759
+ @with_multiprocessing
760
+ @parametrize(
761
+ "backend",
762
+ [
763
+ pytest.param(
764
+ "multiprocessing",
765
+ marks=pytest.mark.xfail(
766
+ reason='https://github.com/joblib/joblib/issues/1086'
767
+ ),
768
+ ),
769
+ "loky",
770
+ ]
771
+ )
772
+ def test_child_raises_parent_exits_cleanly(backend):
773
+ # When a task executed by a child process raises an error, the parent
774
+ # process's backend is notified, and calls abort_everything.
775
+ # In loky, abort_everything itself calls shutdown(kill_workers=True) which
776
+ # sends SIGKILL to the worker, preventing it from running the finalizers
777
+ # supposed to signal the resource_tracker when the worker is done using
778
+ # objects relying on a shared resource (e.g np.memmaps). Because this
779
+ # behavior is prone to :
780
+ # - cause a resource leak
781
+ # - make the resource tracker emit noisy resource warnings
782
+ # we explicitly test that, when the said situation occurs:
783
+ # - no resources are actually leaked
784
+ # - the temporary resources are deleted as soon as possible (typically, at
785
+ # the end of the failing Parallel call)
786
+ # - the resource_tracker does not emit any warnings.
787
+ cmd = """if 1:
788
+ import os
789
+ from pathlib import Path
790
+ from time import sleep
791
+
792
+ import numpy as np
793
+ from joblib import Parallel, delayed
794
+ from testutils import print_filename_and_raise
795
+
796
+ data = np.random.rand(1000)
797
+
798
+ def get_temp_folder(parallel_obj, backend):
799
+ if "{b}" == "loky":
800
+ return Path(parallel_obj._backend._workers._temp_folder)
801
+ else:
802
+ return Path(parallel_obj._backend._pool._temp_folder)
803
+
804
+
805
+ if __name__ == "__main__":
806
+ try:
807
+ with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p:
808
+ temp_folder = get_temp_folder(p, "{b}")
809
+ p(delayed(print_filename_and_raise)(data)
810
+ for i in range(1))
811
+ except ValueError as e:
812
+ # the temporary folder should be deleted by the end of this
813
+ # call but apparently on some file systems, this takes
814
+ # some time to be visible.
815
+ #
816
+ # We attempt to write into the temporary folder to test for
817
+ # its existence and we wait for a maximum of 10 seconds.
818
+ for i in range(100):
819
+ try:
820
+ with open(temp_folder / "some_file.txt", "w") as f:
821
+ f.write("some content")
822
+ except FileNotFoundError:
823
+ # temp_folder has been deleted, all is fine
824
+ break
825
+
826
+ # ... else, wait a bit and try again
827
+ sleep(.1)
828
+ else:
829
+ raise AssertionError(
830
+ str(temp_folder) + " was not deleted"
831
+ ) from e
832
+ """.format(b=backend)
833
+ env = os.environ.copy()
834
+ env['PYTHONPATH'] = os.path.dirname(__file__)
835
+ p = subprocess.Popen([sys.executable, '-c', cmd], stderr=subprocess.PIPE,
836
+ stdout=subprocess.PIPE, env=env)
837
+ p.wait()
838
+ out, err = p.communicate()
839
+ out, err = out.decode(), err.decode()
840
+ filename = out.split('\n')[0]
841
+ assert p.returncode == 0, err or out
842
+ assert err == '' # no resource_tracker warnings.
843
+ assert not os.path.exists(filename)
844
+
845
+
846
+ @with_numpy
847
+ @with_multiprocessing
848
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
849
+ ids=["multiprocessing", "loky"])
850
+ def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir):
851
+ """Check that large arrays memmapping can be disabled"""
852
+ # Set max_nbytes to None to disable the auto memmapping feature
853
+ p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath)
854
+ try:
855
+
856
+ # Check that the tempfolder is empty
857
+ assert os.listdir(tmpdir.strpath) == []
858
+
859
+ # Try with a file largish than the memmap threshold of 40 bytes
860
+ large = np.ones(100, dtype=np.float64)
861
+ assert large.nbytes == 800
862
+ p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])])
863
+
864
+ # Check that the tempfolder is still empty
865
+ assert os.listdir(tmpdir.strpath) == []
866
+
867
+ finally:
868
+ # Cleanup open file descriptors
869
+ p.terminate()
870
+ del p
871
+
872
+
873
+ @with_numpy
874
+ @with_multiprocessing
875
+ @with_dev_shm
876
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
877
+ ids=["multiprocessing", "loky"])
878
+ def test_memmapping_on_large_enough_dev_shm(factory):
879
+ """Check that memmapping uses /dev/shm when possible"""
880
+ orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE
881
+ try:
882
+ # Make joblib believe that it can use /dev/shm even when running on a
883
+ # CI container where the size of the /dev/shm is not very large (that
884
+ # is at least 32 MB instead of 2 GB by default).
885
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6)
886
+ p = factory(3, max_nbytes=10)
887
+ try:
888
+ # Check that the pool has correctly detected the presence of the
889
+ # shared memory filesystem.
890
+ pool_temp_folder = p._temp_folder
891
+ folder_prefix = '/dev/shm/joblib_memmapping_folder_'
892
+ assert pool_temp_folder.startswith(folder_prefix)
893
+ assert os.path.exists(pool_temp_folder)
894
+
895
+ # Try with a file larger than the memmap threshold of 10 bytes
896
+ a = np.ones(100, dtype=np.float64)
897
+ assert a.nbytes == 800
898
+ p.map(id, [a] * 10)
899
+ # a should have been memmapped to the pool temp folder: the joblib
900
+ # pickling procedure generate one .pkl file:
901
+ assert len(os.listdir(pool_temp_folder)) == 1
902
+
903
+ # create a new array with content that is different from 'a' so
904
+ # that it is mapped to a different file in the temporary folder of
905
+ # the pool.
906
+ b = np.ones(100, dtype=np.float64) * 2
907
+ assert b.nbytes == 800
908
+ p.map(id, [b] * 10)
909
+ # A copy of both a and b are now stored in the shared memory folder
910
+ assert len(os.listdir(pool_temp_folder)) == 2
911
+ finally:
912
+ # Cleanup open file descriptors
913
+ p.terminate()
914
+ del p
915
+
916
+ for i in range(100):
917
+ # The temp folder is cleaned up upon pool termination
918
+ if not os.path.exists(pool_temp_folder):
919
+ break
920
+ sleep(.1)
921
+ else: # pragma: no cover
922
+ raise AssertionError('temporary folder of pool was not deleted')
923
+ finally:
924
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size
925
+
926
+
927
+ @with_numpy
928
+ @with_multiprocessing
929
+ @with_dev_shm
930
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
931
+ ids=["multiprocessing", "loky"])
932
+ def test_memmapping_on_too_small_dev_shm(factory):
933
+ orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE
934
+ try:
935
+ # Make joblib believe that it cannot use /dev/shm unless there is
936
+ # 42 exabytes of available shared memory in /dev/shm
937
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18)
938
+
939
+ p = factory(3, max_nbytes=10)
940
+ try:
941
+ # Check that the pool has correctly detected the presence of the
942
+ # shared memory filesystem.
943
+ pool_temp_folder = p._temp_folder
944
+ assert not pool_temp_folder.startswith('/dev/shm')
945
+ finally:
946
+ # Cleanup open file descriptors
947
+ p.terminate()
948
+ del p
949
+
950
+ # The temp folder is cleaned up upon pool termination
951
+ assert not os.path.exists(pool_temp_folder)
952
+ finally:
953
+ jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size
954
+
955
+
956
+ @with_numpy
957
+ @with_multiprocessing
958
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
959
+ ids=["multiprocessing", "loky"])
960
+ def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir):
961
+ """Check that large arrays are not copied in memory in return"""
962
+ assert_array_equal = np.testing.assert_array_equal
963
+
964
+ # Build an array reducers that automatically dump large array content
965
+ # but check that the returned datastructure are regular arrays to avoid
966
+ # passing a memmap array pointing to a pool controlled temp folder that
967
+ # might be confusing to the user
968
+
969
+ # The MemmappingPool user can always return numpy.memmap object explicitly
970
+ # to avoid memory copy
971
+ p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath)
972
+ try:
973
+ res = p.apply_async(np.ones, args=(1000,))
974
+ large = res.get()
975
+ assert not has_shareable_memory(large)
976
+ assert_array_equal(large, np.ones(1000))
977
+ finally:
978
+ p.terminate()
979
+ del p
980
+
981
+
982
+ def _worker_multiply(a, n_times):
983
+ """Multiplication function to be executed by subprocess"""
984
+ assert has_shareable_memory(a)
985
+ return a * n_times
986
+
987
+
988
+ @with_numpy
989
+ @with_multiprocessing
990
+ @parametrize("factory", [MemmappingPool, TestExecutor.get_memmapping_executor],
991
+ ids=["multiprocessing", "loky"])
992
+ def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir):
993
+ """Check that memmaps with a bad buffer are returned as regular arrays
994
+
995
+ Unary operations and ufuncs on memmap instances return a new memmap
996
+ instance with an in-memory buffer (probably a numpy bug).
997
+ """
998
+ assert_array_equal = np.testing.assert_array_equal
999
+
1000
+ p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath)
1001
+ try:
1002
+ # Send a complex, large-ish view on a array that will be converted to
1003
+ # a memmap in the worker process
1004
+ a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
1005
+ order='F')[:, :1, :]
1006
+
1007
+ # Call a non-inplace multiply operation on the worker and memmap and
1008
+ # send it back to the parent.
1009
+ b = p.apply_async(_worker_multiply, args=(a, 3)).get()
1010
+ assert not has_shareable_memory(b)
1011
+ assert_array_equal(b, 3 * a)
1012
+ finally:
1013
+ p.terminate()
1014
+ del p
1015
+
1016
+
1017
+ def identity(arg):
1018
+ return arg
1019
+
1020
+
1021
+ @with_numpy
1022
+ @with_multiprocessing
1023
+ @parametrize(
1024
+ "factory,retry_no",
1025
+ list(itertools.product(
1026
+ [MemmappingPool, TestExecutor.get_memmapping_executor], range(3))),
1027
+ ids=['{}, {}'.format(x, y) for x, y in itertools.product(
1028
+ ["multiprocessing", "loky"], map(str, range(3)))])
1029
+ def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir):
1030
+ # Test that numpy memmap offset is set correctly if greater than
1031
+ # mmap.ALLOCATIONGRANULARITY, see
1032
+ # https://github.com/joblib/joblib/issues/451 and
1033
+ # https://github.com/numpy/numpy/pull/8443 for more details.
1034
+ fname = tmpdir.join('test.mmap').strpath
1035
+ size = 5 * mmap.ALLOCATIONGRANULARITY
1036
+ offset = mmap.ALLOCATIONGRANULARITY + 1
1037
+ obj = make_memmap(fname, mode='w+', shape=size, dtype='uint8',
1038
+ offset=offset)
1039
+
1040
+ p = factory(2, temp_folder=tmpdir.strpath)
1041
+ result = p.apply_async(identity, args=(obj,)).get()
1042
+ assert isinstance(result, np.memmap)
1043
+ assert result.offset == offset
1044
+ np.testing.assert_array_equal(obj, result)
1045
+ p.terminate()
1046
+
1047
+
1048
+ def test_pool_get_temp_dir(tmpdir):
1049
+ pool_folder_name = 'test.tmpdir'
1050
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath)
1051
+ assert shared_mem is False
1052
+ assert pool_folder == tmpdir.join('test.tmpdir').strpath
1053
+
1054
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None)
1055
+ if sys.platform.startswith('win'):
1056
+ assert shared_mem is False
1057
+ assert pool_folder.endswith(pool_folder_name)
1058
+
1059
+
1060
+ def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch):
1061
+ """Check that _get_temp_dir works when os.statvfs is not defined
1062
+
1063
+ Regression test for #902
1064
+ """
1065
+ pool_folder_name = 'test.tmpdir'
1066
+ import joblib._memmapping_reducer
1067
+ if hasattr(joblib._memmapping_reducer.os, 'statvfs'):
1068
+ # We are on Unix, since Windows doesn't have this function
1069
+ monkeypatch.delattr(joblib._memmapping_reducer.os, 'statvfs')
1070
+
1071
+ pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None)
1072
+ if sys.platform.startswith('win'):
1073
+ assert shared_mem is False
1074
+ assert pool_folder.endswith(pool_folder_name)
1075
+
1076
+
1077
+ @with_numpy
1078
+ @skipif(sys.platform == 'win32', reason='This test fails with a '
1079
+ 'PermissionError on Windows')
1080
+ @parametrize("mmap_mode", ["r+", "w+"])
1081
+ def test_numpy_arrays_use_different_memory(mmap_mode):
1082
+ def func(arr, value):
1083
+ arr[:] = value
1084
+ return arr
1085
+
1086
+ arrays = [np.zeros((10, 10), dtype='float64') for i in range(10)]
1087
+
1088
+ results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)(
1089
+ delayed(func)(arr, i) for i, arr in enumerate(arrays))
1090
+
1091
+ for i, arr in enumerate(results):
1092
+ np.testing.assert_array_equal(arr, i)
1093
+
1094
+
1095
+ @with_numpy
1096
+ def test_weak_array_key_map():
1097
+
1098
+ def assert_empty_after_gc_collect(container, retries=100):
1099
+ for i in range(retries):
1100
+ if len(container) == 0:
1101
+ return
1102
+ gc.collect()
1103
+ sleep(.1)
1104
+ assert len(container) == 0
1105
+
1106
+ a = np.ones(42)
1107
+ m = _WeakArrayKeyMap()
1108
+ m.set(a, 'a')
1109
+ assert m.get(a) == 'a'
1110
+
1111
+ b = a
1112
+ assert m.get(b) == 'a'
1113
+ m.set(b, 'b')
1114
+ assert m.get(a) == 'b'
1115
+
1116
+ del a
1117
+ gc.collect()
1118
+ assert len(m._data) == 1
1119
+ assert m.get(b) == 'b'
1120
+
1121
+ del b
1122
+ assert_empty_after_gc_collect(m._data)
1123
+
1124
+ c = np.ones(42)
1125
+ m.set(c, 'c')
1126
+ assert len(m._data) == 1
1127
+ assert m.get(c) == 'c'
1128
+
1129
+ with raises(KeyError):
1130
+ m.get(np.ones(42))
1131
+
1132
+ del c
1133
+ assert_empty_after_gc_collect(m._data)
1134
+
1135
+ # Check that creating and dropping numpy arrays with potentially the same
1136
+ # object id will not cause the map to get confused.
1137
+ def get_set_get_collect(m, i):
1138
+ a = np.ones(42)
1139
+ with raises(KeyError):
1140
+ m.get(a)
1141
+ m.set(a, i)
1142
+ assert m.get(a) == i
1143
+ return id(a)
1144
+
1145
+ unique_ids = set([get_set_get_collect(m, i) for i in range(1000)])
1146
+ if platform.python_implementation() == 'CPython':
1147
+ # On CPython (at least) the same id is often reused many times for the
1148
+ # temporary arrays created under the local scope of the
1149
+ # get_set_get_collect function without causing any spurious lookups /
1150
+ # insertions in the map. Apparently on Python nogil, the id is not
1151
+ # reused as often.
1152
+ max_len_unique_ids = 400 if getattr(sys.flags, 'nogil', False) else 100
1153
+ assert len(unique_ids) < max_len_unique_ids
1154
+
1155
+
1156
+ def test_weak_array_key_map_no_pickling():
1157
+ m = _WeakArrayKeyMap()
1158
+ with raises(pickle.PicklingError):
1159
+ pickle.dumps(m)
1160
+
1161
+
1162
+ @with_numpy
1163
+ @with_multiprocessing
1164
+ def test_direct_mmap(tmpdir):
1165
+ testfile = str(tmpdir.join('arr.dat'))
1166
+ a = np.arange(10, dtype='uint8')
1167
+ a.tofile(testfile)
1168
+
1169
+ def _read_array():
1170
+ with open(testfile) as fd:
1171
+ mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0)
1172
+ return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0)
1173
+
1174
+ def func(x):
1175
+ return x**2
1176
+
1177
+ arr = _read_array()
1178
+
1179
+ # this is expected to work and gives the reference
1180
+ ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a])
1181
+
1182
+ # now test that it work with the mmap array
1183
+ results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr])
1184
+ np.testing.assert_array_equal(results, ref)
1185
+
1186
+ # also test with a mmap array read in the subprocess
1187
+ def worker():
1188
+ return _read_array()
1189
+
1190
+ results = Parallel(n_jobs=2)(delayed(worker)() for _ in range(1))
1191
+ np.testing.assert_array_equal(results[0], arr)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_memory.py ADDED
@@ -0,0 +1,1526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the memory module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import functools
10
+ import gc
11
+ import logging
12
+ import shutil
13
+ import os
14
+ import os.path
15
+ import pathlib
16
+ import pickle
17
+ import sys
18
+ import time
19
+ import datetime
20
+ import textwrap
21
+
22
+ import pytest
23
+
24
+ from joblib.memory import Memory
25
+ from joblib.memory import expires_after
26
+ from joblib.memory import MemorizedFunc, NotMemorizedFunc
27
+ from joblib.memory import MemorizedResult, NotMemorizedResult
28
+ from joblib.memory import _FUNCTION_HASHES
29
+ from joblib.memory import register_store_backend, _STORE_BACKENDS
30
+ from joblib.memory import _build_func_identifier, _store_backend_factory
31
+ from joblib.memory import JobLibCollisionWarning
32
+ from joblib.parallel import Parallel, delayed
33
+ from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend
34
+ from joblib.test.common import with_numpy, np
35
+ from joblib.test.common import with_multiprocessing
36
+ from joblib.testing import parametrize, raises, warns
37
+ from joblib.hashing import hash
38
+
39
+
40
+ ###############################################################################
41
+ # Module-level variables for the tests
42
+ def f(x, y=1):
43
+ """ A module-level function for testing purposes.
44
+ """
45
+ return x ** 2 + y
46
+
47
+
48
+ ###############################################################################
49
+ # Helper function for the tests
50
+ def check_identity_lazy(func, accumulator, location):
51
+ """ Given a function and an accumulator (a list that grows every
52
+ time the function is called), check that the function can be
53
+ decorated by memory to be a lazy identity.
54
+ """
55
+ # Call each function with several arguments, and check that it is
56
+ # evaluated only once per argument.
57
+ memory = Memory(location=location, verbose=0)
58
+ func = memory.cache(func)
59
+ for i in range(3):
60
+ for _ in range(2):
61
+ assert func(i) == i
62
+ assert len(accumulator) == i + 1
63
+
64
+
65
+ def corrupt_single_cache_item(memory):
66
+ single_cache_item, = memory.store_backend.get_items()
67
+ output_filename = os.path.join(single_cache_item.path, 'output.pkl')
68
+ with open(output_filename, 'w') as f:
69
+ f.write('garbage')
70
+
71
+
72
+ def monkeypatch_cached_func_warn(func, monkeypatch_fixture):
73
+ # Need monkeypatch because pytest does not
74
+ # capture stdlib logging output (see
75
+ # https://github.com/pytest-dev/pytest/issues/2079)
76
+
77
+ recorded = []
78
+
79
+ def append_to_record(item):
80
+ recorded.append(item)
81
+ monkeypatch_fixture.setattr(func, 'warn', append_to_record)
82
+ return recorded
83
+
84
+
85
+ ###############################################################################
86
+ # Tests
87
+ def test_memory_integration(tmpdir):
88
+ """ Simple test of memory lazy evaluation.
89
+ """
90
+ accumulator = list()
91
+
92
+ # Rmk: this function has the same name than a module-level function,
93
+ # thus it serves as a test to see that both are identified
94
+ # as different.
95
+ def f(arg):
96
+ accumulator.append(1)
97
+ return arg
98
+
99
+ check_identity_lazy(f, accumulator, tmpdir.strpath)
100
+
101
+ # Now test clearing
102
+ for compress in (False, True):
103
+ for mmap_mode in ('r', None):
104
+ memory = Memory(location=tmpdir.strpath, verbose=10,
105
+ mmap_mode=mmap_mode, compress=compress)
106
+ # First clear the cache directory, to check that our code can
107
+ # handle that
108
+ # NOTE: this line would raise an exception, as the database file is
109
+ # still open; we ignore the error since we want to test what
110
+ # happens if the directory disappears
111
+ shutil.rmtree(tmpdir.strpath, ignore_errors=True)
112
+ g = memory.cache(f)
113
+ g(1)
114
+ g.clear(warn=False)
115
+ current_accumulator = len(accumulator)
116
+ out = g(1)
117
+
118
+ assert len(accumulator) == current_accumulator + 1
119
+ # Also, check that Memory.eval works similarly
120
+ assert memory.eval(f, 1) == out
121
+ assert len(accumulator) == current_accumulator + 1
122
+
123
+ # Now do a smoke test with a function defined in __main__, as the name
124
+ # mangling rules are more complex
125
+ f.__module__ = '__main__'
126
+ memory = Memory(location=tmpdir.strpath, verbose=0)
127
+ memory.cache(f)(1)
128
+
129
+
130
+ @parametrize("call_before_reducing", [True, False])
131
+ def test_parallel_call_cached_function_defined_in_jupyter(
132
+ tmpdir, call_before_reducing
133
+ ):
134
+ # Calling an interactively defined memory.cache()'d function inside a
135
+ # Parallel call used to clear the existing cache related to the said
136
+ # function (https://github.com/joblib/joblib/issues/1035)
137
+
138
+ # This tests checks that this is no longer the case.
139
+
140
+ # TODO: test that the cache related to the function cache persists across
141
+ # ipython sessions (provided that no code change were made to the
142
+ # function's source)?
143
+
144
+ # The first part of the test makes the necessary low-level calls to emulate
145
+ # the definition of a function in an jupyter notebook cell. Joblib has
146
+ # some custom code to treat functions defined specifically in jupyter
147
+ # notebooks/ipython session -- we want to test this code, which requires
148
+ # the emulation to be rigorous.
149
+ for session_no in [0, 1]:
150
+ ipython_cell_source = '''
151
+ def f(x):
152
+ return x
153
+ '''
154
+
155
+ ipython_cell_id = '<ipython-input-{}-000000000000>'.format(session_no)
156
+
157
+ exec(
158
+ compile(
159
+ textwrap.dedent(ipython_cell_source),
160
+ filename=ipython_cell_id,
161
+ mode='exec'
162
+ )
163
+ )
164
+ # f is now accessible in the locals mapping - but for some unknown
165
+ # reason, f = locals()['f'] throws a KeyError at runtime, we need to
166
+ # bind locals()['f'] to a different name in the local namespace
167
+ aliased_f = locals()['f']
168
+ aliased_f.__module__ = "__main__"
169
+
170
+ # Preliminary sanity checks, and tests checking that joblib properly
171
+ # identified f as an interactive function defined in a jupyter notebook
172
+ assert aliased_f(1) == 1
173
+ assert aliased_f.__code__.co_filename == ipython_cell_id
174
+
175
+ memory = Memory(location=tmpdir.strpath, verbose=0)
176
+ cached_f = memory.cache(aliased_f)
177
+
178
+ assert len(os.listdir(tmpdir / 'joblib')) == 1
179
+ f_cache_relative_directory = os.listdir(tmpdir / 'joblib')[0]
180
+ assert 'ipython-input' in f_cache_relative_directory
181
+
182
+ f_cache_directory = tmpdir / 'joblib' / f_cache_relative_directory
183
+
184
+ if session_no == 0:
185
+ # The cache should be empty as cached_f has not been called yet.
186
+ assert os.listdir(f_cache_directory) == ['f']
187
+ assert os.listdir(f_cache_directory / 'f') == []
188
+
189
+ if call_before_reducing:
190
+ cached_f(3)
191
+ # Two files were just created, func_code.py, and a folder
192
+ # containing the information (inputs hash/ouptput) of
193
+ # cached_f(3)
194
+ assert len(os.listdir(f_cache_directory / 'f')) == 2
195
+
196
+ # Now, testing #1035: when calling a cached function, joblib
197
+ # used to dynamically inspect the underlying function to
198
+ # extract its source code (to verify it matches the source code
199
+ # of the function as last inspected by joblib) -- however,
200
+ # source code introspection fails for dynamic functions sent to
201
+ # child processes - which would eventually make joblib clear
202
+ # the cache associated to f
203
+ res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2])
204
+ else:
205
+ # Submit the function to the joblib child processes, although
206
+ # the function has never been called in the parent yet. This
207
+ # triggers a specific code branch inside
208
+ # MemorizedFunc.__reduce__.
209
+ res = Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2])
210
+ assert len(os.listdir(f_cache_directory / 'f')) == 3
211
+
212
+ cached_f(3)
213
+
214
+ # Making sure f's cache does not get cleared after the parallel
215
+ # calls, and contains ALL cached functions calls (f(1), f(2), f(3))
216
+ # and 'func_code.py'
217
+ assert len(os.listdir(f_cache_directory / 'f')) == 4
218
+ else:
219
+ # For the second session, there should be an already existing cache
220
+ assert len(os.listdir(f_cache_directory / 'f')) == 4
221
+
222
+ cached_f(3)
223
+
224
+ # The previous cache should not be invalidated after calling the
225
+ # function in a new session
226
+ assert len(os.listdir(f_cache_directory / 'f')) == 4
227
+
228
+
229
+ def test_no_memory():
230
+ """ Test memory with location=None: no memoize """
231
+ accumulator = list()
232
+
233
+ def ff(arg):
234
+ accumulator.append(1)
235
+ return arg
236
+
237
+ memory = Memory(location=None, verbose=0)
238
+ gg = memory.cache(ff)
239
+ for _ in range(4):
240
+ current_accumulator = len(accumulator)
241
+ gg(1)
242
+ assert len(accumulator) == current_accumulator + 1
243
+
244
+
245
+ def test_memory_kwarg(tmpdir):
246
+ " Test memory with a function with keyword arguments."
247
+ accumulator = list()
248
+
249
+ def g(arg1=None, arg2=1):
250
+ accumulator.append(1)
251
+ return arg1
252
+
253
+ check_identity_lazy(g, accumulator, tmpdir.strpath)
254
+
255
+ memory = Memory(location=tmpdir.strpath, verbose=0)
256
+ g = memory.cache(g)
257
+ # Smoke test with an explicit keyword argument:
258
+ assert g(arg1=30, arg2=2) == 30
259
+
260
+
261
+ def test_memory_lambda(tmpdir):
262
+ " Test memory with a function with a lambda."
263
+ accumulator = list()
264
+
265
+ def helper(x):
266
+ """ A helper function to define l as a lambda.
267
+ """
268
+ accumulator.append(1)
269
+ return x
270
+
271
+ check_identity_lazy(lambda x: helper(x), accumulator, tmpdir.strpath)
272
+
273
+
274
+ def test_memory_name_collision(tmpdir):
275
+ " Check that name collisions with functions will raise warnings"
276
+ memory = Memory(location=tmpdir.strpath, verbose=0)
277
+
278
+ @memory.cache
279
+ def name_collision(x):
280
+ """ A first function called name_collision
281
+ """
282
+ return x
283
+
284
+ a = name_collision
285
+
286
+ @memory.cache
287
+ def name_collision(x):
288
+ """ A second function called name_collision
289
+ """
290
+ return x
291
+
292
+ b = name_collision
293
+
294
+ with warns(JobLibCollisionWarning) as warninfo:
295
+ a(1)
296
+ b(1)
297
+
298
+ assert len(warninfo) == 1
299
+ assert "collision" in str(warninfo[0].message)
300
+
301
+
302
+ def test_memory_warning_lambda_collisions(tmpdir):
303
+ # Check that multiple use of lambda will raise collisions
304
+ memory = Memory(location=tmpdir.strpath, verbose=0)
305
+ a = memory.cache(lambda x: x)
306
+ b = memory.cache(lambda x: x + 1)
307
+
308
+ with warns(JobLibCollisionWarning) as warninfo:
309
+ assert a(0) == 0
310
+ assert b(1) == 2
311
+ assert a(1) == 1
312
+
313
+ # In recent Python versions, we can retrieve the code of lambdas,
314
+ # thus nothing is raised
315
+ assert len(warninfo) == 4
316
+
317
+
318
+ def test_memory_warning_collision_detection(tmpdir):
319
+ # Check that collisions impossible to detect will raise appropriate
320
+ # warnings.
321
+ memory = Memory(location=tmpdir.strpath, verbose=0)
322
+ a1 = eval('lambda x: x')
323
+ a1 = memory.cache(a1)
324
+ b1 = eval('lambda x: x+1')
325
+ b1 = memory.cache(b1)
326
+
327
+ with warns(JobLibCollisionWarning) as warninfo:
328
+ a1(1)
329
+ b1(1)
330
+ a1(0)
331
+
332
+ assert len(warninfo) == 2
333
+ assert "cannot detect" in str(warninfo[0].message).lower()
334
+
335
+
336
+ def test_memory_partial(tmpdir):
337
+ " Test memory with functools.partial."
338
+ accumulator = list()
339
+
340
+ def func(x, y):
341
+ """ A helper function to define l as a lambda.
342
+ """
343
+ accumulator.append(1)
344
+ return y
345
+
346
+ import functools
347
+ function = functools.partial(func, 1)
348
+
349
+ check_identity_lazy(function, accumulator, tmpdir.strpath)
350
+
351
+
352
+ def test_memory_eval(tmpdir):
353
+ " Smoke test memory with a function with a function defined in an eval."
354
+ memory = Memory(location=tmpdir.strpath, verbose=0)
355
+
356
+ m = eval('lambda x: x')
357
+ mm = memory.cache(m)
358
+
359
+ assert mm(1) == 1
360
+
361
+
362
+ def count_and_append(x=[]):
363
+ """ A function with a side effect in its arguments.
364
+
365
+ Return the length of its argument and append one element.
366
+ """
367
+ len_x = len(x)
368
+ x.append(None)
369
+ return len_x
370
+
371
+
372
+ def test_argument_change(tmpdir):
373
+ """ Check that if a function has a side effect in its arguments, it
374
+ should use the hash of changing arguments.
375
+ """
376
+ memory = Memory(location=tmpdir.strpath, verbose=0)
377
+ func = memory.cache(count_and_append)
378
+ # call the function for the first time, is should cache it with
379
+ # argument x=[]
380
+ assert func() == 0
381
+ # the second time the argument is x=[None], which is not cached
382
+ # yet, so the functions should be called a second time
383
+ assert func() == 1
384
+
385
+
386
+ @with_numpy
387
+ @parametrize('mmap_mode', [None, 'r'])
388
+ def test_memory_numpy(tmpdir, mmap_mode):
389
+ " Test memory with a function with numpy arrays."
390
+ accumulator = list()
391
+
392
+ def n(arg=None):
393
+ accumulator.append(1)
394
+ return arg
395
+
396
+ memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode,
397
+ verbose=0)
398
+ cached_n = memory.cache(n)
399
+
400
+ rnd = np.random.RandomState(0)
401
+ for i in range(3):
402
+ a = rnd.random_sample((10, 10))
403
+ for _ in range(3):
404
+ assert np.all(cached_n(a) == a)
405
+ assert len(accumulator) == i + 1
406
+
407
+
408
+ @with_numpy
409
+ def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch):
410
+ """Check that mmap_mode is respected even at the first call"""
411
+
412
+ memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0)
413
+
414
+ @memory.cache()
415
+ def twice(a):
416
+ return a * 2
417
+
418
+ a = np.ones(3)
419
+
420
+ b = twice(a)
421
+ c = twice(a)
422
+
423
+ assert isinstance(c, np.memmap)
424
+ assert c.mode == 'r'
425
+
426
+ assert isinstance(b, np.memmap)
427
+ assert b.mode == 'r'
428
+
429
+ # Corrupts the file, Deleting b and c mmaps
430
+ # is necessary to be able edit the file
431
+ del b
432
+ del c
433
+ gc.collect()
434
+ corrupt_single_cache_item(memory)
435
+
436
+ # Make sure that corrupting the file causes recomputation and that
437
+ # a warning is issued.
438
+ recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch)
439
+ d = twice(a)
440
+ assert len(recorded_warnings) == 1
441
+ exception_msg = 'Exception while loading results'
442
+ assert exception_msg in recorded_warnings[0]
443
+ # Asserts that the recomputation returns a mmap
444
+ assert isinstance(d, np.memmap)
445
+ assert d.mode == 'r'
446
+
447
+
448
+ def test_memory_exception(tmpdir):
449
+ """ Smoketest the exception handling of Memory.
450
+ """
451
+ memory = Memory(location=tmpdir.strpath, verbose=0)
452
+
453
+ class MyException(Exception):
454
+ pass
455
+
456
+ @memory.cache
457
+ def h(exc=0):
458
+ if exc:
459
+ raise MyException
460
+
461
+ # Call once, to initialise the cache
462
+ h()
463
+
464
+ for _ in range(3):
465
+ # Call 3 times, to be sure that the Exception is always raised
466
+ with raises(MyException):
467
+ h(1)
468
+
469
+
470
+ def test_memory_ignore(tmpdir):
471
+ " Test the ignore feature of memory "
472
+ memory = Memory(location=tmpdir.strpath, verbose=0)
473
+ accumulator = list()
474
+
475
+ @memory.cache(ignore=['y'])
476
+ def z(x, y=1):
477
+ accumulator.append(1)
478
+
479
+ assert z.ignore == ['y']
480
+
481
+ z(0, y=1)
482
+ assert len(accumulator) == 1
483
+ z(0, y=1)
484
+ assert len(accumulator) == 1
485
+ z(0, y=2)
486
+ assert len(accumulator) == 1
487
+
488
+
489
+ def test_memory_ignore_decorated(tmpdir):
490
+ " Test the ignore feature of memory on a decorated function "
491
+ memory = Memory(location=tmpdir.strpath, verbose=0)
492
+ accumulator = list()
493
+
494
+ def decorate(f):
495
+ @functools.wraps(f)
496
+ def wrapped(*args, **kwargs):
497
+ return f(*args, **kwargs)
498
+ return wrapped
499
+
500
+ @memory.cache(ignore=['y'])
501
+ @decorate
502
+ def z(x, y=1):
503
+ accumulator.append(1)
504
+
505
+ assert z.ignore == ['y']
506
+
507
+ z(0, y=1)
508
+ assert len(accumulator) == 1
509
+ z(0, y=1)
510
+ assert len(accumulator) == 1
511
+ z(0, y=2)
512
+ assert len(accumulator) == 1
513
+
514
+
515
+ def test_memory_args_as_kwargs(tmpdir):
516
+ """Non-regression test against 0.12.0 changes.
517
+
518
+ https://github.com/joblib/joblib/pull/751
519
+ """
520
+ memory = Memory(location=tmpdir.strpath, verbose=0)
521
+
522
+ @memory.cache
523
+ def plus_one(a):
524
+ return a + 1
525
+
526
+ # It's possible to call a positional arg as a kwarg.
527
+ assert plus_one(1) == 2
528
+ assert plus_one(a=1) == 2
529
+
530
+ # However, a positional argument that joblib hadn't seen
531
+ # before would cause a failure if it was passed as a kwarg.
532
+ assert plus_one(a=2) == 3
533
+
534
+
535
+ @parametrize('ignore, verbose, mmap_mode', [(['x'], 100, 'r'),
536
+ ([], 10, None)])
537
+ def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode):
538
+ "Check cache may be called with kwargs before decorating"
539
+ memory = Memory(location=tmpdir.strpath, verbose=0)
540
+
541
+ @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode)
542
+ def z(x):
543
+ pass
544
+
545
+ assert z.ignore == ignore
546
+ assert z._verbose == verbose
547
+ assert z.mmap_mode == mmap_mode
548
+
549
+
550
+ def test_func_dir(tmpdir):
551
+ # Test the creation of the memory cache directory for the function.
552
+ memory = Memory(location=tmpdir.strpath, verbose=0)
553
+ path = __name__.split('.')
554
+ path.append('f')
555
+ path = tmpdir.join('joblib', *path).strpath
556
+
557
+ g = memory.cache(f)
558
+ # Test that the function directory is created on demand
559
+ func_id = _build_func_identifier(f)
560
+ location = os.path.join(g.store_backend.location, func_id)
561
+ assert location == path
562
+ assert os.path.exists(path)
563
+ assert memory.location == os.path.dirname(g.store_backend.location)
564
+
565
+ # Test that the code is stored.
566
+ # For the following test to be robust to previous execution, we clear
567
+ # the in-memory store
568
+ _FUNCTION_HASHES.clear()
569
+ assert not g._check_previous_func_code()
570
+ assert os.path.exists(os.path.join(path, 'func_code.py'))
571
+ assert g._check_previous_func_code()
572
+
573
+ # Test the robustness to failure of loading previous results.
574
+ args_id = g._get_args_id(1)
575
+ output_dir = os.path.join(g.store_backend.location, g.func_id, args_id)
576
+ a = g(1)
577
+ assert os.path.exists(output_dir)
578
+ os.remove(os.path.join(output_dir, 'output.pkl'))
579
+ assert a == g(1)
580
+
581
+
582
+ def test_persistence(tmpdir):
583
+ # Test the memorized functions can be pickled and restored.
584
+ memory = Memory(location=tmpdir.strpath, verbose=0)
585
+ g = memory.cache(f)
586
+ output = g(1)
587
+
588
+ h = pickle.loads(pickle.dumps(g))
589
+
590
+ args_id = h._get_args_id(1)
591
+ output_dir = os.path.join(h.store_backend.location, h.func_id, args_id)
592
+ assert os.path.exists(output_dir)
593
+ assert output == h.store_backend.load_item([h.func_id, args_id])
594
+ memory2 = pickle.loads(pickle.dumps(memory))
595
+ assert memory.store_backend.location == memory2.store_backend.location
596
+
597
+ # Smoke test that pickling a memory with location=None works
598
+ memory = Memory(location=None, verbose=0)
599
+ pickle.loads(pickle.dumps(memory))
600
+ g = memory.cache(f)
601
+ gp = pickle.loads(pickle.dumps(g))
602
+ gp(1)
603
+
604
+
605
+ def test_check_call_in_cache(tmpdir):
606
+ for func in (MemorizedFunc(f, tmpdir.strpath),
607
+ Memory(location=tmpdir.strpath, verbose=0).cache(f)):
608
+ result = func.check_call_in_cache(2)
609
+ assert not result
610
+ assert isinstance(result, bool)
611
+ assert func(2) == 5
612
+ result = func.check_call_in_cache(2)
613
+ assert result
614
+ assert isinstance(result, bool)
615
+ func.clear()
616
+
617
+
618
+ def test_call_and_shelve(tmpdir):
619
+ # Test MemorizedFunc outputting a reference to cache.
620
+
621
+ for func, Result in zip((MemorizedFunc(f, tmpdir.strpath),
622
+ NotMemorizedFunc(f),
623
+ Memory(location=tmpdir.strpath,
624
+ verbose=0).cache(f),
625
+ Memory(location=None).cache(f),
626
+ ),
627
+ (MemorizedResult, NotMemorizedResult,
628
+ MemorizedResult, NotMemorizedResult)):
629
+ assert func(2) == 5
630
+ result = func.call_and_shelve(2)
631
+ assert isinstance(result, Result)
632
+ assert result.get() == 5
633
+
634
+ result.clear()
635
+ with raises(KeyError):
636
+ result.get()
637
+ result.clear() # Do nothing if there is no cache.
638
+
639
+
640
+ def test_call_and_shelve_argument_hash(tmpdir):
641
+ # Verify that a warning is raised when accessing arguments_hash
642
+ # attribute from MemorizedResult
643
+ func = Memory(location=tmpdir.strpath, verbose=0).cache(f)
644
+ result = func.call_and_shelve(2)
645
+ assert isinstance(result, MemorizedResult)
646
+ with warns(DeprecationWarning) as w:
647
+ assert result.argument_hash == result.args_id
648
+ assert len(w) == 1
649
+ assert "The 'argument_hash' attribute has been deprecated" \
650
+ in str(w[-1].message)
651
+
652
+
653
+ def test_call_and_shelve_lazily_load_stored_result(tmpdir):
654
+ """Check call_and_shelve only load stored data if needed."""
655
+ test_access_time_file = tmpdir.join('test_access')
656
+ test_access_time_file.write('test_access')
657
+ test_access_time = os.stat(test_access_time_file.strpath).st_atime
658
+ # check file system access time stats resolution is lower than test wait
659
+ # timings.
660
+ time.sleep(0.5)
661
+ assert test_access_time_file.read() == 'test_access'
662
+
663
+ if test_access_time == os.stat(test_access_time_file.strpath).st_atime:
664
+ # Skip this test when access time cannot be retrieved with enough
665
+ # precision from the file system (e.g. NTFS on windows).
666
+ pytest.skip("filesystem does not support fine-grained access time "
667
+ "attribute")
668
+
669
+ memory = Memory(location=tmpdir.strpath, verbose=0)
670
+ func = memory.cache(f)
671
+ args_id = func._get_args_id(2)
672
+ result_path = os.path.join(memory.store_backend.location,
673
+ func.func_id, args_id, 'output.pkl')
674
+ assert func(2) == 5
675
+ first_access_time = os.stat(result_path).st_atime
676
+ time.sleep(1)
677
+
678
+ # Should not access the stored data
679
+ result = func.call_and_shelve(2)
680
+ assert isinstance(result, MemorizedResult)
681
+ assert os.stat(result_path).st_atime == first_access_time
682
+ time.sleep(1)
683
+
684
+ # Read the stored data => last access time is greater than first_access
685
+ assert result.get() == 5
686
+ assert os.stat(result_path).st_atime > first_access_time
687
+
688
+
689
+ def test_memorized_pickling(tmpdir):
690
+ for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)):
691
+ filename = tmpdir.join('pickling_test.dat').strpath
692
+ result = func.call_and_shelve(2)
693
+ with open(filename, 'wb') as fp:
694
+ pickle.dump(result, fp)
695
+ with open(filename, 'rb') as fp:
696
+ result2 = pickle.load(fp)
697
+ assert result2.get() == result.get()
698
+ os.remove(filename)
699
+
700
+
701
+ def test_memorized_repr(tmpdir):
702
+ func = MemorizedFunc(f, tmpdir.strpath)
703
+ result = func.call_and_shelve(2)
704
+
705
+ func2 = MemorizedFunc(f, tmpdir.strpath)
706
+ result2 = func2.call_and_shelve(2)
707
+ assert result.get() == result2.get()
708
+ assert repr(func) == repr(func2)
709
+
710
+ # Smoke test with NotMemorizedFunc
711
+ func = NotMemorizedFunc(f)
712
+ repr(func)
713
+ repr(func.call_and_shelve(2))
714
+
715
+ # Smoke test for message output (increase code coverage)
716
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time())
717
+ result = func.call_and_shelve(11)
718
+ result.get()
719
+
720
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=11)
721
+ result = func.call_and_shelve(11)
722
+ result.get()
723
+
724
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time())
725
+ result = func.call_and_shelve(11)
726
+ result.get()
727
+
728
+ func = MemorizedFunc(f, tmpdir.strpath, verbose=5)
729
+ result = func.call_and_shelve(11)
730
+ result.get()
731
+
732
+
733
+ def test_memory_file_modification(capsys, tmpdir, monkeypatch):
734
+ # Test that modifying a Python file after loading it does not lead to
735
+ # Recomputation
736
+ dir_name = tmpdir.mkdir('tmp_import').strpath
737
+ filename = os.path.join(dir_name, 'tmp_joblib_.py')
738
+ content = 'def f(x):\n print(x)\n return x\n'
739
+ with open(filename, 'w') as module_file:
740
+ module_file.write(content)
741
+
742
+ # Load the module:
743
+ monkeypatch.syspath_prepend(dir_name)
744
+ import tmp_joblib_ as tmp
745
+
746
+ memory = Memory(location=tmpdir.strpath, verbose=0)
747
+ f = memory.cache(tmp.f)
748
+ # First call f a few times
749
+ f(1)
750
+ f(2)
751
+ f(1)
752
+
753
+ # Now modify the module where f is stored without modifying f
754
+ with open(filename, 'w') as module_file:
755
+ module_file.write('\n\n' + content)
756
+
757
+ # And call f a couple more times
758
+ f(1)
759
+ f(1)
760
+
761
+ # Flush the .pyc files
762
+ shutil.rmtree(dir_name)
763
+ os.mkdir(dir_name)
764
+ # Now modify the module where f is stored, modifying f
765
+ content = 'def f(x):\n print("x=%s" % x)\n return x\n'
766
+ with open(filename, 'w') as module_file:
767
+ module_file.write(content)
768
+
769
+ # And call f more times prior to reloading: the cache should not be
770
+ # invalidated at this point as the active function definition has not
771
+ # changed in memory yet.
772
+ f(1)
773
+ f(1)
774
+
775
+ # Now reload
776
+ sys.stdout.write('Reloading\n')
777
+ sys.modules.pop('tmp_joblib_')
778
+ import tmp_joblib_ as tmp
779
+ f = memory.cache(tmp.f)
780
+
781
+ # And call f more times
782
+ f(1)
783
+ f(1)
784
+
785
+ out, err = capsys.readouterr()
786
+ assert out == '1\n2\nReloading\nx=1\n'
787
+
788
+
789
+ def _function_to_cache(a, b):
790
+ # Just a place holder function to be mutated by tests
791
+ pass
792
+
793
+
794
+ def _sum(a, b):
795
+ return a + b
796
+
797
+
798
+ def _product(a, b):
799
+ return a * b
800
+
801
+
802
+ def test_memory_in_memory_function_code_change(tmpdir):
803
+ _function_to_cache.__code__ = _sum.__code__
804
+
805
+ memory = Memory(location=tmpdir.strpath, verbose=0)
806
+ f = memory.cache(_function_to_cache)
807
+
808
+ assert f(1, 2) == 3
809
+ assert f(1, 2) == 3
810
+
811
+ with warns(JobLibCollisionWarning):
812
+ # Check that inline function modification triggers a cache invalidation
813
+ _function_to_cache.__code__ = _product.__code__
814
+ assert f(1, 2) == 2
815
+ assert f(1, 2) == 2
816
+
817
+
818
+ def test_clear_memory_with_none_location():
819
+ memory = Memory(location=None)
820
+ memory.clear()
821
+
822
+
823
+ def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
824
+ return a, b, kw1, kw2
825
+
826
+
827
+ def func_with_signature(a: int, b: float) -> float:
828
+ return a + b
829
+
830
+
831
+ def test_memory_func_with_kwonly_args(tmpdir):
832
+ memory = Memory(location=tmpdir.strpath, verbose=0)
833
+ func_cached = memory.cache(func_with_kwonly_args)
834
+
835
+ assert func_cached(1, 2, kw1=3) == (1, 2, 3, 'kw2')
836
+
837
+ # Making sure that providing a keyword-only argument by
838
+ # position raises an exception
839
+ with raises(ValueError) as excinfo:
840
+ func_cached(1, 2, 3, kw2=4)
841
+ excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
842
+ "parameter")
843
+
844
+ # Keyword-only parameter passed by position with cached call
845
+ # should still raise ValueError
846
+ func_cached(1, 2, kw1=3, kw2=4)
847
+
848
+ with raises(ValueError) as excinfo:
849
+ func_cached(1, 2, 3, kw2=4)
850
+ excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
851
+ "parameter")
852
+
853
+ # Test 'ignore' parameter
854
+ func_cached = memory.cache(func_with_kwonly_args, ignore=['kw2'])
855
+ assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4)
856
+ assert func_cached(1, 2, kw1=3, kw2='ignored') == (1, 2, 3, 4)
857
+
858
+
859
+ def test_memory_func_with_signature(tmpdir):
860
+ memory = Memory(location=tmpdir.strpath, verbose=0)
861
+ func_cached = memory.cache(func_with_signature)
862
+
863
+ assert func_cached(1, 2.) == 3.
864
+
865
+
866
+ def _setup_toy_cache(tmpdir, num_inputs=10):
867
+ memory = Memory(location=tmpdir.strpath, verbose=0)
868
+
869
+ @memory.cache()
870
+ def get_1000_bytes(arg):
871
+ return 'a' * 1000
872
+
873
+ inputs = list(range(num_inputs))
874
+ for arg in inputs:
875
+ get_1000_bytes(arg)
876
+
877
+ func_id = _build_func_identifier(get_1000_bytes)
878
+ hash_dirnames = [get_1000_bytes._get_args_id(arg)
879
+ for arg in inputs]
880
+
881
+ full_hashdirs = [os.path.join(get_1000_bytes.store_backend.location,
882
+ func_id, dirname)
883
+ for dirname in hash_dirnames]
884
+ return memory, full_hashdirs, get_1000_bytes
885
+
886
+
887
+ def test__get_items(tmpdir):
888
+ memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir)
889
+ items = memory.store_backend.get_items()
890
+ hash_dirs = [ci.path for ci in items]
891
+ assert set(hash_dirs) == set(expected_hash_dirs)
892
+
893
+ def get_files_size(directory):
894
+ full_paths = [os.path.join(directory, fn)
895
+ for fn in os.listdir(directory)]
896
+ return sum(os.path.getsize(fp) for fp in full_paths)
897
+
898
+ expected_hash_cache_sizes = [get_files_size(hash_dir)
899
+ for hash_dir in hash_dirs]
900
+ hash_cache_sizes = [ci.size for ci in items]
901
+ assert hash_cache_sizes == expected_hash_cache_sizes
902
+
903
+ output_filenames = [os.path.join(hash_dir, 'output.pkl')
904
+ for hash_dir in hash_dirs]
905
+
906
+ expected_last_accesses = [
907
+ datetime.datetime.fromtimestamp(os.path.getatime(fn))
908
+ for fn in output_filenames]
909
+ last_accesses = [ci.last_access for ci in items]
910
+ assert last_accesses == expected_last_accesses
911
+
912
+
913
+ def test__get_items_to_delete(tmpdir):
914
+ # test empty cache
915
+ memory, _, _ = _setup_toy_cache(tmpdir, num_inputs=0)
916
+ items_to_delete = memory.store_backend._get_items_to_delete('1K')
917
+ assert items_to_delete == []
918
+
919
+ memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir)
920
+ items = memory.store_backend.get_items()
921
+ # bytes_limit set to keep only one cache item (each hash cache
922
+ # folder is about 1000 bytes + metadata)
923
+ items_to_delete = memory.store_backend._get_items_to_delete('2K')
924
+ nb_hashes = len(expected_hash_cachedirs)
925
+ assert set.issubset(set(items_to_delete), set(items))
926
+ assert len(items_to_delete) == nb_hashes - 1
927
+
928
+ # Sanity check bytes_limit=2048 is the same as bytes_limit='2K'
929
+ items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048)
930
+ assert sorted(items_to_delete) == sorted(items_to_delete_2048b)
931
+
932
+ # bytes_limit greater than the size of the cache
933
+ items_to_delete_empty = memory.store_backend._get_items_to_delete('1M')
934
+ assert items_to_delete_empty == []
935
+
936
+ # All the cache items need to be deleted
937
+ bytes_limit_too_small = 500
938
+ items_to_delete_500b = memory.store_backend._get_items_to_delete(
939
+ bytes_limit_too_small
940
+ )
941
+ assert set(items_to_delete_500b), set(items)
942
+
943
+ # Test LRU property: surviving cache items should all have a more
944
+ # recent last_access that the ones that have been deleted
945
+ items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000)
946
+ surviving_items = set(items).difference(items_to_delete_6000b)
947
+
948
+ assert (max(ci.last_access for ci in items_to_delete_6000b) <=
949
+ min(ci.last_access for ci in surviving_items))
950
+
951
+
952
+ def test_memory_reduce_size_bytes_limit(tmpdir):
953
+ memory, _, _ = _setup_toy_cache(tmpdir)
954
+ ref_cache_items = memory.store_backend.get_items()
955
+
956
+ # By default memory.bytes_limit is None and reduce_size is a noop
957
+ memory.reduce_size()
958
+ cache_items = memory.store_backend.get_items()
959
+ assert sorted(ref_cache_items) == sorted(cache_items)
960
+
961
+ # No cache items deleted if bytes_limit greater than the size of
962
+ # the cache
963
+ memory.reduce_size(bytes_limit='1M')
964
+ cache_items = memory.store_backend.get_items()
965
+ assert sorted(ref_cache_items) == sorted(cache_items)
966
+
967
+ # bytes_limit is set so that only two cache items are kept
968
+ memory.reduce_size(bytes_limit='3K')
969
+ cache_items = memory.store_backend.get_items()
970
+ assert set.issubset(set(cache_items), set(ref_cache_items))
971
+ assert len(cache_items) == 2
972
+
973
+ # bytes_limit set so that no cache item is kept
974
+ bytes_limit_too_small = 500
975
+ memory.reduce_size(bytes_limit=bytes_limit_too_small)
976
+ cache_items = memory.store_backend.get_items()
977
+ assert cache_items == []
978
+
979
+
980
+ def test_memory_reduce_size_items_limit(tmpdir):
981
+ memory, _, _ = _setup_toy_cache(tmpdir)
982
+ ref_cache_items = memory.store_backend.get_items()
983
+
984
+ # By default reduce_size is a noop
985
+ memory.reduce_size()
986
+ cache_items = memory.store_backend.get_items()
987
+ assert sorted(ref_cache_items) == sorted(cache_items)
988
+
989
+ # No cache items deleted if items_limit greater than the size of
990
+ # the cache
991
+ memory.reduce_size(items_limit=10)
992
+ cache_items = memory.store_backend.get_items()
993
+ assert sorted(ref_cache_items) == sorted(cache_items)
994
+
995
+ # items_limit is set so that only two cache items are kept
996
+ memory.reduce_size(items_limit=2)
997
+ cache_items = memory.store_backend.get_items()
998
+ assert set.issubset(set(cache_items), set(ref_cache_items))
999
+ assert len(cache_items) == 2
1000
+
1001
+ # item_limit set so that no cache item is kept
1002
+ memory.reduce_size(items_limit=0)
1003
+ cache_items = memory.store_backend.get_items()
1004
+ assert cache_items == []
1005
+
1006
+
1007
+ def test_memory_reduce_size_age_limit(tmpdir):
1008
+ import time
1009
+ import datetime
1010
+ memory, _, put_cache = _setup_toy_cache(tmpdir)
1011
+ ref_cache_items = memory.store_backend.get_items()
1012
+
1013
+ # By default reduce_size is a noop
1014
+ memory.reduce_size()
1015
+ cache_items = memory.store_backend.get_items()
1016
+ assert sorted(ref_cache_items) == sorted(cache_items)
1017
+
1018
+ # No cache items deleted if age_limit big.
1019
+ memory.reduce_size(age_limit=datetime.timedelta(days=1))
1020
+ cache_items = memory.store_backend.get_items()
1021
+ assert sorted(ref_cache_items) == sorted(cache_items)
1022
+
1023
+ # age_limit is set so that only two cache items are kept
1024
+ time.sleep(1)
1025
+ put_cache(-1)
1026
+ put_cache(-2)
1027
+ memory.reduce_size(age_limit=datetime.timedelta(seconds=1))
1028
+ cache_items = memory.store_backend.get_items()
1029
+ assert not set.issubset(set(cache_items), set(ref_cache_items))
1030
+ assert len(cache_items) == 2
1031
+
1032
+ # age_limit set so that no cache item is kept
1033
+ memory.reduce_size(age_limit=datetime.timedelta(seconds=0))
1034
+ cache_items = memory.store_backend.get_items()
1035
+ assert cache_items == []
1036
+
1037
+
1038
+ def test_memory_clear(tmpdir):
1039
+ memory, _, g = _setup_toy_cache(tmpdir)
1040
+ memory.clear()
1041
+
1042
+ assert os.listdir(memory.store_backend.location) == []
1043
+
1044
+ # Check that the cache for functions hash is also reset.
1045
+ assert not g._check_previous_func_code(stacklevel=4)
1046
+
1047
+
1048
+ def fast_func_with_complex_output():
1049
+ complex_obj = ['a' * 1000] * 1000
1050
+ return complex_obj
1051
+
1052
+
1053
+ def fast_func_with_conditional_complex_output(complex_output=True):
1054
+ complex_obj = {str(i): i for i in range(int(1e5))}
1055
+ return complex_obj if complex_output else 'simple output'
1056
+
1057
+
1058
+ @with_multiprocessing
1059
+ def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd):
1060
+ # Test race condition where multiple processes are writing into
1061
+ # the same output.pkl. See
1062
+ # https://github.com/joblib/joblib/issues/490 for more details.
1063
+ memory = Memory(location=tmpdir.strpath)
1064
+ func_cached = memory.cache(fast_func_with_complex_output)
1065
+
1066
+ Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3))
1067
+
1068
+ stdout, stderr = capfd.readouterr()
1069
+
1070
+ # Checking both stdout and stderr (ongoing PR #434 may change
1071
+ # logging destination) to make sure there is no exception while
1072
+ # loading the results
1073
+ exception_msg = 'Exception while loading results'
1074
+ assert exception_msg not in stdout
1075
+ assert exception_msg not in stderr
1076
+
1077
+
1078
+ @with_multiprocessing
1079
+ def test_cached_function_race_condition_when_persisting_output_2(tmpdir,
1080
+ capfd):
1081
+ # Test race condition in first attempt at solving
1082
+ # https://github.com/joblib/joblib/issues/490. The race condition
1083
+ # was due to the delay between seeing the cache directory created
1084
+ # (interpreted as the result being cached) and the output.pkl being
1085
+ # pickled.
1086
+ memory = Memory(location=tmpdir.strpath)
1087
+ func_cached = memory.cache(fast_func_with_conditional_complex_output)
1088
+
1089
+ Parallel(n_jobs=2)(delayed(func_cached)(True if i % 2 == 0 else False)
1090
+ for i in range(3))
1091
+
1092
+ stdout, stderr = capfd.readouterr()
1093
+
1094
+ # Checking both stdout and stderr (ongoing PR #434 may change
1095
+ # logging destination) to make sure there is no exception while
1096
+ # loading the results
1097
+ exception_msg = 'Exception while loading results'
1098
+ assert exception_msg not in stdout
1099
+ assert exception_msg not in stderr
1100
+
1101
+
1102
+ def test_memory_recomputes_after_an_error_while_loading_results(
1103
+ tmpdir, monkeypatch):
1104
+ memory = Memory(location=tmpdir.strpath)
1105
+
1106
+ def func(arg):
1107
+ # This makes sure that the timestamp returned by two calls of
1108
+ # func are different. This is needed on Windows where
1109
+ # time.time resolution may not be accurate enough
1110
+ time.sleep(0.01)
1111
+ return arg, time.time()
1112
+
1113
+ cached_func = memory.cache(func)
1114
+ input_arg = 'arg'
1115
+ arg, timestamp = cached_func(input_arg)
1116
+
1117
+ # Make sure the function is correctly cached
1118
+ assert arg == input_arg
1119
+
1120
+ # Corrupting output.pkl to make sure that an error happens when
1121
+ # loading the cached result
1122
+ corrupt_single_cache_item(memory)
1123
+
1124
+ # Make sure that corrupting the file causes recomputation and that
1125
+ # a warning is issued.
1126
+ recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch)
1127
+ recomputed_arg, recomputed_timestamp = cached_func(arg)
1128
+ assert len(recorded_warnings) == 1
1129
+ exception_msg = 'Exception while loading results'
1130
+ assert exception_msg in recorded_warnings[0]
1131
+ assert recomputed_arg == arg
1132
+ assert recomputed_timestamp > timestamp
1133
+
1134
+ # Corrupting output.pkl to make sure that an error happens when
1135
+ # loading the cached result
1136
+ corrupt_single_cache_item(memory)
1137
+ reference = cached_func.call_and_shelve(arg)
1138
+ try:
1139
+ reference.get()
1140
+ raise AssertionError(
1141
+ "It normally not possible to load a corrupted"
1142
+ " MemorizedResult"
1143
+ )
1144
+ except KeyError as e:
1145
+ message = "is corrupted"
1146
+ assert message in str(e.args)
1147
+
1148
+
1149
+ class IncompleteStoreBackend(StoreBackendBase):
1150
+ """This backend cannot be instantiated and should raise a TypeError."""
1151
+ pass
1152
+
1153
+
1154
+ class DummyStoreBackend(StoreBackendBase):
1155
+ """A dummy store backend that does nothing."""
1156
+
1157
+ def _open_item(self, *args, **kwargs):
1158
+ """Open an item on store."""
1159
+ "Does nothing"
1160
+
1161
+ def _item_exists(self, location):
1162
+ """Check if an item location exists."""
1163
+ "Does nothing"
1164
+
1165
+ def _move_item(self, src, dst):
1166
+ """Move an item from src to dst in store."""
1167
+ "Does nothing"
1168
+
1169
+ def create_location(self, location):
1170
+ """Create location on store."""
1171
+ "Does nothing"
1172
+
1173
+ def exists(self, obj):
1174
+ """Check if an object exists in the store"""
1175
+ return False
1176
+
1177
+ def clear_location(self, obj):
1178
+ """Clear object on store"""
1179
+ "Does nothing"
1180
+
1181
+ def get_items(self):
1182
+ """Returns the whole list of items available in cache."""
1183
+ return []
1184
+
1185
+ def configure(self, location, *args, **kwargs):
1186
+ """Configure the store"""
1187
+ "Does nothing"
1188
+
1189
+
1190
+ @parametrize("invalid_prefix", [None, dict(), list()])
1191
+ def test_register_invalid_store_backends_key(invalid_prefix):
1192
+ # verify the right exceptions are raised when passing a wrong backend key.
1193
+ with raises(ValueError) as excinfo:
1194
+ register_store_backend(invalid_prefix, None)
1195
+ excinfo.match(r'Store backend name should be a string*')
1196
+
1197
+
1198
+ def test_register_invalid_store_backends_object():
1199
+ # verify the right exceptions are raised when passing a wrong backend
1200
+ # object.
1201
+ with raises(ValueError) as excinfo:
1202
+ register_store_backend("fs", None)
1203
+ excinfo.match(r'Store backend should inherit StoreBackendBase*')
1204
+
1205
+
1206
+ def test_memory_default_store_backend():
1207
+ # test an unknown backend falls back into a FileSystemStoreBackend
1208
+ with raises(TypeError) as excinfo:
1209
+ Memory(location='/tmp/joblib', backend='unknown')
1210
+ excinfo.match(r"Unknown location*")
1211
+
1212
+
1213
+ def test_warning_on_unknown_location_type():
1214
+ class NonSupportedLocationClass:
1215
+ pass
1216
+ unsupported_location = NonSupportedLocationClass()
1217
+
1218
+ with warns(UserWarning) as warninfo:
1219
+ _store_backend_factory("local", location=unsupported_location)
1220
+
1221
+ expected_mesage = ("Instantiating a backend using a "
1222
+ "NonSupportedLocationClass as a location is not "
1223
+ "supported by joblib")
1224
+ assert expected_mesage in str(warninfo[0].message)
1225
+
1226
+
1227
+ def test_instanciate_incomplete_store_backend():
1228
+ # Verify that registering an external incomplete store backend raises an
1229
+ # exception when one tries to instantiate it.
1230
+ backend_name = "isb"
1231
+ register_store_backend(backend_name, IncompleteStoreBackend)
1232
+ assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items()
1233
+ with raises(TypeError) as excinfo:
1234
+ _store_backend_factory(backend_name, "fake_location")
1235
+ excinfo.match(r"Can't instantiate abstract class IncompleteStoreBackend "
1236
+ "(without an implementation for|with) abstract methods*")
1237
+
1238
+
1239
+ def test_dummy_store_backend():
1240
+ # Verify that registering an external store backend works.
1241
+
1242
+ backend_name = "dsb"
1243
+ register_store_backend(backend_name, DummyStoreBackend)
1244
+ assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items()
1245
+
1246
+ backend_obj = _store_backend_factory(backend_name, "dummy_location")
1247
+ assert isinstance(backend_obj, DummyStoreBackend)
1248
+
1249
+
1250
+ def test_instanciate_store_backend_with_pathlib_path():
1251
+ # Instantiate a FileSystemStoreBackend using a pathlib.Path object
1252
+ path = pathlib.Path("some_folder")
1253
+ backend_obj = _store_backend_factory("local", path)
1254
+ assert backend_obj.location == "some_folder"
1255
+
1256
+
1257
+ def test_filesystem_store_backend_repr(tmpdir):
1258
+ # Verify string representation of a filesystem store backend.
1259
+
1260
+ repr_pattern = 'FileSystemStoreBackend(location="{location}")'
1261
+ backend = FileSystemStoreBackend()
1262
+ assert backend.location is None
1263
+
1264
+ repr(backend) # Should not raise an exception
1265
+
1266
+ assert str(backend) == repr_pattern.format(location=None)
1267
+
1268
+ # backend location is passed explicitly via the configure method (called
1269
+ # by the internal _store_backend_factory function)
1270
+ backend.configure(tmpdir.strpath)
1271
+
1272
+ assert str(backend) == repr_pattern.format(location=tmpdir.strpath)
1273
+
1274
+ repr(backend) # Should not raise an exception
1275
+
1276
+
1277
+ def test_memory_objects_repr(tmpdir):
1278
+ # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory.
1279
+
1280
+ def my_func(a, b):
1281
+ return a + b
1282
+
1283
+ memory = Memory(location=tmpdir.strpath, verbose=0)
1284
+ memorized_func = memory.cache(my_func)
1285
+
1286
+ memorized_func_repr = 'MemorizedFunc(func={func}, location={location})'
1287
+
1288
+ assert str(memorized_func) == memorized_func_repr.format(
1289
+ func=my_func,
1290
+ location=memory.store_backend.location)
1291
+
1292
+ memorized_result = memorized_func.call_and_shelve(42, 42)
1293
+
1294
+ memorized_result_repr = ('MemorizedResult(location="{location}", '
1295
+ 'func="{func}", args_id="{args_id}")')
1296
+
1297
+ assert str(memorized_result) == memorized_result_repr.format(
1298
+ location=memory.store_backend.location,
1299
+ func=memorized_result.func_id,
1300
+ args_id=memorized_result.args_id)
1301
+
1302
+ assert str(memory) == 'Memory(location={location})'.format(
1303
+ location=memory.store_backend.location)
1304
+
1305
+
1306
+ def test_memorized_result_pickle(tmpdir):
1307
+ # Verify a MemoryResult object can be pickled/depickled. Non regression
1308
+ # test introduced following issue
1309
+ # https://github.com/joblib/joblib/issues/747
1310
+
1311
+ memory = Memory(location=tmpdir.strpath)
1312
+
1313
+ @memory.cache
1314
+ def g(x):
1315
+ return x**2
1316
+
1317
+ memorized_result = g.call_and_shelve(4)
1318
+ memorized_result_pickle = pickle.dumps(memorized_result)
1319
+ memorized_result_loads = pickle.loads(memorized_result_pickle)
1320
+
1321
+ assert memorized_result.store_backend.location == \
1322
+ memorized_result_loads.store_backend.location
1323
+ assert memorized_result.func == memorized_result_loads.func
1324
+ assert memorized_result.args_id == memorized_result_loads.args_id
1325
+ assert str(memorized_result) == str(memorized_result_loads)
1326
+
1327
+
1328
+ def compare(left, right, ignored_attrs=None):
1329
+ if ignored_attrs is None:
1330
+ ignored_attrs = []
1331
+
1332
+ left_vars = vars(left)
1333
+ right_vars = vars(right)
1334
+ assert set(left_vars.keys()) == set(right_vars.keys())
1335
+ for attr in left_vars.keys():
1336
+ if attr in ignored_attrs:
1337
+ continue
1338
+ assert left_vars[attr] == right_vars[attr]
1339
+
1340
+
1341
+ @pytest.mark.parametrize('memory_kwargs',
1342
+ [{'compress': 3, 'verbose': 2},
1343
+ {'mmap_mode': 'r', 'verbose': 5,
1344
+ 'backend_options': {'parameter': 'unused'}}])
1345
+ def test_memory_pickle_dump_load(tmpdir, memory_kwargs):
1346
+ memory = Memory(location=tmpdir.strpath, **memory_kwargs)
1347
+
1348
+ memory_reloaded = pickle.loads(pickle.dumps(memory))
1349
+
1350
+ # Compare Memory instance before and after pickle roundtrip
1351
+ compare(memory.store_backend, memory_reloaded.store_backend)
1352
+ compare(memory, memory_reloaded,
1353
+ ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id']))
1354
+ assert hash(memory) == hash(memory_reloaded)
1355
+
1356
+ func_cached = memory.cache(f)
1357
+
1358
+ func_cached_reloaded = pickle.loads(pickle.dumps(func_cached))
1359
+
1360
+ # Compare MemorizedFunc instance before/after pickle roundtrip
1361
+ compare(func_cached.store_backend, func_cached_reloaded.store_backend)
1362
+ compare(func_cached, func_cached_reloaded,
1363
+ ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id']))
1364
+ assert hash(func_cached) == hash(func_cached_reloaded)
1365
+
1366
+ # Compare MemorizedResult instance before/after pickle roundtrip
1367
+ memorized_result = func_cached.call_and_shelve(1)
1368
+ memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result))
1369
+
1370
+ compare(memorized_result.store_backend,
1371
+ memorized_result_reloaded.store_backend)
1372
+ compare(memorized_result, memorized_result_reloaded,
1373
+ ignored_attrs=set(['store_backend', 'timestamp', '_func_code_id']))
1374
+ assert hash(memorized_result) == hash(memorized_result_reloaded)
1375
+
1376
+
1377
+ def test_info_log(tmpdir, caplog):
1378
+ caplog.set_level(logging.INFO)
1379
+ x = 3
1380
+
1381
+ memory = Memory(location=tmpdir.strpath, verbose=20)
1382
+
1383
+ @memory.cache
1384
+ def f(x):
1385
+ return x ** 2
1386
+
1387
+ _ = f(x)
1388
+ assert "Querying" in caplog.text
1389
+ caplog.clear()
1390
+
1391
+ memory = Memory(location=tmpdir.strpath, verbose=0)
1392
+
1393
+ @memory.cache
1394
+ def f(x):
1395
+ return x ** 2
1396
+
1397
+ _ = f(x)
1398
+ assert "Querying" not in caplog.text
1399
+ caplog.clear()
1400
+
1401
+
1402
+ def test_deprecated_bytes_limit(tmpdir):
1403
+ from joblib import __version__
1404
+ if __version__ >= "1.5":
1405
+ raise DeprecationWarning(
1406
+ "Bytes limit is deprecated and should be removed by 1.4"
1407
+ )
1408
+ with pytest.warns(DeprecationWarning, match="bytes_limit"):
1409
+ _ = Memory(location=tmpdir.strpath, bytes_limit='1K')
1410
+
1411
+
1412
+ class TestCacheValidationCallback:
1413
+ "Tests on parameter `cache_validation_callback`"
1414
+
1415
+ def foo(self, x, d, delay=None):
1416
+ d["run"] = True
1417
+ if delay is not None:
1418
+ time.sleep(delay)
1419
+ return x * 2
1420
+
1421
+ def test_invalid_cache_validation_callback(self, memory):
1422
+ "Test invalid values for `cache_validation_callback"
1423
+ match = "cache_validation_callback needs to be callable. Got True."
1424
+ with pytest.raises(ValueError, match=match):
1425
+ memory.cache(cache_validation_callback=True)
1426
+
1427
+ @pytest.mark.parametrize("consider_cache_valid", [True, False])
1428
+ def test_constant_cache_validation_callback(
1429
+ self, memory, consider_cache_valid
1430
+ ):
1431
+ "Test expiry of old results"
1432
+ f = memory.cache(
1433
+ self.foo, cache_validation_callback=lambda _: consider_cache_valid,
1434
+ ignore=["d"]
1435
+ )
1436
+
1437
+ d1, d2 = {"run": False}, {"run": False}
1438
+ assert f(2, d1) == 4
1439
+ assert f(2, d2) == 4
1440
+
1441
+ assert d1["run"]
1442
+ assert d2["run"] != consider_cache_valid
1443
+
1444
+ def test_memory_only_cache_long_run(self, memory):
1445
+ "Test cache validity based on run duration."
1446
+
1447
+ def cache_validation_callback(metadata):
1448
+ duration = metadata['duration']
1449
+ if duration > 0.1:
1450
+ return True
1451
+
1452
+ f = memory.cache(
1453
+ self.foo, cache_validation_callback=cache_validation_callback,
1454
+ ignore=["d"]
1455
+ )
1456
+
1457
+ # Short run are not cached
1458
+ d1, d2 = {"run": False}, {"run": False}
1459
+ assert f(2, d1, delay=0) == 4
1460
+ assert f(2, d2, delay=0) == 4
1461
+ assert d1["run"]
1462
+ assert d2["run"]
1463
+
1464
+ # Longer run are cached
1465
+ d1, d2 = {"run": False}, {"run": False}
1466
+ assert f(2, d1, delay=0.2) == 4
1467
+ assert f(2, d2, delay=0.2) == 4
1468
+ assert d1["run"]
1469
+ assert not d2["run"]
1470
+
1471
+ def test_memory_expires_after(self, memory):
1472
+ "Test expiry of old cached results"
1473
+
1474
+ f = memory.cache(
1475
+ self.foo, cache_validation_callback=expires_after(seconds=.3),
1476
+ ignore=["d"]
1477
+ )
1478
+
1479
+ d1, d2, d3 = {"run": False}, {"run": False}, {"run": False}
1480
+ assert f(2, d1) == 4
1481
+ assert f(2, d2) == 4
1482
+ time.sleep(.5)
1483
+ assert f(2, d3) == 4
1484
+
1485
+ assert d1["run"]
1486
+ assert not d2["run"]
1487
+ assert d3["run"]
1488
+
1489
+
1490
+ class TestMemorizedFunc:
1491
+ "Tests for the MemorizedFunc and NotMemorizedFunc classes"
1492
+
1493
+ @staticmethod
1494
+ def f(x, counter):
1495
+ counter[x] = counter.get(x, 0) + 1
1496
+ return counter[x]
1497
+
1498
+ def test_call_method_memorized(self, memory):
1499
+ "Test calling the function"
1500
+
1501
+ f = memory.cache(self.f, ignore=['counter'])
1502
+
1503
+ counter = {}
1504
+ assert f(2, counter) == 1
1505
+ assert f(2, counter) == 1
1506
+
1507
+ x, meta = f.call(2, counter)
1508
+ assert x == 2, "f has not been called properly"
1509
+ assert isinstance(meta, dict), (
1510
+ "Metadata are not returned by MemorizedFunc.call."
1511
+ )
1512
+
1513
+ def test_call_method_not_memorized(self, memory):
1514
+ "Test calling the function"
1515
+
1516
+ f = NotMemorizedFunc(self.f)
1517
+
1518
+ counter = {}
1519
+ assert f(2, counter) == 1
1520
+ assert f(2, counter) == 2
1521
+
1522
+ x, meta = f.call(2, counter)
1523
+ assert x == 3, "f has not been called properly"
1524
+ assert isinstance(meta, dict), (
1525
+ "Metadata are not returned by MemorizedFunc.call."
1526
+ )
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_memory_async.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import gc
3
+ import shutil
4
+
5
+ import pytest
6
+
7
+ from joblib.memory import (AsyncMemorizedFunc, AsyncNotMemorizedFunc,
8
+ MemorizedResult, Memory, NotMemorizedResult)
9
+ from joblib.test.common import np, with_numpy
10
+ from joblib.testing import raises
11
+
12
+ from .test_memory import (corrupt_single_cache_item,
13
+ monkeypatch_cached_func_warn)
14
+
15
+
16
+ async def check_identity_lazy_async(func, accumulator, location):
17
+ """ Similar to check_identity_lazy_async for coroutine functions"""
18
+ memory = Memory(location=location, verbose=0)
19
+ func = memory.cache(func)
20
+ for i in range(3):
21
+ for _ in range(2):
22
+ value = await func(i)
23
+ assert value == i
24
+ assert len(accumulator) == i + 1
25
+
26
+
27
+ @pytest.mark.asyncio
28
+ async def test_memory_integration_async(tmpdir):
29
+ accumulator = list()
30
+
31
+ async def f(n):
32
+ await asyncio.sleep(0.1)
33
+ accumulator.append(1)
34
+ return n
35
+
36
+ await check_identity_lazy_async(f, accumulator, tmpdir.strpath)
37
+
38
+ # Now test clearing
39
+ for compress in (False, True):
40
+ for mmap_mode in ('r', None):
41
+ memory = Memory(location=tmpdir.strpath, verbose=10,
42
+ mmap_mode=mmap_mode, compress=compress)
43
+ # First clear the cache directory, to check that our code can
44
+ # handle that
45
+ # NOTE: this line would raise an exception, as the database
46
+ # file is still open; we ignore the error since we want to
47
+ # test what happens if the directory disappears
48
+ shutil.rmtree(tmpdir.strpath, ignore_errors=True)
49
+ g = memory.cache(f)
50
+ await g(1)
51
+ g.clear(warn=False)
52
+ current_accumulator = len(accumulator)
53
+ out = await g(1)
54
+
55
+ assert len(accumulator) == current_accumulator + 1
56
+ # Also, check that Memory.eval works similarly
57
+ evaled = await memory.eval(f, 1)
58
+ assert evaled == out
59
+ assert len(accumulator) == current_accumulator + 1
60
+
61
+ # Now do a smoke test with a function defined in __main__, as the name
62
+ # mangling rules are more complex
63
+ f.__module__ = '__main__'
64
+ memory = Memory(location=tmpdir.strpath, verbose=0)
65
+ await memory.cache(f)(1)
66
+
67
+
68
+ @pytest.mark.asyncio
69
+ async def test_no_memory_async():
70
+ accumulator = list()
71
+
72
+ async def ff(x):
73
+ await asyncio.sleep(0.1)
74
+ accumulator.append(1)
75
+ return x
76
+
77
+ memory = Memory(location=None, verbose=0)
78
+ gg = memory.cache(ff)
79
+ for _ in range(4):
80
+ current_accumulator = len(accumulator)
81
+ await gg(1)
82
+ assert len(accumulator) == current_accumulator + 1
83
+
84
+
85
+ @with_numpy
86
+ @pytest.mark.asyncio
87
+ async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch):
88
+ """Check that mmap_mode is respected even at the first call"""
89
+
90
+ memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0)
91
+
92
+ @memory.cache()
93
+ async def twice(a):
94
+ return a * 2
95
+
96
+ a = np.ones(3)
97
+ b = await twice(a)
98
+ c = await twice(a)
99
+
100
+ assert isinstance(c, np.memmap)
101
+ assert c.mode == 'r'
102
+
103
+ assert isinstance(b, np.memmap)
104
+ assert b.mode == 'r'
105
+
106
+ # Corrupts the file, Deleting b and c mmaps
107
+ # is necessary to be able edit the file
108
+ del b
109
+ del c
110
+ gc.collect()
111
+ corrupt_single_cache_item(memory)
112
+
113
+ # Make sure that corrupting the file causes recomputation and that
114
+ # a warning is issued.
115
+ recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch)
116
+ d = await twice(a)
117
+ assert len(recorded_warnings) == 1
118
+ exception_msg = 'Exception while loading results'
119
+ assert exception_msg in recorded_warnings[0]
120
+ # Asserts that the recomputation returns a mmap
121
+ assert isinstance(d, np.memmap)
122
+ assert d.mode == 'r'
123
+
124
+
125
+ @pytest.mark.asyncio
126
+ async def test_call_and_shelve_async(tmpdir):
127
+ async def f(x, y=1):
128
+ await asyncio.sleep(0.1)
129
+ return x ** 2 + y
130
+
131
+ # Test MemorizedFunc outputting a reference to cache.
132
+ for func, Result in zip((AsyncMemorizedFunc(f, tmpdir.strpath),
133
+ AsyncNotMemorizedFunc(f),
134
+ Memory(location=tmpdir.strpath,
135
+ verbose=0).cache(f),
136
+ Memory(location=None).cache(f),
137
+ ),
138
+ (MemorizedResult, NotMemorizedResult,
139
+ MemorizedResult, NotMemorizedResult,
140
+ )):
141
+ for _ in range(2):
142
+ result = await func.call_and_shelve(2)
143
+ assert isinstance(result, Result)
144
+ assert result.get() == 5
145
+
146
+ result.clear()
147
+ with raises(KeyError):
148
+ result.get()
149
+ result.clear() # Do nothing if there is no cache.
150
+
151
+
152
+ @pytest.mark.asyncio
153
+ async def test_memorized_func_call_async(memory):
154
+
155
+ async def ff(x, counter):
156
+ await asyncio.sleep(0.1)
157
+ counter[x] = counter.get(x, 0) + 1
158
+ return counter[x]
159
+
160
+ gg = memory.cache(ff, ignore=['counter'])
161
+
162
+ counter = {}
163
+ assert await gg(2, counter) == 1
164
+ assert await gg(2, counter) == 1
165
+
166
+ x, meta = await gg.call(2, counter)
167
+ assert x == 2, "f has not been called properly"
168
+ assert isinstance(meta, dict), (
169
+ "Metadata are not returned by MemorizedFunc.call."
170
+ )
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pyodide and other single-threaded Python builds will be missing the
3
+ _multiprocessing module. Test that joblib still works in this environment.
4
+ """
5
+
6
+ import os
7
+ import subprocess
8
+ import sys
9
+
10
+
11
+ def test_missing_multiprocessing(tmp_path):
12
+ """
13
+ Test that import joblib works even if _multiprocessing is missing.
14
+
15
+ pytest has already imported everything from joblib. The most reasonable way
16
+ to test importing joblib with modified environment is to invoke a separate
17
+ Python process. This also ensures that we don't break other tests by
18
+ importing a bad `_multiprocessing` module.
19
+ """
20
+ (tmp_path / "_multiprocessing.py").write_text(
21
+ 'raise ImportError("No _multiprocessing module!")'
22
+ )
23
+ env = dict(os.environ)
24
+ # For subprocess, use current sys.path with our custom version of
25
+ # multiprocessing inserted.
26
+ env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path)
27
+ subprocess.check_call(
28
+ [sys.executable, "-c",
29
+ "import joblib, math; "
30
+ "joblib.Parallel(n_jobs=1)("
31
+ "joblib.delayed(math.sqrt)(i**2) for i in range(10))"
32
+ ], env=env)
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_module.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import joblib
3
+ from joblib.testing import check_subprocess_call
4
+ from joblib.test.common import with_multiprocessing
5
+
6
+
7
+ def test_version():
8
+ assert hasattr(joblib, '__version__'), (
9
+ "There are no __version__ argument on the joblib module")
10
+
11
+
12
+ @with_multiprocessing
13
+ def test_no_start_method_side_effect_on_import():
14
+ # check that importing joblib does not implicitly set the global
15
+ # start_method for multiprocessing.
16
+ code = """if True:
17
+ import joblib
18
+ import multiprocessing as mp
19
+ # The following line would raise RuntimeError if the
20
+ # start_method is already set.
21
+ mp.set_start_method("loky")
22
+ """
23
+ check_subprocess_call([sys.executable, '-c', code])
24
+
25
+
26
+ @with_multiprocessing
27
+ def test_no_semaphore_tracker_on_import():
28
+ # check that importing joblib does not implicitly spawn a resource tracker
29
+ # or a semaphore tracker
30
+ code = """if True:
31
+ import joblib
32
+ from multiprocessing import semaphore_tracker
33
+ # The following line would raise RuntimeError if the
34
+ # start_method is already set.
35
+ msg = "multiprocessing.semaphore_tracker has been spawned on import"
36
+ assert semaphore_tracker._semaphore_tracker._fd is None, msg"""
37
+ if sys.version_info >= (3, 8):
38
+ # semaphore_tracker was renamed in Python 3.8:
39
+ code = code.replace("semaphore_tracker", "resource_tracker")
40
+ check_subprocess_call([sys.executable, '-c', code])
41
+
42
+
43
+ @with_multiprocessing
44
+ def test_no_resource_tracker_on_import():
45
+ code = """if True:
46
+ import joblib
47
+ from joblib.externals.loky.backend import resource_tracker
48
+ # The following line would raise RuntimeError if the
49
+ # start_method is already set.
50
+ msg = "loky.resource_tracker has been spawned on import"
51
+ assert resource_tracker._resource_tracker._fd is None, msg
52
+ """
53
+ check_subprocess_call([sys.executable, '-c', code])
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py ADDED
@@ -0,0 +1,1159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the numpy pickler as a replacement of the standard pickler."""
2
+
3
+ import copy
4
+ import os
5
+ import random
6
+ import re
7
+ import io
8
+ import sys
9
+ import warnings
10
+ import gzip
11
+ import zlib
12
+ import bz2
13
+ import pickle
14
+ import socket
15
+ from contextlib import closing
16
+ import mmap
17
+ from pathlib import Path
18
+
19
+ try:
20
+ import lzma
21
+ except ImportError:
22
+ lzma = None
23
+
24
+ import pytest
25
+
26
+ from joblib.test.common import np, with_numpy, with_lz4, without_lz4
27
+ from joblib.test.common import with_memory_profiler, memory_used
28
+ from joblib.testing import parametrize, raises, warns
29
+
30
+ # numpy_pickle is not a drop-in replacement of pickle, as it takes
31
+ # filenames instead of open files as arguments.
32
+ from joblib import numpy_pickle, register_compressor
33
+ from joblib.test import data
34
+
35
+ from joblib.numpy_pickle_utils import _IO_BUFFER_SIZE
36
+ from joblib.numpy_pickle_utils import _detect_compressor
37
+ from joblib.numpy_pickle_utils import _is_numpy_array_byte_order_mismatch
38
+ from joblib.numpy_pickle_utils import _ensure_native_byte_order
39
+ from joblib.compressor import (_COMPRESSORS, _LZ4_PREFIX, CompressorWrapper,
40
+ LZ4_NOT_INSTALLED_ERROR, BinaryZlibFile)
41
+
42
+
43
+ ###############################################################################
44
+ # Define a list of standard types.
45
+ # Borrowed from dill, initial author: Micheal McKerns:
46
+ # http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py
47
+
48
+ typelist = []
49
+
50
+ # testing types
51
+ _none = None
52
+ typelist.append(_none)
53
+ _type = type
54
+ typelist.append(_type)
55
+ _bool = bool(1)
56
+ typelist.append(_bool)
57
+ _int = int(1)
58
+ typelist.append(_int)
59
+ _float = float(1)
60
+ typelist.append(_float)
61
+ _complex = complex(1)
62
+ typelist.append(_complex)
63
+ _string = str(1)
64
+ typelist.append(_string)
65
+ _tuple = ()
66
+ typelist.append(_tuple)
67
+ _list = []
68
+ typelist.append(_list)
69
+ _dict = {}
70
+ typelist.append(_dict)
71
+ _builtin = len
72
+ typelist.append(_builtin)
73
+
74
+
75
+ def _function(x):
76
+ yield x
77
+
78
+
79
+ class _class:
80
+ def _method(self):
81
+ pass
82
+
83
+
84
+ class _newclass(object):
85
+ def _method(self):
86
+ pass
87
+
88
+
89
+ typelist.append(_function)
90
+ typelist.append(_class)
91
+ typelist.append(_newclass) # <type 'type'>
92
+ _instance = _class()
93
+ typelist.append(_instance)
94
+ _object = _newclass()
95
+ typelist.append(_object) # <type 'class'>
96
+
97
+
98
+ ###############################################################################
99
+ # Tests
100
+
101
+ @parametrize('compress', [0, 1])
102
+ @parametrize('member', typelist)
103
+ def test_standard_types(tmpdir, compress, member):
104
+ # Test pickling and saving with standard types.
105
+ filename = tmpdir.join('test.pkl').strpath
106
+ numpy_pickle.dump(member, filename, compress=compress)
107
+ _member = numpy_pickle.load(filename)
108
+ # We compare the pickled instance to the reloaded one only if it
109
+ # can be compared to a copied one
110
+ if member == copy.deepcopy(member):
111
+ assert member == _member
112
+
113
+
114
+ def test_value_error():
115
+ # Test inverting the input arguments to dump
116
+ with raises(ValueError):
117
+ numpy_pickle.dump('foo', dict())
118
+
119
+
120
+ @parametrize('wrong_compress', [-1, 10, dict()])
121
+ def test_compress_level_error(wrong_compress):
122
+ # Verify that passing an invalid compress argument raises an error.
123
+ exception_msg = ('Non valid compress level given: '
124
+ '"{0}"'.format(wrong_compress))
125
+ with raises(ValueError) as excinfo:
126
+ numpy_pickle.dump('dummy', 'foo', compress=wrong_compress)
127
+ excinfo.match(exception_msg)
128
+
129
+
130
+ @with_numpy
131
+ @parametrize('compress', [False, True, 0, 3, 'zlib'])
132
+ def test_numpy_persistence(tmpdir, compress):
133
+ filename = tmpdir.join('test.pkl').strpath
134
+ rnd = np.random.RandomState(0)
135
+ a = rnd.random_sample((10, 2))
136
+ # We use 'a.T' to have a non C-contiguous array.
137
+ for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])):
138
+ filenames = numpy_pickle.dump(obj, filename, compress=compress)
139
+
140
+ # All is cached in one file
141
+ assert len(filenames) == 1
142
+ # Check that only one file was created
143
+ assert filenames[0] == filename
144
+ # Check that this file does exist
145
+ assert os.path.exists(filenames[0])
146
+
147
+ # Unpickle the object
148
+ obj_ = numpy_pickle.load(filename)
149
+ # Check that the items are indeed arrays
150
+ for item in obj_:
151
+ assert isinstance(item, np.ndarray)
152
+ # And finally, check that all the values are equal.
153
+ np.testing.assert_array_equal(np.array(obj), np.array(obj_))
154
+
155
+ # Now test with an array subclass
156
+ obj = np.memmap(filename + 'mmap', mode='w+', shape=4, dtype=np.float64)
157
+ filenames = numpy_pickle.dump(obj, filename, compress=compress)
158
+ # All is cached in one file
159
+ assert len(filenames) == 1
160
+
161
+ obj_ = numpy_pickle.load(filename)
162
+ if (type(obj) is not np.memmap and
163
+ hasattr(obj, '__array_prepare__')):
164
+ # We don't reconstruct memmaps
165
+ assert isinstance(obj_, type(obj))
166
+
167
+ np.testing.assert_array_equal(obj_, obj)
168
+
169
+ # Test with an object containing multiple numpy arrays
170
+ obj = ComplexTestObject()
171
+ filenames = numpy_pickle.dump(obj, filename, compress=compress)
172
+ # All is cached in one file
173
+ assert len(filenames) == 1
174
+
175
+ obj_loaded = numpy_pickle.load(filename)
176
+ assert isinstance(obj_loaded, type(obj))
177
+ np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float)
178
+ np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int)
179
+ np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj)
180
+
181
+
182
+ @with_numpy
183
+ def test_numpy_persistence_bufferred_array_compression(tmpdir):
184
+ big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8)
185
+ filename = tmpdir.join('test.pkl').strpath
186
+ numpy_pickle.dump(big_array, filename, compress=True)
187
+ arr_reloaded = numpy_pickle.load(filename)
188
+
189
+ np.testing.assert_array_equal(big_array, arr_reloaded)
190
+
191
+
192
+ @with_numpy
193
+ def test_memmap_persistence(tmpdir):
194
+ rnd = np.random.RandomState(0)
195
+ a = rnd.random_sample(10)
196
+ filename = tmpdir.join('test1.pkl').strpath
197
+ numpy_pickle.dump(a, filename)
198
+ b = numpy_pickle.load(filename, mmap_mode='r')
199
+
200
+ assert isinstance(b, np.memmap)
201
+
202
+ # Test with an object containing multiple numpy arrays
203
+ filename = tmpdir.join('test2.pkl').strpath
204
+ obj = ComplexTestObject()
205
+ numpy_pickle.dump(obj, filename)
206
+ obj_loaded = numpy_pickle.load(filename, mmap_mode='r')
207
+ assert isinstance(obj_loaded, type(obj))
208
+ assert isinstance(obj_loaded.array_float, np.memmap)
209
+ assert not obj_loaded.array_float.flags.writeable
210
+ assert isinstance(obj_loaded.array_int, np.memmap)
211
+ assert not obj_loaded.array_int.flags.writeable
212
+ # Memory map not allowed for numpy object arrays
213
+ assert not isinstance(obj_loaded.array_obj, np.memmap)
214
+ np.testing.assert_array_equal(obj_loaded.array_float,
215
+ obj.array_float)
216
+ np.testing.assert_array_equal(obj_loaded.array_int,
217
+ obj.array_int)
218
+ np.testing.assert_array_equal(obj_loaded.array_obj,
219
+ obj.array_obj)
220
+
221
+ # Test we can write in memmapped arrays
222
+ obj_loaded = numpy_pickle.load(filename, mmap_mode='r+')
223
+ assert obj_loaded.array_float.flags.writeable
224
+ obj_loaded.array_float[0:10] = 10.0
225
+ assert obj_loaded.array_int.flags.writeable
226
+ obj_loaded.array_int[0:10] = 10
227
+
228
+ obj_reloaded = numpy_pickle.load(filename, mmap_mode='r')
229
+ np.testing.assert_array_equal(obj_reloaded.array_float,
230
+ obj_loaded.array_float)
231
+ np.testing.assert_array_equal(obj_reloaded.array_int,
232
+ obj_loaded.array_int)
233
+
234
+ # Test w+ mode is caught and the mode has switched to r+
235
+ numpy_pickle.load(filename, mmap_mode='w+')
236
+ assert obj_loaded.array_int.flags.writeable
237
+ assert obj_loaded.array_int.mode == 'r+'
238
+ assert obj_loaded.array_float.flags.writeable
239
+ assert obj_loaded.array_float.mode == 'r+'
240
+
241
+
242
+ @with_numpy
243
+ def test_memmap_persistence_mixed_dtypes(tmpdir):
244
+ # loading datastructures that have sub-arrays with dtype=object
245
+ # should not prevent memmapping on fixed size dtype sub-arrays.
246
+ rnd = np.random.RandomState(0)
247
+ a = rnd.random_sample(10)
248
+ b = np.array([1, 'b'], dtype=object)
249
+ construct = (a, b)
250
+ filename = tmpdir.join('test.pkl').strpath
251
+ numpy_pickle.dump(construct, filename)
252
+ a_clone, b_clone = numpy_pickle.load(filename, mmap_mode='r')
253
+
254
+ # the floating point array has been memory mapped
255
+ assert isinstance(a_clone, np.memmap)
256
+
257
+ # the object-dtype array has been loaded in memory
258
+ assert not isinstance(b_clone, np.memmap)
259
+
260
+
261
+ @with_numpy
262
+ def test_masked_array_persistence(tmpdir):
263
+ # The special-case picker fails, because saving masked_array
264
+ # not implemented, but it just delegates to the standard pickler.
265
+ rnd = np.random.RandomState(0)
266
+ a = rnd.random_sample(10)
267
+ a = np.ma.masked_greater(a, 0.5)
268
+ filename = tmpdir.join('test.pkl').strpath
269
+ numpy_pickle.dump(a, filename)
270
+ b = numpy_pickle.load(filename, mmap_mode='r')
271
+ assert isinstance(b, np.ma.masked_array)
272
+
273
+
274
+ @with_numpy
275
+ def test_compress_mmap_mode_warning(tmpdir):
276
+ # Test the warning in case of compress + mmap_mode
277
+ rnd = np.random.RandomState(0)
278
+ a = rnd.random_sample(10)
279
+ this_filename = tmpdir.join('test.pkl').strpath
280
+ numpy_pickle.dump(a, this_filename, compress=1)
281
+ with warns(UserWarning) as warninfo:
282
+ numpy_pickle.load(this_filename, mmap_mode='r+')
283
+ debug_msg = "\n".join([str(w) for w in warninfo])
284
+ warninfo = [w.message for w in warninfo]
285
+ assert len(warninfo) == 1, debug_msg
286
+ assert (
287
+ str(warninfo[0]) ==
288
+ 'mmap_mode "r+" is not compatible with compressed '
289
+ f'file {this_filename}. "r+" flag will be ignored.'
290
+ )
291
+
292
+
293
+ @with_numpy
294
+ @parametrize('cache_size', [None, 0, 10])
295
+ def test_cache_size_warning(tmpdir, cache_size):
296
+ # Check deprecation warning raised when cache size is not None
297
+ filename = tmpdir.join('test.pkl').strpath
298
+ rnd = np.random.RandomState(0)
299
+ a = rnd.random_sample((10, 2))
300
+
301
+ warnings.simplefilter("always")
302
+ with warnings.catch_warnings(record=True) as warninfo:
303
+ numpy_pickle.dump(a, filename, cache_size=cache_size)
304
+ expected_nb_warnings = 1 if cache_size is not None else 0
305
+ assert len(warninfo) == expected_nb_warnings
306
+ for w in warninfo:
307
+ assert w.category == DeprecationWarning
308
+ assert (str(w.message) ==
309
+ "Please do not set 'cache_size' in joblib.dump, this "
310
+ "parameter has no effect and will be removed. You "
311
+ "used 'cache_size={0}'".format(cache_size))
312
+
313
+
314
+ @with_numpy
315
+ @with_memory_profiler
316
+ @parametrize('compress', [True, False])
317
+ def test_memory_usage(tmpdir, compress):
318
+ # Verify memory stays within expected bounds.
319
+ filename = tmpdir.join('test.pkl').strpath
320
+ small_array = np.ones((10, 10))
321
+ big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8)
322
+
323
+ for obj in (small_array, big_array):
324
+ size = obj.nbytes / 1e6
325
+ obj_filename = filename + str(np.random.randint(0, 1000))
326
+ mem_used = memory_used(numpy_pickle.dump,
327
+ obj, obj_filename, compress=compress)
328
+
329
+ # The memory used to dump the object shouldn't exceed the buffer
330
+ # size used to write array chunks (16MB).
331
+ write_buf_size = _IO_BUFFER_SIZE + 16 * 1024 ** 2 / 1e6
332
+ assert mem_used <= write_buf_size
333
+
334
+ mem_used = memory_used(numpy_pickle.load, obj_filename)
335
+ # memory used should be less than array size + buffer size used to
336
+ # read the array chunk by chunk.
337
+ read_buf_size = 32 + _IO_BUFFER_SIZE # MiB
338
+ assert mem_used < size + read_buf_size
339
+
340
+
341
+ @with_numpy
342
+ def test_compressed_pickle_dump_and_load(tmpdir):
343
+ expected_list = [np.arange(5, dtype=np.dtype('<i8')),
344
+ np.arange(5, dtype=np.dtype('>i8')),
345
+ np.arange(5, dtype=np.dtype('<f8')),
346
+ np.arange(5, dtype=np.dtype('>f8')),
347
+ np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
348
+ np.arange(256, dtype=np.uint8).tobytes(),
349
+ u"C'est l'\xe9t\xe9 !"]
350
+
351
+ fname = tmpdir.join('temp.pkl.gz').strpath
352
+
353
+ dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1)
354
+ assert len(dumped_filenames) == 1
355
+ result_list = numpy_pickle.load(fname)
356
+ for result, expected in zip(result_list, expected_list):
357
+ if isinstance(expected, np.ndarray):
358
+ expected = _ensure_native_byte_order(expected)
359
+ assert result.dtype == expected.dtype
360
+ np.testing.assert_equal(result, expected)
361
+ else:
362
+ assert result == expected
363
+
364
+
365
+ def _check_pickle(filename, expected_list, mmap_mode=None):
366
+ """Helper function to test joblib pickle content.
367
+
368
+ Note: currently only pickles containing an iterable are supported
369
+ by this function.
370
+ """
371
+ version_match = re.match(r'.+py(\d)(\d).+', filename)
372
+ py_version_used_for_writing = int(version_match.group(1))
373
+
374
+ py_version_to_default_pickle_protocol = {2: 2, 3: 3}
375
+ pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4)
376
+ pickle_writing_protocol = py_version_to_default_pickle_protocol.get(
377
+ py_version_used_for_writing, 4)
378
+ if pickle_reading_protocol >= pickle_writing_protocol:
379
+ try:
380
+ with warnings.catch_warnings(record=True) as warninfo:
381
+ warnings.simplefilter('always')
382
+ warnings.filterwarnings(
383
+ 'ignore', module='numpy',
384
+ message='The compiler package is deprecated')
385
+ result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode)
386
+ filename_base = os.path.basename(filename)
387
+ expected_nb_deprecation_warnings = 1 if (
388
+ "_0.9" in filename_base or "_0.8.4" in filename_base) else 0
389
+
390
+ expected_nb_user_warnings = 3 if (
391
+ re.search("_0.1.+.pkl$", filename_base) and
392
+ mmap_mode is not None) else 0
393
+ expected_nb_warnings = \
394
+ expected_nb_deprecation_warnings + expected_nb_user_warnings
395
+ assert len(warninfo) == expected_nb_warnings
396
+
397
+ deprecation_warnings = [
398
+ w for w in warninfo if issubclass(
399
+ w.category, DeprecationWarning)]
400
+ user_warnings = [
401
+ w for w in warninfo if issubclass(
402
+ w.category, UserWarning)]
403
+ for w in deprecation_warnings:
404
+ assert (str(w.message) ==
405
+ "The file '{0}' has been generated with a joblib "
406
+ "version less than 0.10. Please regenerate this "
407
+ "pickle file.".format(filename))
408
+
409
+ for w in user_warnings:
410
+ escaped_filename = re.escape(filename)
411
+ assert re.search(
412
+ f"memmapped.+{escaped_filename}.+segmentation fault",
413
+ str(w.message))
414
+
415
+ for result, expected in zip(result_list, expected_list):
416
+ if isinstance(expected, np.ndarray):
417
+ expected = _ensure_native_byte_order(expected)
418
+ assert result.dtype == expected.dtype
419
+ np.testing.assert_equal(result, expected)
420
+ else:
421
+ assert result == expected
422
+ except Exception as exc:
423
+ # When trying to read with python 3 a pickle generated
424
+ # with python 2 we expect a user-friendly error
425
+ if py_version_used_for_writing == 2:
426
+ assert isinstance(exc, ValueError)
427
+ message = ('You may be trying to read with '
428
+ 'python 3 a joblib pickle generated with python 2.')
429
+ assert message in str(exc)
430
+ elif filename.endswith('.lz4') and with_lz4.args[0]:
431
+ assert isinstance(exc, ValueError)
432
+ assert LZ4_NOT_INSTALLED_ERROR in str(exc)
433
+ else:
434
+ raise
435
+ else:
436
+ # Pickle protocol used for writing is too high. We expect a
437
+ # "unsupported pickle protocol" error message
438
+ try:
439
+ numpy_pickle.load(filename)
440
+ raise AssertionError('Numpy pickle loading should '
441
+ 'have raised a ValueError exception')
442
+ except ValueError as e:
443
+ message = 'unsupported pickle protocol: {0}'.format(
444
+ pickle_writing_protocol)
445
+ assert message in str(e.args)
446
+
447
+
448
+ @with_numpy
449
+ def test_joblib_pickle_across_python_versions():
450
+ # We need to be specific about dtypes in particular endianness
451
+ # because the pickles can be generated on one architecture and
452
+ # the tests run on another one. See
453
+ # https://github.com/joblib/joblib/issues/279.
454
+ expected_list = [np.arange(5, dtype=np.dtype('<i8')),
455
+ np.arange(5, dtype=np.dtype('<f8')),
456
+ np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
457
+ np.arange(256, dtype=np.uint8).tobytes(),
458
+ # np.matrix is a subclass of np.ndarray, here we want
459
+ # to verify this type of object is correctly unpickled
460
+ # among versions.
461
+ np.matrix([0, 1, 2], dtype=np.dtype('<i8')),
462
+ u"C'est l'\xe9t\xe9 !"]
463
+
464
+ # Testing all the compressed and non compressed
465
+ # pickles in joblib/test/data. These pickles were generated by
466
+ # the joblib/test/data/create_numpy_pickle.py script for the
467
+ # relevant python, joblib and numpy versions.
468
+ test_data_dir = os.path.dirname(os.path.abspath(data.__file__))
469
+
470
+ pickle_extensions = ('.pkl', '.gz', '.gzip', '.bz2', 'lz4')
471
+ if lzma is not None:
472
+ pickle_extensions += ('.xz', '.lzma')
473
+ pickle_filenames = [os.path.join(test_data_dir, fn)
474
+ for fn in os.listdir(test_data_dir)
475
+ if any(fn.endswith(ext) for ext in pickle_extensions)]
476
+
477
+ for fname in pickle_filenames:
478
+ _check_pickle(fname, expected_list)
479
+
480
+
481
+ @with_numpy
482
+ def test_joblib_pickle_across_python_versions_with_mmap():
483
+ expected_list = [np.arange(5, dtype=np.dtype('<i8')),
484
+ np.arange(5, dtype=np.dtype('<f8')),
485
+ np.array([1, 'abc', {'a': 1, 'b': 2}], dtype='O'),
486
+ np.arange(256, dtype=np.uint8).tobytes(),
487
+ # np.matrix is a subclass of np.ndarray, here we want
488
+ # to verify this type of object is correctly unpickled
489
+ # among versions.
490
+ np.matrix([0, 1, 2], dtype=np.dtype('<i8')),
491
+ u"C'est l'\xe9t\xe9 !"]
492
+
493
+ test_data_dir = os.path.dirname(os.path.abspath(data.__file__))
494
+
495
+ pickle_filenames = [
496
+ os.path.join(test_data_dir, fn)
497
+ for fn in os.listdir(test_data_dir) if fn.endswith('.pkl')]
498
+ for fname in pickle_filenames:
499
+ _check_pickle(fname, expected_list, mmap_mode='r')
500
+
501
+
502
+ @with_numpy
503
+ def test_numpy_array_byte_order_mismatch_detection():
504
+ # List of numpy arrays with big endian byteorder.
505
+ be_arrays = [np.array([(1, 2.0), (3, 4.0)],
506
+ dtype=[('', '>i8'), ('', '>f8')]),
507
+ np.arange(3, dtype=np.dtype('>i8')),
508
+ np.arange(3, dtype=np.dtype('>f8'))]
509
+
510
+ # Verify the byteorder mismatch is correctly detected.
511
+ for array in be_arrays:
512
+ if sys.byteorder == 'big':
513
+ assert not _is_numpy_array_byte_order_mismatch(array)
514
+ else:
515
+ assert _is_numpy_array_byte_order_mismatch(array)
516
+ converted = _ensure_native_byte_order(array)
517
+ if converted.dtype.fields:
518
+ for f in converted.dtype.fields.values():
519
+ f[0].byteorder == '='
520
+ else:
521
+ assert converted.dtype.byteorder == "="
522
+
523
+ # List of numpy arrays with little endian byteorder.
524
+ le_arrays = [np.array([(1, 2.0), (3, 4.0)],
525
+ dtype=[('', '<i8'), ('', '<f8')]),
526
+ np.arange(3, dtype=np.dtype('<i8')),
527
+ np.arange(3, dtype=np.dtype('<f8'))]
528
+
529
+ # Verify the byteorder mismatch is correctly detected.
530
+ for array in le_arrays:
531
+ if sys.byteorder == 'little':
532
+ assert not _is_numpy_array_byte_order_mismatch(array)
533
+ else:
534
+ assert _is_numpy_array_byte_order_mismatch(array)
535
+ converted = _ensure_native_byte_order(array)
536
+ if converted.dtype.fields:
537
+ for f in converted.dtype.fields.values():
538
+ f[0].byteorder == '='
539
+ else:
540
+ assert converted.dtype.byteorder == "="
541
+
542
+
543
+ @parametrize('compress_tuple', [('zlib', 3), ('gzip', 3)])
544
+ def test_compress_tuple_argument(tmpdir, compress_tuple):
545
+ # Verify the tuple is correctly taken into account.
546
+ filename = tmpdir.join('test.pkl').strpath
547
+ numpy_pickle.dump("dummy", filename,
548
+ compress=compress_tuple)
549
+ # Verify the file contains the right magic number
550
+ with open(filename, 'rb') as f:
551
+ assert _detect_compressor(f) == compress_tuple[0]
552
+
553
+
554
+ @parametrize('compress_tuple,message',
555
+ [(('zlib', 3, 'extra'), # wrong compress tuple
556
+ 'Compress argument tuple should contain exactly 2 elements'),
557
+ (('wrong', 3), # wrong compress method
558
+ 'Non valid compression method given: "{}"'.format('wrong')),
559
+ (('zlib', 'wrong'), # wrong compress level
560
+ 'Non valid compress level given: "{}"'.format('wrong'))])
561
+ def test_compress_tuple_argument_exception(tmpdir, compress_tuple, message):
562
+ filename = tmpdir.join('test.pkl').strpath
563
+ # Verify setting a wrong compress tuple raises a ValueError.
564
+ with raises(ValueError) as excinfo:
565
+ numpy_pickle.dump('dummy', filename, compress=compress_tuple)
566
+ excinfo.match(message)
567
+
568
+
569
+ @parametrize('compress_string', ['zlib', 'gzip'])
570
+ def test_compress_string_argument(tmpdir, compress_string):
571
+ # Verify the string is correctly taken into account.
572
+ filename = tmpdir.join('test.pkl').strpath
573
+ numpy_pickle.dump("dummy", filename,
574
+ compress=compress_string)
575
+ # Verify the file contains the right magic number
576
+ with open(filename, 'rb') as f:
577
+ assert _detect_compressor(f) == compress_string
578
+
579
+
580
+ @with_numpy
581
+ @parametrize('compress', [1, 3, 6])
582
+ @parametrize('cmethod', _COMPRESSORS)
583
+ def test_joblib_compression_formats(tmpdir, compress, cmethod):
584
+ filename = tmpdir.join('test.pkl').strpath
585
+ objects = (np.ones(shape=(100, 100), dtype='f8'),
586
+ range(10),
587
+ {'a': 1, 2: 'b'}, [], (), {}, 0, 1.0)
588
+
589
+ if cmethod in ("lzma", "xz") and lzma is None:
590
+ pytest.skip("lzma is support not available")
591
+
592
+ elif cmethod == 'lz4' and with_lz4.args[0]:
593
+ # Skip the test if lz4 is not installed. We here use the with_lz4
594
+ # skipif fixture whose argument is True when lz4 is not installed
595
+ pytest.skip("lz4 is not installed.")
596
+
597
+ dump_filename = filename + "." + cmethod
598
+ for obj in objects:
599
+ numpy_pickle.dump(obj, dump_filename, compress=(cmethod, compress))
600
+ # Verify the file contains the right magic number
601
+ with open(dump_filename, 'rb') as f:
602
+ assert _detect_compressor(f) == cmethod
603
+ # Verify the reloaded object is correct
604
+ obj_reloaded = numpy_pickle.load(dump_filename)
605
+ assert isinstance(obj_reloaded, type(obj))
606
+ if isinstance(obj, np.ndarray):
607
+ np.testing.assert_array_equal(obj_reloaded, obj)
608
+ else:
609
+ assert obj_reloaded == obj
610
+
611
+
612
+ def _gzip_file_decompress(source_filename, target_filename):
613
+ """Decompress a gzip file."""
614
+ with closing(gzip.GzipFile(source_filename, "rb")) as fo:
615
+ buf = fo.read()
616
+
617
+ with open(target_filename, "wb") as fo:
618
+ fo.write(buf)
619
+
620
+
621
+ def _zlib_file_decompress(source_filename, target_filename):
622
+ """Decompress a zlib file."""
623
+ with open(source_filename, 'rb') as fo:
624
+ buf = zlib.decompress(fo.read())
625
+
626
+ with open(target_filename, 'wb') as fo:
627
+ fo.write(buf)
628
+
629
+
630
+ @parametrize('extension,decompress',
631
+ [('.z', _zlib_file_decompress),
632
+ ('.gz', _gzip_file_decompress)])
633
+ def test_load_externally_decompressed_files(tmpdir, extension, decompress):
634
+ # Test that BinaryZlibFile generates valid gzip and zlib compressed files.
635
+ obj = "a string to persist"
636
+ filename_raw = tmpdir.join('test.pkl').strpath
637
+
638
+ filename_compressed = filename_raw + extension
639
+ # Use automatic extension detection to compress with the right method.
640
+ numpy_pickle.dump(obj, filename_compressed)
641
+
642
+ # Decompress with the corresponding method
643
+ decompress(filename_compressed, filename_raw)
644
+
645
+ # Test that the uncompressed pickle can be loaded and
646
+ # that the result is correct.
647
+ obj_reloaded = numpy_pickle.load(filename_raw)
648
+ assert obj == obj_reloaded
649
+
650
+
651
+ @parametrize('extension,cmethod',
652
+ # valid compressor extensions
653
+ [('.z', 'zlib'),
654
+ ('.gz', 'gzip'),
655
+ ('.bz2', 'bz2'),
656
+ ('.lzma', 'lzma'),
657
+ ('.xz', 'xz'),
658
+ # invalid compressor extensions
659
+ ('.pkl', 'not-compressed'),
660
+ ('', 'not-compressed')])
661
+ def test_compression_using_file_extension(tmpdir, extension, cmethod):
662
+ if cmethod in ("lzma", "xz") and lzma is None:
663
+ pytest.skip("lzma is missing")
664
+ # test that compression method corresponds to the given filename extension.
665
+ filename = tmpdir.join('test.pkl').strpath
666
+ obj = "object to dump"
667
+
668
+ dump_fname = filename + extension
669
+ numpy_pickle.dump(obj, dump_fname)
670
+ # Verify the file contains the right magic number
671
+ with open(dump_fname, 'rb') as f:
672
+ assert _detect_compressor(f) == cmethod
673
+ # Verify the reloaded object is correct
674
+ obj_reloaded = numpy_pickle.load(dump_fname)
675
+ assert isinstance(obj_reloaded, type(obj))
676
+ assert obj_reloaded == obj
677
+
678
+
679
+ @with_numpy
680
+ def test_file_handle_persistence(tmpdir):
681
+ objs = [np.random.random((10, 10)), "some data"]
682
+ fobjs = [bz2.BZ2File, gzip.GzipFile]
683
+ if lzma is not None:
684
+ fobjs += [lzma.LZMAFile]
685
+ filename = tmpdir.join('test.pkl').strpath
686
+
687
+ for obj in objs:
688
+ for fobj in fobjs:
689
+ with fobj(filename, 'wb') as f:
690
+ numpy_pickle.dump(obj, f)
691
+
692
+ # using the same decompressor prevents from internally
693
+ # decompress again.
694
+ with fobj(filename, 'rb') as f:
695
+ obj_reloaded = numpy_pickle.load(f)
696
+
697
+ # when needed, the correct decompressor should be used when
698
+ # passing a raw file handle.
699
+ with open(filename, 'rb') as f:
700
+ obj_reloaded_2 = numpy_pickle.load(f)
701
+
702
+ if isinstance(obj, np.ndarray):
703
+ np.testing.assert_array_equal(obj_reloaded, obj)
704
+ np.testing.assert_array_equal(obj_reloaded_2, obj)
705
+ else:
706
+ assert obj_reloaded == obj
707
+ assert obj_reloaded_2 == obj
708
+
709
+
710
+ @with_numpy
711
+ def test_in_memory_persistence():
712
+ objs = [np.random.random((10, 10)), "some data"]
713
+ for obj in objs:
714
+ f = io.BytesIO()
715
+ numpy_pickle.dump(obj, f)
716
+ obj_reloaded = numpy_pickle.load(f)
717
+ if isinstance(obj, np.ndarray):
718
+ np.testing.assert_array_equal(obj_reloaded, obj)
719
+ else:
720
+ assert obj_reloaded == obj
721
+
722
+
723
+ @with_numpy
724
+ def test_file_handle_persistence_mmap(tmpdir):
725
+ obj = np.random.random((10, 10))
726
+ filename = tmpdir.join('test.pkl').strpath
727
+
728
+ with open(filename, 'wb') as f:
729
+ numpy_pickle.dump(obj, f)
730
+
731
+ with open(filename, 'rb') as f:
732
+ obj_reloaded = numpy_pickle.load(f, mmap_mode='r+')
733
+
734
+ np.testing.assert_array_equal(obj_reloaded, obj)
735
+
736
+
737
+ @with_numpy
738
+ def test_file_handle_persistence_compressed_mmap(tmpdir):
739
+ obj = np.random.random((10, 10))
740
+ filename = tmpdir.join('test.pkl').strpath
741
+
742
+ with open(filename, 'wb') as f:
743
+ numpy_pickle.dump(obj, f, compress=('gzip', 3))
744
+
745
+ with closing(gzip.GzipFile(filename, 'rb')) as f:
746
+ with warns(UserWarning) as warninfo:
747
+ numpy_pickle.load(f, mmap_mode='r+')
748
+ assert len(warninfo) == 1
749
+ assert (str(warninfo[0].message) ==
750
+ '"%(fileobj)r" is not a raw file, mmap_mode "%(mmap_mode)s" '
751
+ 'flag will be ignored.' % {'fileobj': f, 'mmap_mode': 'r+'})
752
+
753
+
754
+ @with_numpy
755
+ def test_file_handle_persistence_in_memory_mmap():
756
+ obj = np.random.random((10, 10))
757
+ buf = io.BytesIO()
758
+
759
+ numpy_pickle.dump(obj, buf)
760
+
761
+ with warns(UserWarning) as warninfo:
762
+ numpy_pickle.load(buf, mmap_mode='r+')
763
+ assert len(warninfo) == 1
764
+ assert (str(warninfo[0].message) ==
765
+ 'In memory persistence is not compatible with mmap_mode '
766
+ '"%(mmap_mode)s" flag passed. mmap_mode option will be '
767
+ 'ignored.' % {'mmap_mode': 'r+'})
768
+
769
+
770
+ @parametrize('data', [b'a little data as bytes.',
771
+ # More bytes
772
+ 10000 * "{}".format(
773
+ random.randint(0, 1000) * 1000).encode('latin-1')],
774
+ ids=["a little data as bytes.", "a large data as bytes."])
775
+ @parametrize('compress_level', [1, 3, 9])
776
+ def test_binary_zlibfile(tmpdir, data, compress_level):
777
+ filename = tmpdir.join('test.pkl').strpath
778
+ # Regular cases
779
+ with open(filename, 'wb') as f:
780
+ with BinaryZlibFile(f, 'wb',
781
+ compresslevel=compress_level) as fz:
782
+ assert fz.writable()
783
+ fz.write(data)
784
+ assert fz.fileno() == f.fileno()
785
+ with raises(io.UnsupportedOperation):
786
+ fz._check_can_read()
787
+
788
+ with raises(io.UnsupportedOperation):
789
+ fz._check_can_seek()
790
+ assert fz.closed
791
+ with raises(ValueError):
792
+ fz._check_not_closed()
793
+
794
+ with open(filename, 'rb') as f:
795
+ with BinaryZlibFile(f) as fz:
796
+ assert fz.readable()
797
+ assert fz.seekable()
798
+ assert fz.fileno() == f.fileno()
799
+ assert fz.read() == data
800
+ with raises(io.UnsupportedOperation):
801
+ fz._check_can_write()
802
+ assert fz.seekable()
803
+ fz.seek(0)
804
+ assert fz.tell() == 0
805
+ assert fz.closed
806
+
807
+ # Test with a filename as input
808
+ with BinaryZlibFile(filename, 'wb',
809
+ compresslevel=compress_level) as fz:
810
+ assert fz.writable()
811
+ fz.write(data)
812
+
813
+ with BinaryZlibFile(filename, 'rb') as fz:
814
+ assert fz.read() == data
815
+ assert fz.seekable()
816
+
817
+ # Test without context manager
818
+ fz = BinaryZlibFile(filename, 'wb', compresslevel=compress_level)
819
+ assert fz.writable()
820
+ fz.write(data)
821
+ fz.close()
822
+
823
+ fz = BinaryZlibFile(filename, 'rb')
824
+ assert fz.read() == data
825
+ fz.close()
826
+
827
+
828
+ @parametrize('bad_value', [-1, 10, 15, 'a', (), {}])
829
+ def test_binary_zlibfile_bad_compression_levels(tmpdir, bad_value):
830
+ filename = tmpdir.join('test.pkl').strpath
831
+ with raises(ValueError) as excinfo:
832
+ BinaryZlibFile(filename, 'wb', compresslevel=bad_value)
833
+ pattern = re.escape("'compresslevel' must be an integer between 1 and 9. "
834
+ "You provided 'compresslevel={}'".format(bad_value))
835
+ excinfo.match(pattern)
836
+
837
+
838
+ @parametrize('bad_mode', ['a', 'x', 'r', 'w', 1, 2])
839
+ def test_binary_zlibfile_invalid_modes(tmpdir, bad_mode):
840
+ filename = tmpdir.join('test.pkl').strpath
841
+ with raises(ValueError) as excinfo:
842
+ BinaryZlibFile(filename, bad_mode)
843
+ excinfo.match("Invalid mode")
844
+
845
+
846
+ @parametrize('bad_file', [1, (), {}])
847
+ def test_binary_zlibfile_invalid_filename_type(bad_file):
848
+ with raises(TypeError) as excinfo:
849
+ BinaryZlibFile(bad_file, 'rb')
850
+ excinfo.match("filename must be a str or bytes object, or a file")
851
+
852
+
853
+ ###############################################################################
854
+ # Test dumping array subclasses
855
+ if np is not None:
856
+
857
+ class SubArray(np.ndarray):
858
+
859
+ def __reduce__(self):
860
+ return _load_sub_array, (np.asarray(self), )
861
+
862
+ def _load_sub_array(arr):
863
+ d = SubArray(arr.shape)
864
+ d[:] = arr
865
+ return d
866
+
867
+ class ComplexTestObject:
868
+ """A complex object containing numpy arrays as attributes."""
869
+
870
+ def __init__(self):
871
+ self.array_float = np.arange(100, dtype='float64')
872
+ self.array_int = np.ones(100, dtype='int32')
873
+ self.array_obj = np.array(['a', 10, 20.0], dtype='object')
874
+
875
+
876
+ @with_numpy
877
+ def test_numpy_subclass(tmpdir):
878
+ filename = tmpdir.join('test.pkl').strpath
879
+ a = SubArray((10,))
880
+ numpy_pickle.dump(a, filename)
881
+ c = numpy_pickle.load(filename)
882
+ assert isinstance(c, SubArray)
883
+ np.testing.assert_array_equal(c, a)
884
+
885
+
886
+ def test_pathlib(tmpdir):
887
+ filename = tmpdir.join('test.pkl').strpath
888
+ value = 123
889
+ numpy_pickle.dump(value, Path(filename))
890
+ assert numpy_pickle.load(filename) == value
891
+ numpy_pickle.dump(value, filename)
892
+ assert numpy_pickle.load(Path(filename)) == value
893
+
894
+
895
+ @with_numpy
896
+ def test_non_contiguous_array_pickling(tmpdir):
897
+ filename = tmpdir.join('test.pkl').strpath
898
+
899
+ for array in [ # Array that triggers a contiguousness issue with nditer,
900
+ # see https://github.com/joblib/joblib/pull/352 and see
901
+ # https://github.com/joblib/joblib/pull/353
902
+ np.asfortranarray([[1, 2], [3, 4]])[1:],
903
+ # Non contiguous array with works fine with nditer
904
+ np.ones((10, 50, 20), order='F')[:, :1, :]]:
905
+ assert not array.flags.c_contiguous
906
+ assert not array.flags.f_contiguous
907
+ numpy_pickle.dump(array, filename)
908
+ array_reloaded = numpy_pickle.load(filename)
909
+ np.testing.assert_array_equal(array_reloaded, array)
910
+
911
+
912
+ @with_numpy
913
+ def test_pickle_highest_protocol(tmpdir):
914
+ # ensure persistence of a numpy array is valid even when using
915
+ # the pickle HIGHEST_PROTOCOL.
916
+ # see https://github.com/joblib/joblib/issues/362
917
+
918
+ filename = tmpdir.join('test.pkl').strpath
919
+ test_array = np.zeros(10)
920
+
921
+ numpy_pickle.dump(test_array, filename, protocol=pickle.HIGHEST_PROTOCOL)
922
+ array_reloaded = numpy_pickle.load(filename)
923
+
924
+ np.testing.assert_array_equal(array_reloaded, test_array)
925
+
926
+
927
+ @with_numpy
928
+ def test_pickle_in_socket():
929
+ # test that joblib can pickle in sockets
930
+ test_array = np.arange(10)
931
+ _ADDR = ("localhost", 12345)
932
+ listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
933
+ listener.bind(_ADDR)
934
+ listener.listen(1)
935
+
936
+ with socket.create_connection(_ADDR) as client:
937
+ server, client_addr = listener.accept()
938
+
939
+ with server.makefile("wb") as sf:
940
+ numpy_pickle.dump(test_array, sf)
941
+
942
+ with client.makefile("rb") as cf:
943
+ array_reloaded = numpy_pickle.load(cf)
944
+
945
+ np.testing.assert_array_equal(array_reloaded, test_array)
946
+
947
+ # Check that a byte-aligned numpy array written in a file can be send
948
+ # over a socket and then read on the other side
949
+ bytes_to_send = io.BytesIO()
950
+ numpy_pickle.dump(test_array, bytes_to_send)
951
+ server.send(bytes_to_send.getvalue())
952
+
953
+ with client.makefile("rb") as cf:
954
+ array_reloaded = numpy_pickle.load(cf)
955
+
956
+ np.testing.assert_array_equal(array_reloaded, test_array)
957
+
958
+
959
+ @with_numpy
960
+ def test_load_memmap_with_big_offset(tmpdir):
961
+ # Test that numpy memmap offset is set correctly if greater than
962
+ # mmap.ALLOCATIONGRANULARITY, see
963
+ # https://github.com/joblib/joblib/issues/451 and
964
+ # https://github.com/numpy/numpy/pull/8443 for more details.
965
+ fname = tmpdir.join('test.mmap').strpath
966
+ size = mmap.ALLOCATIONGRANULARITY
967
+ obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')]
968
+ numpy_pickle.dump(obj, fname)
969
+ memmaps = numpy_pickle.load(fname, mmap_mode='r')
970
+ assert isinstance(memmaps[1], np.memmap)
971
+ assert memmaps[1].offset > size
972
+ np.testing.assert_array_equal(obj, memmaps)
973
+
974
+
975
+ def test_register_compressor(tmpdir):
976
+ # Check that registering compressor file works.
977
+ compressor_name = 'test-name'
978
+ compressor_prefix = 'test-prefix'
979
+
980
+ class BinaryCompressorTestFile(io.BufferedIOBase):
981
+ pass
982
+
983
+ class BinaryCompressorTestWrapper(CompressorWrapper):
984
+
985
+ def __init__(self):
986
+ CompressorWrapper.__init__(self, obj=BinaryCompressorTestFile,
987
+ prefix=compressor_prefix)
988
+
989
+ register_compressor(compressor_name, BinaryCompressorTestWrapper())
990
+
991
+ assert (_COMPRESSORS[compressor_name].fileobj_factory ==
992
+ BinaryCompressorTestFile)
993
+ assert _COMPRESSORS[compressor_name].prefix == compressor_prefix
994
+
995
+ # Remove this dummy compressor file from extra compressors because other
996
+ # tests might fail because of this
997
+ _COMPRESSORS.pop(compressor_name)
998
+
999
+
1000
+ @parametrize('invalid_name', [1, (), {}])
1001
+ def test_register_compressor_invalid_name(invalid_name):
1002
+ # Test that registering an invalid compressor name is not allowed.
1003
+ with raises(ValueError) as excinfo:
1004
+ register_compressor(invalid_name, None)
1005
+ excinfo.match("Compressor name should be a string")
1006
+
1007
+
1008
+ def test_register_compressor_invalid_fileobj():
1009
+ # Test that registering an invalid file object is not allowed.
1010
+
1011
+ class InvalidFileObject():
1012
+ pass
1013
+
1014
+ class InvalidFileObjectWrapper(CompressorWrapper):
1015
+ def __init__(self):
1016
+ CompressorWrapper.__init__(self, obj=InvalidFileObject,
1017
+ prefix=b'prefix')
1018
+
1019
+ with raises(ValueError) as excinfo:
1020
+ register_compressor('invalid', InvalidFileObjectWrapper())
1021
+
1022
+ excinfo.match("Compressor 'fileobj_factory' attribute should implement "
1023
+ "the file object interface")
1024
+
1025
+
1026
+ class AnotherZlibCompressorWrapper(CompressorWrapper):
1027
+
1028
+ def __init__(self):
1029
+ CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b'prefix')
1030
+
1031
+
1032
+ class StandardLibGzipCompressorWrapper(CompressorWrapper):
1033
+
1034
+ def __init__(self):
1035
+ CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b'prefix')
1036
+
1037
+
1038
+ def test_register_compressor_already_registered():
1039
+ # Test registration of existing compressor files.
1040
+ compressor_name = 'test-name'
1041
+
1042
+ # register a test compressor
1043
+ register_compressor(compressor_name, AnotherZlibCompressorWrapper())
1044
+
1045
+ with raises(ValueError) as excinfo:
1046
+ register_compressor(compressor_name,
1047
+ StandardLibGzipCompressorWrapper())
1048
+ excinfo.match("Compressor '{}' already registered."
1049
+ .format(compressor_name))
1050
+
1051
+ register_compressor(compressor_name, StandardLibGzipCompressorWrapper(),
1052
+ force=True)
1053
+
1054
+ assert compressor_name in _COMPRESSORS
1055
+ assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile
1056
+
1057
+ # Remove this dummy compressor file from extra compressors because other
1058
+ # tests might fail because of this
1059
+ _COMPRESSORS.pop(compressor_name)
1060
+
1061
+
1062
+ @with_lz4
1063
+ def test_lz4_compression(tmpdir):
1064
+ # Check that lz4 can be used when dependency is available.
1065
+ import lz4.frame
1066
+ compressor = 'lz4'
1067
+ assert compressor in _COMPRESSORS
1068
+ assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile
1069
+
1070
+ fname = tmpdir.join('test.pkl').strpath
1071
+ data = 'test data'
1072
+ numpy_pickle.dump(data, fname, compress=compressor)
1073
+
1074
+ with open(fname, 'rb') as f:
1075
+ assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
1076
+ assert numpy_pickle.load(fname) == data
1077
+
1078
+ # Test that LZ4 is applied based on file extension
1079
+ numpy_pickle.dump(data, fname + '.lz4')
1080
+ with open(fname, 'rb') as f:
1081
+ assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX
1082
+ assert numpy_pickle.load(fname) == data
1083
+
1084
+
1085
+ @without_lz4
1086
+ def test_lz4_compression_without_lz4(tmpdir):
1087
+ # Check that lz4 cannot be used when dependency is not available.
1088
+ fname = tmpdir.join('test.nolz4').strpath
1089
+ data = 'test data'
1090
+ msg = LZ4_NOT_INSTALLED_ERROR
1091
+ with raises(ValueError) as excinfo:
1092
+ numpy_pickle.dump(data, fname, compress='lz4')
1093
+ excinfo.match(msg)
1094
+
1095
+ with raises(ValueError) as excinfo:
1096
+ numpy_pickle.dump(data, fname + '.lz4')
1097
+ excinfo.match(msg)
1098
+
1099
+
1100
+ protocols = [pickle.DEFAULT_PROTOCOL]
1101
+ if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL:
1102
+ protocols.append(pickle.HIGHEST_PROTOCOL)
1103
+
1104
+
1105
+ @with_numpy
1106
+ @parametrize('protocol', protocols)
1107
+ def test_memmap_alignment_padding(tmpdir, protocol):
1108
+ # Test that memmaped arrays returned by numpy.load are correctly aligned
1109
+ fname = tmpdir.join('test.mmap').strpath
1110
+
1111
+ a = np.random.randn(2)
1112
+ numpy_pickle.dump(a, fname, protocol=protocol)
1113
+ memmap = numpy_pickle.load(fname, mmap_mode='r')
1114
+ assert isinstance(memmap, np.memmap)
1115
+ np.testing.assert_array_equal(a, memmap)
1116
+ assert (
1117
+ memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
1118
+ assert memmap.flags.aligned
1119
+
1120
+ array_list = [
1121
+ np.random.randn(2), np.random.randn(2),
1122
+ np.random.randn(2), np.random.randn(2)
1123
+ ]
1124
+
1125
+ # On Windows OSError 22 if reusing the same path for memmap ...
1126
+ fname = tmpdir.join('test1.mmap').strpath
1127
+ numpy_pickle.dump(array_list, fname, protocol=protocol)
1128
+ l_reloaded = numpy_pickle.load(fname, mmap_mode='r')
1129
+
1130
+ for idx, memmap in enumerate(l_reloaded):
1131
+ assert isinstance(memmap, np.memmap)
1132
+ np.testing.assert_array_equal(array_list[idx], memmap)
1133
+ assert (
1134
+ memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
1135
+ assert memmap.flags.aligned
1136
+
1137
+ array_dict = {
1138
+ 'a0': np.arange(2, dtype=np.uint8),
1139
+ 'a1': np.arange(3, dtype=np.uint8),
1140
+ 'a2': np.arange(5, dtype=np.uint8),
1141
+ 'a3': np.arange(7, dtype=np.uint8),
1142
+ 'a4': np.arange(11, dtype=np.uint8),
1143
+ 'a5': np.arange(13, dtype=np.uint8),
1144
+ 'a6': np.arange(17, dtype=np.uint8),
1145
+ 'a7': np.arange(19, dtype=np.uint8),
1146
+ 'a8': np.arange(23, dtype=np.uint8),
1147
+ }
1148
+
1149
+ # On Windows OSError 22 if reusing the same path for memmap ...
1150
+ fname = tmpdir.join('test2.mmap').strpath
1151
+ numpy_pickle.dump(array_dict, fname, protocol=protocol)
1152
+ d_reloaded = numpy_pickle.load(fname, mmap_mode='r')
1153
+
1154
+ for key, memmap in d_reloaded.items():
1155
+ assert isinstance(memmap, np.memmap)
1156
+ np.testing.assert_array_equal(array_dict[key], memmap)
1157
+ assert (
1158
+ memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0)
1159
+ assert memmap.flags.aligned
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test the old numpy pickler, compatibility version."""
2
+
3
+ # numpy_pickle is not a drop-in replacement of pickle, as it takes
4
+ # filenames instead of open files as arguments.
5
+ from joblib import numpy_pickle_compat
6
+
7
+
8
+ def test_z_file(tmpdir):
9
+ # Test saving and loading data with Zfiles.
10
+ filename = tmpdir.join('test.pkl').strpath
11
+ data = numpy_pickle_compat.asbytes('Foo, \n Bar, baz, \n\nfoobar')
12
+ with open(filename, 'wb') as f:
13
+ numpy_pickle_compat.write_zfile(f, data)
14
+ with open(filename, 'rb') as f:
15
+ data_read = numpy_pickle_compat.read_zfile(f)
16
+ assert data == data_read
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_parallel.py ADDED
@@ -0,0 +1,2056 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test the parallel module.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2010-2011 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import os
10
+ import sys
11
+ import time
12
+ import mmap
13
+ import weakref
14
+ import warnings
15
+ import threading
16
+ from traceback import format_exception
17
+ from math import sqrt
18
+ from time import sleep
19
+ from pickle import PicklingError
20
+ from contextlib import nullcontext
21
+ from multiprocessing import TimeoutError
22
+ import pytest
23
+
24
+ import joblib
25
+ from joblib import parallel
26
+ from joblib import dump, load
27
+
28
+ from joblib._multiprocessing_helpers import mp
29
+
30
+ from joblib.test.common import np, with_numpy
31
+ from joblib.test.common import with_multiprocessing
32
+ from joblib.test.common import IS_PYPY, force_gc_pypy
33
+ from joblib.testing import (parametrize, raises, check_subprocess_call,
34
+ skipif, warns)
35
+
36
+ if mp is not None:
37
+ # Loky is not available if multiprocessing is not
38
+ from joblib.externals.loky import get_reusable_executor
39
+
40
+ from queue import Queue
41
+
42
+ try:
43
+ import posix
44
+ except ImportError:
45
+ posix = None
46
+
47
+ try:
48
+ from ._openmp_test_helper.parallel_sum import parallel_sum
49
+ except ImportError:
50
+ parallel_sum = None
51
+
52
+ try:
53
+ import distributed
54
+ except ImportError:
55
+ distributed = None
56
+
57
+ from joblib._parallel_backends import SequentialBackend
58
+ from joblib._parallel_backends import ThreadingBackend
59
+ from joblib._parallel_backends import MultiprocessingBackend
60
+ from joblib._parallel_backends import ParallelBackendBase
61
+ from joblib._parallel_backends import LokyBackend
62
+
63
+ from joblib.parallel import Parallel, delayed
64
+ from joblib.parallel import parallel_config
65
+ from joblib.parallel import parallel_backend
66
+ from joblib.parallel import register_parallel_backend
67
+ from joblib.parallel import effective_n_jobs, cpu_count
68
+
69
+ from joblib.parallel import mp, BACKENDS, DEFAULT_BACKEND
70
+
71
+
72
+ RETURN_GENERATOR_BACKENDS = BACKENDS.copy()
73
+ RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None)
74
+
75
+ ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys())
76
+ # Add instances of backend classes deriving from ParallelBackendBase
77
+ ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS]
78
+ if mp is None:
79
+ PROCESS_BACKENDS = []
80
+ else:
81
+ PROCESS_BACKENDS = ['multiprocessing', 'loky']
82
+ PARALLEL_BACKENDS = PROCESS_BACKENDS + ['threading']
83
+
84
+ if hasattr(mp, 'get_context'):
85
+ # Custom multiprocessing context in Python 3.4+
86
+ ALL_VALID_BACKENDS.append(mp.get_context('spawn'))
87
+
88
+ DefaultBackend = BACKENDS[DEFAULT_BACKEND]
89
+
90
+
91
+ def get_workers(backend):
92
+ return getattr(backend, '_pool', getattr(backend, '_workers', None))
93
+
94
+
95
+ def division(x, y):
96
+ return x / y
97
+
98
+
99
+ def square(x):
100
+ return x ** 2
101
+
102
+
103
+ class MyExceptionWithFinickyInit(Exception):
104
+ """An exception class with non trivial __init__
105
+ """
106
+ def __init__(self, a, b, c, d):
107
+ pass
108
+
109
+
110
+ def exception_raiser(x, custom_exception=False):
111
+ if x == 7:
112
+ raise (MyExceptionWithFinickyInit('a', 'b', 'c', 'd')
113
+ if custom_exception else ValueError)
114
+ return x
115
+
116
+
117
+ def interrupt_raiser(x):
118
+ time.sleep(.05)
119
+ raise KeyboardInterrupt
120
+
121
+
122
+ def f(x, y=0, z=0):
123
+ """ A module-level function so that it can be spawn with
124
+ multiprocessing.
125
+ """
126
+ return x ** 2 + y + z
127
+
128
+
129
+ def _active_backend_type():
130
+ return type(parallel.get_active_backend()[0])
131
+
132
+
133
+ def parallel_func(inner_n_jobs, backend):
134
+ return Parallel(n_jobs=inner_n_jobs, backend=backend)(
135
+ delayed(square)(i) for i in range(3))
136
+
137
+
138
+ ###############################################################################
139
+ def test_cpu_count():
140
+ assert cpu_count() > 0
141
+
142
+
143
+ def test_effective_n_jobs():
144
+ assert effective_n_jobs() > 0
145
+
146
+
147
+ @parametrize("context", [parallel_config, parallel_backend])
148
+ @pytest.mark.parametrize(
149
+ "backend_n_jobs, expected_n_jobs",
150
+ [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)],
151
+ ids=["positive-int", "negative-int", "None"]
152
+ )
153
+ @with_multiprocessing
154
+ def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs):
155
+ # check the number of effective jobs when `n_jobs=None`
156
+ # non-regression test for https://github.com/joblib/joblib/issues/984
157
+ with context("threading", n_jobs=backend_n_jobs):
158
+ # when using a backend, the default of number jobs will be the one set
159
+ # in the backend
160
+ assert effective_n_jobs(n_jobs=None) == expected_n_jobs
161
+ # without any backend, None will default to a single job
162
+ assert effective_n_jobs(n_jobs=None) == 1
163
+
164
+
165
+ ###############################################################################
166
+ # Test parallel
167
+
168
+ @parametrize('backend', ALL_VALID_BACKENDS)
169
+ @parametrize('n_jobs', [1, 2, -1, -2])
170
+ @parametrize('verbose', [2, 11, 100])
171
+ def test_simple_parallel(backend, n_jobs, verbose):
172
+ assert ([square(x) for x in range(5)] ==
173
+ Parallel(n_jobs=n_jobs, backend=backend,
174
+ verbose=verbose)(
175
+ delayed(square)(x) for x in range(5)))
176
+
177
+
178
+ @parametrize('backend', ALL_VALID_BACKENDS)
179
+ def test_main_thread_renamed_no_warning(backend, monkeypatch):
180
+ # Check that no default backend relies on the name of the main thread:
181
+ # https://github.com/joblib/joblib/issues/180#issuecomment-253266247
182
+ # Some programs use a different name for the main thread. This is the case
183
+ # for uWSGI apps for instance.
184
+ monkeypatch.setattr(target=threading.current_thread(), name='name',
185
+ value='some_new_name_for_the_main_thread')
186
+
187
+ with warnings.catch_warnings(record=True) as warninfo:
188
+ results = Parallel(n_jobs=2, backend=backend)(
189
+ delayed(square)(x) for x in range(3))
190
+ assert results == [0, 1, 4]
191
+
192
+ # Due to the default parameters of LokyBackend, there is a chance that
193
+ # warninfo catches Warnings from worker timeouts. We remove it if it exists
194
+ warninfo = [w for w in warninfo if "worker timeout" not in str(w.message)]
195
+
196
+ # The multiprocessing backend will raise a warning when detecting that is
197
+ # started from the non-main thread. Let's check that there is no false
198
+ # positive because of the name change.
199
+ assert len(warninfo) == 0
200
+
201
+
202
+ def _assert_warning_nested(backend, inner_n_jobs, expected):
203
+ with warnings.catch_warnings(record=True) as warninfo:
204
+ warnings.simplefilter("always")
205
+ parallel_func(backend=backend, inner_n_jobs=inner_n_jobs)
206
+
207
+ warninfo = [w.message for w in warninfo]
208
+ if expected:
209
+ if warninfo:
210
+ warnings_are_correct = all(
211
+ 'backed parallel loops cannot' in each.args[0]
212
+ for each in warninfo
213
+ )
214
+ # With Python nogil, when the outer backend is threading, we might
215
+ # see more that one warning
216
+ warnings_have_the_right_length = (
217
+ len(warninfo) >= 1 if getattr(sys.flags, 'nogil', False)
218
+ else len(warninfo) == 1)
219
+ return warnings_are_correct and warnings_have_the_right_length
220
+
221
+ return False
222
+ else:
223
+ assert not warninfo
224
+ return True
225
+
226
+
227
+ @with_multiprocessing
228
+ @parametrize('parent_backend,child_backend,expected', [
229
+ ('loky', 'multiprocessing', True),
230
+ ('loky', 'loky', False),
231
+ ('multiprocessing', 'multiprocessing', True),
232
+ ('multiprocessing', 'loky', True),
233
+ ('threading', 'multiprocessing', True),
234
+ ('threading', 'loky', True),
235
+ ])
236
+ def test_nested_parallel_warnings(parent_backend, child_backend, expected):
237
+
238
+ # no warnings if inner_n_jobs=1
239
+ Parallel(n_jobs=2, backend=parent_backend)(
240
+ delayed(_assert_warning_nested)(
241
+ backend=child_backend, inner_n_jobs=1,
242
+ expected=False)
243
+ for _ in range(5))
244
+
245
+ # warnings if inner_n_jobs != 1 and expected
246
+ res = Parallel(n_jobs=2, backend=parent_backend)(
247
+ delayed(_assert_warning_nested)(
248
+ backend=child_backend, inner_n_jobs=2,
249
+ expected=expected)
250
+ for _ in range(5))
251
+
252
+ # warning handling is not thread safe. One thread might see multiple
253
+ # warning or no warning at all.
254
+ if parent_backend == "threading":
255
+ if IS_PYPY and not any(res):
256
+ # Related to joblib#1426, should be removed once it is solved.
257
+ pytest.xfail(reason="This test often fails in PyPy.")
258
+ assert any(res)
259
+ else:
260
+ assert all(res)
261
+
262
+
263
+ @with_multiprocessing
264
+ @parametrize('backend', ['loky', 'multiprocessing', 'threading'])
265
+ def test_background_thread_parallelism(backend):
266
+ is_run_parallel = [False]
267
+
268
+ def background_thread(is_run_parallel):
269
+ with warnings.catch_warnings(record=True) as warninfo:
270
+ Parallel(n_jobs=2)(
271
+ delayed(sleep)(.1) for _ in range(4))
272
+ print(len(warninfo))
273
+ is_run_parallel[0] = len(warninfo) == 0
274
+
275
+ t = threading.Thread(target=background_thread, args=(is_run_parallel,))
276
+ t.start()
277
+ t.join()
278
+ assert is_run_parallel[0]
279
+
280
+
281
+ def nested_loop(backend):
282
+ Parallel(n_jobs=2, backend=backend)(
283
+ delayed(square)(.01) for _ in range(2))
284
+
285
+
286
+ @parametrize('child_backend', BACKENDS)
287
+ @parametrize('parent_backend', BACKENDS)
288
+ def test_nested_loop(parent_backend, child_backend):
289
+ Parallel(n_jobs=2, backend=parent_backend)(
290
+ delayed(nested_loop)(child_backend) for _ in range(2))
291
+
292
+
293
+ def raise_exception(backend):
294
+ raise ValueError
295
+
296
+
297
+ @with_multiprocessing
298
+ def test_nested_loop_with_exception_with_loky():
299
+ with raises(ValueError):
300
+ with Parallel(n_jobs=2, backend="loky") as parallel:
301
+ parallel([delayed(nested_loop)("loky"),
302
+ delayed(raise_exception)("loky")])
303
+
304
+
305
+ def test_mutate_input_with_threads():
306
+ """Input is mutable when using the threading backend"""
307
+ q = Queue(maxsize=5)
308
+ Parallel(n_jobs=2, backend="threading")(
309
+ delayed(q.put)(1) for _ in range(5))
310
+ assert q.full()
311
+
312
+
313
+ @parametrize('n_jobs', [1, 2, 3])
314
+ def test_parallel_kwargs(n_jobs):
315
+ """Check the keyword argument processing of pmap."""
316
+ lst = range(10)
317
+ assert ([f(x, y=1) for x in lst] ==
318
+ Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst))
319
+
320
+
321
+ @parametrize('backend', PARALLEL_BACKENDS)
322
+ def test_parallel_as_context_manager(backend):
323
+ lst = range(10)
324
+ expected = [f(x, y=1) for x in lst]
325
+
326
+ with Parallel(n_jobs=4, backend=backend) as p:
327
+ # Internally a pool instance has been eagerly created and is managed
328
+ # via the context manager protocol
329
+ managed_backend = p._backend
330
+
331
+ # We make call with the managed parallel object several times inside
332
+ # the managed block:
333
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
334
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
335
+
336
+ # Those calls have all used the same pool instance:
337
+ if mp is not None:
338
+ assert get_workers(managed_backend) is get_workers(p._backend)
339
+
340
+ # As soon as we exit the context manager block, the pool is terminated and
341
+ # no longer referenced from the parallel object:
342
+ if mp is not None:
343
+ assert get_workers(p._backend) is None
344
+
345
+ # It's still possible to use the parallel instance in non-managed mode:
346
+ assert expected == p(delayed(f)(x, y=1) for x in lst)
347
+ if mp is not None:
348
+ assert get_workers(p._backend) is None
349
+
350
+
351
+ @with_multiprocessing
352
+ def test_parallel_pickling():
353
+ """ Check that pmap captures the errors when it is passed an object
354
+ that cannot be pickled.
355
+ """
356
+ class UnpicklableObject(object):
357
+ def __reduce__(self):
358
+ raise RuntimeError('123')
359
+
360
+ with raises(PicklingError, match=r"the task to send"):
361
+ Parallel(n_jobs=2, backend='loky')(delayed(id)(
362
+ UnpicklableObject()) for _ in range(10))
363
+
364
+
365
+ @with_numpy
366
+ @with_multiprocessing
367
+ @parametrize('byteorder', ['<', '>', '='])
368
+ def test_parallel_byteorder_corruption(byteorder):
369
+
370
+ def inspect_byteorder(x):
371
+ return x, x.dtype.byteorder
372
+
373
+ x = np.arange(6).reshape((2, 3)).view(f'{byteorder}i4')
374
+
375
+ initial_np_byteorder = x.dtype.byteorder
376
+
377
+ result = Parallel(n_jobs=2, backend='loky')(
378
+ delayed(inspect_byteorder)(x) for _ in range(3)
379
+ )
380
+
381
+ for x_returned, byteorder_in_worker in result:
382
+ assert byteorder_in_worker == initial_np_byteorder
383
+ assert byteorder_in_worker == x_returned.dtype.byteorder
384
+ np.testing.assert_array_equal(x, x_returned)
385
+
386
+
387
+ @parametrize('backend', PARALLEL_BACKENDS)
388
+ def test_parallel_timeout_success(backend):
389
+ # Check that timeout isn't thrown when function is fast enough
390
+ assert len(Parallel(n_jobs=2, backend=backend, timeout=30)(
391
+ delayed(sleep)(0.001) for x in range(10))) == 10
392
+
393
+
394
+ @with_multiprocessing
395
+ @parametrize('backend', PARALLEL_BACKENDS)
396
+ def test_parallel_timeout_fail(backend):
397
+ # Check that timeout properly fails when function is too slow
398
+ with raises(TimeoutError):
399
+ Parallel(n_jobs=2, backend=backend, timeout=0.01)(
400
+ delayed(sleep)(10) for x in range(10))
401
+
402
+
403
+ @with_multiprocessing
404
+ @parametrize('backend', PROCESS_BACKENDS)
405
+ def test_error_capture(backend):
406
+ # Check that error are captured, and that correct exceptions
407
+ # are raised.
408
+ if mp is not None:
409
+ with raises(ZeroDivisionError):
410
+ Parallel(n_jobs=2, backend=backend)(
411
+ [delayed(division)(x, y)
412
+ for x, y in zip((0, 1), (1, 0))])
413
+
414
+ with raises(KeyboardInterrupt):
415
+ Parallel(n_jobs=2, backend=backend)(
416
+ [delayed(interrupt_raiser)(x) for x in (1, 0)])
417
+
418
+ # Try again with the context manager API
419
+ with Parallel(n_jobs=2, backend=backend) as parallel:
420
+ assert get_workers(parallel._backend) is not None
421
+ original_workers = get_workers(parallel._backend)
422
+
423
+ with raises(ZeroDivisionError):
424
+ parallel([delayed(division)(x, y)
425
+ for x, y in zip((0, 1), (1, 0))])
426
+
427
+ # The managed pool should still be available and be in a working
428
+ # state despite the previously raised (and caught) exception
429
+ assert get_workers(parallel._backend) is not None
430
+
431
+ # The pool should have been interrupted and restarted:
432
+ assert get_workers(parallel._backend) is not original_workers
433
+
434
+ assert ([f(x, y=1) for x in range(10)] ==
435
+ parallel(delayed(f)(x, y=1) for x in range(10)))
436
+
437
+ original_workers = get_workers(parallel._backend)
438
+ with raises(KeyboardInterrupt):
439
+ parallel([delayed(interrupt_raiser)(x) for x in (1, 0)])
440
+
441
+ # The pool should still be available despite the exception
442
+ assert get_workers(parallel._backend) is not None
443
+
444
+ # The pool should have been interrupted and restarted:
445
+ assert get_workers(parallel._backend) is not original_workers
446
+
447
+ assert ([f(x, y=1) for x in range(10)] ==
448
+ parallel(delayed(f)(x, y=1) for x in range(10))), (
449
+ parallel._iterating, parallel.n_completed_tasks,
450
+ parallel.n_dispatched_tasks, parallel._aborting
451
+ )
452
+
453
+ # Check that the inner pool has been terminated when exiting the
454
+ # context manager
455
+ assert get_workers(parallel._backend) is None
456
+ else:
457
+ with raises(KeyboardInterrupt):
458
+ Parallel(n_jobs=2)(
459
+ [delayed(interrupt_raiser)(x) for x in (1, 0)])
460
+
461
+ # wrapped exceptions should inherit from the class of the original
462
+ # exception to make it easy to catch them
463
+ with raises(ZeroDivisionError):
464
+ Parallel(n_jobs=2)(
465
+ [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))])
466
+
467
+ with raises(MyExceptionWithFinickyInit):
468
+ Parallel(n_jobs=2, verbose=0)(
469
+ (delayed(exception_raiser)(i, custom_exception=True)
470
+ for i in range(30)))
471
+
472
+
473
+ @with_multiprocessing
474
+ @parametrize('backend', BACKENDS)
475
+ def test_error_in_task_iterator(backend):
476
+
477
+ def my_generator(raise_at=0):
478
+ for i in range(20):
479
+ if i == raise_at:
480
+ raise ValueError("Iterator Raising Error")
481
+ yield i
482
+
483
+ with Parallel(n_jobs=2, backend=backend) as p:
484
+ # The error is raised in the pre-dispatch phase
485
+ with raises(ValueError, match="Iterator Raising Error"):
486
+ p(delayed(square)(i) for i in my_generator(raise_at=0))
487
+
488
+ # The error is raised when dispatching a new task after the
489
+ # pre-dispatch (likely to happen in a different thread)
490
+ with raises(ValueError, match="Iterator Raising Error"):
491
+ p(delayed(square)(i) for i in my_generator(raise_at=5))
492
+
493
+ # Same, but raises long after the pre-dispatch phase
494
+ with raises(ValueError, match="Iterator Raising Error"):
495
+ p(delayed(square)(i) for i in my_generator(raise_at=19))
496
+
497
+
498
+ def consumer(queue, item):
499
+ queue.append('Consumed %s' % item)
500
+
501
+
502
+ @parametrize('backend', BACKENDS)
503
+ @parametrize('batch_size, expected_queue',
504
+ [(1, ['Produced 0', 'Consumed 0',
505
+ 'Produced 1', 'Consumed 1',
506
+ 'Produced 2', 'Consumed 2',
507
+ 'Produced 3', 'Consumed 3',
508
+ 'Produced 4', 'Consumed 4',
509
+ 'Produced 5', 'Consumed 5']),
510
+ (4, [ # First Batch
511
+ 'Produced 0', 'Produced 1', 'Produced 2', 'Produced 3',
512
+ 'Consumed 0', 'Consumed 1', 'Consumed 2', 'Consumed 3',
513
+ # Second batch
514
+ 'Produced 4', 'Produced 5', 'Consumed 4', 'Consumed 5'])])
515
+ def test_dispatch_one_job(backend, batch_size, expected_queue):
516
+ """ Test that with only one job, Parallel does act as a iterator.
517
+ """
518
+ queue = list()
519
+
520
+ def producer():
521
+ for i in range(6):
522
+ queue.append('Produced %i' % i)
523
+ yield i
524
+
525
+ Parallel(n_jobs=1, batch_size=batch_size, backend=backend)(
526
+ delayed(consumer)(queue, x) for x in producer())
527
+ assert queue == expected_queue
528
+ assert len(queue) == 12
529
+
530
+
531
+ @with_multiprocessing
532
+ @parametrize('backend', PARALLEL_BACKENDS)
533
+ def test_dispatch_multiprocessing(backend):
534
+ """ Check that using pre_dispatch Parallel does indeed dispatch items
535
+ lazily.
536
+ """
537
+ manager = mp.Manager()
538
+ queue = manager.list()
539
+
540
+ def producer():
541
+ for i in range(6):
542
+ queue.append('Produced %i' % i)
543
+ yield i
544
+
545
+ Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)(
546
+ delayed(consumer)(queue, 'any') for _ in producer())
547
+
548
+ queue_contents = list(queue)
549
+ assert queue_contents[0] == 'Produced 0'
550
+
551
+ # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only
552
+ # after any of the first 3 jobs have completed.
553
+ first_consumption_index = queue_contents[:4].index('Consumed any')
554
+ assert first_consumption_index > -1
555
+
556
+ produced_3_index = queue_contents.index('Produced 3') # 4th task produced
557
+ assert produced_3_index > first_consumption_index
558
+
559
+ assert len(queue) == 12
560
+
561
+
562
+ def test_batching_auto_threading():
563
+ # batching='auto' with the threading backend leaves the effective batch
564
+ # size to 1 (no batching) as it has been found to never be beneficial with
565
+ # this low-overhead backend.
566
+
567
+ with Parallel(n_jobs=2, batch_size='auto', backend='threading') as p:
568
+ p(delayed(id)(i) for i in range(5000)) # many very fast tasks
569
+ assert p._backend.compute_batch_size() == 1
570
+
571
+
572
+ @with_multiprocessing
573
+ @parametrize('backend', PROCESS_BACKENDS)
574
+ def test_batching_auto_subprocesses(backend):
575
+ with Parallel(n_jobs=2, batch_size='auto', backend=backend) as p:
576
+ p(delayed(id)(i) for i in range(5000)) # many very fast tasks
577
+
578
+ # It should be strictly larger than 1 but as we don't want heisen
579
+ # failures on clogged CI worker environment be safe and only check that
580
+ # it's a strictly positive number.
581
+ assert p._backend.compute_batch_size() > 0
582
+
583
+
584
+ def test_exception_dispatch():
585
+ """Make sure that exception raised during dispatch are indeed captured"""
586
+ with raises(ValueError):
587
+ Parallel(n_jobs=2, pre_dispatch=16, verbose=0)(
588
+ delayed(exception_raiser)(i) for i in range(30))
589
+
590
+
591
+ def nested_function_inner(i):
592
+ Parallel(n_jobs=2)(
593
+ delayed(exception_raiser)(j) for j in range(30))
594
+
595
+
596
+ def nested_function_outer(i):
597
+ Parallel(n_jobs=2)(
598
+ delayed(nested_function_inner)(j) for j in range(30))
599
+
600
+
601
+ @with_multiprocessing
602
+ @parametrize('backend', PARALLEL_BACKENDS)
603
+ @pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255")
604
+ def test_nested_exception_dispatch(backend):
605
+ """Ensure errors for nested joblib cases gets propagated
606
+
607
+ We rely on the Python 3 built-in __cause__ system that already
608
+ report this kind of information to the user.
609
+ """
610
+ with raises(ValueError) as excinfo:
611
+ Parallel(n_jobs=2, backend=backend)(
612
+ delayed(nested_function_outer)(i) for i in range(30))
613
+
614
+ # Check that important information such as function names are visible
615
+ # in the final error message reported to the user
616
+ report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb)
617
+ report = "".join(report_lines)
618
+ assert 'nested_function_outer' in report
619
+ assert 'nested_function_inner' in report
620
+ assert 'exception_raiser' in report
621
+
622
+ assert type(excinfo.value) is ValueError
623
+
624
+
625
+ class FakeParallelBackend(SequentialBackend):
626
+ """Pretends to run concurrently while running sequentially."""
627
+
628
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
629
+ self.n_jobs = self.effective_n_jobs(n_jobs)
630
+ self.parallel = parallel
631
+ return n_jobs
632
+
633
+ def effective_n_jobs(self, n_jobs=1):
634
+ if n_jobs < 0:
635
+ n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
636
+ return n_jobs
637
+
638
+
639
+ def test_invalid_backend():
640
+ with raises(ValueError, match="Invalid backend:"):
641
+ Parallel(backend='unit-testing')
642
+
643
+ with raises(ValueError, match="Invalid backend:"):
644
+ with parallel_config(backend='unit-testing'):
645
+ pass
646
+
647
+ with raises(ValueError, match="Invalid backend:"):
648
+ with parallel_config(backend='unit-testing'):
649
+ pass
650
+
651
+
652
+ @parametrize('backend', ALL_VALID_BACKENDS)
653
+ def test_invalid_njobs(backend):
654
+ with raises(ValueError) as excinfo:
655
+ Parallel(n_jobs=0, backend=backend)._initialize_backend()
656
+ assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
657
+
658
+ with raises(ValueError) as excinfo:
659
+ Parallel(n_jobs=0.5, backend=backend)._initialize_backend()
660
+ assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value)
661
+
662
+ with raises(ValueError) as excinfo:
663
+ Parallel(n_jobs="2.3", backend=backend)._initialize_backend()
664
+ assert "n_jobs could not be converted to int" in str(excinfo.value)
665
+
666
+ with raises(ValueError) as excinfo:
667
+ Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend()
668
+ assert "n_jobs could not be converted to int" in str(excinfo.value)
669
+
670
+
671
+ @with_multiprocessing
672
+ @parametrize('backend', PARALLEL_BACKENDS)
673
+ @parametrize('n_jobs', ['2', 2.3, 2])
674
+ def test_njobs_converted_to_int(backend, n_jobs):
675
+ p = Parallel(n_jobs=n_jobs, backend=backend)
676
+ assert p._effective_n_jobs() == 2
677
+
678
+ res = p(delayed(square)(i) for i in range(10))
679
+ assert all(r == square(i) for i, r in enumerate(res))
680
+
681
+
682
+ def test_register_parallel_backend():
683
+ try:
684
+ register_parallel_backend("test_backend", FakeParallelBackend)
685
+ assert "test_backend" in BACKENDS
686
+ assert BACKENDS["test_backend"] == FakeParallelBackend
687
+ finally:
688
+ del BACKENDS["test_backend"]
689
+
690
+
691
+ def test_overwrite_default_backend():
692
+ assert _active_backend_type() == DefaultBackend
693
+ try:
694
+ register_parallel_backend("threading", BACKENDS["threading"],
695
+ make_default=True)
696
+ assert _active_backend_type() == ThreadingBackend
697
+ finally:
698
+ # Restore the global default manually
699
+ parallel.DEFAULT_BACKEND = DEFAULT_BACKEND
700
+ assert _active_backend_type() == DefaultBackend
701
+
702
+
703
+ @skipif(mp is not None, reason="Only without multiprocessing")
704
+ def test_backend_no_multiprocessing():
705
+ with warns(UserWarning,
706
+ match="joblib backend '.*' is not available on.*"):
707
+ Parallel(backend='loky')(delayed(square)(i) for i in range(3))
708
+
709
+ # The below should now work without problems
710
+ with parallel_config(backend='loky'):
711
+ Parallel()(delayed(square)(i) for i in range(3))
712
+
713
+
714
+ def check_backend_context_manager(context, backend_name):
715
+ with context(backend_name, n_jobs=3):
716
+ active_backend, active_n_jobs = parallel.get_active_backend()
717
+ assert active_n_jobs == 3
718
+ assert effective_n_jobs(3) == 3
719
+ p = Parallel()
720
+ assert p.n_jobs == 3
721
+ if backend_name == 'multiprocessing':
722
+ assert type(active_backend) is MultiprocessingBackend
723
+ assert type(p._backend) is MultiprocessingBackend
724
+ elif backend_name == 'loky':
725
+ assert type(active_backend) is LokyBackend
726
+ assert type(p._backend) is LokyBackend
727
+ elif backend_name == 'threading':
728
+ assert type(active_backend) is ThreadingBackend
729
+ assert type(p._backend) is ThreadingBackend
730
+ elif backend_name.startswith('test_'):
731
+ assert type(active_backend) is FakeParallelBackend
732
+ assert type(p._backend) is FakeParallelBackend
733
+
734
+
735
+ all_backends_for_context_manager = PARALLEL_BACKENDS[:]
736
+ all_backends_for_context_manager.extend(
737
+ ['test_backend_%d' % i for i in range(3)]
738
+ )
739
+
740
+
741
+ @with_multiprocessing
742
+ @parametrize('backend', all_backends_for_context_manager)
743
+ @parametrize('context', [parallel_backend, parallel_config])
744
+ def test_backend_context_manager(monkeypatch, backend, context):
745
+ if backend not in BACKENDS:
746
+ monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend)
747
+
748
+ assert _active_backend_type() == DefaultBackend
749
+ # check that this possible to switch parallel backends sequentially
750
+ check_backend_context_manager(context, backend)
751
+
752
+ # The default backend is restored
753
+ assert _active_backend_type() == DefaultBackend
754
+
755
+ # Check that context manager switching is thread safe:
756
+ Parallel(n_jobs=2, backend='threading')(
757
+ delayed(check_backend_context_manager)(context, b)
758
+ for b in all_backends_for_context_manager if not b)
759
+
760
+ # The default backend is again restored
761
+ assert _active_backend_type() == DefaultBackend
762
+
763
+
764
+ class ParameterizedParallelBackend(SequentialBackend):
765
+ """Pretends to run conncurrently while running sequentially."""
766
+
767
+ def __init__(self, param=None):
768
+ if param is None:
769
+ raise ValueError('param should not be None')
770
+ self.param = param
771
+
772
+
773
+ @parametrize("context", [parallel_config, parallel_backend])
774
+ def test_parameterized_backend_context_manager(monkeypatch, context):
775
+ monkeypatch.setitem(BACKENDS, 'param_backend',
776
+ ParameterizedParallelBackend)
777
+ assert _active_backend_type() == DefaultBackend
778
+
779
+ with context('param_backend', param=42, n_jobs=3):
780
+ active_backend, active_n_jobs = parallel.get_active_backend()
781
+ assert type(active_backend) is ParameterizedParallelBackend
782
+ assert active_backend.param == 42
783
+ assert active_n_jobs == 3
784
+ p = Parallel()
785
+ assert p.n_jobs == 3
786
+ assert p._backend is active_backend
787
+ results = p(delayed(sqrt)(i) for i in range(5))
788
+ assert results == [sqrt(i) for i in range(5)]
789
+
790
+ # The default backend is again restored
791
+ assert _active_backend_type() == DefaultBackend
792
+
793
+
794
+ @parametrize("context", [parallel_config, parallel_backend])
795
+ def test_directly_parameterized_backend_context_manager(context):
796
+ assert _active_backend_type() == DefaultBackend
797
+
798
+ # Check that it's possible to pass a backend instance directly,
799
+ # without registration
800
+ with context(ParameterizedParallelBackend(param=43), n_jobs=5):
801
+ active_backend, active_n_jobs = parallel.get_active_backend()
802
+ assert type(active_backend) is ParameterizedParallelBackend
803
+ assert active_backend.param == 43
804
+ assert active_n_jobs == 5
805
+ p = Parallel()
806
+ assert p.n_jobs == 5
807
+ assert p._backend is active_backend
808
+ results = p(delayed(sqrt)(i) for i in range(5))
809
+ assert results == [sqrt(i) for i in range(5)]
810
+
811
+ # The default backend is again restored
812
+ assert _active_backend_type() == DefaultBackend
813
+
814
+
815
+ def sleep_and_return_pid():
816
+ sleep(.1)
817
+ return os.getpid()
818
+
819
+
820
+ def get_nested_pids():
821
+ assert _active_backend_type() == ThreadingBackend
822
+ # Assert that the nested backend does not change the default number of
823
+ # jobs used in Parallel
824
+ assert Parallel()._effective_n_jobs() == 1
825
+
826
+ # Assert that the tasks are running only on one process
827
+ return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)()
828
+ for _ in range(2))
829
+
830
+
831
+ class MyBackend(joblib._parallel_backends.LokyBackend):
832
+ """Backend to test backward compatibility with older backends"""
833
+ def get_nested_backend(self, ):
834
+ # Older backends only return a backend, without n_jobs indications.
835
+ return super(MyBackend, self).get_nested_backend()[0]
836
+
837
+
838
+ register_parallel_backend('back_compat_backend', MyBackend)
839
+
840
+
841
+ @with_multiprocessing
842
+ @parametrize('backend', ['threading', 'loky', 'multiprocessing',
843
+ 'back_compat_backend'])
844
+ @parametrize("context", [parallel_config, parallel_backend])
845
+ def test_nested_backend_context_manager(context, backend):
846
+ # Check that by default, nested parallel calls will always use the
847
+ # ThreadingBackend
848
+
849
+ with context(backend):
850
+ pid_groups = Parallel(n_jobs=2)(
851
+ delayed(get_nested_pids)()
852
+ for _ in range(10)
853
+ )
854
+ for pid_group in pid_groups:
855
+ assert len(set(pid_group)) == 1
856
+
857
+
858
+ @with_multiprocessing
859
+ @parametrize('n_jobs', [2, -1, None])
860
+ @parametrize('backend', PARALLEL_BACKENDS)
861
+ @parametrize("context", [parallel_config, parallel_backend])
862
+ def test_nested_backend_in_sequential(backend, n_jobs, context):
863
+ # Check that by default, nested parallel calls will always use the
864
+ # ThreadingBackend
865
+
866
+ def check_nested_backend(expected_backend_type, expected_n_job):
867
+ # Assert that the sequential backend at top level, does not change the
868
+ # backend for nested calls.
869
+ assert _active_backend_type() == BACKENDS[expected_backend_type]
870
+
871
+ # Assert that the nested backend in SequentialBackend does not change
872
+ # the default number of jobs used in Parallel
873
+ expected_n_job = effective_n_jobs(expected_n_job)
874
+ assert Parallel()._effective_n_jobs() == expected_n_job
875
+
876
+ Parallel(n_jobs=1)(
877
+ delayed(check_nested_backend)(DEFAULT_BACKEND, 1)
878
+ for _ in range(10)
879
+ )
880
+
881
+ with context(backend, n_jobs=n_jobs):
882
+ Parallel(n_jobs=1)(
883
+ delayed(check_nested_backend)(backend, n_jobs)
884
+ for _ in range(10)
885
+ )
886
+
887
+
888
+ def check_nesting_level(context, inner_backend, expected_level):
889
+ with context(inner_backend) as ctx:
890
+ if context is parallel_config:
891
+ backend = ctx["backend"]
892
+ if context is parallel_backend:
893
+ backend = ctx[0]
894
+ assert backend.nesting_level == expected_level
895
+
896
+
897
+ @with_multiprocessing
898
+ @parametrize('outer_backend', PARALLEL_BACKENDS)
899
+ @parametrize('inner_backend', PARALLEL_BACKENDS)
900
+ @parametrize("context", [parallel_config, parallel_backend])
901
+ def test_backend_nesting_level(context, outer_backend, inner_backend):
902
+ # Check that the nesting level for the backend is correctly set
903
+ check_nesting_level(context, outer_backend, 0)
904
+
905
+ Parallel(n_jobs=2, backend=outer_backend)(
906
+ delayed(check_nesting_level)(context, inner_backend, 1)
907
+ for _ in range(10)
908
+ )
909
+
910
+ with context(inner_backend, n_jobs=2):
911
+ Parallel()(delayed(check_nesting_level)(context, inner_backend, 1)
912
+ for _ in range(10))
913
+
914
+
915
+ @with_multiprocessing
916
+ @parametrize("context", [parallel_config, parallel_backend])
917
+ @parametrize('with_retrieve_callback', [True, False])
918
+ def test_retrieval_context(context, with_retrieve_callback):
919
+ import contextlib
920
+
921
+ class MyBackend(ThreadingBackend):
922
+ i = 0
923
+ supports_retrieve_callback = with_retrieve_callback
924
+
925
+ @contextlib.contextmanager
926
+ def retrieval_context(self):
927
+ self.i += 1
928
+ yield
929
+
930
+ register_parallel_backend("retrieval", MyBackend)
931
+
932
+ def nested_call(n):
933
+ return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n))
934
+
935
+ with context("retrieval") as ctx:
936
+ Parallel(n_jobs=2)(
937
+ delayed(nested_call)(i)
938
+ for i in range(5)
939
+ )
940
+ if context is parallel_config:
941
+ assert ctx["backend"].i == 1
942
+ if context is parallel_backend:
943
+ assert ctx[0].i == 1
944
+
945
+
946
+ ###############################################################################
947
+ # Test helpers
948
+
949
+ @parametrize('batch_size', [0, -1, 1.42])
950
+ def test_invalid_batch_size(batch_size):
951
+ with raises(ValueError):
952
+ Parallel(batch_size=batch_size)
953
+
954
+
955
+ @parametrize('n_tasks, n_jobs, pre_dispatch, batch_size',
956
+ [(2, 2, 'all', 'auto'),
957
+ (2, 2, 'n_jobs', 'auto'),
958
+ (10, 2, 'n_jobs', 'auto'),
959
+ (517, 2, 'n_jobs', 'auto'),
960
+ (10, 2, 'n_jobs', 'auto'),
961
+ (10, 4, 'n_jobs', 'auto'),
962
+ (200, 12, 'n_jobs', 'auto'),
963
+ (25, 12, '2 * n_jobs', 1),
964
+ (250, 12, 'all', 1),
965
+ (250, 12, '2 * n_jobs', 7),
966
+ (200, 12, '2 * n_jobs', 'auto')])
967
+ def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size):
968
+ # Check that using (async-)dispatch does not yield a race condition on the
969
+ # iterable generator that is not thread-safe natively.
970
+ # This is a non-regression test for the "Pool seems closed" class of error
971
+ params = {'n_jobs': n_jobs, 'pre_dispatch': pre_dispatch,
972
+ 'batch_size': batch_size}
973
+ expected = [square(i) for i in range(n_tasks)]
974
+ results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks))
975
+ assert results == expected
976
+
977
+
978
+ @with_multiprocessing
979
+ def test_default_mp_context():
980
+ mp_start_method = mp.get_start_method()
981
+ p = Parallel(n_jobs=2, backend='multiprocessing')
982
+ context = p._backend_args.get('context')
983
+ start_method = context.get_start_method()
984
+ assert start_method == mp_start_method
985
+
986
+
987
+ @with_numpy
988
+ @with_multiprocessing
989
+ @parametrize('backend', PROCESS_BACKENDS)
990
+ def test_no_blas_crash_or_freeze_with_subprocesses(backend):
991
+ if backend == 'multiprocessing':
992
+ # Use the spawn backend that is both robust and available on all
993
+ # platforms
994
+ backend = mp.get_context('spawn')
995
+
996
+ # Check that on recent Python version, the 'spawn' start method can make
997
+ # it possible to use multiprocessing in conjunction of any BLAS
998
+ # implementation that happens to be used by numpy with causing a freeze or
999
+ # a crash
1000
+ rng = np.random.RandomState(42)
1001
+
1002
+ # call BLAS DGEMM to force the initialization of the internal thread-pool
1003
+ # in the main process
1004
+ a = rng.randn(1000, 1000)
1005
+ np.dot(a, a.T)
1006
+
1007
+ # check that the internal BLAS thread-pool is not in an inconsistent state
1008
+ # in the worker processes managed by multiprocessing
1009
+ Parallel(n_jobs=2, backend=backend)(
1010
+ delayed(np.dot)(a, a.T) for i in range(2))
1011
+
1012
+
1013
+ UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\
1014
+ from joblib import Parallel, delayed
1015
+
1016
+ def square(x):
1017
+ return x ** 2
1018
+
1019
+ backend = "{}"
1020
+ if backend == "spawn":
1021
+ from multiprocessing import get_context
1022
+ backend = get_context(backend)
1023
+
1024
+ print(Parallel(n_jobs=2, backend=backend)(
1025
+ delayed(square)(i) for i in range(5)))
1026
+ """
1027
+
1028
+
1029
+ @with_multiprocessing
1030
+ @parametrize('backend', PROCESS_BACKENDS)
1031
+ def test_parallel_with_interactively_defined_functions(backend):
1032
+ # When using the "-c" flag, interactive functions defined in __main__
1033
+ # should work with any backend.
1034
+ if backend == "multiprocessing" and mp.get_start_method() != "fork":
1035
+ pytest.skip("Require fork start method to use interactively defined "
1036
+ "functions with multiprocessing.")
1037
+ code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend)
1038
+ check_subprocess_call(
1039
+ [sys.executable, '-c', code], timeout=10,
1040
+ stdout_regex=r'\[0, 1, 4, 9, 16\]')
1041
+
1042
+
1043
+ UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\
1044
+ import sys
1045
+ # Make sure that joblib is importable in the subprocess launching this
1046
+ # script. This is needed in case we run the tests from the joblib root
1047
+ # folder without having installed joblib
1048
+ sys.path.insert(0, {joblib_root_folder!r})
1049
+
1050
+ from joblib import Parallel, delayed
1051
+
1052
+ def run(f, x):
1053
+ return f(x)
1054
+
1055
+ {define_func}
1056
+
1057
+ if __name__ == "__main__":
1058
+ backend = "{backend}"
1059
+ if backend == "spawn":
1060
+ from multiprocessing import get_context
1061
+ backend = get_context(backend)
1062
+
1063
+ callable_position = "{callable_position}"
1064
+ if callable_position == "delayed":
1065
+ print(Parallel(n_jobs=2, backend=backend)(
1066
+ delayed(square)(i) for i in range(5)))
1067
+ elif callable_position == "args":
1068
+ print(Parallel(n_jobs=2, backend=backend)(
1069
+ delayed(run)(square, i) for i in range(5)))
1070
+ else:
1071
+ print(Parallel(n_jobs=2, backend=backend)(
1072
+ delayed(run)(f=square, x=i) for i in range(5)))
1073
+ """
1074
+
1075
+ SQUARE_MAIN = """\
1076
+ def square(x):
1077
+ return x ** 2
1078
+ """
1079
+ SQUARE_LOCAL = """\
1080
+ def gen_square():
1081
+ def square(x):
1082
+ return x ** 2
1083
+ return square
1084
+ square = gen_square()
1085
+ """
1086
+ SQUARE_LAMBDA = """\
1087
+ square = lambda x: x ** 2
1088
+ """
1089
+
1090
+
1091
+ @with_multiprocessing
1092
+ @parametrize('backend', PROCESS_BACKENDS + ([] if mp is None else ['spawn']))
1093
+ @parametrize('define_func', [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA])
1094
+ @parametrize('callable_position', ['delayed', 'args', 'kwargs'])
1095
+ def test_parallel_with_unpicklable_functions_in_args(
1096
+ backend, define_func, callable_position, tmpdir):
1097
+ if backend in ['multiprocessing', 'spawn'] and (
1098
+ define_func != SQUARE_MAIN or sys.platform == "win32"):
1099
+ pytest.skip("Not picklable with pickle")
1100
+ code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format(
1101
+ define_func=define_func, backend=backend,
1102
+ callable_position=callable_position,
1103
+ joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)))
1104
+ code_file = tmpdir.join("unpicklable_func_script.py")
1105
+ code_file.write(code)
1106
+ check_subprocess_call(
1107
+ [sys.executable, code_file.strpath], timeout=10,
1108
+ stdout_regex=r'\[0, 1, 4, 9, 16\]')
1109
+
1110
+
1111
+ INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\
1112
+ import sys
1113
+ import faulthandler
1114
+ # Make sure that joblib is importable in the subprocess launching this
1115
+ # script. This is needed in case we run the tests from the joblib root
1116
+ # folder without having installed joblib
1117
+ sys.path.insert(0, {joblib_root_folder!r})
1118
+
1119
+ from joblib import Parallel, delayed
1120
+ from functools import partial
1121
+
1122
+ class MyClass:
1123
+ '''Class defined in the __main__ namespace'''
1124
+ def __init__(self, value):
1125
+ self.value = value
1126
+
1127
+
1128
+ def square(x, ignored=None, ignored2=None):
1129
+ '''Function defined in the __main__ namespace'''
1130
+ return x.value ** 2
1131
+
1132
+
1133
+ square2 = partial(square, ignored2='something')
1134
+
1135
+ # Here, we do not need the `if __name__ == "__main__":` safeguard when
1136
+ # using the default `loky` backend (even on Windows).
1137
+
1138
+ # To make debugging easier
1139
+ faulthandler.dump_traceback_later(30, exit=True)
1140
+
1141
+ # The following baroque function call is meant to check that joblib
1142
+ # introspection rightfully uses cloudpickle instead of the (faster) pickle
1143
+ # module of the standard library when necessary. In particular cloudpickle is
1144
+ # necessary for functions and instances of classes interactively defined in the
1145
+ # __main__ module.
1146
+
1147
+ print(Parallel(backend="loky", n_jobs=2)(
1148
+ delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))])
1149
+ for i in range(5)
1150
+ ))
1151
+ """.format(joblib_root_folder=os.path.dirname(
1152
+ os.path.dirname(joblib.__file__)))
1153
+
1154
+
1155
+ @with_multiprocessing
1156
+ def test_parallel_with_interactively_defined_functions_loky(tmpdir):
1157
+ # loky accepts interactive functions defined in __main__ and does not
1158
+ # require if __name__ == '__main__' even when the __main__ module is
1159
+ # defined by the result of the execution of a filesystem script.
1160
+ script = tmpdir.join('joblib_interactively_defined_function.py')
1161
+ script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT)
1162
+ check_subprocess_call(
1163
+ [sys.executable, script.strpath],
1164
+ stdout_regex=r'\[0, 1, 4, 9, 16\]',
1165
+ timeout=None, # rely on faulthandler to kill the process
1166
+ )
1167
+
1168
+
1169
+ INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\
1170
+ import sys
1171
+ # Make sure that joblib is importable in the subprocess launching this
1172
+ # script. This is needed in case we run the tests from the joblib root
1173
+ # folder without having installed joblib
1174
+ sys.path.insert(0, {joblib_root_folder!r})
1175
+
1176
+ from joblib import Parallel, delayed, hash
1177
+ import multiprocessing as mp
1178
+ mp.util.log_to_stderr(5)
1179
+
1180
+ class MyList(list):
1181
+ '''MyList is interactively defined by MyList.append is a built-in'''
1182
+ def __hash__(self):
1183
+ # XXX: workaround limitation in cloudpickle
1184
+ return hash(self).__hash__()
1185
+
1186
+ l = MyList()
1187
+
1188
+ print(Parallel(backend="loky", n_jobs=2)(
1189
+ delayed(l.append)(i) for i in range(3)
1190
+ ))
1191
+ """.format(joblib_root_folder=os.path.dirname(
1192
+ os.path.dirname(joblib.__file__)))
1193
+
1194
+
1195
+ @with_multiprocessing
1196
+ def test_parallel_with_interactively_defined_bound_method_loky(tmpdir):
1197
+ script = tmpdir.join('joblib_interactive_bound_method_script.py')
1198
+ script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT)
1199
+ check_subprocess_call([sys.executable, script.strpath],
1200
+ stdout_regex=r'\[None, None, None\]',
1201
+ stderr_regex=r'LokyProcess',
1202
+ timeout=15)
1203
+
1204
+
1205
+ def test_parallel_with_exhausted_iterator():
1206
+ exhausted_iterator = iter([])
1207
+ assert Parallel(n_jobs=2)(exhausted_iterator) == []
1208
+
1209
+
1210
+ def _cleanup_worker():
1211
+ """Helper function to force gc in each worker."""
1212
+ force_gc_pypy()
1213
+ time.sleep(.1)
1214
+
1215
+
1216
+ def check_memmap(a):
1217
+ if not isinstance(a, np.memmap):
1218
+ raise TypeError('Expected np.memmap instance, got %r',
1219
+ type(a))
1220
+ return a.copy() # return a regular array instead of a memmap
1221
+
1222
+
1223
+ @with_numpy
1224
+ @with_multiprocessing
1225
+ @parametrize('backend', PROCESS_BACKENDS)
1226
+ def test_auto_memmap_on_arrays_from_generator(backend):
1227
+ # Non-regression test for a problem with a bad interaction between the
1228
+ # GC collecting arrays recently created during iteration inside the
1229
+ # parallel dispatch loop and the auto-memmap feature of Parallel.
1230
+ # See: https://github.com/joblib/joblib/pull/294
1231
+ def generate_arrays(n):
1232
+ for i in range(n):
1233
+ yield np.ones(10, dtype=np.float32) * i
1234
+ # Use max_nbytes=1 to force the use of memory-mapping even for small
1235
+ # arrays
1236
+ results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)(
1237
+ delayed(check_memmap)(a) for a in generate_arrays(100))
1238
+ for result, expected in zip(results, generate_arrays(len(results))):
1239
+ np.testing.assert_array_equal(expected, result)
1240
+
1241
+ # Second call to force loky to adapt the executor by growing the number
1242
+ # of worker processes. This is a non-regression test for:
1243
+ # https://github.com/joblib/joblib/issues/629.
1244
+ results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)(
1245
+ delayed(check_memmap)(a) for a in generate_arrays(100))
1246
+ for result, expected in zip(results, generate_arrays(len(results))):
1247
+ np.testing.assert_array_equal(expected, result)
1248
+
1249
+
1250
+ def identity(arg):
1251
+ return arg
1252
+
1253
+
1254
+ @with_numpy
1255
+ @with_multiprocessing
1256
+ def test_memmap_with_big_offset(tmpdir):
1257
+ fname = tmpdir.join('test.mmap').strpath
1258
+ size = mmap.ALLOCATIONGRANULARITY
1259
+ obj = [np.zeros(size, dtype='uint8'), np.ones(size, dtype='uint8')]
1260
+ dump(obj, fname)
1261
+ memmap = load(fname, mmap_mode='r')
1262
+ result, = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0])
1263
+ assert isinstance(memmap[1], np.memmap)
1264
+ assert memmap[1].offset > size
1265
+ np.testing.assert_array_equal(obj, result)
1266
+
1267
+
1268
+ def test_warning_about_timeout_not_supported_by_backend():
1269
+ with warnings.catch_warnings(record=True) as warninfo:
1270
+ Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50))
1271
+ assert len(warninfo) == 1
1272
+ w = warninfo[0]
1273
+ assert isinstance(w.message, UserWarning)
1274
+ assert str(w.message) == (
1275
+ "The backend class 'SequentialBackend' does not support timeout. "
1276
+ "You have set 'timeout=1' in Parallel but the 'timeout' parameter "
1277
+ "will not be used.")
1278
+
1279
+
1280
+ def set_list_value(input_list, index, value):
1281
+ input_list[index] = value
1282
+ return value
1283
+
1284
+
1285
+ @pytest.mark.parametrize('n_jobs', [1, 2, 4])
1286
+ def test_parallel_return_order_with_return_as_generator_parameter(n_jobs):
1287
+ # This test inserts values in a list in some expected order
1288
+ # in sequential computing, and then checks that this order has been
1289
+ # respected by Parallel output generator.
1290
+ input_list = [0] * 5
1291
+ result = Parallel(n_jobs=n_jobs, return_as="generator",
1292
+ backend='threading')(
1293
+ delayed(set_list_value)(input_list, i, i) for i in range(5))
1294
+
1295
+ # Ensure that all the tasks are completed before checking the result
1296
+ result = list(result)
1297
+
1298
+ assert all(v == r for v, r in zip(input_list, result))
1299
+
1300
+
1301
+ def _sqrt_with_delay(e, delay):
1302
+ if delay:
1303
+ sleep(30)
1304
+ return sqrt(e)
1305
+
1306
+
1307
+ def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
1308
+ # This test submits 10 tasks, but the second task is super slow. This test
1309
+ # checks that the 9 other tasks return before the slow task is done, when
1310
+ # `return_as` parameter is set to `'generator_unordered'`
1311
+ result = Parallel(n_jobs=n_jobs, return_as="generator_unordered",
1312
+ backend=backend)(
1313
+ delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10))
1314
+
1315
+ quickly_returned = sorted(next(result) for _ in range(9))
1316
+
1317
+ expected_quickly_returned = [0] + list(range(2, 10))
1318
+
1319
+ assert all(
1320
+ v == r for v, r in zip(expected_quickly_returned, quickly_returned)
1321
+ )
1322
+
1323
+ del result
1324
+ force_gc_pypy()
1325
+
1326
+
1327
+ @pytest.mark.parametrize('n_jobs', [2, 4])
1328
+ # NB: for this test to work, the backend must be allowed to process tasks
1329
+ # concurrently, so at least two jobs with a non-sequential backend are
1330
+ # mandatory.
1331
+ @with_multiprocessing
1332
+ @parametrize('backend', set(RETURN_GENERATOR_BACKENDS) - {"sequential"})
1333
+ def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
1334
+ _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs)
1335
+
1336
+
1337
+ @pytest.mark.parametrize('n_jobs', [2, -1])
1338
+ @parametrize("context", [parallel_config, parallel_backend])
1339
+ @skipif(distributed is None, reason='This test requires dask')
1340
+ def test_parallel_unordered_generator_returns_fastest_first_with_dask(
1341
+ n_jobs, context
1342
+ ):
1343
+ with distributed.Client(
1344
+ n_workers=2, threads_per_worker=2
1345
+ ), context("dask"):
1346
+ _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs)
1347
+
1348
+
1349
+ @parametrize('backend', ALL_VALID_BACKENDS)
1350
+ @parametrize('n_jobs', [1, 2, -2, -1])
1351
+ def test_abort_backend(n_jobs, backend):
1352
+ delays = ["a"] + [10] * 100
1353
+ with raises(TypeError):
1354
+ t_start = time.time()
1355
+ Parallel(n_jobs=n_jobs, backend=backend)(
1356
+ delayed(time.sleep)(i) for i in delays)
1357
+ dt = time.time() - t_start
1358
+ assert dt < 20
1359
+
1360
+
1361
+ def get_large_object(arg):
1362
+ result = np.ones(int(5 * 1e5), dtype=bool)
1363
+ result[0] = False
1364
+ return result
1365
+
1366
+
1367
+ def _test_deadlock_with_generator(backend, return_as, n_jobs):
1368
+ # Non-regression test for a race condition in the backends when the pickler
1369
+ # is delayed by a large object.
1370
+ with Parallel(n_jobs=n_jobs, backend=backend,
1371
+ return_as=return_as) as parallel:
1372
+ result = parallel(delayed(get_large_object)(i) for i in range(10))
1373
+ next(result)
1374
+ next(result)
1375
+ del result
1376
+ # The gc in pypy can be delayed. Force it to make sure this test does
1377
+ # not cause timeout on the CI.
1378
+ force_gc_pypy()
1379
+
1380
+
1381
+ @with_numpy
1382
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1383
+ @parametrize('return_as', ["generator", "generator_unordered"])
1384
+ @parametrize('n_jobs', [1, 2, -2, -1])
1385
+ def test_deadlock_with_generator(backend, return_as, n_jobs):
1386
+ _test_deadlock_with_generator(backend, return_as, n_jobs)
1387
+
1388
+
1389
+ @with_numpy
1390
+ @pytest.mark.parametrize('n_jobs', [2, -1])
1391
+ @parametrize('return_as', ["generator", "generator_unordered"])
1392
+ @parametrize("context", [parallel_config, parallel_backend])
1393
+ @skipif(distributed is None, reason='This test requires dask')
1394
+ def test_deadlock_with_generator_and_dask(context, return_as, n_jobs):
1395
+ with distributed.Client(
1396
+ n_workers=2, threads_per_worker=2
1397
+ ), context("dask"):
1398
+ _test_deadlock_with_generator(None, return_as, n_jobs)
1399
+
1400
+
1401
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1402
+ @parametrize('return_as', ["generator", "generator_unordered"])
1403
+ @parametrize('n_jobs', [1, 2, -2, -1])
1404
+ def test_multiple_generator_call(backend, return_as, n_jobs):
1405
+ # Non-regression test that ensures the dispatch of the tasks starts
1406
+ # immediately when Parallel.__call__ is called. This test relies on the
1407
+ # assumption that only one generator can be submitted at a time.
1408
+ with raises(RuntimeError,
1409
+ match="This Parallel instance is already running"):
1410
+ parallel = Parallel(n_jobs, backend=backend, return_as=return_as)
1411
+ g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841
1412
+ t_start = time.time()
1413
+ gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
1414
+
1415
+ # Make sure that the error is raised quickly
1416
+ assert time.time() - t_start < 2, (
1417
+ "The error should be raised immediatly when submitting a new task "
1418
+ "but it took more than 2s."
1419
+ )
1420
+
1421
+ del g
1422
+ # The gc in pypy can be delayed. Force it to make sure this test does not
1423
+ # cause timeout on the CI.
1424
+ force_gc_pypy()
1425
+
1426
+
1427
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1428
+ @parametrize('return_as', ["generator", "generator_unordered"])
1429
+ @parametrize('n_jobs', [1, 2, -2, -1])
1430
+ def test_multiple_generator_call_managed(backend, return_as, n_jobs):
1431
+ # Non-regression test that ensures the dispatch of the tasks starts
1432
+ # immediately when Parallel.__call__ is called. This test relies on the
1433
+ # assumption that only one generator can be submitted at a time.
1434
+ with Parallel(n_jobs, backend=backend,
1435
+ return_as=return_as) as parallel:
1436
+ g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841
1437
+ t_start = time.time()
1438
+ with raises(RuntimeError,
1439
+ match="This Parallel instance is already running"):
1440
+ g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
1441
+
1442
+ # Make sure that the error is raised quickly
1443
+ assert time.time() - t_start < 2, (
1444
+ "The error should be raised immediatly when submitting a new task "
1445
+ "but it took more than 2s."
1446
+ )
1447
+
1448
+ # The gc in pypy can be delayed. Force it to make sure this test does not
1449
+ # cause timeout on the CI.
1450
+ del g
1451
+ force_gc_pypy()
1452
+
1453
+
1454
+ @parametrize('backend', RETURN_GENERATOR_BACKENDS)
1455
+ @parametrize('return_as_1', ["generator", "generator_unordered"])
1456
+ @parametrize('return_as_2', ["generator", "generator_unordered"])
1457
+ @parametrize('n_jobs', [1, 2, -2, -1])
1458
+ def test_multiple_generator_call_separated(
1459
+ backend, return_as_1, return_as_2, n_jobs
1460
+ ):
1461
+ # Check that for separated Parallel, both tasks are correctly returned.
1462
+ g = Parallel(n_jobs, backend=backend, return_as=return_as_1)(
1463
+ delayed(sqrt)(i ** 2) for i in range(10)
1464
+ )
1465
+ g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)(
1466
+ delayed(sqrt)(i ** 2) for i in range(10, 20)
1467
+ )
1468
+
1469
+ if return_as_1 == "generator_unordered":
1470
+ g = sorted(g)
1471
+
1472
+ if return_as_2 == "generator_unordered":
1473
+ g2 = sorted(g2)
1474
+
1475
+ assert all(res == i for res, i in zip(g, range(10)))
1476
+ assert all(res == i for res, i in zip(g2, range(10, 20)))
1477
+
1478
+
1479
+ @parametrize('backend, error', [
1480
+ ('loky', True),
1481
+ ('threading', False),
1482
+ ('sequential', False),
1483
+ ])
1484
+ @parametrize('return_as_1', ["generator", "generator_unordered"])
1485
+ @parametrize('return_as_2', ["generator", "generator_unordered"])
1486
+ def test_multiple_generator_call_separated_gc(
1487
+ backend, return_as_1, return_as_2, error
1488
+ ):
1489
+
1490
+ if (backend == 'loky') and (mp is None):
1491
+ pytest.skip("Requires multiprocessing")
1492
+
1493
+ # Check that in loky, only one call can be run at a time with
1494
+ # a single executor.
1495
+ parallel = Parallel(2, backend=backend, return_as=return_as_1)
1496
+ g = parallel(delayed(sleep)(10) for i in range(10))
1497
+ g_wr = weakref.finalize(g, lambda: print("Generator collected"))
1498
+ ctx = (
1499
+ raises(RuntimeError, match="The executor underlying Parallel")
1500
+ if error else nullcontext()
1501
+ )
1502
+ with ctx:
1503
+ # For loky, this call will raise an error as the gc of the previous
1504
+ # generator will shutdown the shared executor.
1505
+ # For the other backends, as the worker pools are not shared between
1506
+ # the two calls, this should proceed correctly.
1507
+ t_start = time.time()
1508
+ g = Parallel(2, backend=backend, return_as=return_as_2)(
1509
+ delayed(sqrt)(i ** 2) for i in range(10, 20)
1510
+ )
1511
+
1512
+ # The gc in pypy can be delayed. Force it to test the behavior when it
1513
+ # will eventually be collected.
1514
+ force_gc_pypy()
1515
+
1516
+ if return_as_2 == "generator_unordered":
1517
+ g = sorted(g)
1518
+
1519
+ assert all(res == i for res, i in zip(g, range(10, 20)))
1520
+
1521
+ assert time.time() - t_start < 5
1522
+
1523
+ # Make sure that the computation are stopped for the gc'ed generator
1524
+ retry = 0
1525
+ while g_wr.alive and retry < 3:
1526
+ retry += 1
1527
+ time.sleep(.5)
1528
+ assert time.time() - t_start < 5
1529
+
1530
+ if parallel._effective_n_jobs() != 1:
1531
+ # check that the first parallel object is aborting (the final _aborted
1532
+ # state might be delayed).
1533
+ assert parallel._aborting
1534
+
1535
+
1536
+ @with_numpy
1537
+ @with_multiprocessing
1538
+ @parametrize('backend', PROCESS_BACKENDS)
1539
+ def test_memmapping_leaks(backend, tmpdir):
1540
+ # Non-regression test for memmapping backends. Ensure that the data
1541
+ # does not stay too long in memory
1542
+ tmpdir = tmpdir.strpath
1543
+
1544
+ # Use max_nbytes=1 to force the use of memory-mapping even for small
1545
+ # arrays
1546
+ with Parallel(n_jobs=2, max_nbytes=1, backend=backend,
1547
+ temp_folder=tmpdir) as p:
1548
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
1549
+
1550
+ # The memmap folder should not be clean in the context scope
1551
+ assert len(os.listdir(tmpdir)) > 0
1552
+
1553
+ # Cleaning of the memmap folder is triggered by the garbage
1554
+ # collection. With pypy the garbage collection has been observed to be
1555
+ # delayed, sometimes up until the shutdown of the interpreter. This
1556
+ # cleanup job executed in the worker ensures that it's triggered
1557
+ # immediately.
1558
+ p(delayed(_cleanup_worker)() for _ in range(2))
1559
+
1560
+ # Make sure that the shared memory is cleaned at the end when we exit
1561
+ # the context
1562
+ for _ in range(100):
1563
+ if not os.listdir(tmpdir):
1564
+ break
1565
+ sleep(.1)
1566
+ else:
1567
+ raise AssertionError('temporary directory of Parallel was not removed')
1568
+
1569
+ # Make sure that the shared memory is cleaned at the end of a call
1570
+ p = Parallel(n_jobs=2, max_nbytes=1, backend=backend)
1571
+ p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
1572
+ p(delayed(_cleanup_worker)() for _ in range(2))
1573
+
1574
+ for _ in range(100):
1575
+ if not os.listdir(tmpdir):
1576
+ break
1577
+ sleep(.1)
1578
+ else:
1579
+ raise AssertionError('temporary directory of Parallel was not removed')
1580
+
1581
+
1582
+ @parametrize('backend',
1583
+ ([None, 'threading'] if mp is None
1584
+ else [None, 'loky', 'threading'])
1585
+ )
1586
+ def test_lambda_expression(backend):
1587
+ # cloudpickle is used to pickle delayed callables
1588
+ results = Parallel(n_jobs=2, backend=backend)(
1589
+ delayed(lambda x: x ** 2)(i) for i in range(10))
1590
+ assert results == [i ** 2 for i in range(10)]
1591
+
1592
+
1593
+ @with_multiprocessing
1594
+ @parametrize('backend', PROCESS_BACKENDS)
1595
+ def test_backend_batch_statistics_reset(backend):
1596
+ """Test that a parallel backend correctly resets its batch statistics."""
1597
+ n_jobs = 2
1598
+ n_inputs = 500
1599
+ task_time = 2. / n_inputs
1600
+
1601
+ p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend)
1602
+ p(delayed(time.sleep)(task_time) for i in range(n_inputs))
1603
+ assert (p._backend._effective_batch_size ==
1604
+ p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
1605
+ assert (p._backend._smoothed_batch_duration ==
1606
+ p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
1607
+
1608
+ p(delayed(time.sleep)(task_time) for i in range(n_inputs))
1609
+ assert (p._backend._effective_batch_size ==
1610
+ p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE)
1611
+ assert (p._backend._smoothed_batch_duration ==
1612
+ p._backend._DEFAULT_SMOOTHED_BATCH_DURATION)
1613
+
1614
+
1615
+ @with_multiprocessing
1616
+ @parametrize("context", [parallel_config, parallel_backend])
1617
+ def test_backend_hinting_and_constraints(context):
1618
+ for n_jobs in [1, 2, -1]:
1619
+ assert type(Parallel(n_jobs=n_jobs)._backend) == DefaultBackend
1620
+
1621
+ p = Parallel(n_jobs=n_jobs, prefer='threads')
1622
+ assert type(p._backend) is ThreadingBackend
1623
+
1624
+ p = Parallel(n_jobs=n_jobs, prefer='processes')
1625
+ assert type(p._backend) is DefaultBackend
1626
+
1627
+ p = Parallel(n_jobs=n_jobs, require='sharedmem')
1628
+ assert type(p._backend) is ThreadingBackend
1629
+
1630
+ # Explicit backend selection can override backend hinting although it
1631
+ # is useless to pass a hint when selecting a backend.
1632
+ p = Parallel(n_jobs=2, backend='loky', prefer='threads')
1633
+ assert type(p._backend) is LokyBackend
1634
+
1635
+ with context('loky', n_jobs=2):
1636
+ # Explicit backend selection by the user with the context manager
1637
+ # should be respected when combined with backend hints only.
1638
+ p = Parallel(prefer='threads')
1639
+ assert type(p._backend) is LokyBackend
1640
+ assert p.n_jobs == 2
1641
+
1642
+ with context('loky', n_jobs=2):
1643
+ # Locally hard-coded n_jobs value is respected.
1644
+ p = Parallel(n_jobs=3, prefer='threads')
1645
+ assert type(p._backend) is LokyBackend
1646
+ assert p.n_jobs == 3
1647
+
1648
+ with context('loky', n_jobs=2):
1649
+ # Explicit backend selection by the user with the context manager
1650
+ # should be ignored when the Parallel call has hard constraints.
1651
+ # In this case, the default backend that supports shared mem is
1652
+ # used an the default number of processes is used.
1653
+ p = Parallel(require='sharedmem')
1654
+ assert type(p._backend) is ThreadingBackend
1655
+ assert p.n_jobs == 1
1656
+
1657
+ with context('loky', n_jobs=2):
1658
+ p = Parallel(n_jobs=3, require='sharedmem')
1659
+ assert type(p._backend) is ThreadingBackend
1660
+ assert p.n_jobs == 3
1661
+
1662
+
1663
+ @parametrize("context", [parallel_config, parallel_backend])
1664
+ def test_backend_hinting_and_constraints_with_custom_backends(
1665
+ capsys, context
1666
+ ):
1667
+ # Custom backends can declare that they use threads and have shared memory
1668
+ # semantics:
1669
+ class MyCustomThreadingBackend(ParallelBackendBase):
1670
+ supports_sharedmem = True
1671
+ use_threads = True
1672
+
1673
+ def apply_async(self):
1674
+ pass
1675
+
1676
+ def effective_n_jobs(self, n_jobs):
1677
+ return n_jobs
1678
+
1679
+ with context(MyCustomThreadingBackend()):
1680
+ p = Parallel(n_jobs=2, prefer='processes') # ignored
1681
+ assert type(p._backend) is MyCustomThreadingBackend
1682
+
1683
+ p = Parallel(n_jobs=2, require='sharedmem')
1684
+ assert type(p._backend) is MyCustomThreadingBackend
1685
+
1686
+ class MyCustomProcessingBackend(ParallelBackendBase):
1687
+ supports_sharedmem = False
1688
+ use_threads = False
1689
+
1690
+ def apply_async(self):
1691
+ pass
1692
+
1693
+ def effective_n_jobs(self, n_jobs):
1694
+ return n_jobs
1695
+
1696
+ with context(MyCustomProcessingBackend()):
1697
+ p = Parallel(n_jobs=2, prefer='processes')
1698
+ assert type(p._backend) is MyCustomProcessingBackend
1699
+
1700
+ out, err = capsys.readouterr()
1701
+ assert out == ""
1702
+ assert err == ""
1703
+
1704
+ p = Parallel(n_jobs=2, require='sharedmem', verbose=10)
1705
+ assert type(p._backend) is ThreadingBackend
1706
+
1707
+ out, err = capsys.readouterr()
1708
+ expected = ("Using ThreadingBackend as joblib backend "
1709
+ "instead of MyCustomProcessingBackend as the latter "
1710
+ "does not provide shared memory semantics.")
1711
+ assert out.strip() == expected
1712
+ assert err == ""
1713
+
1714
+ with raises(ValueError):
1715
+ Parallel(backend=MyCustomProcessingBackend(), require='sharedmem')
1716
+
1717
+
1718
+ def test_invalid_backend_hinting_and_constraints():
1719
+ with raises(ValueError):
1720
+ Parallel(prefer='invalid')
1721
+
1722
+ with raises(ValueError):
1723
+ Parallel(require='invalid')
1724
+
1725
+ with raises(ValueError):
1726
+ # It is inconsistent to prefer process-based parallelism while
1727
+ # requiring shared memory semantics.
1728
+ Parallel(prefer='processes', require='sharedmem')
1729
+
1730
+ if mp is not None:
1731
+ # It is inconsistent to ask explicitly for a process-based
1732
+ # parallelism while requiring shared memory semantics.
1733
+ with raises(ValueError):
1734
+ Parallel(backend='loky', require='sharedmem')
1735
+ with raises(ValueError):
1736
+ Parallel(backend='multiprocessing', require='sharedmem')
1737
+
1738
+
1739
+ def _recursive_backend_info(limit=3, **kwargs):
1740
+ """Perform nested parallel calls and introspect the backend on the way"""
1741
+
1742
+ with Parallel(n_jobs=2) as p:
1743
+ this_level = [(type(p._backend).__name__, p._backend.nesting_level)]
1744
+ if limit == 0:
1745
+ return this_level
1746
+ results = p(delayed(_recursive_backend_info)(limit=limit - 1, **kwargs)
1747
+ for i in range(1))
1748
+ return this_level + results[0]
1749
+
1750
+
1751
+ @with_multiprocessing
1752
+ @parametrize('backend', ['loky', 'threading'])
1753
+ @parametrize("context", [parallel_config, parallel_backend])
1754
+ def test_nested_parallelism_limit(context, backend):
1755
+ with context(backend, n_jobs=2):
1756
+ backend_types_and_levels = _recursive_backend_info()
1757
+
1758
+ if cpu_count() == 1:
1759
+ second_level_backend_type = 'SequentialBackend'
1760
+ max_level = 1
1761
+ else:
1762
+ second_level_backend_type = 'ThreadingBackend'
1763
+ max_level = 2
1764
+
1765
+ top_level_backend_type = backend.title() + 'Backend'
1766
+ expected_types_and_levels = [
1767
+ (top_level_backend_type, 0),
1768
+ (second_level_backend_type, 1),
1769
+ ('SequentialBackend', max_level),
1770
+ ('SequentialBackend', max_level)
1771
+ ]
1772
+ assert backend_types_and_levels == expected_types_and_levels
1773
+
1774
+
1775
+ @with_numpy
1776
+ @parametrize("context", [parallel_config, parallel_backend])
1777
+ @skipif(distributed is None, reason='This test requires dask')
1778
+ def test_nested_parallelism_with_dask(context):
1779
+ with distributed.Client(n_workers=2, threads_per_worker=2):
1780
+ # 10 MB of data as argument to trigger implicit scattering
1781
+ data = np.ones(int(1e7), dtype=np.uint8)
1782
+ for i in range(2):
1783
+ with context('dask'):
1784
+ backend_types_and_levels = _recursive_backend_info(data=data)
1785
+ assert len(backend_types_and_levels) == 4
1786
+ assert all(name == 'DaskDistributedBackend'
1787
+ for name, _ in backend_types_and_levels)
1788
+
1789
+ # No argument
1790
+ with context('dask'):
1791
+ backend_types_and_levels = _recursive_backend_info()
1792
+ assert len(backend_types_and_levels) == 4
1793
+ assert all(name == 'DaskDistributedBackend'
1794
+ for name, _ in backend_types_and_levels)
1795
+
1796
+
1797
+ def _recursive_parallel(nesting_limit=None):
1798
+ """A horrible function that does recursive parallel calls"""
1799
+ return Parallel()(delayed(_recursive_parallel)() for i in range(2))
1800
+
1801
+
1802
+ @pytest.mark.no_cover
1803
+ @parametrize("context", [parallel_config, parallel_backend])
1804
+ @parametrize(
1805
+ 'backend', (['threading'] if mp is None else ['loky', 'threading'])
1806
+ )
1807
+ def test_thread_bomb_mitigation(context, backend):
1808
+ # Test that recursive parallelism raises a recursion rather than
1809
+ # saturating the operating system resources by creating a unbounded number
1810
+ # of threads.
1811
+ with context(backend, n_jobs=2):
1812
+ with raises(BaseException) as excinfo:
1813
+ _recursive_parallel()
1814
+ exc = excinfo.value
1815
+ if backend == "loky":
1816
+ # Local import because loky may not be importable for lack of
1817
+ # multiprocessing
1818
+ from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa
1819
+ if isinstance(exc, (TerminatedWorkerError, PicklingError)):
1820
+ # The recursion exception can itself cause an error when
1821
+ # pickling it to be send back to the parent process. In this
1822
+ # case the worker crashes but the original traceback is still
1823
+ # printed on stderr. This could be improved but does not seem
1824
+ # simple to do and this is not critical for users (as long
1825
+ # as there is no process or thread bomb happening).
1826
+ pytest.xfail("Loky worker crash when serializing RecursionError")
1827
+
1828
+ assert isinstance(exc, RecursionError)
1829
+
1830
+
1831
+ def _run_parallel_sum():
1832
+ env_vars = {}
1833
+ for var in ['OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
1834
+ 'VECLIB_MAXIMUM_THREADS', 'NUMEXPR_NUM_THREADS',
1835
+ 'NUMBA_NUM_THREADS', 'ENABLE_IPC']:
1836
+ env_vars[var] = os.environ.get(var)
1837
+ return env_vars, parallel_sum(100)
1838
+
1839
+
1840
+ @parametrize("backend", ([None, 'loky'] if mp is not None else [None]))
1841
+ @skipif(parallel_sum is None, reason="Need OpenMP helper compiled")
1842
+ def test_parallel_thread_limit(backend):
1843
+ results = Parallel(n_jobs=2, backend=backend)(
1844
+ delayed(_run_parallel_sum)() for _ in range(2)
1845
+ )
1846
+ expected_num_threads = max(cpu_count() // 2, 1)
1847
+ for worker_env_vars, omp_num_threads in results:
1848
+ assert omp_num_threads == expected_num_threads
1849
+ for name, value in worker_env_vars.items():
1850
+ if name.endswith("_THREADS"):
1851
+ assert value == str(expected_num_threads)
1852
+ else:
1853
+ assert name == "ENABLE_IPC"
1854
+ assert value == "1"
1855
+
1856
+
1857
+ @parametrize("context", [parallel_config, parallel_backend])
1858
+ @skipif(distributed is not None, reason='This test requires dask')
1859
+ def test_dask_backend_when_dask_not_installed(context):
1860
+ with raises(ValueError, match='Please install dask'):
1861
+ context('dask')
1862
+
1863
+
1864
+ @parametrize("context", [parallel_config, parallel_backend])
1865
+ def test_zero_worker_backend(context):
1866
+ # joblib.Parallel should reject with an explicit error message parallel
1867
+ # backends that have no worker.
1868
+ class ZeroWorkerBackend(ThreadingBackend):
1869
+ def configure(self, *args, **kwargs):
1870
+ return 0
1871
+
1872
+ def apply_async(self, func, callback=None): # pragma: no cover
1873
+ raise TimeoutError("No worker available")
1874
+
1875
+ def effective_n_jobs(self, n_jobs): # pragma: no cover
1876
+ return 0
1877
+
1878
+ expected_msg = "ZeroWorkerBackend has no active worker"
1879
+ with context(ZeroWorkerBackend()):
1880
+ with pytest.raises(RuntimeError, match=expected_msg):
1881
+ Parallel(n_jobs=2)(delayed(id)(i) for i in range(2))
1882
+
1883
+
1884
+ def test_globals_update_at_each_parallel_call():
1885
+ # This is a non-regression test related to joblib issues #836 and #833.
1886
+ # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global
1887
+ # variables changes in a parent process between two calls to
1888
+ # joblib.Parallel would not be propagated into the workers.
1889
+ global MY_GLOBAL_VARIABLE
1890
+ MY_GLOBAL_VARIABLE = "original value"
1891
+
1892
+ def check_globals():
1893
+ global MY_GLOBAL_VARIABLE
1894
+ return MY_GLOBAL_VARIABLE
1895
+
1896
+ assert check_globals() == "original value"
1897
+
1898
+ workers_global_variable = Parallel(n_jobs=2)(
1899
+ delayed(check_globals)() for i in range(2))
1900
+ assert set(workers_global_variable) == {"original value"}
1901
+
1902
+ # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets
1903
+ # propagated into the workers environment
1904
+ MY_GLOBAL_VARIABLE = "changed value"
1905
+ assert check_globals() == "changed value"
1906
+
1907
+ workers_global_variable = Parallel(n_jobs=2)(
1908
+ delayed(check_globals)() for i in range(2))
1909
+ assert set(workers_global_variable) == {"changed value"}
1910
+
1911
+
1912
+ ##############################################################################
1913
+ # Test environment variable in child env, in particular for limiting
1914
+ # the maximal number of threads in C-library threadpools.
1915
+ #
1916
+
1917
+ def _check_numpy_threadpool_limits():
1918
+ import numpy as np
1919
+ # Let's call BLAS on a Matrix Matrix multiplication with dimensions large
1920
+ # enough to ensure that the threadpool managed by the underlying BLAS
1921
+ # implementation is actually used so as to force its initialization.
1922
+ a = np.random.randn(100, 100)
1923
+ np.dot(a, a)
1924
+ from threadpoolctl import threadpool_info
1925
+ return threadpool_info()
1926
+
1927
+
1928
+ def _parent_max_num_threads_for(child_module, parent_info):
1929
+ for parent_module in parent_info:
1930
+ if parent_module['filepath'] == child_module['filepath']:
1931
+ return parent_module['num_threads']
1932
+ raise ValueError("An unexpected module was loaded in child:\n{}"
1933
+ .format(child_module))
1934
+
1935
+
1936
+ def check_child_num_threads(workers_info, parent_info, num_threads):
1937
+ # Check that the number of threads reported in workers_info is consistent
1938
+ # with the expectation. We need to be careful to handle the cases where
1939
+ # the requested number of threads is below max_num_thread for the library.
1940
+ for child_threadpool_info in workers_info:
1941
+ for child_module in child_threadpool_info:
1942
+ parent_max_num_threads = _parent_max_num_threads_for(
1943
+ child_module, parent_info)
1944
+ expected = {min(num_threads, parent_max_num_threads), num_threads}
1945
+ assert child_module['num_threads'] in expected
1946
+
1947
+
1948
+ @with_numpy
1949
+ @with_multiprocessing
1950
+ @parametrize('n_jobs', [2, 4, -2, -1])
1951
+ def test_threadpool_limitation_in_child_loky(n_jobs):
1952
+ # Check that the protection against oversubscription in workers is working
1953
+ # using threadpoolctl functionalities.
1954
+
1955
+ # Skip this test if numpy is not linked to a BLAS library
1956
+ parent_info = _check_numpy_threadpool_limits()
1957
+ if len(parent_info) == 0:
1958
+ pytest.skip(reason="Need a version of numpy linked to BLAS")
1959
+
1960
+ workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)(
1961
+ delayed(_check_numpy_threadpool_limits)() for i in range(2))
1962
+
1963
+ n_jobs = effective_n_jobs(n_jobs)
1964
+ expected_child_num_threads = max(cpu_count() // n_jobs, 1)
1965
+
1966
+ check_child_num_threads(workers_threadpool_infos, parent_info,
1967
+ expected_child_num_threads)
1968
+
1969
+
1970
+ @with_numpy
1971
+ @with_multiprocessing
1972
+ @parametrize('inner_max_num_threads', [1, 2, 4, None])
1973
+ @parametrize('n_jobs', [2, -1])
1974
+ @parametrize("context", [parallel_config, parallel_backend])
1975
+ def test_threadpool_limitation_in_child_context(
1976
+ context, n_jobs, inner_max_num_threads
1977
+ ):
1978
+ # Check that the protection against oversubscription in workers is working
1979
+ # using threadpoolctl functionalities.
1980
+
1981
+ # Skip this test if numpy is not linked to a BLAS library
1982
+ parent_info = _check_numpy_threadpool_limits()
1983
+ if len(parent_info) == 0:
1984
+ pytest.skip(reason="Need a version of numpy linked to BLAS")
1985
+
1986
+ with context('loky', inner_max_num_threads=inner_max_num_threads):
1987
+ workers_threadpool_infos = Parallel(n_jobs=n_jobs)(
1988
+ delayed(_check_numpy_threadpool_limits)() for i in range(2))
1989
+
1990
+ n_jobs = effective_n_jobs(n_jobs)
1991
+ if inner_max_num_threads is None:
1992
+ expected_child_num_threads = max(cpu_count() // n_jobs, 1)
1993
+ else:
1994
+ expected_child_num_threads = inner_max_num_threads
1995
+
1996
+ check_child_num_threads(workers_threadpool_infos, parent_info,
1997
+ expected_child_num_threads)
1998
+
1999
+
2000
+ @with_multiprocessing
2001
+ @parametrize('n_jobs', [2, -1])
2002
+ @parametrize('var_name', ["OPENBLAS_NUM_THREADS",
2003
+ "MKL_NUM_THREADS",
2004
+ "OMP_NUM_THREADS"])
2005
+ @parametrize("context", [parallel_config, parallel_backend])
2006
+ def test_threadpool_limitation_in_child_override(context, n_jobs, var_name):
2007
+ # Check that environment variables set by the user on the main process
2008
+ # always have the priority.
2009
+
2010
+ # Clean up the existing executor because we change the environment of the
2011
+ # parent at runtime and it is not detected in loky intentionally.
2012
+ get_reusable_executor(reuse=True).shutdown()
2013
+
2014
+ def _get_env(var_name):
2015
+ return os.environ.get(var_name)
2016
+
2017
+ original_var_value = os.environ.get(var_name)
2018
+ try:
2019
+ os.environ[var_name] = "4"
2020
+ # Skip this test if numpy is not linked to a BLAS library
2021
+ results = Parallel(n_jobs=n_jobs)(
2022
+ delayed(_get_env)(var_name) for i in range(2))
2023
+ assert results == ["4", "4"]
2024
+
2025
+ with context('loky', inner_max_num_threads=1):
2026
+ results = Parallel(n_jobs=n_jobs)(
2027
+ delayed(_get_env)(var_name) for i in range(2))
2028
+ assert results == ["1", "1"]
2029
+
2030
+ finally:
2031
+ if original_var_value is None:
2032
+ del os.environ[var_name]
2033
+ else:
2034
+ os.environ[var_name] = original_var_value
2035
+
2036
+
2037
+ @with_multiprocessing
2038
+ @parametrize('n_jobs', [2, 4, -1])
2039
+ def test_loky_reuse_workers(n_jobs):
2040
+ # Non-regression test for issue #967 where the workers are not reused when
2041
+ # calling multiple Parallel loops.
2042
+
2043
+ def parallel_call(n_jobs):
2044
+ x = range(10)
2045
+ Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10))
2046
+
2047
+ # Run a parallel loop and get the workers used for computations
2048
+ parallel_call(n_jobs)
2049
+ first_executor = get_reusable_executor(reuse=True)
2050
+
2051
+ # Ensure that the workers are reused for the next calls, as the executor is
2052
+ # not restarted.
2053
+ for _ in range(10):
2054
+ parallel_call(n_jobs)
2055
+ executor = get_reusable_executor(reuse=True)
2056
+ assert executor == first_executor
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/test_store_backends.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ try:
3
+ # Python 2.7: use the C pickle to speed up
4
+ # test_concurrency_safe_write which pickles big python objects
5
+ import cPickle as cpickle
6
+ except ImportError:
7
+ import pickle as cpickle
8
+ import functools
9
+ from pickle import PicklingError
10
+ import time
11
+
12
+ import pytest
13
+
14
+ from joblib.testing import parametrize, timeout
15
+ from joblib.test.common import with_multiprocessing
16
+ from joblib.backports import concurrency_safe_rename
17
+ from joblib import Parallel, delayed
18
+ from joblib._store_backends import (
19
+ concurrency_safe_write,
20
+ FileSystemStoreBackend,
21
+ CacheWarning,
22
+ )
23
+
24
+
25
+ def write_func(output, filename):
26
+ with open(filename, 'wb') as f:
27
+ cpickle.dump(output, f)
28
+
29
+
30
+ def load_func(expected, filename):
31
+ for i in range(10):
32
+ try:
33
+ with open(filename, 'rb') as f:
34
+ reloaded = cpickle.load(f)
35
+ break
36
+ except (OSError, IOError):
37
+ # On Windows you can have WindowsError ([Error 5] Access
38
+ # is denied or [Error 13] Permission denied) when reading the file,
39
+ # probably because a writer process has a lock on the file
40
+ time.sleep(0.1)
41
+ else:
42
+ raise
43
+ assert expected == reloaded
44
+
45
+
46
+ def concurrency_safe_write_rename(to_write, filename, write_func):
47
+ temporary_filename = concurrency_safe_write(to_write,
48
+ filename, write_func)
49
+ concurrency_safe_rename(temporary_filename, filename)
50
+
51
+
52
+ @timeout(0) # No timeout as this test can be long
53
+ @with_multiprocessing
54
+ @parametrize('backend', ['multiprocessing', 'loky', 'threading'])
55
+ def test_concurrency_safe_write(tmpdir, backend):
56
+ # Add one item to cache
57
+ filename = tmpdir.join('test.pkl').strpath
58
+
59
+ obj = {str(i): i for i in range(int(1e5))}
60
+ funcs = [functools.partial(concurrency_safe_write_rename,
61
+ write_func=write_func)
62
+ if i % 3 != 2 else load_func for i in range(12)]
63
+ Parallel(n_jobs=2, backend=backend)(
64
+ delayed(func)(obj, filename) for func in funcs)
65
+
66
+
67
+ def test_warning_on_dump_failure(tmpdir):
68
+ # Check that a warning is raised when the dump fails for any reason but
69
+ # a PicklingError.
70
+ class UnpicklableObject(object):
71
+ def __reduce__(self):
72
+ raise RuntimeError("some exception")
73
+
74
+ backend = FileSystemStoreBackend()
75
+ backend.location = tmpdir.join('test_warning_on_pickling_error').strpath
76
+ backend.compress = None
77
+
78
+ with pytest.warns(CacheWarning, match="some exception"):
79
+ backend.dump_item("testpath", UnpicklableObject())
80
+
81
+
82
+ def test_warning_on_pickling_error(tmpdir):
83
+ # This is separate from test_warning_on_dump_failure because in the
84
+ # future we will turn this into an exception.
85
+ class UnpicklableObject(object):
86
+ def __reduce__(self):
87
+ raise PicklingError("not picklable")
88
+
89
+ backend = FileSystemStoreBackend()
90
+ backend.location = tmpdir.join('test_warning_on_pickling_error').strpath
91
+ backend.compress = None
92
+
93
+ with pytest.warns(FutureWarning, match="not picklable"):
94
+ backend.dump_item("testpath", UnpicklableObject())
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/joblib/test/testutils.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ def return_slice_of_data(arr, start_idx, end_idx):
2
+ return arr[start_idx:end_idx]
3
+
4
+
5
+ def print_filename_and_raise(arr):
6
+ from joblib._memmapping_reducer import _get_backing_memmap
7
+ print(_get_backing_memmap(arr).filename)
8
+ raise ValueError
material/dataset/xpsdeeplearning/.venv/lib/python3.10/site-packages/jupyter_console-6.6.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip