id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
172,267 | from mmap import mmap
import errno
import os
import stat
import threading
import atexit
import tempfile
import time
import warnings
import weakref
from uuid import uuid4
from multiprocessing import util
from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
from .numpy_pickle import dump, load, load_temporary_memmap
from .backports import make_memmap
from .disk import delete_folder
from .externals.loky.backend import resource_tracker
SYSTEM_SHARED_MEM_FS = '/dev/shm'
SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(2e9)
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
The provided code snippet includes necessary dependencies for implementing the `_get_temp_dir` function. Write a Python function `def _get_temp_dir(pool_folder_name, temp_folder=None)` to solve the following problem:
Get the full path to a subfolder inside the temporary folder. Parameters ---------- pool_folder_name : str Sub-folder name used for the serialization of a pool instance. temp_folder: str, optional Folder to be used by the pool for memmapping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. Returns ------- pool_folder : str full path to the temporary folder use_shared_mem : bool whether the temporary folder is written to the system shared memory folder or some other temporary folder.
Here is the function:
def _get_temp_dir(pool_folder_name, temp_folder=None):
"""Get the full path to a subfolder inside the temporary folder.
Parameters
----------
pool_folder_name : str
Sub-folder name used for the serialization of a pool instance.
temp_folder: str, optional
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Returns
-------
pool_folder : str
full path to the temporary folder
use_shared_mem : bool
whether the temporary folder is written to the system shared memory
folder or some other temporary folder.
"""
use_shared_mem = False
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
shm_stats = os.statvfs(SYSTEM_SHARED_MEM_FS)
available_nbytes = shm_stats.f_bsize * shm_stats.f_bavail
if available_nbytes > SYSTEM_SHARED_MEM_FS_MIN_SIZE:
# Try to see if we have write access to the shared mem
# folder only if it is reasonably large (that is 2GB or
# more).
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except (IOError, OSError):
# Missing rights in the /dev/shm partition, fallback to regular
# temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
return pool_folder, use_shared_mem | Get the full path to a subfolder inside the temporary folder. Parameters ---------- pool_folder_name : str Sub-folder name used for the serialization of a pool instance. temp_folder: str, optional Folder to be used by the pool for memmapping large arrays for sharing memory with worker processes. If None, this will try in order: - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable, - /dev/shm if the folder exists and is writable: this is a RAMdisk filesystem available by default on modern Linux distributions, - the default system temporary folder that can be overridden with TMP, TMPDIR or TEMP environment variables, typically /tmp under Unix operating systems. Returns ------- pool_folder : str full path to the temporary folder use_shared_mem : bool whether the temporary folder is written to the system shared memory folder or some other temporary folder. |
172,268 | from mmap import mmap
import errno
import os
import stat
import threading
import atexit
import tempfile
import time
import warnings
import weakref
from uuid import uuid4
from multiprocessing import util
from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
from .numpy_pickle import dump, load, load_temporary_memmap
from .backports import make_memmap
from .disk import delete_folder
from .externals.loky.backend import resource_tracker
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any."""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
The provided code snippet includes necessary dependencies for implementing the `has_shareable_memory` function. Write a Python function `def has_shareable_memory(a)` to solve the following problem:
Return True if a is backed by some mmap buffer directly or not.
Here is the function:
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not."""
return _get_backing_memmap(a) is not None | Return True if a is backed by some mmap buffer directly or not. |
172,269 | from mmap import mmap
import errno
import os
import stat
import threading
import atexit
import tempfile
import time
import warnings
import weakref
from uuid import uuid4
from multiprocessing import util
from pickle import whichmodule, loads, dumps, HIGHEST_PROTOCOL, PicklingError
from .numpy_pickle import dump, load, load_temporary_memmap
from .backports import make_memmap
from .disk import delete_folder
from .externals.loky.backend import resource_tracker
def reduce_array_memmap_backward(a):
"""reduce a np.array or a np.memmap from a child process"""
m = _get_backing_memmap(a)
if isinstance(m, np.memmap) and m.filename not in JOBLIB_MMAPS:
# if a is backed by a memmaped file, reconstruct a using the
# memmaped file.
return _reduce_memmap_backed(a, m)
else:
# a is either a regular (not memmap-backed) numpy array, or an array
# backed by a shared temporary file created by joblib. In the latter
# case, in order to limit the lifespan of these temporary files, we
# serialize the memmap as a regular numpy array, and decref the
# file backing the memmap (done implicitly in a previously registered
# finalizer, see ``unlink_on_gc_collect`` for more details)
return (
loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL), )
)
class ArrayMemmapForwardReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmapping of large arrays to files created
a folder.
temp_folder_resolver: callable
An callable in charge of resolving a temporary folder name where files
for backing memmapped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
prewarm: bool, optional, False by default.
Force a read on newly memmapped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder_resolver, mmap_mode,
unlink_on_gc_collect, verbose=0, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder_resolver = temp_folder_resolver
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
if prewarm == "auto":
self._prewarm = not self._temp_folder.startswith(
SYSTEM_SHARED_MEM_FS
)
else:
self._prewarm = prewarm
self._prewarm = prewarm
self._memmaped_arrays = _WeakArrayKeyMap()
self._temporary_memmaped_filenames = set()
self._unlink_on_gc_collect = unlink_on_gc_collect
def _temp_folder(self):
return self._temp_folder_resolver()
def __reduce__(self):
# The ArrayMemmapForwardReducer is passed to the children processes: it
# needs to be pickled but the _WeakArrayKeyMap need to be skipped as
# it's only guaranteed to be consistent with the parent process memory
# garbage collection.
# Although this reducer is pickled, it is not needed in its destination
# process (child processes), as we only use this reducer to send
# memmaps from the parent process to the children processes. For this
# reason, we can afford skipping the resolver, (which would otherwise
# be unpicklable), and pass it as None instead.
args = (self._max_nbytes, None, self._mmap_mode,
self._unlink_on_gc_collect)
kwargs = {
'verbose': self.verbose,
'prewarm': self._prewarm,
}
return ArrayMemmapForwardReducer, args, kwargs
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None and isinstance(m, np.memmap):
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject and self._max_nbytes is not None and
a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
try:
basename = self._memmaped_arrays.get(a)
except KeyError:
# Generate a new unique random filename. The process and thread
# ids are only useful for debugging purpose and to make it
# easier to cleanup orphaned files in case of hard process
# kill (e.g. by "kill -9" or segfault).
basename = "{}-{}-{}.pkl".format(
os.getpid(), id(threading.current_thread()), uuid4().hex)
self._memmaped_arrays.set(a, basename)
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
is_new_memmap = filename not in self._temporary_memmaped_filenames
# add the memmap to the list of temporary memmaps created by joblib
self._temporary_memmaped_filenames.add(filename)
if self._unlink_on_gc_collect:
# Bump reference count of the memmap by 1 to account for
# shared usage of the memmap by a child process. The
# corresponding decref call will be executed upon calling
# resource_tracker.maybe_unlink, registered as a finalizer in
# the child.
# the incref/decref calls here are only possible when the child
# and the parent share the same resource_tracker. It is not the
# case for the multiprocessing backend, but it does not matter
# because unlinking a memmap from a child process is only
# useful to control the memory usage of long-lasting child
# processes, while the multiprocessing-based pools terminate
# their workers at the end of a map() call.
resource_tracker.register(filename, "file")
if is_new_memmap:
# Incref each temporary memmap created by joblib one extra
# time. This means that these memmaps will only be deleted
# once an extra maybe_unlink() is called, which is done once
# all the jobs have completed (or been canceled) in the
# Parallel._terminate_backend() method.
resource_tracker.register(filename, "file")
if not os.path.exists(filename):
util.debug(
"[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
"creating a new memmap at {}".format(
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data by accessing it. This operation ensures
# that the disk access required to create the memmapping
# file are performed in the reducing process and avoids
# concurrent memmap creation in multiple children
# processes.
load(filename, mmap_mode=self._mmap_mode).max()
else:
util.debug(
"[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
"reusing memmap file: {}".format(
a.shape, a.dtype, os.path.basename(filename)))
# The worker process will use joblib.load to memmap the data
return (
(load_temporary_memmap, (filename, self._mmap_mode,
self._unlink_on_gc_collect))
)
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
util.debug(
'[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, '
' dtype={}).'.format(a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
The provided code snippet includes necessary dependencies for implementing the `get_memmapping_reducers` function. Write a Python function `def get_memmapping_reducers( forward_reducers=None, backward_reducers=None, temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0, prewarm=False, unlink_on_gc_collect=True, **kwargs)` to solve the following problem:
Construct a pair of memmapping reducer linked to a tmpdir. This function manage the creation and the clean up of the temporary folders underlying the memory maps and should be use to get the reducers necessary to construct joblib pool or executor.
Here is the function:
def get_memmapping_reducers(
forward_reducers=None, backward_reducers=None,
temp_folder_resolver=None, max_nbytes=1e6, mmap_mode='r', verbose=0,
prewarm=False, unlink_on_gc_collect=True, **kwargs):
"""Construct a pair of memmapping reducer linked to a tmpdir.
This function manage the creation and the clean up of the temporary folders
underlying the memory maps and should be use to get the reducers necessary
to construct joblib pool or executor.
"""
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is also able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
forward_reduce_ndarray = ArrayMemmapForwardReducer(
max_nbytes, temp_folder_resolver, mmap_mode, unlink_on_gc_collect,
verbose, prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = forward_reduce_ndarray
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reducers[np.ndarray] = reduce_array_memmap_backward
backward_reducers[np.memmap] = reduce_array_memmap_backward
return forward_reducers, backward_reducers | Construct a pair of memmapping reducer linked to a tmpdir. This function manage the creation and the clean up of the temporary folders underlying the memory maps and should be use to get the reducers necessary to construct joblib pool or executor. |
172,270 | from ._memmapping_reducer import get_memmapping_reducers
from ._memmapping_reducer import TemporaryResourcesManager
from .externals.loky.reusable_executor import _ReusablePoolExecutor
class MemmappingExecutor(_ReusablePoolExecutor):
def get_memmapping_executor(cls, n_jobs, timeout=300, initializer=None,
initargs=(), env=None, temp_folder=None,
context_id=None, **backend_args):
def terminate(self, kill_workers=False):
def _temp_folder(self):
def get_memmapping_executor(n_jobs, **kwargs):
return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs) | null |
172,271 | import gc
import os
import warnings
import threading
import functools
import contextlib
from abc import ABCMeta, abstractmethod
from .my_exceptions import WorkerInterrupt
from ._multiprocessing_helpers import mp
The provided code snippet includes necessary dependencies for implementing the `inside_dask_worker` function. Write a Python function `def inside_dask_worker()` to solve the following problem:
Check whether the current function is executed inside a Dask worker.
Here is the function:
def inside_dask_worker():
"""Check whether the current function is executed inside a Dask worker.
"""
# This function can not be in joblib._dask because there would be a
# circular import:
# _dask imports _parallel_backend that imports _dask ...
try:
from distributed import get_worker
except ImportError:
return False
try:
get_worker()
return True
except ValueError:
return False | Check whether the current function is executed inside a Dask worker. |
172,272 | import inspect
import warnings
import re
import os
import collections
from itertools import islice
from tokenize import open as open_py_source
from .logger import pformat
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
def islice(iterable: Iterable[_T], stop: Optional[int]) -> Iterator[_T]: ...
def islice(iterable: Iterable[_T], start: Optional[int], stop: Optional[int], step: Optional[int] = ...) -> Iterator[_T]: ...
The provided code snippet includes necessary dependencies for implementing the `get_func_code` function. Write a Python function `def get_func_code(func)` to solve the following problem:
Attempts to retrieve a reliable function code hash. The reason we don't use inspect.getsource is that it caches the source, whereas we want this to be modified on the fly when the function is modified. Returns ------- func_code: string The function code source_file: string The path to the file in which the function is defined. first_line: int The first line of the code in the source file. Notes ------ This function does a bit more magic than inspect, and is thus more robust.
Here is the function:
def get_func_code(func):
""" Attempts to retrieve a reliable function code hash.
The reason we don't use inspect.getsource is that it caches the
source, whereas we want this to be modified on the fly when the
function is modified.
Returns
-------
func_code: string
The function code
source_file: string
The path to the file in which the function is defined.
first_line: int
The first line of the code in the source file.
Notes
------
This function does a bit more magic than inspect, and is thus
more robust.
"""
source_file = None
try:
code = func.__code__
source_file = code.co_filename
if not os.path.exists(source_file):
# Use inspect for lambda functions and functions defined in an
# interactive shell, or in doctests
source_code = ''.join(inspect.getsourcelines(func)[0])
line_no = 1
if source_file.startswith('<doctest '):
source_file, line_no = re.match(
r'\<doctest (.*\.rst)\[(.*)\]\>', source_file).groups()
line_no = int(line_no)
source_file = '<doctest %s>' % source_file
return source_code, source_file, line_no
# Try to retrieve the source code.
with open_py_source(source_file) as source_file_obj:
first_line = code.co_firstlineno
# All the lines after the function definition:
source_lines = list(islice(source_file_obj, first_line - 1, None))
return ''.join(inspect.getblock(source_lines)), source_file, first_line
except:
# If the source code fails, we use the hash. This is fragile and
# might change from one session to another.
if hasattr(func, '__code__'):
# Python 3.X
return str(func.__code__.__hash__()), source_file, -1
else:
# Weird objects like numpy ufunc don't have __code__
# This is fragile, as quite often the id of the object is
# in the repr, so it might not persist across sessions,
# however it will work for ufuncs.
return repr(func), source_file, -1 | Attempts to retrieve a reliable function code hash. The reason we don't use inspect.getsource is that it caches the source, whereas we want this to be modified on the fly when the function is modified. Returns ------- func_code: string The function code source_file: string The path to the file in which the function is defined. first_line: int The first line of the code in the source file. Notes ------ This function does a bit more magic than inspect, and is thus more robust. |
172,273 | import inspect
import warnings
import re
import os
import collections
from itertools import islice
from tokenize import open as open_py_source
from .logger import pformat
def get_func_name(func, resolv_alias=True, win_characters=True):
""" Return the function import path (as a list of module names), and
a name for the function.
Parameters
----------
func: callable
The func to inspect
resolv_alias: boolean, optional
If true, possible local aliases are indicated.
win_characters: boolean, optional
If true, substitute special characters using urllib.quote
This is useful in Windows, as it cannot encode some filenames
"""
if hasattr(func, '__module__'):
module = func.__module__
else:
try:
module = inspect.getmodule(func)
except TypeError:
if hasattr(func, '__class__'):
module = func.__class__.__module__
else:
module = 'unknown'
if module is None:
# Happens in doctests, eg
module = ''
if module == '__main__':
try:
filename = os.path.abspath(inspect.getsourcefile(func))
except:
filename = None
if filename is not None:
# mangling of full path to filename
parts = filename.split(os.sep)
if parts[-1].startswith('<ipython-input'):
# We're in a IPython (or notebook) session. parts[-1] comes
# from func.__code__.co_filename and is of the form
# <ipython-input-N-XYZ>, where:
# - N is the cell number where the function was defined
# - XYZ is a hash representing the function's code (and name).
# It will be consistent across sessions and kernel restarts,
# and will change if the function's code/name changes
# We remove N so that cache is properly hit if the cell where
# the func is defined is re-exectuted.
# The XYZ hash should avoid collisions between functions with
# the same name, both within the same notebook but also across
# notebooks
splitted = parts[-1].split('-')
parts[-1] = '-'.join(splitted[:2] + splitted[3:])
elif len(parts) > 2 and parts[-2].startswith('ipykernel_'):
# In a notebook session (ipykernel). Filename seems to be 'xyz'
# of above. parts[-2] has the structure ipykernel_XXXXXX where
# XXXXXX is a six-digit number identifying the current run (?).
# If we split it off, the function again has the same
# identifier across runs.
parts[-2] = 'ipykernel'
filename = '-'.join(parts)
if filename.endswith('.py'):
filename = filename[:-3]
module = module + '-' + filename
module = module.split('.')
if hasattr(func, 'func_name'):
name = func.func_name
elif hasattr(func, '__name__'):
name = func.__name__
else:
name = 'unknown'
# Hack to detect functions not defined at the module-level
if resolv_alias:
# TODO: Maybe add a warning here?
if hasattr(func, 'func_globals') and name in func.func_globals:
if not func.func_globals[name] is func:
name = '%s-alias' % name
if inspect.ismethod(func):
# We need to add the name of the class
if hasattr(func, 'im_class'):
klass = func.im_class
module.append(klass.__name__)
if os.name == 'nt' and win_characters:
# Windows can't encode certain characters in filenames
name = _clean_win_chars(name)
module = [_clean_win_chars(s) for s in module]
return module, name
def _signature_str(function_name, arg_sig):
"""Helper function to output a function signature"""
return '{}{}'.format(function_name, arg_sig)
def _function_called_str(function_name, args, kwargs):
"""Helper function to output a function call"""
template_str = '{0}({1}, {2})'
args_str = repr(args)[1:-1]
kwargs_str = ', '.join('%s=%s' % (k, v)
for k, v in kwargs.items())
return template_str.format(function_name, args_str,
kwargs_str)
The provided code snippet includes necessary dependencies for implementing the `filter_args` function. Write a Python function `def filter_args(func, ignore_lst, args=(), kwargs=dict())` to solve the following problem:
Filters the given args and kwargs using a list of arguments to ignore, and a function specification. Parameters ---------- func: callable Function giving the argument specification ignore_lst: list of strings List of arguments to ignore (either a name of an argument in the function spec, or '*', or '**') *args: list Positional arguments passed to the function. **kwargs: dict Keyword arguments passed to the function Returns ------- filtered_args: list List of filtered positional and keyword arguments.
Here is the function:
def filter_args(func, ignore_lst, args=(), kwargs=dict()):
""" Filters the given args and kwargs using a list of arguments to
ignore, and a function specification.
Parameters
----------
func: callable
Function giving the argument specification
ignore_lst: list of strings
List of arguments to ignore (either a name of an argument
in the function spec, or '*', or '**')
*args: list
Positional arguments passed to the function.
**kwargs: dict
Keyword arguments passed to the function
Returns
-------
filtered_args: list
List of filtered positional and keyword arguments.
"""
args = list(args)
if isinstance(ignore_lst, str):
# Catch a common mistake
raise ValueError(
'ignore_lst must be a list of parameters to ignore '
'%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
# Special case for functools.partial objects
if (not inspect.ismethod(func) and not inspect.isfunction(func)):
if ignore_lst:
warnings.warn('Cannot inspect object %s, ignore list will '
'not work.' % func, stacklevel=2)
return {'*': args, '**': kwargs}
arg_sig = inspect.signature(func)
arg_names = []
arg_defaults = []
arg_kwonlyargs = []
arg_varargs = None
arg_varkw = None
for param in arg_sig.parameters.values():
if param.kind is param.POSITIONAL_OR_KEYWORD:
arg_names.append(param.name)
elif param.kind is param.KEYWORD_ONLY:
arg_names.append(param.name)
arg_kwonlyargs.append(param.name)
elif param.kind is param.VAR_POSITIONAL:
arg_varargs = param.name
elif param.kind is param.VAR_KEYWORD:
arg_varkw = param.name
if param.default is not param.empty:
arg_defaults.append(param.default)
if inspect.ismethod(func):
# First argument is 'self', it has been removed by Python
# we need to add it back:
args = [func.__self__, ] + args
# func is an instance method, inspect.signature(func) does not
# include self, we need to fetch it from the class method, i.e
# func.__func__
class_method_sig = inspect.signature(func.__func__)
self_name = next(iter(class_method_sig.parameters))
arg_names = [self_name] + arg_names
# XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
# as on ndarrays.
_, name = get_func_name(func, resolv_alias=False)
arg_dict = dict()
arg_position = -1
for arg_position, arg_name in enumerate(arg_names):
if arg_position < len(args):
# Positional argument or keyword argument given as positional
if arg_name not in arg_kwonlyargs:
arg_dict[arg_name] = args[arg_position]
else:
raise ValueError(
"Keyword-only parameter '%s' was passed as "
'positional parameter for %s:\n'
' %s was called.'
% (arg_name,
_signature_str(name, arg_sig),
_function_called_str(name, args, kwargs))
)
else:
position = arg_position - len(arg_names)
if arg_name in kwargs:
arg_dict[arg_name] = kwargs[arg_name]
else:
try:
arg_dict[arg_name] = arg_defaults[position]
except (IndexError, KeyError) as e:
# Missing argument
raise ValueError(
'Wrong number of arguments for %s:\n'
' %s was called.'
% (_signature_str(name, arg_sig),
_function_called_str(name, args, kwargs))
) from e
varkwargs = dict()
for arg_name, arg_value in sorted(kwargs.items()):
if arg_name in arg_dict:
arg_dict[arg_name] = arg_value
elif arg_varkw is not None:
varkwargs[arg_name] = arg_value
else:
raise TypeError("Ignore list for %s() contains an unexpected "
"keyword argument '%s'" % (name, arg_name))
if arg_varkw is not None:
arg_dict['**'] = varkwargs
if arg_varargs is not None:
varargs = args[arg_position + 1:]
arg_dict['*'] = varargs
# Now remove the arguments to be ignored
for item in ignore_lst:
if item in arg_dict:
arg_dict.pop(item)
else:
raise ValueError("Ignore list: argument '%s' is not defined for "
"function %s"
% (item,
_signature_str(name, arg_sig))
)
# XXX: Return a sorted list of pairs?
return arg_dict | Filters the given args and kwargs using a list of arguments to ignore, and a function specification. Parameters ---------- func: callable Function giving the argument specification ignore_lst: list of strings List of arguments to ignore (either a name of an argument in the function spec, or '*', or '**') *args: list Positional arguments passed to the function. **kwargs: dict Keyword arguments passed to the function Returns ------- filtered_args: list List of filtered positional and keyword arguments. |
172,274 | import inspect
import warnings
import re
import os
import collections
from itertools import islice
from tokenize import open as open_py_source
from .logger import pformat
def format_signature(func, *args, **kwargs):
# XXX: Should this use inspect.formatargvalues/formatargspec?
module, name = get_func_name(func)
module = [m for m in module if m]
if module:
module.append(name)
module_path = '.'.join(module)
else:
module_path = name
arg_str = list()
previous_length = 0
for arg in args:
formatted_arg = _format_arg(arg)
if previous_length > 80:
formatted_arg = '\n%s' % formatted_arg
previous_length = len(formatted_arg)
arg_str.append(formatted_arg)
arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()])
arg_str = ', '.join(arg_str)
signature = '%s(%s)' % (name, arg_str)
return module_path, signature
The provided code snippet includes necessary dependencies for implementing the `format_call` function. Write a Python function `def format_call(func, args, kwargs, object_name="Memory")` to solve the following problem:
Returns a nicely formatted statement displaying the function call with the given arguments.
Here is the function:
def format_call(func, args, kwargs, object_name="Memory"):
""" Returns a nicely formatted statement displaying the function
call with the given arguments.
"""
path, signature = format_signature(func, *args, **kwargs)
msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
path, signature)
return msg
# XXX: Not using logging framework
# self.debug(msg) | Returns a nicely formatted statement displaying the function call with the given arguments. |
172,275 | import io
import zlib
from joblib.backports import LooseVersion
_COMPRESSORS = {}
class CompressorWrapper():
"""A wrapper around a compressor file object.
Attributes
----------
obj: a file-like object
The object must implement the buffer interface and will be used
internally to compress/decompress the data.
prefix: bytestring
A bytestring corresponding to the magic number that identifies the
file format associated to the compressor.
extension: str
The file extension used to automatically select this compressor during
a dump to a file.
"""
def __init__(self, obj, prefix=b'', extension=''):
self.fileobj_factory = obj
self.prefix = prefix
self.extension = extension
def compressor_file(self, fileobj, compresslevel=None):
"""Returns an instance of a compressor file object."""
if compresslevel is None:
return self.fileobj_factory(fileobj, 'wb')
else:
return self.fileobj_factory(fileobj, 'wb',
compresslevel=compresslevel)
def decompressor_file(self, fileobj):
"""Returns an instance of a decompressor file object."""
return self.fileobj_factory(fileobj, 'rb')
The provided code snippet includes necessary dependencies for implementing the `register_compressor` function. Write a Python function `def register_compressor(compressor_name, compressor, force=False)` to solve the following problem:
Register a new compressor. Parameters ----------- compressor_name: str. The name of the compressor. compressor: CompressorWrapper An instance of a 'CompressorWrapper'.
Here is the function:
def register_compressor(compressor_name, compressor,
force=False):
"""Register a new compressor.
Parameters
-----------
compressor_name: str.
The name of the compressor.
compressor: CompressorWrapper
An instance of a 'CompressorWrapper'.
"""
global _COMPRESSORS
if not isinstance(compressor_name, str):
raise ValueError("Compressor name should be a string, "
"'{}' given.".format(compressor_name))
if not isinstance(compressor, CompressorWrapper):
raise ValueError("Compressor should implement the CompressorWrapper "
"interface, '{}' given.".format(compressor))
if (compressor.fileobj_factory is not None and
(not hasattr(compressor.fileobj_factory, 'read') or
not hasattr(compressor.fileobj_factory, 'write') or
not hasattr(compressor.fileobj_factory, 'seek') or
not hasattr(compressor.fileobj_factory, 'tell'))):
raise ValueError("Compressor 'fileobj_factory' attribute should "
"implement the file object interface, '{}' given."
.format(compressor.fileobj_factory))
if compressor_name in _COMPRESSORS and not force:
raise ValueError("Compressor '{}' already registered."
.format(compressor_name))
_COMPRESSORS[compressor_name] = compressor | Register a new compressor. Parameters ----------- compressor_name: str. The name of the compressor. compressor: CompressorWrapper An instance of a 'CompressorWrapper'. |
172,276 | import pickle
import os
import zlib
import inspect
from io import BytesIO
from .numpy_pickle_utils import _ZFILE_PREFIX
from .numpy_pickle_utils import Unpickler
from .numpy_pickle_utils import _ensure_native_byte_order
_MAX_LEN = len(hex_str(2 ** 64))
The provided code snippet includes necessary dependencies for implementing the `read_zfile` function. Write a Python function `def read_zfile(file_handle)` to solve the following problem:
Read the z-file and return the content as a string. Z-files are raw data compressed with zlib used internally by joblib for persistence. Backward compatibility is not guaranteed. Do not use for external purposes.
Here is the function:
def read_zfile(file_handle):
"""Read the z-file and return the content as a string.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.seek(0)
header_length = len(_ZFILE_PREFIX) + _MAX_LEN
length = file_handle.read(header_length)
length = length[len(_ZFILE_PREFIX):]
length = int(length, 16)
# With python2 and joblib version <= 0.8.4 compressed pickle header is one
# character wider so we need to ignore an additional space if present.
# Note: the first byte of the zlib data is guaranteed not to be a
# space according to
# https://tools.ietf.org/html/rfc6713#section-2.1
next_byte = file_handle.read(1)
if next_byte != b' ':
# The zlib compressed data has started and we need to go back
# one byte
file_handle.seek(header_length)
# We use the known length of the data to tell Zlib the size of the
# buffer to allocate.
data = zlib.decompress(file_handle.read(), 15, length)
assert len(data) == length, (
"Incorrect data length while decompressing %s."
"The file could be corrupted." % file_handle)
return data | Read the z-file and return the content as a string. Z-files are raw data compressed with zlib used internally by joblib for persistence. Backward compatibility is not guaranteed. Do not use for external purposes. |
172,277 | import pickle
import os
import zlib
import inspect
from io import BytesIO
from .numpy_pickle_utils import _ZFILE_PREFIX
from .numpy_pickle_utils import Unpickler
from .numpy_pickle_utils import _ensure_native_byte_order
def hex_str(an_int):
"""Convert an int to an hexadecimal string."""
return '{:#x}'.format(an_int)
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
_MAX_LEN = len(hex_str(2 ** 64))
The provided code snippet includes necessary dependencies for implementing the `write_zfile` function. Write a Python function `def write_zfile(file_handle, data, compress=1)` to solve the following problem:
Write the data in the given file as a Z-file. Z-files are raw data compressed with zlib used internally by joblib for persistence. Backward compatibility is not guaranteed. Do not use for external purposes.
Here is the function:
def write_zfile(file_handle, data, compress=1):
"""Write the data in the given file as a Z-file.
Z-files are raw data compressed with zlib used internally by joblib
for persistence. Backward compatibility is not guaranteed. Do not
use for external purposes.
"""
file_handle.write(_ZFILE_PREFIX)
length = hex_str(len(data))
# Store the length of the data
file_handle.write(asbytes(length.ljust(_MAX_LEN)))
file_handle.write(zlib.compress(asbytes(data), compress)) | Write the data in the given file as a Z-file. Z-files are raw data compressed with zlib used internally by joblib for persistence. Backward compatibility is not guaranteed. Do not use for external purposes. |
172,278 | import re
import os
import os.path
import datetime
import json
import shutil
import warnings
import collections
import operator
import threading
from abc import ABCMeta, abstractmethod
from .backports import concurrency_safe_rename
from .disk import mkdirp, memstr_to_bytes, rm_subdirs
from . import numpy_pickle
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
The provided code snippet includes necessary dependencies for implementing the `concurrency_safe_write` function. Write a Python function `def concurrency_safe_write(object_to_write, filename, write_func)` to solve the following problem:
Writes an object into a unique file in a concurrency-safe way.
Here is the function:
def concurrency_safe_write(object_to_write, filename, write_func):
"""Writes an object into a unique file in a concurrency-safe way."""
thread_id = id(threading.current_thread())
temporary_filename = '{}.thread-{}-pid-{}'.format(
filename, thread_id, os.getpid())
write_func(object_to_write, temporary_filename)
return temporary_filename | Writes an object into a unique file in a concurrency-safe way. |
172,279 | import pickle
import io
import sys
import warnings
import contextlib
from .compressor import _ZFILE_PREFIX
from .compressor import _COMPRESSORS
def _is_numpy_array_byte_order_mismatch(array):
"""Check if numpy array is having byte order mismatch"""
return ((sys.byteorder == 'big' and
(array.dtype.byteorder == '<' or
(array.dtype.byteorder == '|' and array.dtype.fields and
all(e[0].byteorder == '<'
for e in array.dtype.fields.values())))) or
(sys.byteorder == 'little' and
(array.dtype.byteorder == '>' or
(array.dtype.byteorder == '|' and array.dtype.fields and
all(e[0].byteorder == '>'
for e in array.dtype.fields.values())))))
The provided code snippet includes necessary dependencies for implementing the `_ensure_native_byte_order` function. Write a Python function `def _ensure_native_byte_order(array)` to solve the following problem:
Use the byte order of the host while preserving values Does nothing if array already uses the system byte order.
Here is the function:
def _ensure_native_byte_order(array):
"""Use the byte order of the host while preserving values
Does nothing if array already uses the system byte order.
"""
if _is_numpy_array_byte_order_mismatch(array):
array = array.byteswap().newbyteorder('=')
return array | Use the byte order of the host while preserving values Does nothing if array already uses the system byte order. |
172,280 | import pickle
import io
import sys
import warnings
import contextlib
from .compressor import _ZFILE_PREFIX
from .compressor import _COMPRESSORS
The provided code snippet includes necessary dependencies for implementing the `_read_bytes` function. Write a Python function `def _read_bytes(fp, size, error_template="ran out of data")` to solve the following problem:
Read from file-like object until size bytes are read. TODO python2_drop: is it still needed? The docstring mentions python 2.6 and it looks like this can be at least simplified ... Raises ValueError if not EOF is encountered before size bytes are read. Non-blocking objects only supported if they derive from io objects. Required as e.g. ZipExtFile in python 2.6 can return less data than requested. This function was taken from numpy/lib/format.py in version 1.10.2. Parameters ---------- fp: file-like object size: int error_template: str Returns ------- a bytes object The data read in bytes.
Here is the function:
def _read_bytes(fp, size, error_template="ran out of data"):
"""Read from file-like object until size bytes are read.
TODO python2_drop: is it still needed? The docstring mentions python 2.6
and it looks like this can be at least simplified ...
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
This function was taken from numpy/lib/format.py in version 1.10.2.
Parameters
----------
fp: file-like object
size: int
error_template: str
Returns
-------
a bytes object
The data read in bytes.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data | Read from file-like object until size bytes are read. TODO python2_drop: is it still needed? The docstring mentions python 2.6 and it looks like this can be at least simplified ... Raises ValueError if not EOF is encountered before size bytes are read. Non-blocking objects only supported if they derive from io objects. Required as e.g. ZipExtFile in python 2.6 can return less data than requested. This function was taken from numpy/lib/format.py in version 1.10.2. Parameters ---------- fp: file-like object size: int error_template: str Returns ------- a bytes object The data read in bytes. |
172,281 | import pickle
import hashlib
import sys
import types
import struct
import io
import decimal
class Hasher(Pickler):
""" A subclass of pickler, to do cryptographic hashing, rather than
pickling.
"""
def __init__(self, hash_name='md5'):
self.stream = io.BytesIO()
# By default we want a pickle protocol that only changes with
# the major python version and not the minor one
protocol = 3
Pickler.__init__(self, self.stream, protocol=protocol)
# Initialise the hash obj
self._hash = hashlib.new(hash_name)
def hash(self, obj, return_digest=True):
try:
self.dump(obj)
except pickle.PicklingError as e:
e.args += ('PicklingError while hashing %r: %r' % (obj, e),)
raise
dumps = self.stream.getvalue()
self._hash.update(dumps)
if return_digest:
return self._hash.hexdigest()
def save(self, obj):
if isinstance(obj, (types.MethodType, type({}.pop))):
# the Pickler cannot pickle instance methods; here we decompose
# them into components that make them uniquely identifiable
if hasattr(obj, '__func__'):
func_name = obj.__func__.__name__
else:
func_name = obj.__name__
inst = obj.__self__
if type(inst) == type(pickle):
obj = _MyHash(func_name, inst.__name__)
elif inst is None:
# type(None) or type(module) do not pickle
obj = _MyHash(func_name, inst)
else:
cls = obj.__self__.__class__
obj = _MyHash(func_name, inst, cls)
Pickler.save(self, obj)
def memoize(self, obj):
# We want hashing to be sensitive to value instead of reference.
# For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]]
# to hash to the same value and that's why we disable memoization
# for strings
if isinstance(obj, (bytes, str)):
return
Pickler.memoize(self, obj)
# The dispatch table of the pickler is not accessible in Python
# 3, as these lines are only bugware for IPython, we skip them.
def save_global(self, obj, name=None, pack=struct.pack):
# We have to override this method in order to deal with objects
# defined interactively in IPython that are not injected in
# __main__
kwargs = dict(name=name, pack=pack)
del kwargs['pack']
try:
Pickler.save_global(self, obj, **kwargs)
except pickle.PicklingError:
Pickler.save_global(self, obj, **kwargs)
module = getattr(obj, "__module__", None)
if module == '__main__':
my_name = name
if my_name is None:
my_name = obj.__name__
mod = sys.modules[module]
if not hasattr(mod, my_name):
# IPython doesn't inject the variables define
# interactively in __main__
setattr(mod, my_name, obj)
dispatch = Pickler.dispatch.copy()
# builtin
dispatch[type(len)] = save_global
# type
dispatch[type(object)] = save_global
# classobj
dispatch[type(Pickler)] = save_global
# function
dispatch[type(pickle.dump)] = save_global
def _batch_setitems(self, items):
# forces order of keys in dict to ensure consistent hash.
try:
# Trying first to compare dict assuming the type of keys is
# consistent and orderable.
# This fails on python 3 when keys are unorderable
# but we keep it in a try as it's faster.
Pickler._batch_setitems(self, iter(sorted(items)))
except TypeError:
# If keys are unorderable, sorting them using their hash. This is
# slower but works in any case.
Pickler._batch_setitems(self, iter(sorted((hash(k), v)
for k, v in items)))
def save_set(self, set_items):
# forces order of items in Set to ensure consistent hash
Pickler.save(self, _ConsistentSet(set_items))
dispatch[type(set())] = save_set
class NumpyHasher(Hasher):
""" Special case the hasher for when numpy is loaded.
"""
def __init__(self, hash_name='md5', coerce_mmap=False):
"""
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
"""
self.coerce_mmap = coerce_mmap
Hasher.__init__(self, hash_name=hash_name)
# delayed import of numpy, to avoid tight coupling
import numpy as np
self.np = np
if hasattr(np, 'getbuffer'):
self._getbuffer = np.getbuffer
else:
self._getbuffer = memoryview
def save(self, obj):
""" Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
"""
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
# Compute a hash of the object
# The update function of the hash requires a c_contiguous buffer.
if obj.shape == ():
# 0d arrays need to be flattened because viewing them as bytes
# raises a ValueError exception.
obj_c_contiguous = obj.flatten()
elif obj.flags.c_contiguous:
obj_c_contiguous = obj
elif obj.flags.f_contiguous:
obj_c_contiguous = obj.T
else:
# Cater for non-single-segment arrays: this creates a
# copy, and thus alleviates this issue.
# XXX: There might be a more efficient way of doing this
obj_c_contiguous = obj.flatten()
# memoryview is not supported for some dtypes, e.g. datetime64, see
# https://github.com/numpy/numpy/issues/4983. The
# workaround is to view the array as bytes before
# taking the memoryview.
self._hash.update(
self._getbuffer(obj_c_contiguous.view(self.np.uint8)))
# We store the class, to be able to distinguish between
# Objects with the same binary content, but different
# classes.
if self.coerce_mmap and isinstance(obj, self.np.memmap):
# We don't make the difference between memmap and
# normal ndarrays, to be able to reload previously
# computed results with memmap.
klass = self.np.ndarray
else:
klass = obj.__class__
# We also return the dtype and the shape, to distinguish
# different views on the same data with different dtypes.
# The object will be pickled by the pickler hashed at the end.
obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
elif isinstance(obj, self.np.dtype):
# numpy.dtype consistent hashing is tricky to get right. This comes
# from the fact that atomic np.dtype objects are interned:
# ``np.dtype('f4') is np.dtype('f4')``. The situation is
# complicated by the fact that this interning does not resist a
# simple pickle.load/dump roundtrip:
# ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not
# np.dtype('f4') Because pickle relies on memoization during
# pickling, it is easy to
# produce different hashes for seemingly identical objects, such as
# ``[np.dtype('f4'), np.dtype('f4')]``
# and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``.
# To prevent memoization from interfering with hashing, we isolate
# the serialization (and thus the pickle memoization) of each dtype
# using each time a different ``pickle.dumps`` call unrelated to
# the current Hasher instance.
self._hash.update("_HASHED_DTYPE".encode('utf-8'))
self._hash.update(pickle.dumps(obj))
return
Hasher.save(self, obj)
The provided code snippet includes necessary dependencies for implementing the `hash` function. Write a Python function `def hash(obj, hash_name='md5', coerce_mmap=False)` to solve the following problem:
Quick calculation of a hash to identify uniquely Python objects containing numpy arrays. Parameters ----------- hash_name: 'md5' or 'sha1' Hashing algorithm used. sha1 is supposedly safer, but md5 is faster. coerce_mmap: boolean Make no difference between np.memmap and np.ndarray
Here is the function:
def hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
valid_hash_names = ('md5', 'sha1')
if hash_name not in valid_hash_names:
raise ValueError("Valid options for 'hash_name' are {}. "
"Got hash_name={!r} instead."
.format(valid_hash_names, hash_name))
if 'numpy' in sys.modules:
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj) | Quick calculation of a hash to identify uniquely Python objects containing numpy arrays. Parameters ----------- hash_name: 'md5' or 'sha1' Hashing algorithm used. sha1 is supposedly safer, but md5 is faster. coerce_mmap: boolean Make no difference between np.memmap and np.ndarray |
172,282 | from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from uuid import uuid4
from numbers import Integral
import warnings
import queue
from ._multiprocessing_helpers import mp
from .logger import Logger, short_format_time
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from .externals.cloudpickle import dumps, loads
from ._utils import eval_expr
from ._parallel_backends import AutoBatchingMixin
from ._parallel_backends import ParallelBackendBase
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the :class:`~Parallel` class. Moreover, the default backend can
be overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
MIN_IDEAL_BATCH_DURATION = 0.2
MAX_IDEAL_BATCH_DURATION = 1.0
supports_timeout = True
def __init__(self, scheduler_host=None, scatter=None,
client=None, loop=None, wait_for_workers_timeout=10,
**submit_kwargs):
super().__init__()
if distributed is None:
msg = ("You are trying to use 'dask' as a joblib parallel backend "
"but dask is not installed. Please install dask "
"to fix this error.")
raise ValueError(msg)
if client is None:
if scheduler_host:
client = Client(scheduler_host, loop=loop,
set_as_default=False)
else:
try:
client = get_client()
except ValueError as e:
msg = ("To use Joblib with Dask first create a Dask Client"
"\n\n"
" from dask.distributed import Client\n"
" client = Client()\n"
"or\n"
" client = Client('scheduler-address:8786')")
raise ValueError(msg) from e
self.client = client
if scatter is not None and not isinstance(scatter, (list, tuple)):
raise TypeError("scatter must be a list/tuple, got "
"`%s`" % type(scatter).__name__)
if scatter is not None and len(scatter) > 0:
# Keep a reference to the scattered data to keep the ids the same
self._scatter = list(scatter)
scattered = self.client.scatter(scatter, broadcast=True)
self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
else:
self._scatter = []
self.data_futures = {}
self.wait_for_workers_timeout = wait_for_workers_timeout
self.submit_kwargs = submit_kwargs
self.waiting_futures = as_completed(
[],
loop=client.loop,
with_results=True,
raise_errors=False
)
self._results = {}
self._callbacks = {}
async def _collect(self):
while self._continue:
async for future, result in self.waiting_futures:
cf_future = self._results.pop(future)
callback = self._callbacks.pop(future)
if future.status == "error":
typ, exc, tb = result
cf_future.set_exception(exc)
else:
cf_future.set_result(result)
callback(result)
await asyncio.sleep(0.01)
def __reduce__(self):
return (DaskDistributedBackend, ())
def get_nested_backend(self):
return DaskDistributedBackend(client=self.client), -1
def configure(self, n_jobs=1, parallel=None, **backend_args):
self.parallel = parallel
return self.effective_n_jobs(n_jobs)
def start_call(self):
self._continue = True
self.client.loop.add_callback(self._collect)
self.call_data_futures = _WeakKeyDictionary()
def stop_call(self):
# The explicit call to clear is required to break a cycling reference
# to the futures.
self._continue = False
# wait for the future collection routine (self._backend._collect) to
# finish in order to limit asyncio warnings due to aborting _collect
# during a following backend termination call
time.sleep(0.01)
self.call_data_futures.clear()
def effective_n_jobs(self, n_jobs):
effective_n_jobs = sum(self.client.ncores().values())
if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
return effective_n_jobs
# If there is no worker, schedule a probe task to wait for the workers
# to come up and be available. If the dask cluster is in adaptive mode
# task might cause the cluster to provision some workers.
try:
self.client.submit(_joblib_probe_task).result(
timeout=self.wait_for_workers_timeout)
except _TimeoutError as e:
error_msg = (
"DaskDistributedBackend has no worker after {} seconds. "
"Make sure that workers are started and can properly connect "
"to the scheduler and increase the joblib/dask connection "
"timeout with:\n\n"
"parallel_backend('dask', wait_for_workers_timeout={})"
).format(self.wait_for_workers_timeout,
max(10, 2 * self.wait_for_workers_timeout))
raise TimeoutError(error_msg) from e
return sum(self.client.ncores().values())
async def _to_func_args(self, func):
itemgetters = dict()
# Futures that are dynamically generated during a single call to
# Parallel.__call__.
call_data_futures = getattr(self, 'call_data_futures', None)
async def maybe_to_futures(args):
out = []
for arg in args:
arg_id = id(arg)
if arg_id in itemgetters:
out.append(itemgetters[arg_id])
continue
f = self.data_futures.get(arg_id, None)
if f is None and call_data_futures is not None:
try:
f = await call_data_futures[arg]
except KeyError:
pass
if f is None:
if is_weakrefable(arg) and sizeof(arg) > 1e3:
# Automatically scatter large objects to some of
# the workers to avoid duplicated data transfers.
# Rely on automated inter-worker data stealing if
# more workers need to reuse this data
# concurrently.
# set hash=False - nested scatter calls (i.e
# calling client.scatter inside a dask worker)
# using hash=True often raise CancelledError,
# see dask/distributed#3703
_coro = self.client.scatter(
arg,
asynchronous=True,
hash=False
)
# Centralize the scattering of identical arguments
# between concurrent apply_async callbacks by
# exposing the running coroutine in
# call_data_futures before it completes.
t = asyncio.Task(_coro)
call_data_futures[arg] = t
f = await t
if f is not None:
out.append(f)
else:
out.append(arg)
return out
tasks = []
for f, args, kwargs in func.items:
args = list(await maybe_to_futures(args))
kwargs = dict(zip(kwargs.keys(),
await maybe_to_futures(kwargs.values())))
tasks.append((f, args, kwargs))
return (Batch(tasks), tasks)
def apply_async(self, func, callback=None):
cf_future = concurrent.futures.Future()
cf_future.get = cf_future.result # achieve AsyncResult API
async def f(func, callback):
batch, tasks = await self._to_func_args(func)
key = f'{repr(batch)}-{uuid4().hex}'
dask_future = self.client.submit(
batch, tasks=tasks, key=key, **self.submit_kwargs
)
self.waiting_futures.add(dask_future)
self._callbacks[dask_future] = callback
self._results[dask_future] = cf_future
self.client.loop.add_callback(f, func, callback)
return cf_future
def abort_everything(self, ensure_ready=True):
""" Tell the client to cancel any task submitted via this instance
joblib.Parallel will never access those results
"""
with self.waiting_futures.lock:
self.waiting_futures.futures.clear()
while not self.waiting_futures.queue.empty():
self.waiting_futures.queue.get()
def retrieval_context(self):
"""Override ParallelBackendBase.retrieval_context to avoid deadlocks.
This removes thread from the worker's thread pool (using 'secede').
Seceding avoids deadlock in nested parallelism settings.
"""
# See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
# this is used.
if hasattr(thread_state, 'execution_state'):
# we are in a worker. Secede to avoid deadlock.
secede()
yield
if hasattr(thread_state, 'execution_state'):
rejoin()
The provided code snippet includes necessary dependencies for implementing the `_register_dask` function. Write a Python function `def _register_dask()` to solve the following problem:
Register Dask Backend if called with parallel_backend("dask")
Here is the function:
def _register_dask():
""" Register Dask Backend if called with parallel_backend("dask") """
try:
from ._dask import DaskDistributedBackend
register_parallel_backend('dask', DaskDistributedBackend)
except ImportError as e:
msg = ("To use the dask.distributed backend you must install both "
"the `dask` and distributed modules.\n\n"
"See https://dask.pydata.org/en/latest/install.html for more "
"information.")
raise ImportError(msg) from e | Register Dask Backend if called with parallel_backend("dask") |
172,283 | from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from uuid import uuid4
from numbers import Integral
import warnings
import queue
from ._multiprocessing_helpers import mp
from .logger import Logger, short_format_time
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from .externals.cloudpickle import dumps, loads
from ._utils import eval_expr
from ._parallel_backends import AutoBatchingMixin
from ._parallel_backends import ParallelBackendBase
if mp is not None:
BACKENDS['multiprocessing'] = MultiprocessingBackend
from .externals import loky
BACKENDS['loky'] = LokyBackend
DEFAULT_BACKEND = 'loky'
mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if mp:
try:
import multiprocessing as mp
import _multiprocessing # noqa
except ImportError:
mp = None
if mp is not None:
try:
# try to create a named semaphore using SemLock to make sure they are
# available on this platform. We use the low level object
# _multiprocessing.SemLock to avoid spawning a resource tracker on
# Unix system or changing the default backend.
import tempfile
from _multiprocessing import SemLock
_rand = tempfile._RandomNameSequence()
for i in range(100):
try:
name = '/joblib-{}-{}' .format(
os.getpid(), next(_rand))
_sem = SemLock(0, 0, 1, name=name, unlink=True)
del _sem # cleanup
break
except FileExistsError as e: # pragma: no cover
if i >= 99:
raise FileExistsError(
'cannot find name for semaphore') from e
except (FileExistsError, AttributeError, ImportError, OSError) as e:
mp = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
if mp is not None:
from multiprocessing.context import assert_spawning
else:
assert_spawning = None
The provided code snippet includes necessary dependencies for implementing the `cpu_count` function. Write a Python function `def cpu_count(only_physical_cores=False)` to solve the following problem:
Return the number of CPUs. This delegates to loky.cpu_count that takes into account additional constraints such as Linux CFS scheduler quotas (typically set by container runtimes such as docker) and CPU affinity (for instance using the taskset command on Linux). If only_physical_cores is True, do not take hyperthreading / SMT logical cores into account.
Here is the function:
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs.
This delegates to loky.cpu_count that takes into account additional
constraints such as Linux CFS scheduler quotas (typically set by container
runtimes such as docker) and CPU affinity (for instance using the taskset
command on Linux).
If only_physical_cores is True, do not take hyperthreading / SMT logical
cores into account.
"""
if mp is None:
return 1
return loky.cpu_count(only_physical_cores=only_physical_cores) | Return the number of CPUs. This delegates to loky.cpu_count that takes into account additional constraints such as Linux CFS scheduler quotas (typically set by container runtimes such as docker) and CPU affinity (for instance using the taskset command on Linux). If only_physical_cores is True, do not take hyperthreading / SMT logical cores into account. |
172,284 | from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from uuid import uuid4
from numbers import Integral
import warnings
import queue
from ._multiprocessing_helpers import mp
from .logger import Logger, short_format_time
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from .externals.cloudpickle import dumps, loads
from ._utils import eval_expr
from ._parallel_backends import AutoBatchingMixin
from ._parallel_backends import ParallelBackendBase
def sqrt(__x: SupportsFloat) -> float: ...
The provided code snippet includes necessary dependencies for implementing the `_verbosity_filter` function. Write a Python function `def _verbosity_filter(index, verbose)` to solve the following problem:
Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index
Here is the function:
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale)) | Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index |
172,285 | from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from uuid import uuid4
from numbers import Integral
import warnings
import queue
from ._multiprocessing_helpers import mp
from .logger import Logger, short_format_time
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from .externals.cloudpickle import dumps, loads
from ._utils import eval_expr
from ._parallel_backends import AutoBatchingMixin
from ._parallel_backends import ParallelBackendBase
The provided code snippet includes necessary dependencies for implementing the `delayed` function. Write a Python function `def delayed(function)` to solve the following problem:
Decorator used to capture the arguments of a function.
Here is the function:
def delayed(function):
"""Decorator used to capture the arguments of a function."""
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function | Decorator used to capture the arguments of a function. |
172,286 | from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from uuid import uuid4
from numbers import Integral
import warnings
import queue
from ._multiprocessing_helpers import mp
from .logger import Logger, short_format_time
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from .externals.cloudpickle import dumps, loads
from ._utils import eval_expr
from ._parallel_backends import AutoBatchingMixin
from ._parallel_backends import ParallelBackendBase
def get_active_backend(prefer=None, require=None, verbose=0):
"""Return the active default backend"""
if prefer not in VALID_BACKEND_HINTS:
raise ValueError("prefer=%r is not a valid backend hint, "
"expected one of %r" % (prefer, VALID_BACKEND_HINTS))
if require not in VALID_BACKEND_CONSTRAINTS:
raise ValueError("require=%r is not a valid backend constraint, "
"expected one of %r"
% (require, VALID_BACKEND_CONSTRAINTS))
if prefer == 'processes' and require == 'sharedmem':
raise ValueError("prefer == 'processes' and require == 'sharedmem'"
" are inconsistent settings")
backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if backend_and_jobs is not None:
# Try to use the backend set by the user with the context manager.
backend, n_jobs = backend_and_jobs
nesting_level = backend.nesting_level
supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
if require == 'sharedmem' and not supports_sharedmem:
# This backend does not match the shared memory constraint:
# fallback to the default thead-based backend.
sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](
nesting_level=nesting_level)
if verbose >= 10:
print("Using %s as joblib.Parallel backend instead of %s "
"as the latter does not provide shared memory semantics."
% (sharedmem_backend.__class__.__name__,
backend.__class__.__name__))
return sharedmem_backend, DEFAULT_N_JOBS
else:
return backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now.
backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)
supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
uses_threads = getattr(backend, 'uses_threads', False)
if ((require == 'sharedmem' and not supports_sharedmem) or
(prefer == 'threads' and not uses_threads)):
# Make sure the selected default backend match the soft hints and
# hard constraints:
backend = BACKENDS[DEFAULT_THREAD_BACKEND](nesting_level=0)
return backend, DEFAULT_N_JOBS
The provided code snippet includes necessary dependencies for implementing the `effective_n_jobs` function. Write a Python function `def effective_n_jobs(n_jobs=-1)` to solve the following problem:
Determine the number of jobs that can actually run in parallel n_jobs is the number of workers requested by the callers. Passing n_jobs=-1 means requesting all available workers for instance matching the number of CPU cores on the worker host(s). This method should return a guesstimate of the number of workers that can actually perform work concurrently with the currently enabled default backend. The primary use case is to make it possible for the caller to know in how many chunks to slice the work. In general working on larger data chunks is more efficient (less scheduling overhead and better use of CPU cache prefetching heuristics) as long as all the workers have enough work to do. Warning: this function is experimental and subject to change in a future version of joblib. .. versionadded:: 0.10
Here is the function:
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
means requesting all available workers for instance matching the number of
CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less scheduling
overhead and better use of CPU cache prefetching heuristics) as long as all
the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, backend_n_jobs = get_active_backend()
if n_jobs is None:
n_jobs = backend_n_jobs
return backend.effective_n_jobs(n_jobs=n_jobs) | Determine the number of jobs that can actually run in parallel n_jobs is the number of workers requested by the callers. Passing n_jobs=-1 means requesting all available workers for instance matching the number of CPU cores on the worker host(s). This method should return a guesstimate of the number of workers that can actually perform work concurrently with the currently enabled default backend. The primary use case is to make it possible for the caller to know in how many chunks to slice the work. In general working on larger data chunks is more efficient (less scheduling overhead and better use of CPU cache prefetching heuristics) as long as all the workers have enough work to do. Warning: this function is experimental and subject to change in a future version of joblib. .. versionadded:: 0.10 |
172,287 | import os
import sys
import time
import errno
import shutil
from multiprocessing import util
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
The provided code snippet includes necessary dependencies for implementing the `disk_used` function. Write a Python function `def disk_used(path)` to solve the following problem:
Return the disk usage in a directory.
Here is the function:
def disk_used(path):
""" Return the disk usage in a directory."""
size = 0
for file in os.listdir(path) + ['.']:
stat = os.stat(os.path.join(path, file))
if hasattr(stat, 'st_blocks'):
size += stat.st_blocks * 512
else:
# on some platform st_blocks is not available (e.g., Windows)
# approximate by rounding to next multiple of 512
size += (stat.st_size // 512 + 1) * 512
# We need to convert to int to avoid having longs on some systems (we
# don't want longs to avoid problems we SQLite)
return int(size / 1024.) | Return the disk usage in a directory. |
172,288 | import os
import sys
import time
import errno
import shutil
from multiprocessing import util
The provided code snippet includes necessary dependencies for implementing the `memstr_to_bytes` function. Write a Python function `def memstr_to_bytes(text)` to solve the following problem:
Convert a memory text to its value in bytes.
Here is the function:
def memstr_to_bytes(text):
""" Convert a memory text to its value in bytes.
"""
kilo = 1024
units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3)
try:
size = int(units[text[-1]] * float(text[:-1]))
except (KeyError, ValueError) as e:
raise ValueError(
"Invalid literal for size give: %s (type %s) should be "
"alike '10G', '500M', '50K'." % (text, type(text))) from e
return size | Convert a memory text to its value in bytes. |
172,289 | import os
import sys
import time
import errno
import shutil
from multiprocessing import util
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
The provided code snippet includes necessary dependencies for implementing the `mkdirp` function. Write a Python function `def mkdirp(d)` to solve the following problem:
Ensure directory d exists (like mkdir -p on Unix) No guarantee that the directory is writable.
Here is the function:
def mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise | Ensure directory d exists (like mkdir -p on Unix) No guarantee that the directory is writable. |
172,290 | import os
import sys
import time
import errno
import shutil
from multiprocessing import util
def delete_folder(folder_path, onerror=None, allow_non_empty=True):
"""Utility function to cleanup a temporary folder if it still exists."""
if os.path.isdir(folder_path):
if onerror is not None:
shutil.rmtree(folder_path, False, onerror)
else:
# allow the rmtree to fail once, wait and re-try.
# if the error is raised again, fail
err_count = 0
while True:
files = os.listdir(folder_path)
try:
if len(files) == 0 or allow_non_empty:
shutil.rmtree(
folder_path, ignore_errors=False, onerror=None
)
util.debug(
"Successfully deleted {}".format(folder_path))
break
else:
raise OSError(
"Expected empty folder {} but got {} "
"files.".format(folder_path, len(files))
)
except (OSError, WindowsError):
err_count += 1
if err_count > RM_SUBDIRS_N_RETRY:
# the folder cannot be deleted right now. It maybe
# because some temporary files have not been deleted
# yet.
raise
time.sleep(RM_SUBDIRS_RETRY_TIME)
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
The provided code snippet includes necessary dependencies for implementing the `rm_subdirs` function. Write a Python function `def rm_subdirs(path, onerror=None)` to solve the following problem:
Remove all subdirectories in this path. The directory indicated by `path` is left in place, and its subdirectories are erased. If onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If onerror is None, an exception is raised.
Here is the function:
def rm_subdirs(path, onerror=None):
"""Remove all subdirectories in this path.
The directory indicated by `path` is left in place, and its subdirectories
are erased.
If onerror is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If onerror is None,
an exception is raised.
"""
# NOTE this code is adapted from the one in shutil.rmtree, and is
# just as fast
names = []
try:
names = os.listdir(path)
except os.error:
if onerror is not None:
onerror(os.listdir, path, sys.exc_info())
else:
raise
for name in names:
fullname = os.path.join(path, name)
delete_folder(fullname, onerror=onerror) | Remove all subdirectories in this path. The directory indicated by `path` is left in place, and its subdirectories are erased. If onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If onerror is None, an exception is raised. |
172,291 | from sys import version_info
from warnings import warn
from joblib import _deprecated_format_stack
_deprecated_names = [
name for name in dir(_deprecated_format_stack) if
not name.startswith("__") # special attributes
]
def __getattr__(name):
if not name.startswith("__") and name in _deprecated_names:
warn("{} is deprecated and will be removed from joblib "
"in 0.16".format(name), DeprecationWarning)
return getattr(_deprecated_format_stack, name)
raise AttributeError | null |
172,292 | import inspect
import keyword
import linecache
import os
import pydoc
import sys
import time
import tokenize
import traceback
def _fixed_getframes(etb, context=1, tb_offset=0):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
# If the error is at the console, don't build any context, since it would
# otherwise produce 5 blank lines printed out (there is no file at the
# console)
rec_check = records[tb_offset:]
try:
rname = rec_check[0][1]
if rname == '<ipython console>' or rname.endswith('<string>'):
return rec_check
except IndexError:
pass
aux = traceback.extract_tb(etb)
assert len(records) == len(aux)
for i, (file, lnum, _, _) in enumerate(aux):
maybe_start = lnum - 1 - context // 2
start = max(maybe_start, 0)
end = start + context
lines = linecache.getlines(file)[start:end]
buf = list(records[i])
buf[LNUM_POS] = lnum
buf[INDEX_POS] = lnum - 1 - start
buf[LINES_POS] = lines
records[i] = tuple(buf)
return records[tb_offset:]
def format_records(records): # , print_globals=False):
# Loop over all records printing context and info
frames = []
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
if file.endswith('.pyc'):
file = file[:-4] + '.py'
link = file
args, varargs, varkw, locals = inspect.getargvalues(frame)
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
try:
call = 'in %s%s' % (func, inspect.formatargvalues(args,
varargs, varkw, locals,
formatvalue=eq_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
print("\nJoblib's exception reporting continues...\n")
call = 'in %s(***failed resolving arguments***)' % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambiguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
for token in tokenize.generate_tokens(linereader):
tokeneater(*token)
except (IndexError, UnicodeDecodeError, SyntaxError):
# signals exit of tokenizer
# SyntaxError can happen when trying to tokenize
# a compiled (e.g. .so or .pyd) extension
pass
except tokenize.TokenError as msg:
_m = ("An unexpected error occurred while tokenizing input file %s\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % (file, msg))
print(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
for name_full in unique_names:
name_base = name_full.split('.', 1)[0]
if name_base in frame.f_code.co_varnames:
if name_base in locals.keys():
try:
value = safe_repr(eval(name_full, locals))
except:
value = "undefined"
else:
value = "undefined"
name = name_full
lvals.append('%s = %s' % (name, value))
#elif print_globals:
# if frame.f_globals.has_key(name_base):
# try:
# value = safe_repr(eval(name_full,frame.f_globals))
# except:
# value = "undefined"
# else:
# value = "undefined"
# name = 'global %s' % name_full
# lvals.append('%s = %s' % (name,value))
if lvals:
lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals))
else:
lvals = ''
level = '%s\n%s %s\n' % (75 * '.', link, call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level, ''.join(
_format_traceback_lines(lnum, index, lines, lvals))))
return frames
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
The provided code snippet includes necessary dependencies for implementing the `format_exc` function. Write a Python function `def format_exc(etype, evalue, etb, context=5, tb_offset=0)` to solve the following problem:
Return a nice text document describing the traceback. Parameters ----------- etype, evalue, etb: as returned by sys.exc_info context: number of lines of the source file to plot tb_offset: the number of stack frame not to use (0 = use all)
Here is the function:
def format_exc(etype, evalue, etb, context=5, tb_offset=0):
""" Return a nice text document describing the traceback.
Parameters
-----------
etype, evalue, etb: as returned by sys.exc_info
context: number of lines of the source file to plot
tb_offset: the number of stack frame not to use (0 = use all)
"""
# some locals
try:
etype = etype.__name__
except AttributeError:
pass
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
pid = 'PID: %i' % os.getpid()
head = '%s%s%s\n%s%s%s' % (
etype, ' ' * (75 - len(str(etype)) - len(date)),
date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),
pyver)
# Drop topmost frames if requested
records = _fixed_getframes(etb, context, tb_offset)
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except BaseException:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
exception = ['%s: %s' % (etype_str, evalue_str)]
frames = format_records(records)
return '%s\n%s\n%s' % (head, '\n'.join(frames), ''.join(exception[0])) | Return a nice text document describing the traceback. Parameters ----------- etype, evalue, etb: as returned by sys.exc_info context: number of lines of the source file to plot tb_offset: the number of stack frame not to use (0 = use all) |
172,293 | import inspect
import keyword
import linecache
import os
import pydoc
import sys
import time
import tokenize
import traceback
def format_records(records): # , print_globals=False):
# Loop over all records printing context and info
frames = []
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
if file.endswith('.pyc'):
file = file[:-4] + '.py'
link = file
args, varargs, varkw, locals = inspect.getargvalues(frame)
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
try:
call = 'in %s%s' % (func, inspect.formatargvalues(args,
varargs, varkw, locals,
formatvalue=eq_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
print("\nJoblib's exception reporting continues...\n")
call = 'in %s(***failed resolving arguments***)' % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambiguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
for token in tokenize.generate_tokens(linereader):
tokeneater(*token)
except (IndexError, UnicodeDecodeError, SyntaxError):
# signals exit of tokenizer
# SyntaxError can happen when trying to tokenize
# a compiled (e.g. .so or .pyd) extension
pass
except tokenize.TokenError as msg:
_m = ("An unexpected error occurred while tokenizing input file %s\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % (file, msg))
print(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
for name_full in unique_names:
name_base = name_full.split('.', 1)[0]
if name_base in frame.f_code.co_varnames:
if name_base in locals.keys():
try:
value = safe_repr(eval(name_full, locals))
except:
value = "undefined"
else:
value = "undefined"
name = name_full
lvals.append('%s = %s' % (name, value))
#elif print_globals:
# if frame.f_globals.has_key(name_base):
# try:
# value = safe_repr(eval(name_full,frame.f_globals))
# except:
# value = "undefined"
# else:
# value = "undefined"
# name = 'global %s' % name_full
# lvals.append('%s = %s' % (name,value))
if lvals:
lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals))
else:
lvals = ''
level = '%s\n%s %s\n' % (75 * '.', link, call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level, ''.join(
_format_traceback_lines(lnum, index, lines, lvals))))
return frames
import os
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
def format_outer_frames(context=5, stack_start=None, stack_end=None,
ignore_ipython=True):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = inspect.getouterframes(inspect.currentframe())
output = list()
for i, (frame, filename, line_no, func_name, lines, index) \
in enumerate(records):
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
if filename.endswith('.pyc'):
filename = filename[:-4] + '.py'
if ignore_ipython:
# Hack to avoid printing the internals of IPython
if (os.path.basename(filename) in ('iplib.py', 'py3compat.py')
and func_name in ('execfile', 'safe_execfile', 'runcode')):
break
maybe_start = line_no - 1 - context // 2
start = max(maybe_start, 0)
end = start + context
lines = linecache.getlines(filename)[start:end]
buf = list(records[i])
buf[LNUM_POS] = line_no
buf[INDEX_POS] = line_no - 1 - start
buf[LINES_POS] = lines
output.append(tuple(buf))
return '\n'.join(format_records(output[stack_end:stack_start:-1])) | null |
172,294 | from sys import version_info
from warnings import warn
from . import _deprecated_my_exceptions
_deprecated_names = [
name for name in dir(_deprecated_my_exceptions) if
not name.startswith("__")
]
def __getattr__(name):
if not name.startswith("__") and name in _deprecated_names:
warn("{} is deprecated and will be removed from joblib "
"in 0.16".format(name), DeprecationWarning)
return getattr(_deprecated_my_exceptions, name)
raise AttributeError | null |
172,295 | from ._multiprocessing_helpers import mp
def my_wrap_non_picklable_objects(obj, keep_wrapper=True):
return obj | null |
172,296 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def f():
a = 1
def g():
return a
return g | null |
172,297 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
if sys.version_info >= (3, 8):
from types import CellType
else:
CellType = type(f().__closure__[0])
_PICKLE_BY_VALUE_MODULES = set()
if sys.version_info[:2] < (3, 7):
_cell_set_template_code = _make_cell_set_template_code()
if sys.version_info[:2] < (3, 7): # pragma: no branch
else:
_is_parametrized_type_hint = None
_create_parametrized_type_hint = None
The provided code snippet includes necessary dependencies for implementing the `register_pickle_by_value` function. Write a Python function `def register_pickle_by_value(module)` to solve the following problem:
Register a module to make it functions and classes picklable by value. By default, functions and classes that are attributes of an importable module are to be pickled by reference, that is relying on re-importing the attribute from the module at load time. If `register_pickle_by_value(module)` is called, all its functions and classes are subsequently to be pickled by value, meaning that they can be loaded in Python processes where the module is not importable. This is especially useful when developing a module in a distributed execution environment: restarting the client Python process with the new source code is enough: there is no need to re-install the new version of the module on all the worker nodes nor to restart the workers. Note: this feature is considered experimental. See the cloudpickle README.md file for more details and limitations.
Here is the function:
def register_pickle_by_value(module):
"""Register a module to make it functions and classes picklable by value.
By default, functions and classes that are attributes of an importable
module are to be pickled by reference, that is relying on re-importing
the attribute from the module at load time.
If `register_pickle_by_value(module)` is called, all its functions and
classes are subsequently to be pickled by value, meaning that they can
be loaded in Python processes where the module is not importable.
This is especially useful when developing a module in a distributed
execution environment: restarting the client Python process with the new
source code is enough: there is no need to re-install the new version
of the module on all the worker nodes nor to restart the workers.
Note: this feature is considered experimental. See the cloudpickle
README.md file for more details and limitations.
"""
if not isinstance(module, types.ModuleType):
raise ValueError(
f"Input should be a module object, got {str(module)} instead"
)
# In the future, cloudpickle may need a way to access any module registered
# for pickling by value in order to introspect relative imports inside
# functions pickled by value. (see
# https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
# This access can be ensured by checking that module is present in
# sys.modules at registering time and assuming that it will still be in
# there when accessed during pickling. Another alternative would be to
# store a weakref to the module. Even though cloudpickle does not implement
# this introspection yet, in order to avoid a possible breaking change
# later, we still enforce the presence of module inside sys.modules.
if module.__name__ not in sys.modules:
raise ValueError(
f"{module} was not imported correctly, have you used an "
f"`import` statement to access it?"
)
_PICKLE_BY_VALUE_MODULES.add(module.__name__) | Register a module to make it functions and classes picklable by value. By default, functions and classes that are attributes of an importable module are to be pickled by reference, that is relying on re-importing the attribute from the module at load time. If `register_pickle_by_value(module)` is called, all its functions and classes are subsequently to be pickled by value, meaning that they can be loaded in Python processes where the module is not importable. This is especially useful when developing a module in a distributed execution environment: restarting the client Python process with the new source code is enough: there is no need to re-install the new version of the module on all the worker nodes nor to restart the workers. Note: this feature is considered experimental. See the cloudpickle README.md file for more details and limitations. |
172,298 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
_PICKLE_BY_VALUE_MODULES = set()
The provided code snippet includes necessary dependencies for implementing the `unregister_pickle_by_value` function. Write a Python function `def unregister_pickle_by_value(module)` to solve the following problem:
Unregister that the input module should be pickled by value.
Here is the function:
def unregister_pickle_by_value(module):
"""Unregister that the input module should be pickled by value."""
if not isinstance(module, types.ModuleType):
raise ValueError(
f"Input should be a module object, got {str(module)} instead"
)
if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
raise ValueError(f"{module} is not registered for pickle by value")
else:
_PICKLE_BY_VALUE_MODULES.remove(module.__name__) | Unregister that the input module should be pickled by value. |
172,299 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
_PICKLE_BY_VALUE_MODULES = set()
def list_registry_pickle_by_value():
return _PICKLE_BY_VALUE_MODULES.copy() | null |
172,300 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _make_cell_set_template_code():
def _cell_set_factory(value):
lambda: cell
cell = value
co = _cell_set_factory.__code__
_cell_set_template_code = types.CodeType(
co.co_argcount,
co.co_kwonlyargcount, # Python 3 only argument
co.co_nlocals,
co.co_stacksize,
co.co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_cellvars, # co_freevars is initialized with co_cellvars
(), # co_cellvars is made empty
)
return _cell_set_template_code | null |
172,301 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _is_parametrized_type_hint(obj):
# This is very cheap but might generate false positives. So try to
# narrow it down is good as possible.
type_module = getattr(type(obj), '__module__', None)
from_typing_extensions = type_module == 'typing_extensions'
from_typing = type_module == 'typing'
# general typing Constructs
is_typing = getattr(obj, '__origin__', None) is not None
# typing_extensions.Literal
is_literal = (
(getattr(obj, '__values__', None) is not None)
and from_typing_extensions
)
# typing_extensions.Final
is_final = (
(getattr(obj, '__type__', None) is not None)
and from_typing_extensions
)
# typing.ClassVar
is_classvar = (
(getattr(obj, '__type__', None) is not None) and from_typing
)
# typing.Union/Tuple for old Python 3.5
is_union = getattr(obj, '__union_params__', None) is not None
is_tuple = getattr(obj, '__tuple_params__', None) is not None
is_callable = (
getattr(obj, '__result__', None) is not None and
getattr(obj, '__args__', None) is not None
)
return any((is_typing, is_literal, is_final, is_classvar, is_union,
is_tuple, is_callable)) | null |
172,302 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _create_parametrized_type_hint(origin, args):
return origin[args] | null |
172,303 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
Union: _SpecialForm = ...
ClassVar: _SpecialForm = ...
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
def py__get__(self, instance, class_value):
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
def py__simple_getitem__(self, index):
def py__iter__(self, contextualized_node=None):
def py__getitem__(self, index_value_set, contextualized_node):
def _get_wrapped_value(self):
def name(self):
def infer_type_vars(self, value_set):
class Generic(BaseTypingInstance):
Final: _SpecialForm = ...
Literal: _SpecialForm = ...
def parametrized_type_hint_getinitargs(obj):
# The distorted type check sematic for typing construct becomes:
# ``type(obj) is type(TypeHint)``, which means "obj is a
# parametrized TypeHint"
if type(obj) is type(Literal): # pragma: no branch
initargs = (Literal, obj.__values__)
elif type(obj) is type(Final): # pragma: no branch
initargs = (Final, obj.__type__)
elif type(obj) is type(ClassVar):
initargs = (ClassVar, obj.__type__)
elif type(obj) is type(Generic):
initargs = (obj.__origin__, obj.__args__)
elif type(obj) is type(Union):
initargs = (Union, obj.__args__)
elif type(obj) is type(Tuple):
initargs = (Tuple, obj.__args__)
elif type(obj) is type(Callable):
(*args, result) = obj.__args__
if len(args) == 1 and args[0] is Ellipsis:
args = Ellipsis
else:
args = list(args)
initargs = (Callable, (args, result))
else: # pragma: no cover
raise pickle.PicklingError(
f"Cloudpickle Error: Unknown type {type(obj)}"
)
return initargs | null |
172,304 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
if sys.version_info >= (3, 8):
from types import CellType
else:
CellType = type(f().__closure__[0])
if sys.version_info[:2] < (3, 7):
_cell_set_template_code = _make_cell_set_template_code()
if sys.version_info[:2] < (3, 7): # pragma: no branch
else:
_is_parametrized_type_hint = None
_create_parametrized_type_hint = None
The provided code snippet includes necessary dependencies for implementing the `is_tornado_coroutine` function. Write a Python function `def is_tornado_coroutine(func)` to solve the following problem:
Return whether *func* is a Tornado coroutine function. Running coroutines are not supported.
Here is the function:
def is_tornado_coroutine(func):
"""
Return whether *func* is a Tornado coroutine function.
Running coroutines are not supported.
"""
if 'tornado.gen' not in sys.modules:
return False
gen = sys.modules['tornado.gen']
if not hasattr(gen, "is_coroutine_function"):
# Tornado version is too old
return False
return gen.is_coroutine_function(func) | Return whether *func* is a Tornado coroutine function. Running coroutines are not supported. |
172,305 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _rebuild_tornado_coroutine(func):
from tornado import gen
return gen.coroutine(func) | null |
172,306 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _gen_ellipsis():
return Ellipsis | null |
172,307 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _gen_not_implemented():
return NotImplemented | null |
172,308 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
The provided code snippet includes necessary dependencies for implementing the `instance` function. Write a Python function `def instance(cls)` to solve the following problem:
Create a new instance of a class. Parameters ---------- cls : type The class to create an instance of. Returns ------- instance : cls A new instance of ``cls``.
Here is the function:
def instance(cls):
"""Create a new instance of a class.
Parameters
----------
cls : type
The class to create an instance of.
Returns
-------
instance : cls
A new instance of ``cls``.
"""
return cls() | Create a new instance of a class. Parameters ---------- cls : type The class to create an instance of. Returns ------- instance : cls A new instance of ``cls``. |
172,309 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def cell_set(cell, value):
"""Set the value of a closure cell.
The point of this function is to set the cell_contents attribute of a cell
after its creation. This operation is necessary in case the cell contains a
reference to the function the cell belongs to, as when calling the
function's constructor
``f = types.FunctionType(code, globals, name, argdefs, closure)``,
closure will not be able to contain the yet-to-be-created f.
In Python3.7, cell_contents is writeable, so setting the contents of a cell
can be done simply using
>>> cell.cell_contents = value
In earlier Python3 versions, the cell_contents attribute of a cell is read
only, but this limitation can be worked around by leveraging the Python 3
``nonlocal`` keyword.
In Python2 however, this attribute is read only, and there is no
``nonlocal`` keyword. For this reason, we need to come up with more
complicated hacks to set this attribute.
The chosen approach is to create a function with a STORE_DEREF opcode,
which sets the content of a closure variable. Typically:
>>> def inner(value):
... lambda: cell # the lambda makes cell a closure
... cell = value # cell is a closure, so this triggers a STORE_DEREF
(Note that in Python2, A STORE_DEREF can never be triggered from an inner
function. The function g for example here
>>> def f(var):
... def g():
... var += 1
... return g
will not modify the closure variable ``var```inplace, but instead try to
load a local variable var and increment it. As g does not assign the local
variable ``var`` any initial value, calling f(1)() will fail at runtime.)
Our objective is to set the value of a given cell ``cell``. So we need to
somewhat reference our ``cell`` object into the ``inner`` function so that
this object (and not the smoke cell of the lambda function) gets affected
by the STORE_DEREF operation.
In inner, ``cell`` is referenced as a cell variable (an enclosing variable
that is referenced by the inner function). If we create a new function
cell_set with the exact same code as ``inner``, but with ``cell`` marked as
a free variable instead, the STORE_DEREF will be applied on its closure -
``cell``, which we can specify explicitly during construction! The new
cell_set variable thus actually sets the contents of a specified cell!
Note: we do not make use of the ``nonlocal`` keyword to set the contents of
a cell in early python3 versions to limit possible syntax errors in case
test and checker libraries decide to parse the whole file.
"""
if sys.version_info[:2] >= (3, 7): # pragma: no branch
cell.cell_contents = value
else:
_cell_set = types.FunctionType(
_cell_set_template_code, {}, '_cell_set', (), (cell,),)
_cell_set(value)
class _empty_cell_value:
"""sentinel for empty closures
"""
def __reduce__(cls):
return cls.__name__
The provided code snippet includes necessary dependencies for implementing the `_fill_function` function. Write a Python function `def _fill_function(*args)` to solve the following problem:
Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func().
Here is the function:
def _fill_function(*args):
"""Fills in the rest of function data into the skeleton function object
The skeleton itself is create by _make_skel_func().
"""
if len(args) == 2:
func = args[0]
state = args[1]
elif len(args) == 5:
# Backwards compat for cloudpickle v0.4.0, after which the `module`
# argument was introduced
func = args[0]
keys = ['globals', 'defaults', 'dict', 'closure_values']
state = dict(zip(keys, args[1:]))
elif len(args) == 6:
# Backwards compat for cloudpickle v0.4.1, after which the function
# state was passed as a dict to the _fill_function it-self.
func = args[0]
keys = ['globals', 'defaults', 'dict', 'module', 'closure_values']
state = dict(zip(keys, args[1:]))
else:
raise ValueError(f'Unexpected _fill_value arguments: {args!r}')
# - At pickling time, any dynamic global variable used by func is
# serialized by value (in state['globals']).
# - At unpickling time, func's __globals__ attribute is initialized by
# first retrieving an empty isolated namespace that will be shared
# with other functions pickled from the same original module
# by the same CloudPickler instance and then updated with the
# content of state['globals'] to populate the shared isolated
# namespace with all the global variables that are specifically
# referenced for this function.
func.__globals__.update(state['globals'])
func.__defaults__ = state['defaults']
func.__dict__ = state['dict']
if 'annotations' in state:
func.__annotations__ = state['annotations']
if 'doc' in state:
func.__doc__ = state['doc']
if 'name' in state:
func.__name__ = state['name']
if 'module' in state:
func.__module__ = state['module']
if 'qualname' in state:
func.__qualname__ = state['qualname']
if 'kwdefaults' in state:
func.__kwdefaults__ = state['kwdefaults']
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
if '_cloudpickle_submodules' in state:
state.pop('_cloudpickle_submodules')
cells = func.__closure__
if cells is not None:
for cell, value in zip(cells, state['closure_values']):
if value is not _empty_cell_value:
cell_set(cell, value)
return func | Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). |
172,310 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _make_function(code, globals, name, argdefs, closure):
# Setting __builtins__ in globals is needed for nogil CPython.
globals["__builtins__"] = __builtins__
return types.FunctionType(code, globals, name, argdefs, closure) | null |
172,311 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _make_empty_cell():
if False:
# trick the compiler into creating an empty cell in our lambda
cell = None
raise AssertionError('this route should not be executed')
return (lambda: cell).__closure__[0]
The provided code snippet includes necessary dependencies for implementing the `_make_skel_func` function. Write a Python function `def _make_skel_func(code, cell_count, base_globals=None)` to solve the following problem:
Creates a skeleton function object that contains just the provided code and the correct number of cells in func_closure. All other func attributes (e.g. func_globals) are empty.
Here is the function:
def _make_skel_func(code, cell_count, base_globals=None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
# This function is deprecated and should be removed in cloudpickle 1.7
warnings.warn(
"A pickle file created using an old (<=1.4.1) version of cloudpickle "
"is currently being loaded. This is not supported by cloudpickle and "
"will break in cloudpickle 1.7", category=UserWarning
)
# This is backward-compatibility code: for cloudpickle versions between
# 0.5.4 and 0.7, base_globals could be a string or None. base_globals
# should now always be a dictionary.
if base_globals is None or isinstance(base_globals, str):
base_globals = {}
base_globals['__builtins__'] = __builtins__
closure = (
tuple(_make_empty_cell() for _ in range(cell_count))
if cell_count >= 0 else
None
)
return types.FunctionType(code, base_globals, None, None, closure) | Creates a skeleton function object that contains just the provided code and the correct number of cells in func_closure. All other func attributes (e.g. func_globals) are empty. |
172,312 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
The provided code snippet includes necessary dependencies for implementing the `_rehydrate_skeleton_class` function. Write a Python function `def _rehydrate_skeleton_class(skeleton_class, class_dict)` to solve the following problem:
Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info.
Here is the function:
def _rehydrate_skeleton_class(skeleton_class, class_dict):
"""Put attributes from `class_dict` back on `skeleton_class`.
See CloudPickler.save_dynamic_class for more info.
"""
registry = None
for attrname, attr in class_dict.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(skeleton_class, attrname, attr)
if registry is not None:
for subclass in registry:
skeleton_class.register(subclass)
return skeleton_class | Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info. |
172,313 | import builtins
import dis
import opcode
import platform
import sys
import types
import weakref
import uuid
import threading
import typing
import warnings
from .compat import pickle
from collections import OrderedDict
from typing import ClassVar, Generic, Union, Tuple, Callable
from pickle import _getattribute
from importlib._bootstrap import _find_spec
def _is_registered_pickle_by_value(module):
module_name = module.__name__
if module_name in _PICKLE_BY_VALUE_MODULES:
return True
while True:
parent_name = module_name.rsplit(".", 1)[0]
if parent_name == module_name:
break
if parent_name in _PICKLE_BY_VALUE_MODULES:
return True
module_name = parent_name
return False
def _lookup_module_and_qualname(obj, name=None):
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None: # pragma: no cover
# This used to be needed for Python 2.7 support but is probably not
# needed anymore. However we keep the __name__ introspection in case
# users of cloudpickle rely on this old behavior for unknown reasons.
name = getattr(obj, '__name__', None)
module_name = _whichmodule(obj, name)
if module_name is None:
# In this case, obj.__module__ is None AND obj was not found in any
# imported module. obj is thus treated as dynamic.
return None
if module_name == "__main__":
return None
# Note: if module_name is in sys.modules, the corresponding module is
# assumed importable at unpickling time. See #357
module = sys.modules.get(module_name, None)
if module is None:
# The main reason why obj's module would not be imported is that this
# module has been dynamically created, using for example
# types.ModuleType. The other possibility is that module was removed
# from sys.modules after obj was created/imported. But this case is not
# supported, as the standard pickle does not support it either.
return None
try:
obj2, parent = _getattribute(module, name)
except AttributeError:
# obj was not found inside the module it points to
return None
if obj2 is not obj:
return None
return module, name
def _make_typevar(name, bound, constraints, covariant, contravariant,
class_tracker_id):
tv = typing.TypeVar(
name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant
)
if class_tracker_id is not None:
return _lookup_class_or_track(class_tracker_id, tv)
else: # pragma: nocover
# Only for Python 3.5.3 compat.
return tv
def _decompose_typevar(obj):
return (
obj.__name__, obj.__bound__, obj.__constraints__,
obj.__covariant__, obj.__contravariant__,
_get_or_create_tracker_id(obj),
)
def _typevar_reduce(obj):
# TypeVar instances require the module information hence why we
# are not using the _should_pickle_by_reference directly
module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
if module_and_name is None:
return (_make_typevar, _decompose_typevar(obj))
elif _is_registered_pickle_by_value(module_and_name[0]):
return (_make_typevar, _decompose_typevar(obj))
return (getattr, module_and_name) | null |
172,314 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
if pickle.HIGHEST_PROTOCOL >= 5:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
_dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
_dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
_dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
_dispatch_table[abc.abstractmethod] = _classmethod_reduce
_dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
_dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
_dispatch_table[abc.abstractproperty] = _property_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (_make_function, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _should_pickle_by_reference(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
else:
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Pickler is the C implementation of the CPython pickler and therefore
# we rely on reduce_override method to customize the pickler behavior.
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `CloudPickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# dispatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _should_pickle_by_reference(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _should_pickle_by_reference(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function
The provided code snippet includes necessary dependencies for implementing the `dumps` function. Write a Python function `def dumps(obj, protocol=None, buffer_callback=None)` to solve the following problem:
Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python.
Here is the function:
def dumps(obj, protocol=None, buffer_callback=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue() | Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. |
172,315 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
if pickle.HIGHEST_PROTOCOL >= 5:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
_dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
_dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
_dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
_dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
_dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
_dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
_dispatch_table[abc.abstractmethod] = _classmethod_reduce
_dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
_dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
_dispatch_table[abc.abstractproperty] = _property_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (_make_function, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _should_pickle_by_reference(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
else:
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Pickler is the C implementation of the CPython pickler and therefore
# we rely on reduce_override method to customize the pickler behavior.
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `CloudPickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# dispatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _should_pickle_by_reference(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _should_pickle_by_reference(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function
The provided code snippet includes necessary dependencies for implementing the `dumps` function. Write a Python function `def dumps(obj, protocol=None)` to solve the following problem:
Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python.
Here is the function:
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue() | Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. |
172,316 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
out_names = _extract_code_globals_cache.get(co)
if out_names is None:
# We use a dict with None values instead of a set to get a
# deterministic order (assuming Python 3.6+) and avoid introducing
# non-deterministic pickle bytes as a results.
out_names = {name: None for name in _walk_global_ops(co)}
# Declaring a function inside another one using the "def ..."
# syntax generates a constant code object corresponding to the one
# of the nested function's As the nested function may itself need
# global variables, we need to introspect its code, extract its
# globals, (look for code object in it's co_consts attribute..) and
# add the result to code_globals
if co.co_consts:
for const in co.co_consts:
if isinstance(const, types.CodeType):
out_names.update(_extract_code_globals(const))
_extract_code_globals_cache[co] = out_names
return out_names
def _find_imported_submodules(code, top_level_dependencies):
"""
Find currently imported submodules used by a function.
Submodules used by a function need to be detected and referenced for the
function to work correctly at depickling time. Because submodules can be
referenced as attribute of their parent package (``package.submodule``), we
need a special introspection technique that does not rely on GLOBAL-related
opcodes to find references of them in a code object.
Example:
```
import concurrent.futures
import cloudpickle
def func():
x = concurrent.futures.ThreadPoolExecutor
if __name__ == '__main__':
cloudpickle.dumps(func)
```
The globals extracted by cloudpickle in the function's state include the
concurrent package, but not its submodule (here, concurrent.futures), which
is the module used by func. Find_imported_submodules will detect the usage
of concurrent.futures. Saving this module alongside with func will ensure
that calling func once depickled does not fail due to concurrent.futures
not being imported
"""
subimports = []
# check if any known dependency is an imported package
for x in top_level_dependencies:
if (isinstance(x, types.ModuleType) and
hasattr(x, '__package__') and x.__package__):
# check if the package has any currently loaded sub-imports
prefix = x.__name__ + '.'
# A concurrent thread could mutate sys.modules,
# make sure we iterate over a copy to avoid exceptions
for name in list(sys.modules):
# Older versions of pytest will add a "None" module to
# sys.modules.
if name is not None and name.startswith(prefix):
# check whether the function can address the sub-module
tokens = set(name[len(prefix):].split('.'))
if not tokens - set(code.co_names):
subimports.append(sys.modules[name])
return subimports
def _get_cell_contents(cell):
try:
return cell.cell_contents
except ValueError:
# sentinel used by ``_fill_function`` which will leave the cell empty
return _empty_cell_value
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in
func.__globals__}
closure_values = (
list(map(_get_cell_contents, func.__closure__))
if func.__closure__ is not None else ()
)
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values))
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate | null |
172,317 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
The provided code snippet includes necessary dependencies for implementing the `_code_reduce` function. Write a Python function `def _code_reduce(obj)` to solve the following problem:
codeobject reducer
Here is the function:
def _code_reduce(obj):
"""codeobject reducer"""
# If you are not sure about the order of arguments, take a look at help
# of the specific type from types, for example:
# >>> from types import CodeType
# >>> help(CodeType)
if hasattr(obj, "co_exceptiontable"): # pragma: no branch
# Python 3.11 and later: there are some new attributes
# related to the enhanced exceptions.
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name, obj.co_qualname,
obj.co_firstlineno, obj.co_linetable, obj.co_exceptiontable,
obj.co_freevars, obj.co_cellvars,
)
elif hasattr(obj, "co_linetable"): # pragma: no branch
# Python 3.10 and later: obj.co_lnotab is deprecated and constructor
# expects obj.co_linetable instead.
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_linetable, obj.co_freevars,
obj.co_cellvars
)
elif hasattr(obj, "co_nmeta"): # pragma: no cover
# "nogil" Python: modified attributes from 3.9
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_framesize,
obj.co_ndefaultargs, obj.co_nmeta,
obj.co_flags, obj.co_code, obj.co_consts,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_exc_handlers,
obj.co_jump_table, obj.co_freevars, obj.co_cellvars,
obj.co_free2reg, obj.co_cell2reg
)
elif hasattr(obj, "co_posonlyargcount"):
# Backward compat for 3.9 and older
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
# Backward compat for even older versions of Python
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
return types.CodeType, args | codeobject reducer |
172,318 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_empty_cell():
if False:
# trick the compiler into creating an empty cell in our lambda
cell = None
raise AssertionError('this route should not be executed')
return (lambda: cell).__closure__[0]
def _make_cell(value=_empty_cell_value):
cell = _make_empty_cell()
if value is not _empty_cell_value:
cell_set(cell, value)
return cell
The provided code snippet includes necessary dependencies for implementing the `_cell_reduce` function. Write a Python function `def _cell_reduce(obj)` to solve the following problem:
Cell (containing values of a function's free variables) reducer
Here is the function:
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer"""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents, ) | Cell (containing values of a function's free variables) reducer |
172,319 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,) | null |
172,320 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
if pickle.HIGHEST_PROTOCOL >= 5:
# Shorthands similar to pickle.dump/pickle.dumps
else:
def _file_reconstructor(retval):
return retval
The provided code snippet includes necessary dependencies for implementing the `_file_reduce` function. Write a Python function `def _file_reduce(obj)` to solve the following problem:
Save a file
Here is the function:
def _file_reduce(obj):
"""Save a file"""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError(
"Cannot pickle files that map to tty objects"
)
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s"
% obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,) | Save a file |
172,321 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__) | null |
172,322 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),) | null |
172,323 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),) | null |
172,324 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _should_pickle_by_reference(obj, name=None):
"""Test whether an function or a class should be pickled by reference
Pickling by reference means by that the object (typically a function or a
class) is an attribute of a module that is assumed to be importable in the
target Python environment. Loading will therefore rely on importing the
module and then calling `getattr` on it to access the function or class.
Pickling by reference is the only option to pickle functions and classes
in the standard library. In cloudpickle the alternative option is to
pickle by value (for instance for interactively or locally defined
functions and classes or for attributes of modules that have been
explicitly registered to be pickled by value.
"""
if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
module_and_name = _lookup_module_and_qualname(obj, name=name)
if module_and_name is None:
return False
module, name = module_and_name
return not _is_registered_pickle_by_value(module)
elif isinstance(obj, types.ModuleType):
# We assume that sys.modules is primarily used as a cache mechanism for
# the Python import machinery. Checking if a module has been added in
# is sys.modules therefore a cheap and simple heuristic to tell us
# whether we can assume that a given module could be imported by name
# in another Python process.
if _is_registered_pickle_by_value(obj):
return False
return obj.__name__ in sys.modules
else:
raise TypeError(
"cannot check importability of {} instances".format(
type(obj).__name__)
)
def subimport(name):
# We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
# the name of a submodule, __import__ will return the top-level root module
# of this submodule. For instance, __import__('os.path') returns the `os`
# module.
__import__(name)
return sys.modules[name]
def dynamic_subimport(name, vars):
mod = types.ModuleType(name)
mod.__dict__.update(vars)
mod.__dict__['__builtins__'] = builtins.__dict__
return mod
def _module_reduce(obj):
if _should_pickle_by_reference(obj):
return subimport, (obj.__name__,)
else:
# Some external libraries can populate the "__builtins__" entry of a
# module's `__dict__` with unpicklable objects (see #316). For that
# reason, we do not attempt to pickle the "__builtins__" entry, and
# restore a default value for it at unpickling time.
state = obj.__dict__.copy()
state.pop('__builtins__', None)
return dynamic_subimport, (obj.__name__, state) | null |
172,325 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__)) | null |
172,326 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _logger_reduce(obj):
return logging.getLogger, (obj.name,) | null |
172,327 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _root_logger_reduce(obj):
return logging.getLogger, () | null |
172,328 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__) | null |
172,329 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),) | null |
172,330 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _dynamic_class_reduce(obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum, _enum_getnewargs(obj), _enum_getstate(obj),
None, None, _class_setstate
)
else:
return (
_make_skeleton_class, _class_getnewargs(obj), _class_getstate(obj),
None, None, _class_setstate
)
def _should_pickle_by_reference(obj, name=None):
"""Test whether an function or a class should be pickled by reference
Pickling by reference means by that the object (typically a function or a
class) is an attribute of a module that is assumed to be importable in the
target Python environment. Loading will therefore rely on importing the
module and then calling `getattr` on it to access the function or class.
Pickling by reference is the only option to pickle functions and classes
in the standard library. In cloudpickle the alternative option is to
pickle by value (for instance for interactively or locally defined
functions and classes or for attributes of modules that have been
explicitly registered to be pickled by value.
"""
if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
module_and_name = _lookup_module_and_qualname(obj, name=name)
if module_and_name is None:
return False
module, name = module_and_name
return not _is_registered_pickle_by_value(module)
elif isinstance(obj, types.ModuleType):
# We assume that sys.modules is primarily used as a cache mechanism for
# the Python import machinery. Checking if a module has been added in
# is sys.modules therefore a cheap and simple heuristic to tell us
# whether we can assume that a given module could be imported by name
# in another Python process.
if _is_registered_pickle_by_value(obj):
return False
return obj.__name__ in sys.modules
else:
raise TypeError(
"cannot check importability of {} instances".format(
type(obj).__name__)
)
_BUILTIN_TYPE_NAMES = {}
def _builtin_type(name):
if name == "ClassType": # pragma: no cover
# Backward compat to load pickle files generated with cloudpickle
# < 1.3 even if loading pickle files from older versions is not
# officially supported.
return type
return getattr(types, name)
The provided code snippet includes necessary dependencies for implementing the `_class_reduce` function. Write a Python function `def _class_reduce(obj)` to solve the following problem:
Select the reducer depending on the dynamic nature of the class obj
Here is the function:
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj"""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _should_pickle_by_reference(obj):
return _dynamic_class_reduce(obj)
return NotImplemented | Select the reducer depending on the dynamic nature of the class obj |
172,331 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_dict_keys(obj, is_ordered=False):
def _dict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), ) | null |
172,332 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_dict_values(obj, is_ordered=False):
if is_ordered:
return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
else:
return {i: _ for i, _ in enumerate(obj)}.values()
def _dict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), ) | null |
172,333 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_dict_items(obj, is_ordered=False):
def _dict_items_reduce(obj):
return _make_dict_items, (dict(obj), ) | null |
172,334 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_dict_keys(obj, is_ordered=False):
if is_ordered:
return OrderedDict.fromkeys(obj).keys()
else:
return dict.fromkeys(obj).keys()
def _odict_keys_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_keys, (list(obj), True) | null |
172,335 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_dict_values(obj, is_ordered=False):
if is_ordered:
return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
else:
return {i: _ for i, _ in enumerate(obj)}.values()
def _odict_values_reduce(obj):
# Safer not to ship the full dict as sending the rest might
# be unintended and could potentially cause leaking of
# sensitive information
return _make_dict_values, (list(obj), True) | null |
172,336 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def _make_dict_items(obj, is_ordered=False):
if is_ordered:
return OrderedDict(obj).items()
else:
return obj.items()
def _odict_items_reduce(obj):
return _make_dict_items, (dict(obj), True) | null |
172,337 | import _collections_abc
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap, OrderedDict
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _should_pickle_by_reference,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type,
_make_dict_keys, _make_dict_values, _make_dict_items, _make_function,
)
def cell_set(cell, value):
"""Set the value of a closure cell.
The point of this function is to set the cell_contents attribute of a cell
after its creation. This operation is necessary in case the cell contains a
reference to the function the cell belongs to, as when calling the
function's constructor
``f = types.FunctionType(code, globals, name, argdefs, closure)``,
closure will not be able to contain the yet-to-be-created f.
In Python3.7, cell_contents is writeable, so setting the contents of a cell
can be done simply using
>>> cell.cell_contents = value
In earlier Python3 versions, the cell_contents attribute of a cell is read
only, but this limitation can be worked around by leveraging the Python 3
``nonlocal`` keyword.
In Python2 however, this attribute is read only, and there is no
``nonlocal`` keyword. For this reason, we need to come up with more
complicated hacks to set this attribute.
The chosen approach is to create a function with a STORE_DEREF opcode,
which sets the content of a closure variable. Typically:
>>> def inner(value):
... lambda: cell # the lambda makes cell a closure
... cell = value # cell is a closure, so this triggers a STORE_DEREF
(Note that in Python2, A STORE_DEREF can never be triggered from an inner
function. The function g for example here
>>> def f(var):
... def g():
... var += 1
... return g
will not modify the closure variable ``var```inplace, but instead try to
load a local variable var and increment it. As g does not assign the local
variable ``var`` any initial value, calling f(1)() will fail at runtime.)
Our objective is to set the value of a given cell ``cell``. So we need to
somewhat reference our ``cell`` object into the ``inner`` function so that
this object (and not the smoke cell of the lambda function) gets affected
by the STORE_DEREF operation.
In inner, ``cell`` is referenced as a cell variable (an enclosing variable
that is referenced by the inner function). If we create a new function
cell_set with the exact same code as ``inner``, but with ``cell`` marked as
a free variable instead, the STORE_DEREF will be applied on its closure -
``cell``, which we can specify explicitly during construction! The new
cell_set variable thus actually sets the contents of a specified cell!
Note: we do not make use of the ``nonlocal`` keyword to set the contents of
a cell in early python3 versions to limit possible syntax errors in case
test and checker libraries decide to parse the whole file.
"""
if sys.version_info[:2] >= (3, 7): # pragma: no branch
cell.cell_contents = value
else:
_cell_set = types.FunctionType(
_cell_set_template_code, {}, '_cell_set', (), (cell,),)
_cell_set(value)
The provided code snippet includes necessary dependencies for implementing the `_function_setstate` function. Write a Python function `def _function_setstate(obj, state)` to solve the following problem:
Update the state of a dynamic function. As __closure__ and __globals__ are readonly attributes of a function, we cannot rely on the native setstate routine of pickle.load_build, that calls setattr on items of the slotstate. Instead, we have to modify them inplace.
Here is the function:
def _function_setstate(obj, state):
"""Update the state of a dynamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
cell_set(obj.__closure__[i], value)
for k, v in slotstate.items():
setattr(obj, k, v) | Update the state of a dynamic function. As __closure__ and __globals__ are readonly attributes of a function, we cannot rely on the native setstate routine of pickle.load_build, that calls setattr on items of the slotstate. Instead, we have to modify them inplace. |
172,338 | import inspect
from functools import partial
from joblib.externals.cloudpickle import dumps, loads
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
def _reconstruct_wrapper(_pickled_object, keep_wrapper):
obj = loads(_pickled_object)
return _wrap_non_picklable_objects(obj, keep_wrapper) | null |
172,339 | import inspect
from functools import partial
from joblib.externals.cloudpickle import dumps, loads
WRAP_CACHE = {}
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
class partial(Generic[_T]):
func: Callable[..., _T]
args: Tuple[Any, ...]
keywords: Dict[str, Any]
def __init__(self, func: Callable[..., _T], *args: Any, **kwargs: Any) -> None: ...
def __call__(self, *args: Any, **kwargs: Any) -> _T: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
def _wrap_objects_when_needed(obj):
# Function to introspect an object and decide if it should be wrapped or
# not.
need_wrap = "__main__" in getattr(obj, "__module__", "")
if isinstance(obj, partial):
return partial(
_wrap_objects_when_needed(obj.func),
*[_wrap_objects_when_needed(a) for a in obj.args],
**{k: _wrap_objects_when_needed(v)
for k, v in obj.keywords.items()}
)
if callable(obj):
# Need wrap if the object is a function defined in a local scope of
# another function.
func_code = getattr(obj, "__code__", "")
need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED
# Need wrap if the obj is a lambda expression
func_name = getattr(obj, "__name__", "")
need_wrap |= "<lambda>" in func_name
if not need_wrap:
return obj
wrapped_obj = WRAP_CACHE.get(obj)
if wrapped_obj is None:
wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False)
WRAP_CACHE[obj] = wrapped_obj
return wrapped_obj | null |
172,340 | import inspect
from functools import partial
from joblib.externals.cloudpickle import dumps, loads
class CloudpickledObjectWrapper:
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ['_obj', '_keep_wrapper']:
return getattr(self._obj, attr)
return getattr(self, attr)
def _wrap_non_picklable_objects(obj, keep_wrapper):
if callable(obj):
return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper)
return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper)
The provided code snippet includes necessary dependencies for implementing the `wrap_non_picklable_objects` function. Write a Python function `def wrap_non_picklable_objects(obj, keep_wrapper=True)` to solve the following problem:
Wrapper for non-picklable object to use cloudpickle to serialize them. Note that this wrapper tends to slow down the serialization process as it is done with cloudpickle which is typically slower compared to pickle. The proper way to solve serialization issues is to avoid defining functions and objects in the main scripts and to implement __reduce__ functions for complex classes.
Here is the function:
def wrap_non_picklable_objects(obj, keep_wrapper=True):
"""Wrapper for non-picklable object to use cloudpickle to serialize them.
Note that this wrapper tends to slow down the serialization process as it
is done with cloudpickle which is typically slower compared to pickle. The
proper way to solve serialization issues is to avoid defining functions and
objects in the main scripts and to implement __reduce__ functions for
complex classes.
"""
# If obj is a class, create a CloudpickledClassWrapper which instantiates
# the object internally and wrap it directly in a CloudpickledObjectWrapper
if inspect.isclass(obj):
class CloudpickledClassWrapper(CloudpickledObjectWrapper):
def __init__(self, *args, **kwargs):
self._obj = obj(*args, **kwargs)
self._keep_wrapper = keep_wrapper
CloudpickledClassWrapper.__name__ = obj.__name__
return CloudpickledClassWrapper
# If obj is an instance of a class, just wrap it in a regular
# CloudpickledObjectWrapper
return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper) | Wrapper for non-picklable object to use cloudpickle to serialize them. Note that this wrapper tends to slow down the serialization process as it is done with cloudpickle which is typically slower compared to pickle. The proper way to solve serialization issues is to avoid defining functions and objects in the main scripts and to implement __reduce__ functions for complex classes. |
172,341 | import time
import warnings
import threading
import multiprocessing as mp
from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
from .backend.context import cpu_count
from .backend import get_context
_executor_lock = threading.RLock()
_next_executor_id = 0
The provided code snippet includes necessary dependencies for implementing the `_get_next_executor_id` function. Write a Python function `def _get_next_executor_id()` to solve the following problem:
Ensure that each successive executor instance has a unique, monotonic id. The purpose of this monotonic id is to help debug and test automated instance creation.
Here is the function:
def _get_next_executor_id():
"""Ensure that each successive executor instance has a unique, monotonic id.
The purpose of this monotonic id is to help debug and test automated
instance creation.
"""
global _next_executor_id
with _executor_lock:
executor_id = _next_executor_id
_next_executor_id += 1
return executor_id | Ensure that each successive executor instance has a unique, monotonic id. The purpose of this monotonic id is to help debug and test automated instance creation. |
172,342 | import time
import warnings
import threading
import multiprocessing as mp
from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS
from .backend.context import cpu_count
from .backend import get_context
_executor = None
class _ReusablePoolExecutor(ProcessPoolExecutor):
def __init__(self, submit_resize_lock, max_workers=None, context=None,
timeout=None, executor_id=0, job_reducers=None,
result_reducers=None, initializer=None, initargs=(),
env=None):
super().__init__(
max_workers=max_workers, context=context, timeout=timeout,
job_reducers=job_reducers, result_reducers=result_reducers,
initializer=initializer, initargs=initargs, env=env)
self.executor_id = executor_id
self._submit_resize_lock = submit_resize_lock
def get_reusable_executor(cls, max_workers=None, context=None, timeout=10,
kill_workers=False, reuse="auto",
job_reducers=None, result_reducers=None,
initializer=None, initargs=(), env=None):
with _executor_lock:
global _executor, _executor_kwargs
executor = _executor
if max_workers is None:
if reuse is True and executor is not None:
max_workers = executor._max_workers
else:
max_workers = cpu_count()
elif max_workers <= 0:
raise ValueError(
f"max_workers must be greater than 0, got {max_workers}."
)
if isinstance(context, str):
context = get_context(context)
if context is not None and context.get_start_method() == "fork":
raise ValueError(
"Cannot use reusable executor with the 'fork' context"
)
kwargs = dict(context=context, timeout=timeout,
job_reducers=job_reducers,
result_reducers=result_reducers,
initializer=initializer, initargs=initargs,
env=env)
if executor is None:
is_reused = False
mp.util.debug(
f"Create a executor with max_workers={max_workers}."
)
executor_id = _get_next_executor_id()
_executor_kwargs = kwargs
_executor = executor = cls(
_executor_lock, max_workers=max_workers,
executor_id=executor_id, **kwargs)
else:
if reuse == 'auto':
reuse = kwargs == _executor_kwargs
if (executor._flags.broken or executor._flags.shutdown
or not reuse):
if executor._flags.broken:
reason = "broken"
elif executor._flags.shutdown:
reason = "shutdown"
else:
reason = "arguments have changed"
mp.util.debug(
"Creating a new executor with max_workers= "
f"{max_workers} as the previous instance cannot be "
f"reused ({reason})."
)
executor.shutdown(wait=True, kill_workers=kill_workers)
_executor = executor = _executor_kwargs = None
# Recursive call to build a new instance
return cls.get_reusable_executor(max_workers=max_workers,
**kwargs)
else:
mp.util.debug(
"Reusing existing executor with "
f"max_workers={executor._max_workers}."
)
is_reused = True
executor._resize(max_workers)
return executor, is_reused
def submit(self, fn, *args, **kwargs):
with self._submit_resize_lock:
return super().submit(fn, *args, **kwargs)
def _resize(self, max_workers):
with self._submit_resize_lock:
if max_workers is None:
raise ValueError("Trying to resize with max_workers=None")
elif max_workers == self._max_workers:
return
if self._executor_manager_thread is None:
# If the executor_manager_thread has not been started
# then no processes have been spawned and we can just
# update _max_workers and return
self._max_workers = max_workers
return
self._wait_job_completion()
# Some process might have returned due to timeout so check how many
# children are still alive. Use the _process_management_lock to
# ensure that no process are spawned or timeout during the resize.
with self._processes_management_lock:
processes = list(self._processes.values())
nb_children_alive = sum(p.is_alive() for p in processes)
self._max_workers = max_workers
for _ in range(max_workers, nb_children_alive):
self._call_queue.put(None)
while (len(self._processes) > max_workers
and not self._flags.broken):
time.sleep(1e-3)
self._adjust_process_count()
processes = list(self._processes.values())
while not all(p.is_alive() for p in processes):
time.sleep(1e-3)
def _wait_job_completion(self):
"""Wait for the cache to be empty before resizing the pool."""
# Issue a warning to the user about the bad effect of this usage.
if self._pending_work_items:
warnings.warn("Trying to resize an executor with running jobs: "
"waiting for jobs completion before resizing.",
UserWarning)
mp.util.debug(
f"Executor {self.executor_id} waiting for jobs completion "
"before resizing"
)
# Wait for the completion of the jobs
while self._pending_work_items:
time.sleep(1e-3)
def _setup_queues(self, job_reducers, result_reducers):
# As this executor can be resized, use a large queue size to avoid
# underestimating capacity and introducing overhead
queue_size = 2 * cpu_count() + EXTRA_QUEUED_CALLS
super()._setup_queues(
job_reducers, result_reducers, queue_size=queue_size
)
The provided code snippet includes necessary dependencies for implementing the `get_reusable_executor` function. Write a Python function `def get_reusable_executor(max_workers=None, context=None, timeout=10, kill_workers=False, reuse="auto", job_reducers=None, result_reducers=None, initializer=None, initargs=(), env=None)` to solve the following problem:
Return the current ReusableExectutor instance. Start a new instance if it has not been started already or if the previous instance was left in a broken state. If the previous instance does not have the requested number of workers, the executor is dynamically resized to adjust the number of workers prior to returning. Reusing a singleton instance spares the overhead of starting new worker processes and importing common python packages each time. ``max_workers`` controls the maximum number of tasks that can be running in parallel in worker processes. By default this is set to the number of CPUs on the host. Setting ``timeout`` (in seconds) makes idle workers automatically shutdown so as to release system resources. New workers are respawn upon submission of new tasks so that ``max_workers`` are available to accept the newly submitted tasks. Setting ``timeout`` to around 100 times the time required to spawn new processes and import packages in them (on the order of 100ms) ensures that the overhead of spawning workers is negligible. Setting ``kill_workers=True`` makes it possible to forcibly interrupt previously spawned jobs to get a new instance of the reusable executor with new constructor argument values. The ``job_reducers`` and ``result_reducers`` are used to customize the pickling of tasks and results send to the executor. When provided, the ``initializer`` is run first in newly spawned processes with argument ``initargs``. The environment variable in the child process are a copy of the values in the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and ``VAR`` are string literals to overwrite the environment variable ``ENV`` in the child processes to value ``VAL``. The environment variables are set in the children before any module is loaded. This only works with with the ``loky`` context.
Here is the function:
def get_reusable_executor(max_workers=None, context=None, timeout=10,
kill_workers=False, reuse="auto",
job_reducers=None, result_reducers=None,
initializer=None, initargs=(), env=None):
"""Return the current ReusableExectutor instance.
Start a new instance if it has not been started already or if the previous
instance was left in a broken state.
If the previous instance does not have the requested number of workers, the
executor is dynamically resized to adjust the number of workers prior to
returning.
Reusing a singleton instance spares the overhead of starting new worker
processes and importing common python packages each time.
``max_workers`` controls the maximum number of tasks that can be running in
parallel in worker processes. By default this is set to the number of
CPUs on the host.
Setting ``timeout`` (in seconds) makes idle workers automatically shutdown
so as to release system resources. New workers are respawn upon submission
of new tasks so that ``max_workers`` are available to accept the newly
submitted tasks. Setting ``timeout`` to around 100 times the time required
to spawn new processes and import packages in them (on the order of 100ms)
ensures that the overhead of spawning workers is negligible.
Setting ``kill_workers=True`` makes it possible to forcibly interrupt
previously spawned jobs to get a new instance of the reusable executor
with new constructor argument values.
The ``job_reducers`` and ``result_reducers`` are used to customize the
pickling of tasks and results send to the executor.
When provided, the ``initializer`` is run first in newly spawned
processes with argument ``initargs``.
The environment variable in the child process are a copy of the values in
the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and
``VAR`` are string literals to overwrite the environment variable ``ENV``
in the child processes to value ``VAL``. The environment variables are set
in the children before any module is loaded. This only works with with the
``loky`` context.
"""
_executor, _ = _ReusablePoolExecutor.get_reusable_executor(
max_workers=max_workers, context=context, timeout=timeout,
kill_workers=kill_workers, reuse=reuse, job_reducers=job_reducers,
result_reducers=result_reducers, initializer=initializer,
initargs=initargs, env=env
)
return _executor | Return the current ReusableExectutor instance. Start a new instance if it has not been started already or if the previous instance was left in a broken state. If the previous instance does not have the requested number of workers, the executor is dynamically resized to adjust the number of workers prior to returning. Reusing a singleton instance spares the overhead of starting new worker processes and importing common python packages each time. ``max_workers`` controls the maximum number of tasks that can be running in parallel in worker processes. By default this is set to the number of CPUs on the host. Setting ``timeout`` (in seconds) makes idle workers automatically shutdown so as to release system resources. New workers are respawn upon submission of new tasks so that ``max_workers`` are available to accept the newly submitted tasks. Setting ``timeout`` to around 100 times the time required to spawn new processes and import packages in them (on the order of 100ms) ensures that the overhead of spawning workers is negligible. Setting ``kill_workers=True`` makes it possible to forcibly interrupt previously spawned jobs to get a new instance of the reusable executor with new constructor argument values. The ``job_reducers`` and ``result_reducers`` are used to customize the pickling of tasks and results send to the executor. When provided, the ``initializer`` is run first in newly spawned processes with argument ``initargs``. The environment variable in the child process are a copy of the values in the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and ``VAR`` are string literals to overwrite the environment variable ``ENV`` in the child processes to value ``VAL``. The environment variables are set in the children before any module is loaded. This only works with with the ``loky`` context. |
172,343 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
class _RemoteTraceback(Exception):
"""Embed stringification of remote traceback in local traceback
"""
def __init__(self, tb=None):
self.tb = f'\n"""\n{tb}"""'
def __str__(self):
return self.tb
def _rebuild_exc(exc, tb):
exc.__cause__ = _RemoteTraceback(tb)
return exc | null |
172,344 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
The provided code snippet includes necessary dependencies for implementing the `_get_chunks` function. Write a Python function `def _get_chunks(chunksize, *iterables)` to solve the following problem:
Iterates over zip()ed iterables in chunks.
Here is the function:
def _get_chunks(chunksize, *iterables):
"""Iterates over zip()ed iterables in chunks. """
it = zip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk | Iterates over zip()ed iterables in chunks. |
172,345 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
The provided code snippet includes necessary dependencies for implementing the `_process_chunk` function. Write a Python function `def _process_chunk(fn, chunk)` to solve the following problem:
Processes a chunk of an iterable passed to map. Runs the function passed to map() on a chunk of the iterable passed to map. This function is run in a separate process.
Here is the function:
def _process_chunk(fn, chunk):
"""Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk] | Processes a chunk of an iterable passed to map. Runs the function passed to map() on a chunk of the iterable passed to map. This function is run in a separate process. |
172,346 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
_CURRENT_DEPTH = 0
_MEMORY_LEAK_CHECK_DELAY = 1.
_MAX_MEMORY_LEAK_SIZE = int(3e8)
try:
from psutil import Process
_USE_PSUTIL = True
def _get_memory_usage(pid, force_gc=False):
if force_gc:
gc.collect()
mem_size = Process(pid).memory_info().rss
mp.util.debug(f'psutil return memory size: {mem_size}')
return mem_size
except ImportError:
_USE_PSUTIL = False
def _python_exit():
global _global_shutdown
_global_shutdown = True
items = list(_threads_wakeups.items())
if len(items) > 0:
mp.util.debug("Interpreter shutting down. Waking up "
f"executor_manager_thread {items}")
for _, (shutdown_lock, thread_wakeup) in items:
with shutdown_lock:
thread_wakeup.wakeup()
for thread, _ in items:
thread.join()
mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
class _RemoteTraceback(Exception):
"""Embed stringification of remote traceback in local traceback
"""
def __init__(self, tb=None):
self.tb = f'\n"""\n{tb}"""'
def __str__(self):
return self.tb
class _ExceptionWithTraceback(BaseException):
def __init__(self, exc):
tb = getattr(exc, "__traceback__", None)
if tb is None:
_, _, tb = sys.exc_info()
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = tb
def __reduce__(self):
return _rebuild_exc, (self.exc, self.tb)
class _ResultItem:
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
def _sendback_result(result_queue, work_id, result=None, exception=None):
"""Safely send back the given result or exception"""
try:
result_queue.put(_ResultItem(work_id, result=result,
exception=exception))
except BaseException as e:
exc = _ExceptionWithTraceback(e)
result_queue.put(_ResultItem(work_id, exception=exc))
def time() -> float: ...
The provided code snippet includes necessary dependencies for implementing the `_process_worker` function. Write a Python function `def _process_worker(call_queue, result_queue, initializer, initargs, processes_management_lock, timeout, worker_exit_lock, current_depth)` to solve the following problem:
Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A ctx.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A ctx.Queue of _ResultItems that will written to by the worker. initializer: A callable initializer, or None initargs: A tuple of args for the initializer processes_management_lock: A ctx.Lock avoiding worker timeout while some workers are being spawned. timeout: maximum time to wait for a new item in the call_queue. If that time is expired, the worker will shutdown. worker_exit_lock: Lock to avoid flagging the executor as broken on workers timeout. current_depth: Nested parallelism level, to avoid infinite spawning.
Here is the function:
def _process_worker(call_queue, result_queue, initializer, initargs,
processes_management_lock, timeout, worker_exit_lock,
current_depth):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A ctx.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A ctx.Queue of _ResultItems that will written
to by the worker.
initializer: A callable initializer, or None
initargs: A tuple of args for the initializer
processes_management_lock: A ctx.Lock avoiding worker timeout while
some workers are being spawned.
timeout: maximum time to wait for a new item in the call_queue. If that
time is expired, the worker will shutdown.
worker_exit_lock: Lock to avoid flagging the executor as broken on
workers timeout.
current_depth: Nested parallelism level, to avoid infinite spawning.
"""
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
LOGGER.critical('Exception in initializer:', exc_info=True)
# The parent will notice that the process stopped and
# mark the pool broken
return
# set the global _CURRENT_DEPTH mechanism to limit recursive call
global _CURRENT_DEPTH
_CURRENT_DEPTH = current_depth
_process_reference_size = None
_last_memory_leak_check = None
pid = os.getpid()
mp.util.debug(f'Worker started with timeout={timeout}')
while True:
try:
call_item = call_queue.get(block=True, timeout=timeout)
if call_item is None:
mp.util.info("Shutting down worker on sentinel")
except queue.Empty:
mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
if processes_management_lock.acquire(block=False):
processes_management_lock.release()
call_item = None
else:
mp.util.info("Could not acquire processes_management_lock")
continue
except BaseException:
previous_tb = traceback.format_exc()
try:
result_queue.put(_RemoteTraceback(previous_tb))
except BaseException:
# If we cannot format correctly the exception, at least print
# the traceback.
print(previous_tb)
mp.util.debug('Exiting with code 1')
sys.exit(1)
if call_item is None:
# Notify queue management thread about worker shutdown
result_queue.put(pid)
is_clean = worker_exit_lock.acquire(True, timeout=30)
# Early notify any loky executor running in this worker process
# (nested parallelism) that this process is about to shutdown to
# avoid a deadlock waiting undifinitely for the worker to finish.
_python_exit()
if is_clean:
mp.util.debug('Exited cleanly')
else:
mp.util.info('Main process did not release worker_exit')
return
try:
r = call_item()
except BaseException as e:
exc = _ExceptionWithTraceback(e)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
else:
_sendback_result(result_queue, call_item.work_id, result=r)
del r
# Free the resource as soon as possible, to avoid holding onto
# open files or shared memory that is not needed anymore
del call_item
if _USE_PSUTIL:
if _process_reference_size is None:
# Make reference measurement after the first call
_process_reference_size = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
continue
if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
mem_usage = _get_memory_usage(pid)
_last_memory_leak_check = time()
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
# Memory usage stays within bounds: everything is fine.
continue
# Check again memory usage; this time take the measurement
# after a forced garbage collection to break any reference
# cycles.
mem_usage = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
# The GC managed to free the memory: everything is fine.
continue
# The process is leaking memory: let the master process
# know that we need to start a new worker.
mp.util.info("Memory leak detected: shutting down worker")
result_queue.put(pid)
with worker_exit_lock:
mp.util.debug('Exit due to memory leak')
return
else:
# if psutil is not installed, trigger gc.collect events
# regularly to limit potential memory leaks due to reference cycles
if (_last_memory_leak_check is None or
(time() - _last_memory_leak_check >
_MEMORY_LEAK_CHECK_DELAY)):
gc.collect()
_last_memory_leak_check = time() | Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A ctx.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A ctx.Queue of _ResultItems that will written to by the worker. initializer: A callable initializer, or None initargs: A tuple of args for the initializer processes_management_lock: A ctx.Lock avoiding worker timeout while some workers are being spawned. timeout: maximum time to wait for a new item in the call_queue. If that time is expired, the worker will shutdown. worker_exit_lock: Lock to avoid flagging the executor as broken on workers timeout. current_depth: Nested parallelism level, to avoid infinite spawning. |
172,347 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked and _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# undetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = (
f"system provides too few semaphores ({nsems_max} available, "
"256 necessary)"
)
raise NotImplementedError(_system_limited) | null |
172,348 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
The provided code snippet includes necessary dependencies for implementing the `_chain_from_iterable_of_lists` function. Write a Python function `def _chain_from_iterable_of_lists(iterable)` to solve the following problem:
Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects.
Here is the function:
def _chain_from_iterable_of_lists(iterable):
"""
Specialized implementation of itertools.chain.from_iterable.
Each item in *iterable* should be a list. This function is
careful not to keep references to yielded objects.
"""
for element in iterable:
element.reverse()
while element:
yield element.pop() | Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects. |
172,349 | import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
_CURRENT_DEPTH = 0
class LokyRecursionError(RuntimeError):
"""Raised when a process try to spawn too many levels of nested processes.
"""
def _check_max_depth(context):
# Limit the maxmal recursion level
global _CURRENT_DEPTH
if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
raise LokyRecursionError(
"Could not spawn extra nested processes at depth superior to "
"MAX_DEPTH=1. It is not possible to increase this limit when "
"using the 'fork' start method.")
if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
raise LokyRecursionError(
"Could not spawn extra nested processes at depth superior to "
f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
"this limit with the LOKY_MAX_DEPTH environment variable.") | null |
172,350 | import os
import sys
import time
import errno
import signal
import warnings
import subprocess
import traceback
def kill_process_tree(process, use_psutil=True):
"""Terminate process and its descendants with SIGKILL"""
if use_psutil and psutil is not None:
_kill_process_tree_with_psutil(process)
else:
_kill_process_tree_without_psutil(process)
def recursive_terminate(process, use_psutil=True):
warnings.warn(
"recursive_terminate is deprecated in loky 3.2, use kill_process_tree"
"instead",
DeprecationWarning,
)
kill_process_tree(process, use_psutil=use_psutil) | null |
172,351 | import os
import sys
import time
import errno
import signal
import warnings
import subprocess
import traceback
def _format_exitcodes(exitcodes):
"""Format a list of exit code with names of the signals if possible"""
str_exitcodes = [f"{_get_exitcode_name(e)}({e})"
for e in exitcodes if e is not None]
return "{" + ", ".join(str_exitcodes) + "}"
The provided code snippet includes necessary dependencies for implementing the `get_exitcodes_terminated_worker` function. Write a Python function `def get_exitcodes_terminated_worker(processes)` to solve the following problem:
Return a formated string with the exitcodes of terminated workers. If necessary, wait (up to .25s) for the system to correctly set the exitcode of one terminated worker.
Here is the function:
def get_exitcodes_terminated_worker(processes):
"""Return a formated string with the exitcodes of terminated workers.
If necessary, wait (up to .25s) for the system to correctly set the
exitcode of one terminated worker.
"""
patience = 5
# Catch the exitcode of the terminated workers. There should at least be
# one. If not, wait a bit for the system to correctly set the exitcode of
# the terminated worker.
exitcodes = [p.exitcode for p in list(processes.values())
if p.exitcode is not None]
while not exitcodes and patience > 0:
patience -= 1
exitcodes = [p.exitcode for p in list(processes.values())
if p.exitcode is not None]
time.sleep(.05)
return _format_exitcodes(exitcodes) | Return a formated string with the exitcodes of terminated workers. If necessary, wait (up to .25s) for the system to correctly set the exitcode of one terminated worker. |
172,352 | import os
import socket
import _socket
from multiprocessing.connection import Connection
from multiprocessing.context import get_spawning_popen
from .reduction import register
import os
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception as original_error:
try:
_ns = select_backend(second)
except ImportError:
raise original_error from None
def _mk_inheritable(fd):
os.set_inheritable(fd, True)
return fd | null |
172,353 | import os
import socket
import _socket
from multiprocessing.connection import Connection
from multiprocessing.context import get_spawning_popen
from .reduction import register
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE:
from multiprocessing import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise TypeError(
'Cannot pickle connection object. This object can only be '
'passed when spawning a new process'
)
def _rebuild_socket(df, family, type, proto):
fd = df.detach()
return socket.fromfd(fd, family, type, proto)
def _reduce_socket(s):
df = DupFd(s.fileno())
return _rebuild_socket, (df, s.family, s.type, s.proto) | null |
172,354 | import os
import socket
import _socket
from multiprocessing.connection import Connection
from multiprocessing.context import get_spawning_popen
from .reduction import register
def DupFd(fd):
'''Return a wrapper for an fd.'''
popen_obj = get_spawning_popen()
if popen_obj is not None:
return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
elif HAVE_SEND_HANDLE:
from multiprocessing import resource_sharer
return resource_sharer.DupFd(fd)
else:
raise TypeError(
'Cannot pickle connection object. This object can only be '
'passed when spawning a new process'
)
def rebuild_connection(df, readable, writable):
fd = df.detach()
return Connection(fd, readable, writable)
def reduce_connection(conn):
df = DupFd(conn.fileno())
return rebuild_connection, (df, conn.readable, conn.writable) | null |
172,355 | import os
import shutil
import sys
import signal
import warnings
import threading
from _multiprocessing import sem_unlink
from multiprocessing import util
from . import spawn
if sys.platform == "win32":
import _winapi
import msvcrt
from multiprocessing.reduction import duplicate
if os.name == "posix":
_CLEANUP_FUNCS['semlock'] = sem_unlink
import os
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception as original_error:
try:
_ns = select_backend(second)
except ImportError:
raise original_error from None
def fork_exec(cmd, keep_fds, env=None):
def spawnv_passfds(path, args, passfds):
passfds = sorted(passfds)
if sys.platform != "win32":
errpipe_read, errpipe_write = os.pipe()
try:
from .reduction import _mk_inheritable
from .fork_exec import fork_exec
_pass = [_mk_inheritable(fd) for fd in passfds]
return fork_exec(args, _pass)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
else:
cmd = ' '.join(f'"{x}"' for x in args)
try:
_, ht, pid, _ = _winapi.CreateProcess(
path, cmd, None, None, True, 0, None, None, None)
_winapi.CloseHandle(ht)
except BaseException:
pass
return pid | null |
172,356 | import os
import sys
import runpy
import types
from multiprocessing import process, util
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
import msvcrt
from multiprocessing.reduction import duplicate
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
import os
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception as original_error:
try:
_ns = select_backend(second)
except ImportError:
raise original_error from None
_resource_tracker = ResourceTracker()
The provided code snippet includes necessary dependencies for implementing the `get_preparation_data` function. Write a Python function `def get_preparation_data(name, init_main_module=True)` to solve the following problem:
Return info about parent needed by child to unpickle process object
Here is the function:
def get_preparation_data(name, init_main_module=True):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=bytes(process.current_process().authkey),
name=name,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd()
)
# Send sys_path and make sure the current directory will not be changed
d['sys_path'] = [p if p != '' else process.ORIGINAL_DIR for p in sys.path]
# Make sure to pass the information if the multiprocessing logger is active
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
if util._logger.handlers:
h = util._logger.handlers[0]
d['log_fmt'] = h.formatter._fmt
# Tell the child how to communicate with the resource_tracker
from .resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
d["tracker_args"] = {"pid": _resource_tracker._pid}
if sys.platform == "win32":
child_w = duplicate(
msvcrt.get_osfhandle(_resource_tracker._fd), inheritable=True)
d["tracker_args"]["fh"] = child_w
else:
d["tracker_args"]["fd"] = _resource_tracker._fd
if sys.version_info >= (3, 8) and os.name == 'posix':
# joblib/loky#242: allow loky processes to retrieve the resource
# tracker of their parent in case the child processes depickles
# shared_memory objects, that are still tracked by multiprocessing's
# resource_tracker by default.
# XXX: this is a workaround that may be error prone: in the future, it
# would be better to have loky subclass multiprocessing's shared_memory
# to force registration of shared_memory segments via loky's
# resource_tracker.
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
# multiprocessing's resource_tracker must be running before loky
# process is created (othewise the child won't be able to use it if it
# is created later on)
mp_resource_tracker.ensure_running()
d["mp_tracker_args"] = {
'fd': mp_resource_tracker._fd, 'pid': mp_resource_tracker._pid
}
# Figure out whether to initialise main in the subprocess as a module
# or through direct execution (or to leave it alone entirely)
if init_main_module:
main_module = sys.modules['__main__']
try:
main_mod_name = getattr(main_module.__spec__, "name", None)
except BaseException:
main_mod_name = None
if main_mod_name is not None:
d['init_main_from_name'] = main_mod_name
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(main_module, '__file__', None)
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['init_main_from_path'] = os.path.normpath(main_path)
return d | Return info about parent needed by child to unpickle process object |
172,357 | import os
import sys
import runpy
import types
from multiprocessing import process, util
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
import msvcrt
from multiprocessing.reduction import duplicate
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
def _fixup_main_from_name(mod_name):
# __main__.py files for packages, directories, zip archives, etc, run
# their "main only" code unconditionally, so we don't even try to
# populate anything in __main__, nor do we make any changes to
# __main__ attributes
current_main = sys.modules['__main__']
if mod_name == "__main__" or mod_name.endswith(".__main__"):
return
# If this process was forked, __main__ may already be populated
if getattr(current_main.__spec__, "name", None) == mod_name:
return
# Otherwise, __main__ may contain some non-main code where we need to
# support unpickling it properly. We rerun it as __mp_main__ and make
# the normal __main__ an alias to that
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def _fixup_main_from_path(main_path):
# If this process was forked, __main__ may already be populated
current_main = sys.modules['__main__']
# Unfortunately, the main ipython launch script historically had no
# "if __name__ == '__main__'" guard, so we work around that
# by treating it like a __main__.py file
# See https://github.com/ipython/ipython/issues/4698
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == 'ipython':
return
# Otherwise, if __file__ already has the setting we expect,
# there's nothing more to do
if getattr(current_main, '__file__', None) == main_path:
return
# If the parent process has sent a path through rather than a module
# name we assume it is an executable script that may contain
# non-main code that needs to be executed
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
import os
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception as original_error:
try:
_ns = select_backend(second)
except ImportError:
raise original_error from None
_resource_tracker = ResourceTracker()
The provided code snippet includes necessary dependencies for implementing the `prepare` function. Write a Python function `def prepare(data)` to solve the following problem:
Try to get current process ready to unpickle process object
Here is the function:
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'log_fmt' in data:
import logging
util.get_logger().handlers[0].setFormatter(
logging.Formatter(data['log_fmt'])
)
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'mp_tracker_args' in data:
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
mp_resource_tracker._fd = data['mp_tracker_args']['fd']
mp_resource_tracker._pid = data['mp_tracker_args']['pid']
if 'tracker_args' in data:
from .resource_tracker import _resource_tracker
_resource_tracker._pid = data["tracker_args"]['pid']
if sys.platform == 'win32':
handle = data["tracker_args"]["fh"]
_resource_tracker._fd = msvcrt.open_osfhandle(handle, 0)
else:
_resource_tracker._fd = data["tracker_args"]["fd"]
if 'init_main_from_name' in data:
_fixup_main_from_name(data['init_main_from_name'])
elif 'init_main_from_path' in data:
_fixup_main_from_path(data['init_main_from_path']) | Try to get current process ready to unpickle process object |
172,358 | import os
import socket
import _winapi
from multiprocessing.connection import PipeConnection
from multiprocessing.reduction import _reduce_socket
from .reduction import register
class DupHandle:
def __init__(self, handle, access, pid=None):
# duplicate handle for process with given pid
if pid is None:
pid = os.getpid()
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
try:
self._handle = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(),
handle, proc, access, False, 0)
finally:
_winapi.CloseHandle(proc)
self._access = access
self._pid = pid
def detach(self):
# retrieve handle from process which currently owns it
if self._pid == os.getpid():
return self._handle
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
self._pid)
try:
return _winapi.DuplicateHandle(
proc, self._handle, _winapi.GetCurrentProcess(),
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
finally:
_winapi.CloseHandle(proc)
def rebuild_pipe_connection(dh, readable, writable):
handle = dh.detach()
return PipeConnection(handle, readable, writable)
def reduce_pipe_connection(conn):
access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
(_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
dh = DupHandle(conn.fileno(), access)
return rebuild_pipe_connection, (dh, conn.readable, conn.writable) | null |
172,359 | import os
import sys
import math
import subprocess
import traceback
import warnings
import multiprocessing as mp
from multiprocessing import get_context as mp_get_context
from multiprocessing.context import BaseContext
from .process import LokyProcess, LokyInitMainProcess
START_METHODS = ['loky', 'loky_init_main', 'spawn']
_DEFAULT_START_METHOD = None
def get_context(method=None):
# Try to overload the default context
method = method or _DEFAULT_START_METHOD or "loky"
if method == "fork":
# If 'fork' is explicitly requested, warn user about potential issues.
warnings.warn("`fork` start method should not be used with "
"`loky` as it does not respect POSIX. Try using "
"`spawn` or `loky` instead.", UserWarning)
try:
return mp_get_context(method)
except ValueError:
raise ValueError(
f"Unknown context '{method}'. Value should be in "
f"{START_METHODS}."
) | null |
172,360 | import os
import sys
import math
import subprocess
import traceback
import warnings
import multiprocessing as mp
from multiprocessing import get_context as mp_get_context
from multiprocessing.context import BaseContext
from .process import LokyProcess, LokyInitMainProcess
START_METHODS = ['loky', 'loky_init_main', 'spawn']
_DEFAULT_START_METHOD = None
def set_start_method(method, force=False):
global _DEFAULT_START_METHOD
if _DEFAULT_START_METHOD is not None and not force:
raise RuntimeError('context has already been set')
assert method is None or method in START_METHODS, (
f"'{method}' is not a valid start_method. It should be in "
f"{START_METHODS}"
)
_DEFAULT_START_METHOD = method | null |
172,361 | import os
import sys
import math
import subprocess
import traceback
import warnings
import multiprocessing as mp
from multiprocessing import get_context as mp_get_context
from multiprocessing.context import BaseContext
from .process import LokyProcess, LokyInitMainProcess
def _cpu_count_user(os_cpu_count):
"""Number of user defined available CPUs"""
# Number of available CPUs given affinity settings
cpu_count_affinity = os_cpu_count
if hasattr(os, 'sched_getaffinity'):
try:
cpu_count_affinity = len(os.sched_getaffinity(0))
except NotImplementedError:
pass
cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
# User defined soft-limit passed as a loky specific environment variable.
cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', os_cpu_count))
return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
def _count_physical_cores():
"""Return a tuple (number of physical cores, exception)
If the number of physical cores is found, exception is set to None.
If it has not been found, return ("not found", exception).
The number of physical cores is cached to avoid repeating subprocess calls.
"""
exception = None
# First check if the value is cached
global physical_cores_cache
if physical_cores_cache is not None:
return physical_cores_cache, exception
# Not cached yet, find it
try:
if sys.platform == "linux":
cpu_info = subprocess.run(
"lscpu --parse=core".split(), capture_output=True, text=True)
cpu_info = cpu_info.stdout.splitlines()
cpu_info = {line for line in cpu_info if not line.startswith("#")}
cpu_count_physical = len(cpu_info)
elif sys.platform == "win32":
cpu_info = subprocess.run(
"wmic CPU Get NumberOfCores /Format:csv".split(),
capture_output=True, text=True)
cpu_info = cpu_info.stdout.splitlines()
cpu_info = [l.split(",")[1] for l in cpu_info
if (l and l != "Node,NumberOfCores")]
cpu_count_physical = sum(map(int, cpu_info))
elif sys.platform == "darwin":
cpu_info = subprocess.run(
"sysctl -n hw.physicalcpu".split(),
capture_output=True, text=True)
cpu_info = cpu_info.stdout
cpu_count_physical = int(cpu_info)
else:
raise NotImplementedError(f"unsupported platform: {sys.platform}")
# if cpu_count_physical < 1, we did not find a valid value
if cpu_count_physical < 1:
raise ValueError(
f"found {cpu_count_physical} physical cores < 1")
except Exception as e:
exception = e
cpu_count_physical = "not found"
# Put the result in cache
physical_cores_cache = cpu_count_physical
return cpu_count_physical, exception
import os
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception as original_error:
try:
_ns = select_backend(second)
except ImportError:
raise original_error from None
The provided code snippet includes necessary dependencies for implementing the `cpu_count` function. Write a Python function `def cpu_count(only_physical_cores=False)` to solve the following problem:
Return the number of CPUs the current process can use. The returned number of CPUs accounts for: * the number of CPUs in the system, as given by ``multiprocessing.cpu_count``; * the CPU affinity settings of the current process (available on some Unix systems); * Cgroup CPU bandwidth limit (available on Linux only, typically set by docker and similar container orchestration systems); * the value of the LOKY_MAX_CPU_COUNT environment variable if defined. and is given as the minimum of these constraints. If ``only_physical_cores`` is True, return the number of physical cores instead of the number of logical cores (hyperthreading / SMT). Note that this option is not enforced if the number of usable cores is controlled in any other way such as: process affinity, Cgroup restricted CPU bandwidth or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical cores is not found, return the number of logical cores. It is also always larger or equal to 1.
Here is the function:
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs the current process can use.
The returned number of CPUs accounts for:
* the number of CPUs in the system, as given by
``multiprocessing.cpu_count``;
* the CPU affinity settings of the current process
(available on some Unix systems);
* Cgroup CPU bandwidth limit (available on Linux only, typically
set by docker and similar container orchestration systems);
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
and is given as the minimum of these constraints.
If ``only_physical_cores`` is True, return the number of physical cores
instead of the number of logical cores (hyperthreading / SMT). Note that
this option is not enforced if the number of usable cores is controlled in
any other way such as: process affinity, Cgroup restricted CPU bandwidth
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
cores is not found, return the number of logical cores.
It is also always larger or equal to 1.
"""
# Note: os.cpu_count() is allowed to return None in its docstring
os_cpu_count = os.cpu_count() or 1
cpu_count_user = _cpu_count_user(os_cpu_count)
aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
if not only_physical_cores:
return aggregate_cpu_count
if cpu_count_user < os_cpu_count:
# Respect user setting
return max(cpu_count_user, 1)
cpu_count_physical, exception = _count_physical_cores()
if cpu_count_physical != "not found":
return cpu_count_physical
# Fallback to default behavior
if exception is not None:
# warns only the first time
warnings.warn(
"Could not find the number of physical cores for the "
f"following reason:\n{exception}\n"
"Returning the number of logical cores instead. You can "
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
"the number of cores you want to use.")
traceback.print_tb(exception.__traceback__)
return aggregate_cpu_count | Return the number of CPUs the current process can use. The returned number of CPUs accounts for: * the number of CPUs in the system, as given by ``multiprocessing.cpu_count``; * the CPU affinity settings of the current process (available on some Unix systems); * Cgroup CPU bandwidth limit (available on Linux only, typically set by docker and similar container orchestration systems); * the value of the LOKY_MAX_CPU_COUNT environment variable if defined. and is given as the minimum of these constraints. If ``only_physical_cores`` is True, return the number of physical cores instead of the number of logical cores (hyperthreading / SMT). Note that this option is not enforced if the number of usable cores is controlled in any other way such as: process affinity, Cgroup restricted CPU bandwidth or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical cores is not found, return the number of logical cores. It is also always larger or equal to 1. |
172,362 | import os
import sys
import msvcrt
import _winapi
from pickle import load
from multiprocessing import process, util
from multiprocessing.context import get_spawning_popen, set_spawning_popen
from multiprocessing.popen_spawn_win32 import Popen as _Popen
from multiprocessing.reduction import duplicate
from . import reduction, spawn
import os
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
# (reverse on PyPy)
if platform.python_implementation() == 'PyPy':
first, second = ('zmq.backend.cffi', 'zmq.backend.cython')
else:
first, second = ('zmq.backend.cython', 'zmq.backend.cffi')
try:
_ns = select_backend(first)
except Exception as original_error:
try:
_ns = select_backend(second)
except ImportError:
raise original_error from None
def _path_eq(p1, p2):
return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) | null |
172,363 | import os
import sys
import msvcrt
import _winapi
from pickle import load
from multiprocessing import process, util
from multiprocessing.context import get_spawning_popen, set_spawning_popen
from multiprocessing.popen_spawn_win32 import Popen as _Popen
from multiprocessing.reduction import duplicate
from . import reduction, spawn
The provided code snippet includes necessary dependencies for implementing the `get_command_line` function. Write a Python function `def get_command_line(pipe_handle, **kwds)` to solve the following problem:
Returns prefix of command line used for spawning a child process
Here is the function:
def get_command_line(pipe_handle, **kwds):
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork', pipe_handle]
else:
prog = 'from joblib.externals.loky.backend.popen_loky_win32 import main; main()'
opts = util._args_from_interpreter_flags()
return [spawn.get_executable(), *opts,
'-c', prog, '--multiprocessing-fork', pipe_handle] | Returns prefix of command line used for spawning a child process |
172,364 | import os
import sys
import msvcrt
import _winapi
from pickle import load
from multiprocessing import process, util
from multiprocessing.context import get_spawning_popen, set_spawning_popen
from multiprocessing.popen_spawn_win32 import Popen as _Popen
from multiprocessing.reduction import duplicate
from . import reduction, spawn
The provided code snippet includes necessary dependencies for implementing the `is_forking` function. Write a Python function `def is_forking(argv)` to solve the following problem:
Return whether commandline indicates we are forking
Here is the function:
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False | Return whether commandline indicates we are forking |
172,365 | import copyreg
import io
import functools
import types
import sys
import os
from multiprocessing import util
from pickle import loads, HIGHEST_PROTOCOL
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__) | null |
172,366 | import copyreg
import io
import functools
import types
import sys
import os
from multiprocessing import util
from pickle import loads, HIGHEST_PROTOCOL
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.