index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
708,340
|
qpd.qpd_engine
|
logical_not
| null |
def logical_not(self, col: Column) -> Column:
s = self._safe_bool(col.native)
if self.is_series(s):
nulls = s.isnull()
s = s == 0
s = s.mask(nulls, None)
return Column(s)
return Column(1.0 - s)
|
(self, col: qpd.dataframe.Column) -> qpd.dataframe.Column
|
708,341
|
qpd.qpd_engine
|
order_by_limit
| null |
@abstractmethod
def order_by_limit(
self, df: DataFrame, order_by: OrderBySpec, limit: int
) -> DataFrame: # pragma: no cover
raise NotImplementedError
|
(self, df: qpd.dataframe.DataFrame, order_by: qpd.specs.OrderBySpec, limit: int) -> qpd.dataframe.DataFrame
|
708,342
|
qpd.qpd_engine
|
rename
| null |
def rename(self, col: Column, name: str) -> Column:
return col.rename(name)
|
(self, col: qpd.dataframe.Column, name: str) -> qpd.dataframe.Column
|
708,343
|
qpd.qpd_engine
|
to_col
| null |
@abstractmethod
def to_col(self, value: Any, name: str = "") -> Column: # pragma: no cover
raise NotImplementedError
|
(self, value: Any, name: str = '') -> qpd.dataframe.Column
|
708,344
|
qpd.qpd_engine
|
to_df
| null |
@abstractmethod
def to_df(self, obj: Any) -> DataFrame: # pragma: no cover
raise NotImplementedError
|
(self, obj: Any) -> qpd.dataframe.DataFrame
|
708,345
|
qpd.qpd_engine
|
to_native
| null |
@abstractmethod
def to_native(self, df: DataFrame) -> Any: # pragma: no cover
raise NotImplementedError
|
(self, df: qpd.dataframe.DataFrame) -> Any
|
708,346
|
qpd.qpd_engine
|
union
| null |
def union(self, df1: DataFrame, df2: DataFrame, unique: bool) -> DataFrame:
ndf1 = self.to_native(df1)
ndf2 = self.to_native(df2)
return self.to_df(self.pl_utils.union(ndf1, ndf2, unique))
|
(self, df1: qpd.dataframe.DataFrame, df2: qpd.dataframe.DataFrame, unique: bool) -> qpd.dataframe.DataFrame
|
708,347
|
qpd.qpd_engine
|
window
| null |
@abstractmethod
def window( # noqa: C901
self,
df: DataFrame,
func: WindowFunctionSpec,
args: List[ArgumentSpec],
dest_col_name: str,
) -> DataFrame: # pragma: no cover
raise NotImplementedError
|
(self, df: qpd.dataframe.DataFrame, func: qpd.specs.WindowFunctionSpec, args: List[qpd.specs.ArgumentSpec], dest_col_name: str) -> qpd.dataframe.DataFrame
|
708,353
|
qpd.run
|
run_sql
| null |
def run_sql(
engine: QPDEngine, sql: str, dfs: Dict[str, Any], ignore_case: bool = False
) -> Any:
qsql = QPDSql(sql, "singleStatement", ignore_case=ignore_case)
ctx = QPDWorkflowContext(engine, dfs)
wf = QPDWorkflow(ctx)
v = StatementVisitor(
VisitorContext(
sql=qsql,
workflow=wf,
dfs=wf.dfs,
)
)
wf.assemble_output(v.visit(qsql.tree))
wf.run()
return ctx.result
|
(engine: qpd.qpd_engine.QPDEngine, sql: str, dfs: Dict[str, Any], ignore_case: bool = False) -> Any
|
708,356
|
ipyniivue._widget
|
AnyNiivue
|
Represents a Niivue instance.
|
class AnyNiivue(OptionsMixin, anywidget.AnyWidget):
"""Represents a Niivue instance."""
_esm = pathlib.Path(__file__).parent / "static" / "widget.js"
_opts = t.Dict({}).tag(sync=True, to_json=serialize_options)
_volumes = t.List(t.Instance(Volume), default_value=[]).tag(
sync=True, **ipywidgets.widget_serialization
)
def __init__(self, **opts):
# convert to JS camelCase options
_opts = {
_SNAKE_TO_CAMEL_OVERRIDES.get(k, snake_to_camel(k)): v
for k, v in opts.items()
}
super().__init__(_opts=_opts, _volumes=[])
def load_volumes(self, volumes: list):
"""Load a list of volumes into the widget.
Parameters
----------
volumes : list
A list of dictionaries containing the volume information.
"""
volumes = [Volume(**item) for item in volumes]
self._volumes = volumes
def add_volume(self, volume: dict):
"""Add a single volume to the widget.
Parameters
----------
volume : dict
A dictionary containing the volume information.
"""
self._volumes = [*self._volumes, Volume(**volume)]
@property
def volumes(self):
"""Returns the list of volumes."""
return self._volumes
|
(**opts)
|
708,361
|
ipyniivue._widget
|
__init__
| null |
def __init__(self, **opts):
# convert to JS camelCase options
_opts = {
_SNAKE_TO_CAMEL_OVERRIDES.get(k, snake_to_camel(k)): v
for k, v in opts.items()
}
super().__init__(_opts=_opts, _volumes=[])
|
(self, **opts)
|
708,387
|
ipyniivue._widget
|
add_volume
|
Add a single volume to the widget.
Parameters
----------
volume : dict
A dictionary containing the volume information.
|
def add_volume(self, volume: dict):
"""Add a single volume to the widget.
Parameters
----------
volume : dict
A dictionary containing the volume information.
"""
self._volumes = [*self._volumes, Volume(**volume)]
|
(self, volume: dict)
|
708,398
|
ipyniivue._widget
|
load_volumes
|
Load a list of volumes into the widget.
Parameters
----------
volumes : list
A list of dictionaries containing the volume information.
|
def load_volumes(self, volumes: list):
"""Load a list of volumes into the widget.
Parameters
----------
volumes : list
A list of dictionaries containing the volume information.
"""
volumes = [Volume(**item) for item in volumes]
self._volumes = volumes
|
(self, volumes: list)
|
708,419
|
ipyniivue._constants
|
DragMode
|
An enumeration.
|
class DragMode(enum.Enum):
CONTRAST = 1
MEASUREMENT = 2
PAN = 3
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
708,420
|
ipyniivue._constants
|
MuliplanarType
|
An enumeration.
|
class MuliplanarType(enum.Enum):
AUTO = 0
COLUMN = 1
GRID = 2
ROW = 3
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
708,421
|
ipyniivue._constants
|
SliceType
|
An enumeration.
|
class SliceType(enum.Enum):
AXIAL = 1
CORONAL = 2
SAGITTAL = 3
MULTIPLANAR = 4
RENDER = 5
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
708,427
|
more_executors._impl.asyncio
|
AsyncioExecutor
|
An executor which delegates to another executor while converting
returned futures into instances of :class:`asyncio.Future`.
Note that since this class produces :mod:`asyncio` rather than :mod:`concurrent.futures`
future objects, AsyncioExecutor instances themselves cannot be used
as a delegate executor of other executor instances within this library.
.. versionadded:: 1.7.0
|
class AsyncioExecutor(Executor):
"""An executor which delegates to another executor while converting
returned futures into instances of :class:`asyncio.Future`.
Note that since this class produces :mod:`asyncio` rather than :mod:`concurrent.futures`
future objects, AsyncioExecutor instances themselves cannot be used
as a delegate executor of other executor instances within this library.
.. versionadded:: 1.7.0
"""
def __init__(self, delegate, loop=None, logger=None, name="default"):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
loop (~asyncio.AbstractEventLoop):
asyncio event loop used to wrap futures; if omitted, the return
value of :meth:`asyncio.get_event_loop` is used.
.. note::
Starting from Python 3.12, :meth:`asyncio.get_event_loop` raises
an exception if there is no current event loop, so it is
necessary to either pass a loop explicitly or ensure there is a
running loop prior to constructing this executor.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._delegate = delegate
self._loop = loop
self._name = name
self._shutdown = ShutdownHelper()
metrics.EXEC_TOTAL.labels(type="asyncio", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="asyncio", executor=self._name).inc()
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
return self.submit_with_loop(self._loop, *args, **kwargs)
def submit_with_loop(self, loop, fn, *args, **kwargs):
"""Submit a callable with the specified event loop.
Parameters:
loop (~asyncio.AbstractEventLoop):
asyncio event loop used to wrap futures
fn (callable):
callable to be submitted
Returns:
asyncio.Future:
a future for the given callable
"""
with self._shutdown.ensure_alive():
if not loop:
loop = asyncio.get_event_loop()
future = self._delegate.submit(fn, *args, **kwargs)
return asyncio.wrap_future(future, loop=loop)
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
metrics.EXEC_INPROGRESS.labels(type="asyncio", executor=self._name).dec()
self._delegate.shutdown(wait, **_kwargs)
|
(delegate, loop=None, logger=None, name='default')
|
708,430
|
more_executors._impl.asyncio
|
__init__
|
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
loop (~asyncio.AbstractEventLoop):
asyncio event loop used to wrap futures; if omitted, the return
value of :meth:`asyncio.get_event_loop` is used.
.. note::
Starting from Python 3.12, :meth:`asyncio.get_event_loop` raises
an exception if there is no current event loop, so it is
necessary to either pass a loop explicitly or ensure there is a
running loop prior to constructing this executor.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(self, delegate, loop=None, logger=None, name="default"):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
loop (~asyncio.AbstractEventLoop):
asyncio event loop used to wrap futures; if omitted, the return
value of :meth:`asyncio.get_event_loop` is used.
.. note::
Starting from Python 3.12, :meth:`asyncio.get_event_loop` raises
an exception if there is no current event loop, so it is
necessary to either pass a loop explicitly or ensure there is a
running loop prior to constructing this executor.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._delegate = delegate
self._loop = loop
self._name = name
self._shutdown = ShutdownHelper()
metrics.EXEC_TOTAL.labels(type="asyncio", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="asyncio", executor=self._name).inc()
|
(self, delegate, loop=None, logger=None, name='default')
|
708,432
|
more_executors._impl.asyncio
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
metrics.EXEC_INPROGRESS.labels(type="asyncio", executor=self._name).dec()
self._delegate.shutdown(wait, **_kwargs)
|
(self, wait=True, **_kwargs)
|
708,433
|
more_executors._impl.asyncio
|
submit
| null |
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
return self.submit_with_loop(self._loop, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,434
|
more_executors._impl.asyncio
|
submit_with_loop
|
Submit a callable with the specified event loop.
Parameters:
loop (~asyncio.AbstractEventLoop):
asyncio event loop used to wrap futures
fn (callable):
callable to be submitted
Returns:
asyncio.Future:
a future for the given callable
|
def submit_with_loop(self, loop, fn, *args, **kwargs):
"""Submit a callable with the specified event loop.
Parameters:
loop (~asyncio.AbstractEventLoop):
asyncio event loop used to wrap futures
fn (callable):
callable to be submitted
Returns:
asyncio.Future:
a future for the given callable
"""
with self._shutdown.ensure_alive():
if not loop:
loop = asyncio.get_event_loop()
future = self._delegate.submit(fn, *args, **kwargs)
return asyncio.wrap_future(future, loop=loop)
|
(self, loop, fn, *args, **kwargs)
|
708,435
|
more_executors._impl.cancel_on_shutdown
|
CancelOnShutdownExecutor
|
An executor which delegates to another executor and cancels all
futures when the executor is shut down.
This class is useful in conjunction with executors having custom cancel
behavior, such as :class:`~more_executors.PollExecutor`.
.. note::
From Python 3.9 onwards, the standard
:meth:`~concurrent.futures.Executor.shutdown` method includes a
``cancel_futures`` parameter which can be used to cancel futures
on shutdown.
These two approaches of cancelling futures on shutdown have the
following differences:
- ``cancel_futures=True`` will only cancel futures which have not
yet started running.
- ``CancelOnShutdownExecutor`` will attempt to cancel *all* incomplete
futures, including those which are running.
If you're using other executors from this library and you want to
ensure futures are cancelled on shutdown, ``CancelOnShutdownExecutor``
should be preferred because several of the executor classes in this
library do support cancellation of running futures (unlike the executors
in the standard library).
|
class CancelOnShutdownExecutor(CanCustomizeBind, Executor):
"""An executor which delegates to another executor and cancels all
futures when the executor is shut down.
This class is useful in conjunction with executors having custom cancel
behavior, such as :class:`~more_executors.PollExecutor`.
.. note::
From Python 3.9 onwards, the standard
:meth:`~concurrent.futures.Executor.shutdown` method includes a
``cancel_futures`` parameter which can be used to cancel futures
on shutdown.
These two approaches of cancelling futures on shutdown have the
following differences:
- ``cancel_futures=True`` will only cancel futures which have not
yet started running.
- ``CancelOnShutdownExecutor`` will attempt to cancel *all* incomplete
futures, including those which are running.
If you're using other executors from this library and you want to
ensure futures are cancelled on shutdown, ``CancelOnShutdownExecutor``
should be preferred because several of the executor classes in this
library do support cancellation of running futures (unlike the executors
in the standard library).
"""
def __init__(self, delegate, logger=None, name="default"):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = LogWrapper(
logger if logger else logging.getLogger("CancelOnShutdownExecutor")
)
self._name = name
self._delegate = delegate
self._futures = set()
self._lock = RLock()
self._shutdown = ShutdownHelper()
metrics.EXEC_TOTAL.labels(type="cancel_on_shutdown", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(
type="cancel_on_shutdown", executor=self._name
).inc()
def shutdown(self, wait=True, **_kwargs):
"""Shut down the executor.
All futures created by this executor which have not yet been completed
will have :meth:`~concurrent.futures.Future.cancel` invoked.
Note that there is no guarantee that the cancel will succeed, and only a single
attempt is made to cancel any future.
"""
with self._lock:
if not self._shutdown():
return
metrics.EXEC_INPROGRESS.labels(
type="cancel_on_shutdown", executor=self._name
).dec()
futures = self._futures.copy()
for f in futures:
cancel = f.cancel()
self._log.debug("Cancel %s: %s", f, cancel)
if cancel:
metrics.SHUTDOWN_CANCEL.labels(executor=self._name).inc()
self._delegate.shutdown(wait, **_kwargs)
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
with self._lock:
future = self._delegate.submit(*args, **kwargs)
self._futures.add(future)
future.add_done_callback(self._futures.discard)
return future
|
(delegate, logger=None, name='default')
|
708,436
|
more_executors._impl.wrap
|
__propagate_name
| null |
def __propagate_name(self, kwargs):
for name_attr in ("_name", "_CustomizableThreadPoolExecutor__name"):
if hasattr(self, name_attr) and "name" not in kwargs:
kwargs["name"] = getattr(self, name_attr)
return
|
(self, kwargs)
|
708,439
|
more_executors._impl.cancel_on_shutdown
|
__init__
|
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(self, delegate, logger=None, name="default"):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = LogWrapper(
logger if logger else logging.getLogger("CancelOnShutdownExecutor")
)
self._name = name
self._delegate = delegate
self._futures = set()
self._lock = RLock()
self._shutdown = ShutdownHelper()
metrics.EXEC_TOTAL.labels(type="cancel_on_shutdown", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(
type="cancel_on_shutdown", executor=self._name
).inc()
|
(self, delegate, logger=None, name='default')
|
708,440
|
more_executors._impl.wrap
|
bind
| null |
def bind(self, *args, **kwargs):
from .executors import Executors
return Executors.bind(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,441
|
more_executors._impl.wrap
|
flat_bind
| null |
def flat_bind(self, *args, **kwargs):
from .executors import Executors
return Executors.flat_bind(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,443
|
more_executors._impl.cancel_on_shutdown
|
shutdown
|
Shut down the executor.
All futures created by this executor which have not yet been completed
will have :meth:`~concurrent.futures.Future.cancel` invoked.
Note that there is no guarantee that the cancel will succeed, and only a single
attempt is made to cancel any future.
|
def shutdown(self, wait=True, **_kwargs):
"""Shut down the executor.
All futures created by this executor which have not yet been completed
will have :meth:`~concurrent.futures.Future.cancel` invoked.
Note that there is no guarantee that the cancel will succeed, and only a single
attempt is made to cancel any future.
"""
with self._lock:
if not self._shutdown():
return
metrics.EXEC_INPROGRESS.labels(
type="cancel_on_shutdown", executor=self._name
).dec()
futures = self._futures.copy()
for f in futures:
cancel = f.cancel()
self._log.debug("Cancel %s: %s", f, cancel)
if cancel:
metrics.SHUTDOWN_CANCEL.labels(executor=self._name).inc()
self._delegate.shutdown(wait, **_kwargs)
|
(self, wait=True, **_kwargs)
|
708,444
|
more_executors._impl.cancel_on_shutdown
|
submit
| null |
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
with self._lock:
future = self._delegate.submit(*args, **kwargs)
self._futures.add(future)
future.add_done_callback(self._futures.discard)
return future
|
(self, *args, **kwargs)
|
708,445
|
more_executors._impl.wrap
|
with_asyncio
| null |
def with_asyncio(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_asyncio(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,446
|
more_executors._impl.wrap
|
with_cancel_on_shutdown
| null |
def with_cancel_on_shutdown(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_cancel_on_shutdown(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,447
|
more_executors._impl.wrap
|
with_flat_map
| null |
def with_flat_map(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_flat_map(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,448
|
more_executors._impl.wrap
|
with_map
| null |
def with_map(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_map(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,449
|
more_executors._impl.wrap
|
with_poll
| null |
def with_poll(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_poll(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,450
|
more_executors._impl.wrap
|
with_retry
| null |
def with_retry(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_retry(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,451
|
more_executors._impl.wrap
|
with_throttle
| null |
def with_throttle(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_throttle(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,452
|
more_executors._impl.wrap
|
with_timeout
| null |
def with_timeout(self, *args, **kwargs):
from .executors import Executors
self.__propagate_name(kwargs)
return Executors.with_timeout(self, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,453
|
more_executors._impl.retry
|
ExceptionRetryPolicy
|
Retries on any exceptions under the given base class(es),
up to a fixed number of attempts, with an exponential
backoff between each attempt.
|
class ExceptionRetryPolicy(RetryPolicy):
"""Retries on any exceptions under the given base class(es),
up to a fixed number of attempts, with an exponential
backoff between each attempt."""
def __init__(self, **kwargs):
"""
Parameters:
max_attempts (int): maximum number of times a callable should be attempted
exponent (float): exponent used for backoff, e.g. 2.0 will result in a delay
which doubles between each attempt
sleep (float): base value for delay between attempts in seconds, e.g. 1.0
will delay by one second between first two attempts
max_sleep (float): maximum delay between attempts, in seconds
exception_base (type, list(type)): future will be retried if (and only if)
it raises an exception inheriting from one of these classes
All parameters are optional; reasonable defaults apply when omitted.
"""
self._max_attempts = kwargs.get("max_attempts", 3)
self._exponent = kwargs.get("exponent", 2.0)
self._sleep = kwargs.get("sleep", 1.0)
self._max_sleep = kwargs.get("max_sleep", 120)
self._exception_base = kwargs.get("exception_base", Exception)
if isinstance(self._exception_base, type):
self._exception_base = [self._exception_base]
def should_retry(self, attempt, future):
exception = future.exception()
if not exception:
return False
if attempt >= self._max_attempts:
return False
for klass in self._exception_base:
if isinstance(exception, klass):
return True
return False
def sleep_time(self, attempt, future):
return min(self._sleep * (self._exponent ** (attempt - 1)), self._max_sleep)
|
(**kwargs)
|
708,454
|
more_executors._impl.retry
|
__init__
|
Parameters:
max_attempts (int): maximum number of times a callable should be attempted
exponent (float): exponent used for backoff, e.g. 2.0 will result in a delay
which doubles between each attempt
sleep (float): base value for delay between attempts in seconds, e.g. 1.0
will delay by one second between first two attempts
max_sleep (float): maximum delay between attempts, in seconds
exception_base (type, list(type)): future will be retried if (and only if)
it raises an exception inheriting from one of these classes
All parameters are optional; reasonable defaults apply when omitted.
|
def __init__(self, **kwargs):
"""
Parameters:
max_attempts (int): maximum number of times a callable should be attempted
exponent (float): exponent used for backoff, e.g. 2.0 will result in a delay
which doubles between each attempt
sleep (float): base value for delay between attempts in seconds, e.g. 1.0
will delay by one second between first two attempts
max_sleep (float): maximum delay between attempts, in seconds
exception_base (type, list(type)): future will be retried if (and only if)
it raises an exception inheriting from one of these classes
All parameters are optional; reasonable defaults apply when omitted.
"""
self._max_attempts = kwargs.get("max_attempts", 3)
self._exponent = kwargs.get("exponent", 2.0)
self._sleep = kwargs.get("sleep", 1.0)
self._max_sleep = kwargs.get("max_sleep", 120)
self._exception_base = kwargs.get("exception_base", Exception)
if isinstance(self._exception_base, type):
self._exception_base = [self._exception_base]
|
(self, **kwargs)
|
708,455
|
more_executors._impl.retry
|
should_retry
| null |
def should_retry(self, attempt, future):
exception = future.exception()
if not exception:
return False
if attempt >= self._max_attempts:
return False
for klass in self._exception_base:
if isinstance(exception, klass):
return True
return False
|
(self, attempt, future)
|
708,456
|
more_executors._impl.retry
|
sleep_time
| null |
def sleep_time(self, attempt, future):
return min(self._sleep * (self._exponent ** (attempt - 1)), self._max_sleep)
|
(self, attempt, future)
|
708,457
|
more_executors._impl.executors
|
Executors
|
Convenience methods for creating executors.
This class produces wrapped executors which may be customized
by use of the `with_*` methods, as in the following example:
>>> Executors.thread_pool(max_workers=4).with_retry().with_map(lambda x: x*2)
Produces a thread pool executor which will retry on failure
and multiply all output values by 2.
|
class Executors(object):
"""Convenience methods for creating executors.
This class produces wrapped executors which may be customized
by use of the `with_*` methods, as in the following example:
>>> Executors.thread_pool(max_workers=4).with_retry().with_map(lambda x: x*2)
Produces a thread pool executor which will retry on failure
and multiply all output values by 2."""
@classmethod
def bind(cls, executor, fn):
"""Bind a synchronous callable to an executor.
If the callable returns a future, consider using :meth:`flat_bind` instead.
Arguments:
executor (~concurrent.futures.Executor): an executor
fn (callable): any function or callable
Returns:
callable:
A new callable which, when invoked, will submit `fn` to `executor` and return
the resulting future.
This returned callable provides the `Executors.with_*` methods, which may be
chained to further customize the behavior of the callable.
.. versionadded:: 1.13.0
"""
return BoundCallable(executor, fn)
@classmethod
def flat_bind(cls, executor, fn):
"""Bind an asynchronous callable to an executor.
This convenience method should be used in preference to :meth:`bind`
when the bound callable returns a future, in order to avoid a nested
future in the returned value. It is equivalent to:
>>> bind(fn).with_flat_map(lambda future: future)
Arguments:
executor (~concurrent.futures.Executor): an executor
fn (callable): any function or callable which returns a future
Returns:
callable:
A new callable which, when invoked, will submit `fn` to `executor` and return
the resulting (flattened) future.
This returned callable provides the `Executors.with_*` methods, which may be
chained to further customize the behavior of the callable.
.. versionadded:: 1.16.0
"""
return cls.bind(executor, fn).with_flat_map(lambda f: f)
@classmethod
def thread_pool(cls, *args, **kwargs):
"""Create a thread pool executor.
Returns:
~concurrent.futures.ThreadPoolExecutor:
a new executor, initialized with the given arguments
"""
return CustomizableThreadPoolExecutor(*args, **kwargs)
@classmethod
def process_pool(cls, *args, **kwargs):
"""Create a process pool executor.
Returns:
~concurrent.futures.ProcessPoolExecutor:
a new executor, initialized with the given arguments
"""
return CustomizableProcessPoolExecutor(*args, **kwargs)
@classmethod
def sync(cls, *args, **kwargs):
"""Creates a new synchronous executor.
Returns:
~more_executors.SyncExecutor:
a new synchronous executor
Submitted functions will be immediately invoked on the calling thread."""
return SyncExecutor(*args, **kwargs)
@classmethod
def _customize(cls, delegate, executor_class, *args, **kwargs):
if isinstance(delegate, BoundCallable):
executor = delegate._BoundCallable__executor
bound_fn = delegate._BoundCallable__fn
new_executor = executor_class(executor, *args, **kwargs)
return cls.bind(new_executor, bound_fn)
return executor_class(delegate, *args, **kwargs)
@classmethod
def with_retry(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.RetryExecutor:
a new executor which will retry callables on failure
"""
return cls._customize(executor, RetryExecutor, *args, **kwargs)
@classmethod
def with_map(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.MapExecutor:
a new executor which will transform results through the given function
"""
return cls._customize(executor, MapExecutor, *args, **kwargs)
@classmethod
def with_flat_map(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.FlatMapExecutor:
a new executor which will transform results through the given
:class:`~concurrent.futures.Future`-providing function
.. versionadded:: 1.12.0
"""
return cls._customize(executor, FlatMapExecutor, *args, **kwargs)
@classmethod
def with_poll(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.PollExecutor:
a new executor which produces polled futures.
Submitted callables will have their output passed into
a poll function.
"""
return cls._customize(executor, PollExecutor, *args, **kwargs)
@classmethod
def with_timeout(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.TimeoutExecutor:
a new executor which will attempt to cancel any futures if they've
not completed within the given timeout.
.. versionadded:: 1.7.0
"""
return cls._customize(executor, TimeoutExecutor, *args, **kwargs)
@classmethod
def with_throttle(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.ThrottleExecutor:
a new executor which enforces a limit on the number of concurrently
pending futures.
.. versionadded:: 1.9.0
"""
return cls._customize(executor, ThrottleExecutor, *args, **kwargs)
@classmethod
def with_cancel_on_shutdown(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.CancelOnShutdownExecutor:
a new executor which attempts to cancel any pending futures when
the executor is shut down.
"""
return cls._customize(executor, CancelOnShutdownExecutor, *args, **kwargs)
@classmethod
def with_asyncio(cls, executor, *args, **kwargs):
"""
Returns:
~more_executors.AsyncioExecutor:
a new executor which returns :class:`asyncio.Future` instances
rather than :class:`concurrent.futures.Future` instances, i.e. may be used
with the `await` keyword and coroutines.
.. note::
Since the other executors from :class:`Executors` class are designed
for use with :class:`concurrent.futures.Future`, if an executor is being
configured using chained `with_*` methods, this must be the last method called.
.. note::
Only usable for Python >= 3.5.
.. versionadded:: 1.7.0
"""
return cls._customize(executor, AsyncioExecutor, *args, **kwargs)
|
()
|
708,458
|
more_executors._impl.flat_map
|
FlatMapExecutor
|
An executor which delegates to another executor while mapping
output values through given future-producing functions.
This executor behaves like :class:`~more_executors.MapExecutor`,
except that the given mapping/error functions must return instances of
:class:`~concurrent.futures.Future`, and the mapped future is
flattened into the future returned from this executor.
This allows chaining multiple future-producing functions into a single
future.
- If the map/error function returns a :class:`~concurrent.futures.Future`, the
result/exception of that future will be propagated to the future returned
by this executor.
- If the map/error function returns any other type, the returned future will fail
with a :class:`TypeError`.
- If the map/error function raises an exception, the returned future will fail
with that exception.
.. versionadded: 1.12.0
|
class FlatMapExecutor(MapExecutor):
"""An executor which delegates to another executor while mapping
output values through given future-producing functions.
This executor behaves like :class:`~more_executors.MapExecutor`,
except that the given mapping/error functions must return instances of
:class:`~concurrent.futures.Future`, and the mapped future is
flattened into the future returned from this executor.
This allows chaining multiple future-producing functions into a single
future.
- If the map/error function returns a :class:`~concurrent.futures.Future`, the
result/exception of that future will be propagated to the future returned
by this executor.
- If the map/error function returns any other type, the returned future will fail
with a :class:`TypeError`.
- If the map/error function raises an exception, the returned future will fail
with that exception.
.. versionadded: 1.12.0
"""
_FUTURE_CLASS = FlatMapFuture
_TYPE = "flat_map"
|
(delegate, fn=None, logger=None, name='default', **kwargs)
|
708,462
|
more_executors._impl.map
|
__init__
|
Arguments:
delegate (~concurrent.futures.Executor):
an executor to which callables will be submitted
fn (callable):
a callable applied to transform returned values
of successful futures.
This callable will be invoked with a single argument:
the ``result()`` returned from a successful future.
If omitted, no transformation occurs on ``result()``.
error_fn (callable):
a callable applied to transform returned values
of unsuccessful futures.
This callable will be invoked with a single argument:
the ``exception()`` returned from a failed future.
If omitted, no transformation occurs on ``exception()``.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(self, delegate, fn=None, logger=None, name="default", **kwargs):
"""
Arguments:
delegate (~concurrent.futures.Executor):
an executor to which callables will be submitted
fn (callable):
a callable applied to transform returned values
of successful futures.
This callable will be invoked with a single argument:
the ``result()`` returned from a successful future.
If omitted, no transformation occurs on ``result()``.
error_fn (callable):
a callable applied to transform returned values
of unsuccessful futures.
This callable will be invoked with a single argument:
the ``exception()`` returned from a failed future.
If omitted, no transformation occurs on ``exception()``.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._delegate = delegate
self._fn = fn
self._name = name
self._shutdown = ShutdownHelper()
self._error_fn = kwargs.get("error_fn")
self._metric_exec_total.inc()
self._metric_exec_inprogress.inc()
|
(self, delegate, fn=None, logger=None, name='default', **kwargs)
|
708,466
|
more_executors._impl.map
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._metric_exec_inprogress.dec()
self._delegate.shutdown(wait, **_kwargs)
|
(self, wait=True, **_kwargs)
|
708,467
|
more_executors._impl.map
|
submit
| null |
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
inner_f = self._delegate.submit(*args, **kwargs)
return track_future(
self._FUTURE_CLASS(inner_f, self._fn, self._error_fn),
type=self._TYPE,
executor=self._name,
)
|
(self, *args, **kwargs)
|
708,476
|
more_executors._impl.map
|
MapExecutor
|
An executor which delegates to another executor while mapping
output values/exceptions through given functions.
|
class MapExecutor(CanCustomizeBind, Executor):
"""An executor which delegates to another executor while mapping
output values/exceptions through given functions.
"""
_FUTURE_CLASS = MapFuture
_TYPE = "map"
@property
def _metric_exec_total(self):
return metrics.EXEC_TOTAL.labels(type=self._TYPE, executor=self._name)
@property
def _metric_exec_inprogress(self):
return metrics.EXEC_INPROGRESS.labels(type=self._TYPE, executor=self._name)
def __init__(self, delegate, fn=None, logger=None, name="default", **kwargs):
"""
Arguments:
delegate (~concurrent.futures.Executor):
an executor to which callables will be submitted
fn (callable):
a callable applied to transform returned values
of successful futures.
This callable will be invoked with a single argument:
the ``result()`` returned from a successful future.
If omitted, no transformation occurs on ``result()``.
error_fn (callable):
a callable applied to transform returned values
of unsuccessful futures.
This callable will be invoked with a single argument:
the ``exception()`` returned from a failed future.
If omitted, no transformation occurs on ``exception()``.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._delegate = delegate
self._fn = fn
self._name = name
self._shutdown = ShutdownHelper()
self._error_fn = kwargs.get("error_fn")
self._metric_exec_total.inc()
self._metric_exec_inprogress.inc()
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._metric_exec_inprogress.dec()
self._delegate.shutdown(wait, **_kwargs)
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
inner_f = self._delegate.submit(*args, **kwargs)
return track_future(
self._FUTURE_CLASS(inner_f, self._fn, self._error_fn),
type=self._TYPE,
executor=self._name,
)
|
(delegate, fn=None, logger=None, name='default', **kwargs)
|
708,494
|
more_executors._impl.poll
|
PollDescriptor
|
Represents an unresolved :class:`~concurrent.futures.Future`.
The poll function used by :class:`PollExecutor` will be
invoked with a list of PollDescriptor objects.
|
class PollDescriptor(object):
"""Represents an unresolved :class:`~concurrent.futures.Future`.
The poll function used by :class:`PollExecutor` will be
invoked with a list of PollDescriptor objects.
"""
def __init__(self, future, result):
self.__future = future
self.__result = result
@property
def result(self):
"""The result from the delegate executor's future, which should be used to
drive the poll."""
return self.__result
def yield_result(self, result):
"""The poll function can call this function to make the future yield the given result.
Arguments:
result (object):
a result to be returned by the future associated with this descriptor
"""
try_set_result(self.__future, result)
def yield_exception(self, exception, traceback=None):
"""The poll function can call this function to make the future raise the given exception.
Arguments:
exception (Exception):
An exception to be returned or raised by the future associated with this
descriptor
traceback (traceback):
An optional associated traceback. This argument only has an effect on python 2.x,
where exception objects do not include a traceback.
"""
copy_exception(self.__future, exception, traceback)
|
(future, result)
|
708,495
|
more_executors._impl.poll
|
__init__
| null |
def __init__(self, future, result):
self.__future = future
self.__result = result
|
(self, future, result)
|
708,496
|
more_executors._impl.poll
|
yield_exception
|
The poll function can call this function to make the future raise the given exception.
Arguments:
exception (Exception):
An exception to be returned or raised by the future associated with this
descriptor
traceback (traceback):
An optional associated traceback. This argument only has an effect on python 2.x,
where exception objects do not include a traceback.
|
def yield_exception(self, exception, traceback=None):
"""The poll function can call this function to make the future raise the given exception.
Arguments:
exception (Exception):
An exception to be returned or raised by the future associated with this
descriptor
traceback (traceback):
An optional associated traceback. This argument only has an effect on python 2.x,
where exception objects do not include a traceback.
"""
copy_exception(self.__future, exception, traceback)
|
(self, exception, traceback=None)
|
708,497
|
more_executors._impl.poll
|
yield_result
|
The poll function can call this function to make the future yield the given result.
Arguments:
result (object):
a result to be returned by the future associated with this descriptor
|
def yield_result(self, result):
"""The poll function can call this function to make the future yield the given result.
Arguments:
result (object):
a result to be returned by the future associated with this descriptor
"""
try_set_result(self.__future, result)
|
(self, result)
|
708,498
|
more_executors._impl.poll
|
PollExecutor
|
Instances of `PollExecutor` submit callables to a delegate executor
and resolve the returned futures via a provided poll function.
A cancel function may also be provided to perform additional processing
when a returned future is cancelled.
|
class PollExecutor(CanCustomizeBind, Executor):
"""Instances of `PollExecutor` submit callables to a delegate executor
and resolve the returned futures via a provided poll function.
A cancel function may also be provided to perform additional processing
when a returned future is cancelled.
"""
def __init__(
self,
delegate,
poll_fn,
cancel_fn=None,
default_interval=5.0,
logger=None,
name="default",
):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
poll_fn (callable):
a `poll function`_ used to decide when futures should be resolved
cancel_fn (callable):
a `cancel function`_ invoked when future cancel is required
default_interval (float):
default interval between polls (in seconds)
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = LogWrapper(logger if logger else logging.getLogger("PollExecutor"))
self._name = name
self._delegate = delegate
self._default_interval = default_interval
self._poll_fn = poll_fn
self._cancel_fn = cancel_fn
self._poll_descriptors = []
self._poll_event = get_event()
poll_event = self._poll_event
self_ref = weakref.ref(self, lambda _: poll_event.set())
self._poll_thread = Thread(
name="PollExecutor-%s" % name, target=_poll_loop, args=(self_ref,)
)
self._poll_thread.daemon = True
self._shutdown = ShutdownHelper()
self._lock = RLock()
metrics.EXEC_TOTAL.labels(type="poll", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="poll", executor=self._name).inc()
self._poll_thread.start()
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
delegate_future = self._delegate.submit(*args, **kwargs)
out = PollFuture(delegate_future, self)
track_future(out, type="poll", executor=self._name)
return out
def notify(self):
"""Request the executor to re-run the polling function as soon as possible.
This method may be used to perform the next poll earlier than it would
otherwise run.
This is useful for cases where futures may be resolved via a mixture of
polling and external events.
.. versionadded:: 2.2.0
"""
self._poll_event.set()
def _register_poll(self, future, delegate_future):
descriptor = PollDescriptor(future, delegate_future.result())
with self._lock:
self._poll_descriptors.append((future, descriptor))
future._clear_delegate()
self._poll_event.set()
def _deregister_poll(self, future):
with self._lock:
self._poll_descriptors = [
(f, d) for (f, d) in self._poll_descriptors if f is not future
]
def _run_cancel_fn(self, future):
if not self._cancel_fn:
# no cancel function => no veto of cancel
return True
descriptor = [d for (f, d) in self._poll_descriptors if f is future]
if not descriptor:
# no record of this future => no veto of cancel.
# we can get here if the future is already done
# or if polling hasn't started yet
return True
assert len(descriptor) == 1, "Too many poll descriptors for %s" % future
descriptor = descriptor[0]
try:
return self._cancel_fn(descriptor.result)
except Exception:
self._log.exception(
"Exception during cancel on %s/%s", future, descriptor.result
)
return False
def _run_poll_fn(self):
with self._lock:
descriptors = [d for (_, d) in self._poll_descriptors]
try:
now = monotonic()
return self._poll_fn(descriptors)
except Exception as e:
self._log.debug("Poll function failed", exc_info=True)
metrics.POLL_ERROR.labels(executor=self._name).inc()
# If poll function fails, then every future
# depending on the poll also immediately fails.
[d.yield_exception(e) for d in descriptors]
finally:
metrics.POLL_TOTAL.labels(executor=self._name).inc()
metrics.POLL_TIME.labels(executor=self._name).inc(monotonic() - now)
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
metrics.EXEC_INPROGRESS.labels(type="poll", executor=self._name).dec()
self._poll_event.set()
self._delegate.shutdown(wait, **_kwargs)
if wait:
self._log.debug("Join poll thread...")
self._poll_thread.join(MAX_TIMEOUT)
self._log.debug("Joined poll thread.")
|
(delegate, poll_fn, cancel_fn=None, default_interval=5.0, logger=None, name='default')
|
708,502
|
more_executors._impl.poll
|
__init__
|
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
poll_fn (callable):
a `poll function`_ used to decide when futures should be resolved
cancel_fn (callable):
a `cancel function`_ invoked when future cancel is required
default_interval (float):
default interval between polls (in seconds)
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(
self,
delegate,
poll_fn,
cancel_fn=None,
default_interval=5.0,
logger=None,
name="default",
):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
poll_fn (callable):
a `poll function`_ used to decide when futures should be resolved
cancel_fn (callable):
a `cancel function`_ invoked when future cancel is required
default_interval (float):
default interval between polls (in seconds)
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = LogWrapper(logger if logger else logging.getLogger("PollExecutor"))
self._name = name
self._delegate = delegate
self._default_interval = default_interval
self._poll_fn = poll_fn
self._cancel_fn = cancel_fn
self._poll_descriptors = []
self._poll_event = get_event()
poll_event = self._poll_event
self_ref = weakref.ref(self, lambda _: poll_event.set())
self._poll_thread = Thread(
name="PollExecutor-%s" % name, target=_poll_loop, args=(self_ref,)
)
self._poll_thread.daemon = True
self._shutdown = ShutdownHelper()
self._lock = RLock()
metrics.EXEC_TOTAL.labels(type="poll", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="poll", executor=self._name).inc()
self._poll_thread.start()
|
(self, delegate, poll_fn, cancel_fn=None, default_interval=5.0, logger=None, name='default')
|
708,503
|
more_executors._impl.poll
|
_deregister_poll
| null |
def _deregister_poll(self, future):
with self._lock:
self._poll_descriptors = [
(f, d) for (f, d) in self._poll_descriptors if f is not future
]
|
(self, future)
|
708,504
|
more_executors._impl.poll
|
_register_poll
| null |
def _register_poll(self, future, delegate_future):
descriptor = PollDescriptor(future, delegate_future.result())
with self._lock:
self._poll_descriptors.append((future, descriptor))
future._clear_delegate()
self._poll_event.set()
|
(self, future, delegate_future)
|
708,505
|
more_executors._impl.poll
|
_run_cancel_fn
| null |
def _run_cancel_fn(self, future):
if not self._cancel_fn:
# no cancel function => no veto of cancel
return True
descriptor = [d for (f, d) in self._poll_descriptors if f is future]
if not descriptor:
# no record of this future => no veto of cancel.
# we can get here if the future is already done
# or if polling hasn't started yet
return True
assert len(descriptor) == 1, "Too many poll descriptors for %s" % future
descriptor = descriptor[0]
try:
return self._cancel_fn(descriptor.result)
except Exception:
self._log.exception(
"Exception during cancel on %s/%s", future, descriptor.result
)
return False
|
(self, future)
|
708,506
|
more_executors._impl.poll
|
_run_poll_fn
| null |
def _run_poll_fn(self):
with self._lock:
descriptors = [d for (_, d) in self._poll_descriptors]
try:
now = monotonic()
return self._poll_fn(descriptors)
except Exception as e:
self._log.debug("Poll function failed", exc_info=True)
metrics.POLL_ERROR.labels(executor=self._name).inc()
# If poll function fails, then every future
# depending on the poll also immediately fails.
[d.yield_exception(e) for d in descriptors]
finally:
metrics.POLL_TOTAL.labels(executor=self._name).inc()
metrics.POLL_TIME.labels(executor=self._name).inc(monotonic() - now)
|
(self)
|
708,510
|
more_executors._impl.poll
|
notify
|
Request the executor to re-run the polling function as soon as possible.
This method may be used to perform the next poll earlier than it would
otherwise run.
This is useful for cases where futures may be resolved via a mixture of
polling and external events.
.. versionadded:: 2.2.0
|
def notify(self):
"""Request the executor to re-run the polling function as soon as possible.
This method may be used to perform the next poll earlier than it would
otherwise run.
This is useful for cases where futures may be resolved via a mixture of
polling and external events.
.. versionadded:: 2.2.0
"""
self._poll_event.set()
|
(self)
|
708,511
|
more_executors._impl.poll
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
metrics.EXEC_INPROGRESS.labels(type="poll", executor=self._name).dec()
self._poll_event.set()
self._delegate.shutdown(wait, **_kwargs)
if wait:
self._log.debug("Join poll thread...")
self._poll_thread.join(MAX_TIMEOUT)
self._log.debug("Joined poll thread.")
|
(self, wait=True, **_kwargs)
|
708,512
|
more_executors._impl.poll
|
submit
| null |
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
delegate_future = self._delegate.submit(*args, **kwargs)
out = PollFuture(delegate_future, self)
track_future(out, type="poll", executor=self._name)
return out
|
(self, *args, **kwargs)
|
708,521
|
more_executors._impl.retry
|
RetryExecutor
|
An executor which delegates to another executor while adding
implicit retry behavior.
- Callables are submitted to the delegate executor, and may be
submitted more than once if retries are required.
- The callables may be submitted to that executor from a different
thread than the calling thread.
- Cancelling is supported if the delegate executor allows it.
- Cancelling between retries is always supported.
- Attempting to cancel a future prevents any more retries, regardless
of whether the cancel succeeds.
- The returned futures from this executor are only resolved or
failed once the callable either succeeded, or all retries
were exhausted. This includes activation of the done callback.
- This executor is thread-safe.
|
class RetryExecutor(CanCustomizeBind, Executor):
"""An executor which delegates to another executor while adding
implicit retry behavior.
- Callables are submitted to the delegate executor, and may be
submitted more than once if retries are required.
- The callables may be submitted to that executor from a different
thread than the calling thread.
- Cancelling is supported if the delegate executor allows it.
- Cancelling between retries is always supported.
- Attempting to cancel a future prevents any more retries, regardless
of whether the cancel succeeds.
- The returned futures from this executor are only resolved or
failed once the callable either succeeded, or all retries
were exhausted. This includes activation of the done callback.
- This executor is thread-safe.
"""
def __init__(
self, delegate, retry_policy=None, logger=None, name="default", **kwargs
):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
retry_policy (RetryPolicy):
policy used to determine when futures shall be retried; if omitted,
an :class:`ExceptionRetryPolicy` is used, supplied with keyword
arguments to this constructor
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = LogWrapper(logger if logger else logging.getLogger("RetryExecutor"))
self._delegate = delegate
self._default_retry_policy = retry_policy or ExceptionRetryPolicy(**kwargs)
self._jobs = []
self._submit_event = get_event()
self._name = name
event = self._submit_event
self_ref = weakref.ref(self, lambda _: event.set())
self._submit_thread = Thread(
name="RetryExecutor-%s" % name, target=_submit_loop, args=(self_ref,)
)
self._submit_thread.daemon = True
self._shutdown = ShutdownHelper()
self._lock = RLock()
metrics.EXEC_INPROGRESS.labels(executor=self._name, type="retry").inc()
metrics.EXEC_TOTAL.labels(executor=self._name, type="retry").inc()
self._submit_thread.start()
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._log.debug("Shutting down.")
metrics.EXEC_INPROGRESS.labels(executor=self._name, type="retry").dec()
self._wake_thread()
self._delegate.shutdown(wait, **_kwargs)
if wait:
self._log.debug("Waiting for thread")
self._submit_thread.join(MAX_TIMEOUT)
self._log.debug("Shutdown complete")
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
return self.submit_retry(self._default_retry_policy, *args, **kwargs)
def submit_retry(self, retry_policy, fn, *args, **kwargs):
"""Submit a callable with a specific retry policy.
Parameters:
retry_policy (RetryPolicy): a policy which is used for this call only
"""
with self._shutdown.ensure_alive():
future = RetryFuture(self)
track_future(future, type="retry", executor=self._name)
job = RetryJob(retry_policy, None, future, 0, monotonic(), fn, args, kwargs)
self._append_job(job)
# Let the submit thread know it should wake up to check for new jobs
self._wake_thread()
self._log.debug("Returning future %s", future)
return future
def _wake_thread(self):
self._submit_event.set()
def _get_next_job(self):
# Find and return the next job to be handled, if any.
# This means a job with when < now, or with stop_retry == True,
# or if there's none, then the job with the minimum value of when.
min_job = None
now = monotonic()
for job in self._jobs:
if job.delegate_future:
# It's already running, skip
continue
if job.stop_retry:
# We've been requested to stop retrying this job,
# and we can handle that immediately
return job
if job.when <= now:
# job is overdue, just do it
return job
if not min_job:
min_job = job
elif job.when < min_job.when:
min_job = job
return min_job
def _submit_now(self, job):
# Pop job since we'll replace it.
# We need to hold the lock for the entire duration so that other
# threads won't see _jobs between our removal and re-add of the job
with job.future._me_lock:
with self._lock:
self._pop_job(job)
# We need the future's lock now too, because someone could
# call cancel after this check and before we submit.
if job.future.done():
self._log.debug(
"future done %s - not submitting to delegate", job.future
)
return
if job.attempt != 0:
metrics.RETRY_TOTAL.labels(executor=self._name).inc()
delegate_future = self._delegate.submit(job.fn, *job.args, **job.kwargs)
job.future.delegate_future = delegate_future
new_job = RetryJob(
job.policy,
delegate_future,
job.future,
job.attempt + 1,
None,
job.fn,
job.args,
job.kwargs,
)
self._append_job(new_job)
self._log.debug("Submitted: %s", new_job)
delegate_future.add_done_callback(self._delegate_callback)
self._wake_thread()
def _pop_job(self, job):
with self._lock:
for idx, pending in enumerate(self._jobs):
if pending is job:
metrics.RETRY_QUEUE.labels(executor=self._name).dec()
return self._jobs.pop(idx)
def _append_job(self, job):
with self._lock:
metrics.RETRY_QUEUE.labels(executor=self._name).inc()
self._jobs.append(job)
def _retry(self, job, sleep_time):
self._log.debug("Will retry: %s", job)
with self._lock:
self._pop_job(job)
metrics.RETRY_DELAY.labels(executor=self._name).inc(sleep_time)
new_job = RetryJob(
job.policy,
None,
job.future,
job.attempt,
monotonic() + sleep_time,
job.fn,
job.args,
job.kwargs,
# The old delegate future is retained in case we need
# to propagate its exception later
old_delegate=job.delegate_future,
)
new_job.stop_retry = job.stop_retry
self._append_job(new_job)
self._wake_thread()
def _cancel(self, future):
found_job = None
with self._lock:
for idx, job in enumerate(self._jobs):
if job.future is future:
self._log.debug("Try cancel: %s", job)
if not job.delegate_future:
self._log.debug("Successful cancel - no delegate: %s", job)
self._jobs.pop(idx)
return True
found_job = job
# Whether or not we can successfully cancel, the request to cancel
# means that we don't want to retry any more.
found_job.stop_retry = True
break
# This shouldn't be possible.
# - Future holds a lock on itself, and has checked that it's not already done
# - The only other path for removing a job is in delegate_callback, but the
# job is only removed *after* set_result/set_exception which would wait
# for the future's lock.
assert found_job, "Cancel called on orphan %s" % future
self._log.debug("Try cancel delegate: %s", found_job)
if found_job.delegate_future.cancel():
self._log.debug("Successful cancel: %s", found_job)
future._clear_delegate()
# Don't remove from _jobs here,
# the callback attached to delegate_future was expected
# to take care of that
return True
self._log.debug("Could not cancel: %s", found_job)
# Let the submit thread wake up and find that we've set stop_retry
self._wake_thread()
return False
def _delegate_callback(self, delegate_future):
assert delegate_future.done(), "BUG: callback invoked while future not done!"
self._log.debug("Callback activated for %s", delegate_future)
found_job = None
for job in self._jobs[:]:
if job.delegate_future == delegate_future:
found_job = job
break
# Callbacks are only installed after a job is added, and this is
# the only place a job with a delegate associated will be removed,
# thus it should not be possible for a job to be missing.
assert found_job, "BUG: no job associated with delegate %s" % delegate_future
if delegate_future.cancelled():
# nothing to do, retrying on cancel is not allowed
self._log.debug("Delegate was cancelled: %s", delegate_future)
return
(should_retry, sleep_time) = eval_policy(found_job, self._log)
if should_retry:
self._retry(found_job, sleep_time)
return
self._log.debug("Finalizing %s", found_job)
# OK, it won't be retried. Resolve the future.
copy_future(delegate_future, found_job.future)
self._pop_job(found_job)
self._log.debug("Finalized %s", found_job)
|
(delegate, retry_policy=None, logger=None, name='default', **kwargs)
|
708,525
|
more_executors._impl.retry
|
__init__
|
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
retry_policy (RetryPolicy):
policy used to determine when futures shall be retried; if omitted,
an :class:`ExceptionRetryPolicy` is used, supplied with keyword
arguments to this constructor
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(
self, delegate, retry_policy=None, logger=None, name="default", **kwargs
):
"""
Parameters:
delegate (~concurrent.futures.Executor):
executor to which callables will be submitted
retry_policy (RetryPolicy):
policy used to determine when futures shall be retried; if omitted,
an :class:`ExceptionRetryPolicy` is used, supplied with keyword
arguments to this constructor
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = LogWrapper(logger if logger else logging.getLogger("RetryExecutor"))
self._delegate = delegate
self._default_retry_policy = retry_policy or ExceptionRetryPolicy(**kwargs)
self._jobs = []
self._submit_event = get_event()
self._name = name
event = self._submit_event
self_ref = weakref.ref(self, lambda _: event.set())
self._submit_thread = Thread(
name="RetryExecutor-%s" % name, target=_submit_loop, args=(self_ref,)
)
self._submit_thread.daemon = True
self._shutdown = ShutdownHelper()
self._lock = RLock()
metrics.EXEC_INPROGRESS.labels(executor=self._name, type="retry").inc()
metrics.EXEC_TOTAL.labels(executor=self._name, type="retry").inc()
self._submit_thread.start()
|
(self, delegate, retry_policy=None, logger=None, name='default', **kwargs)
|
708,526
|
more_executors._impl.retry
|
_append_job
| null |
def _append_job(self, job):
with self._lock:
metrics.RETRY_QUEUE.labels(executor=self._name).inc()
self._jobs.append(job)
|
(self, job)
|
708,527
|
more_executors._impl.retry
|
_cancel
| null |
def _cancel(self, future):
found_job = None
with self._lock:
for idx, job in enumerate(self._jobs):
if job.future is future:
self._log.debug("Try cancel: %s", job)
if not job.delegate_future:
self._log.debug("Successful cancel - no delegate: %s", job)
self._jobs.pop(idx)
return True
found_job = job
# Whether or not we can successfully cancel, the request to cancel
# means that we don't want to retry any more.
found_job.stop_retry = True
break
# This shouldn't be possible.
# - Future holds a lock on itself, and has checked that it's not already done
# - The only other path for removing a job is in delegate_callback, but the
# job is only removed *after* set_result/set_exception which would wait
# for the future's lock.
assert found_job, "Cancel called on orphan %s" % future
self._log.debug("Try cancel delegate: %s", found_job)
if found_job.delegate_future.cancel():
self._log.debug("Successful cancel: %s", found_job)
future._clear_delegate()
# Don't remove from _jobs here,
# the callback attached to delegate_future was expected
# to take care of that
return True
self._log.debug("Could not cancel: %s", found_job)
# Let the submit thread wake up and find that we've set stop_retry
self._wake_thread()
return False
|
(self, future)
|
708,528
|
more_executors._impl.retry
|
_delegate_callback
| null |
def _delegate_callback(self, delegate_future):
assert delegate_future.done(), "BUG: callback invoked while future not done!"
self._log.debug("Callback activated for %s", delegate_future)
found_job = None
for job in self._jobs[:]:
if job.delegate_future == delegate_future:
found_job = job
break
# Callbacks are only installed after a job is added, and this is
# the only place a job with a delegate associated will be removed,
# thus it should not be possible for a job to be missing.
assert found_job, "BUG: no job associated with delegate %s" % delegate_future
if delegate_future.cancelled():
# nothing to do, retrying on cancel is not allowed
self._log.debug("Delegate was cancelled: %s", delegate_future)
return
(should_retry, sleep_time) = eval_policy(found_job, self._log)
if should_retry:
self._retry(found_job, sleep_time)
return
self._log.debug("Finalizing %s", found_job)
# OK, it won't be retried. Resolve the future.
copy_future(delegate_future, found_job.future)
self._pop_job(found_job)
self._log.debug("Finalized %s", found_job)
|
(self, delegate_future)
|
708,529
|
more_executors._impl.retry
|
_get_next_job
| null |
def _get_next_job(self):
# Find and return the next job to be handled, if any.
# This means a job with when < now, or with stop_retry == True,
# or if there's none, then the job with the minimum value of when.
min_job = None
now = monotonic()
for job in self._jobs:
if job.delegate_future:
# It's already running, skip
continue
if job.stop_retry:
# We've been requested to stop retrying this job,
# and we can handle that immediately
return job
if job.when <= now:
# job is overdue, just do it
return job
if not min_job:
min_job = job
elif job.when < min_job.when:
min_job = job
return min_job
|
(self)
|
708,530
|
more_executors._impl.retry
|
_pop_job
| null |
def _pop_job(self, job):
with self._lock:
for idx, pending in enumerate(self._jobs):
if pending is job:
metrics.RETRY_QUEUE.labels(executor=self._name).dec()
return self._jobs.pop(idx)
|
(self, job)
|
708,531
|
more_executors._impl.retry
|
_retry
| null |
def _retry(self, job, sleep_time):
self._log.debug("Will retry: %s", job)
with self._lock:
self._pop_job(job)
metrics.RETRY_DELAY.labels(executor=self._name).inc(sleep_time)
new_job = RetryJob(
job.policy,
None,
job.future,
job.attempt,
monotonic() + sleep_time,
job.fn,
job.args,
job.kwargs,
# The old delegate future is retained in case we need
# to propagate its exception later
old_delegate=job.delegate_future,
)
new_job.stop_retry = job.stop_retry
self._append_job(new_job)
self._wake_thread()
|
(self, job, sleep_time)
|
708,532
|
more_executors._impl.retry
|
_submit_now
| null |
def _submit_now(self, job):
# Pop job since we'll replace it.
# We need to hold the lock for the entire duration so that other
# threads won't see _jobs between our removal and re-add of the job
with job.future._me_lock:
with self._lock:
self._pop_job(job)
# We need the future's lock now too, because someone could
# call cancel after this check and before we submit.
if job.future.done():
self._log.debug(
"future done %s - not submitting to delegate", job.future
)
return
if job.attempt != 0:
metrics.RETRY_TOTAL.labels(executor=self._name).inc()
delegate_future = self._delegate.submit(job.fn, *job.args, **job.kwargs)
job.future.delegate_future = delegate_future
new_job = RetryJob(
job.policy,
delegate_future,
job.future,
job.attempt + 1,
None,
job.fn,
job.args,
job.kwargs,
)
self._append_job(new_job)
self._log.debug("Submitted: %s", new_job)
delegate_future.add_done_callback(self._delegate_callback)
self._wake_thread()
|
(self, job)
|
708,533
|
more_executors._impl.retry
|
_wake_thread
| null |
def _wake_thread(self):
self._submit_event.set()
|
(self)
|
708,537
|
more_executors._impl.retry
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._log.debug("Shutting down.")
metrics.EXEC_INPROGRESS.labels(executor=self._name, type="retry").dec()
self._wake_thread()
self._delegate.shutdown(wait, **_kwargs)
if wait:
self._log.debug("Waiting for thread")
self._submit_thread.join(MAX_TIMEOUT)
self._log.debug("Shutdown complete")
|
(self, wait=True, **_kwargs)
|
708,538
|
more_executors._impl.retry
|
submit
| null |
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
return self.submit_retry(self._default_retry_policy, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,539
|
more_executors._impl.retry
|
submit_retry
|
Submit a callable with a specific retry policy.
Parameters:
retry_policy (RetryPolicy): a policy which is used for this call only
|
def submit_retry(self, retry_policy, fn, *args, **kwargs):
"""Submit a callable with a specific retry policy.
Parameters:
retry_policy (RetryPolicy): a policy which is used for this call only
"""
with self._shutdown.ensure_alive():
future = RetryFuture(self)
track_future(future, type="retry", executor=self._name)
job = RetryJob(retry_policy, None, future, 0, monotonic(), fn, args, kwargs)
self._append_job(job)
# Let the submit thread know it should wake up to check for new jobs
self._wake_thread()
self._log.debug("Returning future %s", future)
return future
|
(self, retry_policy, fn, *args, **kwargs)
|
708,548
|
more_executors._impl.retry
|
RetryPolicy
|
Instances of this class may be supplied to :class:`RetryExecutor`
to customize the retry behavior.
This base class will never retry. See :class:`ExceptionRetryPolicy` for
a general-purpose implementation.
|
class RetryPolicy(object):
"""Instances of this class may be supplied to :class:`RetryExecutor`
to customize the retry behavior.
This base class will never retry. See :class:`ExceptionRetryPolicy` for
a general-purpose implementation."""
def should_retry(self, attempt, future):
"""
Parameters:
attempt (int): number of times the future has been attempted; starts counting at 1
future (~concurrent.futures.Future): a completed future
Returns:
bool:
True if and only if a future should be retried.
"""
return False
def sleep_time(self, attempt, future):
"""
Parameters:
attempt (int): number of times the future has been attempted; starts counting at 1
future (~concurrent.futures.Future): a completed future
Returns:
float:
The amount of time (in seconds) to delay before the next
attempt at running a future.
"""
return 0
|
()
|
708,549
|
more_executors._impl.retry
|
should_retry
|
Parameters:
attempt (int): number of times the future has been attempted; starts counting at 1
future (~concurrent.futures.Future): a completed future
Returns:
bool:
True if and only if a future should be retried.
|
def should_retry(self, attempt, future):
"""
Parameters:
attempt (int): number of times the future has been attempted; starts counting at 1
future (~concurrent.futures.Future): a completed future
Returns:
bool:
True if and only if a future should be retried.
"""
return False
|
(self, attempt, future)
|
708,550
|
more_executors._impl.retry
|
sleep_time
|
Parameters:
attempt (int): number of times the future has been attempted; starts counting at 1
future (~concurrent.futures.Future): a completed future
Returns:
float:
The amount of time (in seconds) to delay before the next
attempt at running a future.
|
def sleep_time(self, attempt, future):
"""
Parameters:
attempt (int): number of times the future has been attempted; starts counting at 1
future (~concurrent.futures.Future): a completed future
Returns:
float:
The amount of time (in seconds) to delay before the next
attempt at running a future.
"""
return 0
|
(self, attempt, future)
|
708,551
|
more_executors._impl.sync
|
SyncExecutor
|
An executor which immediately invokes all submitted callables.
|
class SyncExecutor(CanCustomizeBind, Executor):
"""An executor which immediately invokes all submitted callables."""
def __init__(self, logger=None, name="default"):
"""
Parameters:
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
super(SyncExecutor, self).__init__()
self._name = name
self._shutdown = ShutdownHelper()
metrics.EXEC_TOTAL.labels(type="sync", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="sync", executor=self._name).inc()
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
super(SyncExecutor, self).shutdown(wait, **_kwargs)
metrics.EXEC_INPROGRESS.labels(type="sync", executor=self._name).dec()
def submit(self, fn, *args, **kwargs): # pylint: disable=arguments-differ
"""Immediately invokes `fn(*args, **kwargs)` and returns a future
with the result (or exception)."""
with self._shutdown.ensure_alive():
future = Future()
track_future(future, type="sync", executor=self._name)
try:
result = fn(*args, **kwargs)
future.set_result(result)
except Exception:
copy_exception(future)
return future
|
(logger=None, name='default')
|
708,555
|
more_executors._impl.sync
|
__init__
|
Parameters:
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(self, logger=None, name="default"):
"""
Parameters:
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
super(SyncExecutor, self).__init__()
self._name = name
self._shutdown = ShutdownHelper()
metrics.EXEC_TOTAL.labels(type="sync", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="sync", executor=self._name).inc()
|
(self, logger=None, name='default')
|
708,559
|
more_executors._impl.sync
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
super(SyncExecutor, self).shutdown(wait, **_kwargs)
metrics.EXEC_INPROGRESS.labels(type="sync", executor=self._name).dec()
|
(self, wait=True, **_kwargs)
|
708,560
|
more_executors._impl.sync
|
submit
|
Immediately invokes `fn(*args, **kwargs)` and returns a future
with the result (or exception).
|
def submit(self, fn, *args, **kwargs): # pylint: disable=arguments-differ
"""Immediately invokes `fn(*args, **kwargs)` and returns a future
with the result (or exception)."""
with self._shutdown.ensure_alive():
future = Future()
track_future(future, type="sync", executor=self._name)
try:
result = fn(*args, **kwargs)
future.set_result(result)
except Exception:
copy_exception(future)
return future
|
(self, fn, *args, **kwargs)
|
708,569
|
more_executors._impl.throttle
|
ThrottleExecutor
|
An executor which delegates to another executor while enforcing
a limit on the number of futures running concurrently.
- Callables are submitted to the delegate executor, from a different
thread than the calling thread.
- Where `count` is used to initialize this executor, if there
are already `count` futures submitted to the delegate executor and not
yet :meth:`~concurrent.futures.Future.done`, additional callables will
either be queued or will block on submit, and will only be submitted
to the delegate executor once there are less than `count` futures in
progress.
.. versionadded:: 1.9.0
|
class ThrottleExecutor(CanCustomizeBind, Executor):
"""An executor which delegates to another executor while enforcing
a limit on the number of futures running concurrently.
- Callables are submitted to the delegate executor, from a different
thread than the calling thread.
- Where `count` is used to initialize this executor, if there
are already `count` futures submitted to the delegate executor and not
yet :meth:`~concurrent.futures.Future.done`, additional callables will
either be queued or will block on submit, and will only be submitted
to the delegate executor once there are less than `count` futures in
progress.
.. versionadded:: 1.9.0
"""
def __init__(self, delegate, count, logger=None, name="default", block=False):
"""
Parameters:
delegate (~concurrent.futures.Executor):
an executor to which callables will be submitted
count (int, callable):
int:
maximum number of concurrently running futures.
callable:
a callable which returns an ``int`` (or ``None``, to indicate
no throttling).
The callable will be invoked each time this executor needs
to decide whether to throttle futures; this may be used
to implement dynamic throttling.
.. versionadded:: 2.5.0
block (bool)
If ``True``, calls to ``submit()`` on this executor may block if
there are already ``count`` futures in progress.
Otherwise, calls to ``submit()`` will always return immediately
and callables will be queued internally.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
.. versionchanged:: 2.11.0
Introduced ``block``.
"""
self._log = LogWrapper(
logger if logger else logging.getLogger("ThrottleExecutor")
)
self._block = block
self._name = name
self._delegate = delegate
self._to_submit = deque()
self._lock = Lock()
self._event = get_event()
self._running_count = AtomicInt()
self._throttle = count if callable(count) else lambda: count
self._last_throttle = self._throttle()
self._shutdown = ShutdownHelper()
event = self._event
self_ref = weakref.ref(self, lambda _: event.set())
metrics.EXEC_INPROGRESS.labels(type="throttle", executor=self._name).inc()
metrics.EXEC_TOTAL.labels(type="throttle", executor=self._name).inc()
self._thread = Thread(
name="ThrottleExecutor-%s" % name, target=_submit_loop, args=(self_ref,)
)
self._thread.daemon = True
self._thread.start()
def submit(self, fn, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
self._block_until_ready(self._eval_throttle())
out = ThrottleFuture(self)
track_future(out, type="throttle", executor=self._name)
job = ThrottleJob(out, fn, args, kwargs)
with self._lock:
self._to_submit.append(job)
metrics.THROTTLE_QUEUE.labels(executor=self._name).inc()
self._log.debug("Enqueued: %s", job)
self._event.set()
return out
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._log.debug("Shutting down")
metrics.EXEC_INPROGRESS.labels(type="throttle", executor=self._name).dec()
self._delegate.shutdown(wait, **_kwargs)
self._event.set()
if wait:
self._thread.join(MAX_TIMEOUT)
def _block_until_ready(self, throttle_val):
while self._block and not self._shutdown.is_shutdown:
if len(self._to_submit) < throttle_val:
return
self._log.debug("%s: throttling on submit", self._name)
self._event.wait(30.0)
def _eval_throttle(self):
try:
self._last_throttle = self._throttle()
except Exception:
self._log.exception(
"Error evaluating throttle count via %r", self._throttle
)
return self._last_throttle
def _do_submit(self, job):
delegate_future = self._delegate.submit(job.fn, *job.args, **job.kwargs)
self._log.debug("Submitted %s yielding %s", job, delegate_future)
delegate_future.add_done_callback(
partial(
self._delegate_future_done, self._log, self._running_count, self._event
)
)
job.future._set_delegate(delegate_future)
def _do_cancel(self, future):
with self._lock:
for job in self._to_submit:
if job.future is future:
self._to_submit.remove(job)
self._log.debug("Cancelled %s", job)
return True
self._log.debug("Could not find for cancel: %s", future)
return False
@classmethod
def _delegate_future_done(cls, log, running_count, event, future):
# Whenever an inner future completes, the thread should wake up
# in case there's something to be submitted
log.debug("Delegate future done: %s", future)
running_count.decr()
event.set()
|
(delegate, count, logger=None, name='default', block=False)
|
708,573
|
more_executors._impl.throttle
|
__init__
|
Parameters:
delegate (~concurrent.futures.Executor):
an executor to which callables will be submitted
count (int, callable):
int:
maximum number of concurrently running futures.
callable:
a callable which returns an ``int`` (or ``None``, to indicate
no throttling).
The callable will be invoked each time this executor needs
to decide whether to throttle futures; this may be used
to implement dynamic throttling.
.. versionadded:: 2.5.0
block (bool)
If ``True``, calls to ``submit()`` on this executor may block if
there are already ``count`` futures in progress.
Otherwise, calls to ``submit()`` will always return immediately
and callables will be queued internally.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
.. versionchanged:: 2.11.0
Introduced ``block``.
|
def __init__(self, delegate, count, logger=None, name="default", block=False):
"""
Parameters:
delegate (~concurrent.futures.Executor):
an executor to which callables will be submitted
count (int, callable):
int:
maximum number of concurrently running futures.
callable:
a callable which returns an ``int`` (or ``None``, to indicate
no throttling).
The callable will be invoked each time this executor needs
to decide whether to throttle futures; this may be used
to implement dynamic throttling.
.. versionadded:: 2.5.0
block (bool)
If ``True``, calls to ``submit()`` on this executor may block if
there are already ``count`` futures in progress.
Otherwise, calls to ``submit()`` will always return immediately
and callables will be queued internally.
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
.. versionchanged:: 2.11.0
Introduced ``block``.
"""
self._log = LogWrapper(
logger if logger else logging.getLogger("ThrottleExecutor")
)
self._block = block
self._name = name
self._delegate = delegate
self._to_submit = deque()
self._lock = Lock()
self._event = get_event()
self._running_count = AtomicInt()
self._throttle = count if callable(count) else lambda: count
self._last_throttle = self._throttle()
self._shutdown = ShutdownHelper()
event = self._event
self_ref = weakref.ref(self, lambda _: event.set())
metrics.EXEC_INPROGRESS.labels(type="throttle", executor=self._name).inc()
metrics.EXEC_TOTAL.labels(type="throttle", executor=self._name).inc()
self._thread = Thread(
name="ThrottleExecutor-%s" % name, target=_submit_loop, args=(self_ref,)
)
self._thread.daemon = True
self._thread.start()
|
(self, delegate, count, logger=None, name='default', block=False)
|
708,574
|
more_executors._impl.throttle
|
_block_until_ready
| null |
def _block_until_ready(self, throttle_val):
while self._block and not self._shutdown.is_shutdown:
if len(self._to_submit) < throttle_val:
return
self._log.debug("%s: throttling on submit", self._name)
self._event.wait(30.0)
|
(self, throttle_val)
|
708,575
|
more_executors._impl.throttle
|
_do_cancel
| null |
def _do_cancel(self, future):
with self._lock:
for job in self._to_submit:
if job.future is future:
self._to_submit.remove(job)
self._log.debug("Cancelled %s", job)
return True
self._log.debug("Could not find for cancel: %s", future)
return False
|
(self, future)
|
708,576
|
more_executors._impl.throttle
|
_do_submit
| null |
def _do_submit(self, job):
delegate_future = self._delegate.submit(job.fn, *job.args, **job.kwargs)
self._log.debug("Submitted %s yielding %s", job, delegate_future)
delegate_future.add_done_callback(
partial(
self._delegate_future_done, self._log, self._running_count, self._event
)
)
job.future._set_delegate(delegate_future)
|
(self, job)
|
708,577
|
more_executors._impl.throttle
|
_eval_throttle
| null |
def _eval_throttle(self):
try:
self._last_throttle = self._throttle()
except Exception:
self._log.exception(
"Error evaluating throttle count via %r", self._throttle
)
return self._last_throttle
|
(self)
|
708,581
|
more_executors._impl.throttle
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._log.debug("Shutting down")
metrics.EXEC_INPROGRESS.labels(type="throttle", executor=self._name).dec()
self._delegate.shutdown(wait, **_kwargs)
self._event.set()
if wait:
self._thread.join(MAX_TIMEOUT)
|
(self, wait=True, **_kwargs)
|
708,582
|
more_executors._impl.throttle
|
submit
| null |
def submit(self, fn, *args, **kwargs): # pylint: disable=arguments-differ
with self._shutdown.ensure_alive():
self._block_until_ready(self._eval_throttle())
out = ThrottleFuture(self)
track_future(out, type="throttle", executor=self._name)
job = ThrottleJob(out, fn, args, kwargs)
with self._lock:
self._to_submit.append(job)
metrics.THROTTLE_QUEUE.labels(executor=self._name).inc()
self._log.debug("Enqueued: %s", job)
self._event.set()
return out
|
(self, fn, *args, **kwargs)
|
708,591
|
more_executors._impl.timeout
|
TimeoutExecutor
|
An executor which delegates to another executor while applying
a timeout to each returned future.
For any futures returned by this executor, if the future hasn't
completed approximately within `timeout` seconds of its creation,
an attempt will be made to cancel the future.
Note that only a single attempt is made to cancel any future, and there
is no guarantee that this will succeed.
.. versionadded:: 1.7.0
|
class TimeoutExecutor(CanCustomizeBind, Executor):
"""An executor which delegates to another executor while applying
a timeout to each returned future.
For any futures returned by this executor, if the future hasn't
completed approximately within `timeout` seconds of its creation,
an attempt will be made to cancel the future.
Note that only a single attempt is made to cancel any future, and there
is no guarantee that this will succeed.
.. versionadded:: 1.7.0
"""
def __init__(self, delegate, timeout, logger=None, name="default"):
"""
Parameters:
delegate (~concurrent.futures.Executor):
an executor to which callables are submitted
timeout (float):
timeout (in seconds) after which :meth:`concurrent.futures.Future.cancel()`
will be invoked on any generated future which has not completed
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = logger if logger else LOG
self._name = name
self._delegate = delegate
self._timeout = timeout
self._shutdown = ShutdownHelper()
self._jobs = []
self._jobs_lock = Lock()
self._jobs_write = get_event()
event = self._jobs_write
self_ref = weakref.ref(self, lambda _: event.set())
self._job_thread = Thread(
name="TimeoutExecutor-%s" % name, target=self._job_loop, args=(self_ref,)
)
self._job_thread.daemon = True
self._job_thread.start()
metrics.EXEC_TOTAL.labels(type="timeout", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="timeout", executor=self._name).inc()
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
return self.submit_timeout(self._timeout, *args, **kwargs)
def submit_timeout(self, timeout, fn, *args, **kwargs):
"""Like :code:`submit(fn, *args, **kwargs)`, but uses the specified
timeout rather than this executor's default.
.. versionadded:: 1.19.0
"""
with self._shutdown.ensure_alive():
delegate_future = self._delegate.submit(fn, *args, **kwargs)
future = MapFuture(delegate_future)
track_future(future, type="timeout", executor=self._name)
future.add_done_callback(self._on_future_done)
job = Job(future, delegate_future, monotonic() + timeout)
with self._jobs_lock:
self._jobs.append(job)
self._jobs_write.set()
return future
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._log.debug("shutdown")
metrics.EXEC_INPROGRESS.labels(type="timeout", executor=self._name).dec()
self._jobs_write.set()
self._delegate.shutdown(wait, **_kwargs)
if wait:
self._job_thread.join(MAX_TIMEOUT)
def _partition_jobs(self):
pending = []
overdue = []
now = monotonic()
for job in self._jobs:
if job.future.done():
self._log.debug("Discarding job for completed future: %s", job)
elif job.deadline < now:
overdue.append(job)
else:
pending.append(job)
return (pending, overdue)
def _on_future_done(self, future):
self._log.debug("Waking thread for %s", future)
self._jobs_write.set()
def _do_cancel(self, job):
self._log.debug("Attempting cancel: %s", job)
cancel_result = job.future.cancel()
if cancel_result:
metrics.TIMEOUT.labels(executor=self._name).inc()
self._log.debug("Cancel of %s resulted in %s", job, cancel_result)
@classmethod
@executor_loop
def _job_loop(cls, executor_ref):
while True:
(event, wait_time) = cls._job_loop_iter(executor_ref())
if not event:
break
event.wait(wait_time)
event.clear()
@classmethod
def _job_loop_iter(cls, executor):
if not executor:
LOG.debug("Executor was collected")
return (None, None)
if executor._shutdown.is_shutdown or is_shutdown():
executor._log.debug("Executor was shut down")
return (None, None)
executor._log.debug("job loop")
with executor._jobs_lock:
(pending, overdue) = executor._partition_jobs()
executor._jobs = pending
executor._log.debug("jobs: %s overdue, %s pending", len(overdue), len(pending))
for job in overdue:
executor._do_cancel(job)
wait_time = None
if pending:
earliest = min([job.deadline for job in pending])
wait_time = max(earliest - monotonic(), 0)
executor._log.debug("Wait until %s", wait_time)
return (executor._jobs_write, wait_time)
|
(delegate, timeout, logger=None, name='default')
|
708,595
|
more_executors._impl.timeout
|
__init__
|
Parameters:
delegate (~concurrent.futures.Executor):
an executor to which callables are submitted
timeout (float):
timeout (in seconds) after which :meth:`concurrent.futures.Future.cancel()`
will be invoked on any generated future which has not completed
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
|
def __init__(self, delegate, timeout, logger=None, name="default"):
"""
Parameters:
delegate (~concurrent.futures.Executor):
an executor to which callables are submitted
timeout (float):
timeout (in seconds) after which :meth:`concurrent.futures.Future.cancel()`
will be invoked on any generated future which has not completed
logger (~logging.Logger):
a logger used for messages from this executor
name (str):
a name for this executor
.. versionchanged:: 2.7.0
Introduced ``name``.
"""
self._log = logger if logger else LOG
self._name = name
self._delegate = delegate
self._timeout = timeout
self._shutdown = ShutdownHelper()
self._jobs = []
self._jobs_lock = Lock()
self._jobs_write = get_event()
event = self._jobs_write
self_ref = weakref.ref(self, lambda _: event.set())
self._job_thread = Thread(
name="TimeoutExecutor-%s" % name, target=self._job_loop, args=(self_ref,)
)
self._job_thread.daemon = True
self._job_thread.start()
metrics.EXEC_TOTAL.labels(type="timeout", executor=self._name).inc()
metrics.EXEC_INPROGRESS.labels(type="timeout", executor=self._name).inc()
|
(self, delegate, timeout, logger=None, name='default')
|
708,596
|
more_executors._impl.timeout
|
_do_cancel
| null |
def _do_cancel(self, job):
self._log.debug("Attempting cancel: %s", job)
cancel_result = job.future.cancel()
if cancel_result:
metrics.TIMEOUT.labels(executor=self._name).inc()
self._log.debug("Cancel of %s resulted in %s", job, cancel_result)
|
(self, job)
|
708,597
|
more_executors._impl.timeout
|
_on_future_done
| null |
def _on_future_done(self, future):
self._log.debug("Waking thread for %s", future)
self._jobs_write.set()
|
(self, future)
|
708,598
|
more_executors._impl.timeout
|
_partition_jobs
| null |
def _partition_jobs(self):
pending = []
overdue = []
now = monotonic()
for job in self._jobs:
if job.future.done():
self._log.debug("Discarding job for completed future: %s", job)
elif job.deadline < now:
overdue.append(job)
else:
pending.append(job)
return (pending, overdue)
|
(self)
|
708,602
|
more_executors._impl.timeout
|
shutdown
| null |
def shutdown(self, wait=True, **_kwargs):
if self._shutdown():
self._log.debug("shutdown")
metrics.EXEC_INPROGRESS.labels(type="timeout", executor=self._name).dec()
self._jobs_write.set()
self._delegate.shutdown(wait, **_kwargs)
if wait:
self._job_thread.join(MAX_TIMEOUT)
|
(self, wait=True, **_kwargs)
|
708,603
|
more_executors._impl.timeout
|
submit
| null |
def submit(self, *args, **kwargs): # pylint: disable=arguments-differ
return self.submit_timeout(self._timeout, *args, **kwargs)
|
(self, *args, **kwargs)
|
708,604
|
more_executors._impl.timeout
|
submit_timeout
|
Like :code:`submit(fn, *args, **kwargs)`, but uses the specified
timeout rather than this executor's default.
.. versionadded:: 1.19.0
|
def submit_timeout(self, timeout, fn, *args, **kwargs):
"""Like :code:`submit(fn, *args, **kwargs)`, but uses the specified
timeout rather than this executor's default.
.. versionadded:: 1.19.0
"""
with self._shutdown.ensure_alive():
delegate_future = self._delegate.submit(fn, *args, **kwargs)
future = MapFuture(delegate_future)
track_future(future, type="timeout", executor=self._name)
future.add_done_callback(self._on_future_done)
job = Job(future, delegate_future, monotonic() + timeout)
with self._jobs_lock:
self._jobs.append(job)
self._jobs_write.set()
return future
|
(self, timeout, fn, *args, **kwargs)
|
708,616
|
more_executors._impl.futures.bool
|
f_and
|
Boolean ``AND`` over a number of futures.
Signature: :code:`Future<A>[, Future<B>[, ...]] ⟶ Future<A|B|...>`
Arguments:
f (~concurrent.futures.Future)
Any future
fs (~concurrent.futures.Future)
Any futures
Returns:
:class:`~concurrent.futures.Future`
A future resolved from the inputs using ``AND`` semantics:
- Resolved with the latest value returned by an input
future, if all futures are resolved with true values.
- Otherwise, resolved with the earliest false value or exception
returned by the input futures.
.. note::
This function is tested with up to 100,000 input futures.
Exceeding this limit may result in performance issues.
.. versionadded:: 1.19.0
|
# -*- coding: utf-8 -*-
from threading import Lock
import logging
from concurrent.futures import Future
from .base import chain_cancel, weak_callback
from ..common import copy_future_exception, try_set_result
from .check import ensure_futures
from ..logwrap import LogWrapper
from ..metrics import track_future
LOG = LogWrapper(logging.getLogger("more_executors.futures"))
class BoolOperation(object):
def __init__(self, fs):
self.fs = {}
for f in fs:
self.fs[f] = True
self.done = False
self.lock = Lock()
self.out = Future()
for f in fs:
chain_cancel(self.out, f)
f.add_done_callback(weak_callback(self.handle_done))
def get_state_update(self, f):
raise NotImplementedError() # pragma: no cover
def handle_done(self, f):
set_result = False
set_exception = False
cancel_futures = set()
with self.lock:
if self.done:
return
del self.fs[f]
(set_result, set_exception, cancel_futures) = self.get_state_update(f)
if set_result:
try_set_result(self.out, f.result())
if set_exception:
copy_future_exception(f, self.out)
for to_cancel in cancel_futures:
to_cancel.cancel()
|
(f, *fs)
|
708,617
|
more_executors._impl.futures.apply
|
f_apply
|
Call a function, where the function, its arguments and return value
are all provided by futures.
Signature: :code:`Future<fn<A[,B[,...]]⟶R>>, Future<A>[, Future<B>[, ...]] ⟶ Future<R>`
Arguments:
future_fn (:class:`~concurrent.futures.Future` of :class:`callable`)
A future returning a function to be applied.
future_args (~concurrent.futures.Future)
Futures holding positional arguments for the function.
future_kwargs (~concurrent.futures.Future)
Futures holding keyword arguments for the function.
Returns:
~concurrent.futures.Future:
A future holding the returned value of the applied function.
.. versionadded:: 1.19.0
|
# -*- coding: utf-8 -*-
from .base import wrap
from .check import ensure_futures
from ..metrics import track_future
# for wrapping arguments.
# This value means an argument came from *args rather than **kwargs
ARGS = object()
@ensure_futures
def f_apply(future_fn, *future_args, **future_kwargs):
"""Call a function, where the function, its arguments and return value
are all provided by futures.
Signature: :code:`Future<fn<A[,B[,...]]⟶R>>, Future<A>[, Future<B>[, ...]] ⟶ Future<R>`
Arguments:
future_fn (:class:`~concurrent.futures.Future` of :class:`callable`)
A future returning a function to be applied.
future_args (~concurrent.futures.Future)
Futures holding positional arguments for the function.
future_kwargs (~concurrent.futures.Future)
Futures holding keyword arguments for the function.
Returns:
~concurrent.futures.Future:
A future holding the returned value of the applied function.
.. versionadded:: 1.19.0
"""
wrapped_args = _wrap_args(*future_args, **future_kwargs)
return track_future(_wrapped_f_apply(future_fn, wrapped_args), type="apply")
|
(future_fn, *future_args, **future_kwargs)
|
708,618
|
more_executors._impl.futures.map
|
f_flat_map
|
Map the output value of a future using the given future-returning functions.
Like :meth:`f_map`, except that the mapping functions must return a future,
and that future will be flattened into the output value (avoiding a nested future).
Signature: :code:`Future<A>, fn<A⟶Future<B>> ⟶ Future<B>`
Arguments:
future (~concurrent.futures.Future)
Any future.
fn (callable)
Any future-returning callable to be applied on successful futures.
This function is provided the result of the input future.
error_fn (callable)
Any future-returning callable to be applied on unsuccessful futures.
This function is provided the exception of the input future.
Returns:
:class:`~concurrent.futures.Future`
A future which is:
- equivalent to that returned by :obj:`fn` if input future succeeded
- or equivalent to that returned by :obj:`error_fn` if input future failed
- or resolved with an exception if any of :obj:`future`, :obj:`fn` or
:obj:`error_fn` failed
.. versionadded:: 1.19.0
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
|
def f_map(future, fn=None, error_fn=None):
"""Map the output value of a future using the given functions.
Signature: :code:`Future<A>, fn<A⟶B> ⟶ Future<B>`
Arguments:
future (~concurrent.futures.Future)
Any future.
fn (callable)
Any callable to be applied on successful futures.
This function is provided the result of the input future.
error_fn (callable)
Any callable to be applied on unsuccessful futures.
This function is provided the exception of the input future.
Returns:
:class:`~concurrent.futures.Future`
A future resolved with:
- the returned value of :code:`fn(future.result())`
- or the returned value of :code:`error_fn(future.exception())`
- or with the exception raised by :obj:`future`, :obj:`fn` or :obj:`error_fn`.
.. versionadded:: 1.19.0
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
"""
return wrap(future).with_map(fn=fn, error_fn=error_fn)()
|
(future, fn=None, error_fn=None)
|
708,619
|
more_executors._impl.futures.map
|
f_map
|
Map the output value of a future using the given functions.
Signature: :code:`Future<A>, fn<A⟶B> ⟶ Future<B>`
Arguments:
future (~concurrent.futures.Future)
Any future.
fn (callable)
Any callable to be applied on successful futures.
This function is provided the result of the input future.
error_fn (callable)
Any callable to be applied on unsuccessful futures.
This function is provided the exception of the input future.
Returns:
:class:`~concurrent.futures.Future`
A future resolved with:
- the returned value of :code:`fn(future.result())`
- or the returned value of :code:`error_fn(future.exception())`
- or with the exception raised by :obj:`future`, :obj:`fn` or :obj:`error_fn`.
.. versionadded:: 1.19.0
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
|
def f_map(future, fn=None, error_fn=None):
"""Map the output value of a future using the given functions.
Signature: :code:`Future<A>, fn<A⟶B> ⟶ Future<B>`
Arguments:
future (~concurrent.futures.Future)
Any future.
fn (callable)
Any callable to be applied on successful futures.
This function is provided the result of the input future.
error_fn (callable)
Any callable to be applied on unsuccessful futures.
This function is provided the exception of the input future.
Returns:
:class:`~concurrent.futures.Future`
A future resolved with:
- the returned value of :code:`fn(future.result())`
- or the returned value of :code:`error_fn(future.exception())`
- or with the exception raised by :obj:`future`, :obj:`fn` or :obj:`error_fn`.
.. versionadded:: 1.19.0
.. versionchanged:: 2.2.0
Introduced ``error_fn``.
"""
return wrap(future).with_map(fn=fn, error_fn=error_fn)()
|
(future, fn=None, error_fn=None)
|
708,620
|
more_executors._impl.futures.nocancel
|
f_nocancel
|
Wrap a future to block cancellation.
Signature: :code:`Future<X> ⟶ Future<X>`
Arguments:
future (~concurrent.futures.Future)
Any future.
Returns:
:class:`~concurrent.futures.Future`
A wrapped version of :obj:`future` which cannot be cancelled.
.. versionadded:: 1.19.0
|
def f_nocancel(future):
"""Wrap a future to block cancellation.
Signature: :code:`Future<X> ⟶ Future<X>`
Arguments:
future (~concurrent.futures.Future)
Any future.
Returns:
:class:`~concurrent.futures.Future`
A wrapped version of :obj:`future` which cannot be cancelled.
.. versionadded:: 1.19.0
"""
return track_future(NoCancelFuture(future, lambda x: x), type="nocancel")
|
(future)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.