diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..28c2e2c429f34a8d57d02a3c6caf6218f4d9a642 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/__init__.py @@ -0,0 +1,45 @@ +"""The asyncio package, tracking PEP 3156.""" + +# flake8: noqa + +import sys + +# This relies on each of the submodules having an __all__ variable. +from .base_events import * +from .coroutines import * +from .events import * +from .exceptions import * +from .futures import * +from .locks import * +from .protocols import * +from .runners import * +from .queues import * +from .streams import * +from .subprocess import * +from .tasks import * +from .transports import * + +# Exposed for _asynciomodule.c to implement now deprecated +# Task.all_tasks() method. This function will be removed in 3.9. +from .tasks import _all_tasks_compat # NoQA + +__all__ = (base_events.__all__ + + coroutines.__all__ + + events.__all__ + + exceptions.__all__ + + futures.__all__ + + locks.__all__ + + protocols.__all__ + + runners.__all__ + + queues.__all__ + + streams.__all__ + + subprocess.__all__ + + tasks.__all__ + + transports.__all__) + +if sys.platform == 'win32': # pragma: no cover + from .windows_events import * + __all__ += windows_events.__all__ +else: + from .unix_events import * # pragma: no cover + __all__ += unix_events.__all__ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/__main__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..18bb87a5bc4ffd89fb24ef193ee88af64ee5dcbc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/__main__.py @@ -0,0 +1,125 @@ +import ast +import asyncio +import code +import concurrent.futures +import inspect +import sys +import threading +import types +import warnings + +from . import futures + + +class AsyncIOInteractiveConsole(code.InteractiveConsole): + + def __init__(self, locals, loop): + super().__init__(locals) + self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT + + self.loop = loop + + def runcode(self, code): + future = concurrent.futures.Future() + + def callback(): + global repl_future + global repl_future_interrupted + + repl_future = None + repl_future_interrupted = False + + func = types.FunctionType(code, self.locals) + try: + coro = func() + except SystemExit: + raise + except KeyboardInterrupt as ex: + repl_future_interrupted = True + future.set_exception(ex) + return + except BaseException as ex: + future.set_exception(ex) + return + + if not inspect.iscoroutine(coro): + future.set_result(coro) + return + + try: + repl_future = self.loop.create_task(coro) + futures._chain_future(repl_future, future) + except BaseException as exc: + future.set_exception(exc) + + loop.call_soon_threadsafe(callback) + + try: + return future.result() + except SystemExit: + raise + except BaseException: + if repl_future_interrupted: + self.write("\nKeyboardInterrupt\n") + else: + self.showtraceback() + + +class REPLThread(threading.Thread): + + def run(self): + try: + banner = ( + f'asyncio REPL {sys.version} on {sys.platform}\n' + f'Use "await" directly instead of "asyncio.run()".\n' + f'Type "help", "copyright", "credits" or "license" ' + f'for more information.\n' + f'{getattr(sys, "ps1", ">>> ")}import asyncio' + ) + + console.interact( + banner=banner, + exitmsg='exiting asyncio REPL...') + finally: + warnings.filterwarnings( + 'ignore', + message=r'^coroutine .* was never awaited$', + category=RuntimeWarning) + + loop.call_soon_threadsafe(loop.stop) + + +if __name__ == '__main__': + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + repl_locals = {'asyncio': asyncio} + for key in {'__name__', '__package__', + '__loader__', '__spec__', + '__builtins__', '__file__'}: + repl_locals[key] = locals()[key] + + console = AsyncIOInteractiveConsole(repl_locals, loop) + + repl_future = None + repl_future_interrupted = False + + try: + import readline # NoQA + except ImportError: + pass + + repl_thread = REPLThread() + repl_thread.daemon = True + repl_thread.start() + + while True: + try: + loop.run_forever() + except KeyboardInterrupt: + if repl_future and not repl_future.done(): + repl_future.cancel() + repl_future_interrupted = True + continue + else: + break diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_events.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_events.py new file mode 100644 index 0000000000000000000000000000000000000000..799013d5ccccb593201b8ab829ba6bb5ad76add8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_events.py @@ -0,0 +1,1884 @@ +"""Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +""" + +import collections +import collections.abc +import concurrent.futures +import functools +import heapq +import itertools +import os +import socket +import stat +import subprocess +import threading +import time +import traceback +import sys +import warnings +import weakref + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import constants +from . import coroutines +from . import events +from . import exceptions +from . import futures +from . import protocols +from . import sslproto +from . import staggered +from . import tasks +from . import transports +from . import trsock +from .log import logger + + +__all__ = 'BaseEventLoop', + + +# Minimum number of _scheduled timer handles before cleanup of +# cancelled handles is performed. +_MIN_SCHEDULED_TIMER_HANDLES = 100 + +# Minimum fraction of _scheduled timer handles that are cancelled +# before cleanup of cancelled handles is performed. +_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 + + +_HAS_IPv6 = hasattr(socket, 'AF_INET6') + +# Maximum timeout passed to select to avoid OS limitations +MAXIMUM_SELECT_TIMEOUT = 24 * 3600 + +# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s +# *reuse_address* parameter +_unset = object() + + +def _format_handle(handle): + cb = handle._callback + if isinstance(getattr(cb, '__self__', None), tasks.Task): + # format the task + return repr(cb.__self__) + else: + return str(handle) + + +def _format_pipe(fd): + if fd == subprocess.PIPE: + return '' + elif fd == subprocess.STDOUT: + return '' + else: + return repr(fd) + + +def _set_reuseport(sock): + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError('reuse_port not supported by socket module') + else: + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except OSError: + raise ValueError('reuse_port not supported by socket module, ' + 'SO_REUSEPORT defined but not implemented.') + + +def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): + return + + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + if type == socket.SOCK_STREAM: + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes) and port == b'': + port = 0 + elif isinstance(port, str) and port == '': + port = 0 + else: + # If port's a service name like "http", don't skip getaddrinfo. + try: + port = int(port) + except (TypeError, ValueError): + return None + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET] + if _HAS_IPv6: + afs.append(socket.AF_INET6) + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: + try: + socket.inet_pton(af, host) + # The host has already been resolved. + if _HAS_IPv6 and af == socket.AF_INET6: + return af, type, proto, '', (host, port, flowinfo, scopeid) + else: + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _interleave_addrinfos(addrinfos, first_address_family_count=1): + """Interleave list of addrinfo tuples by family.""" + # Group addresses by family + addrinfos_by_family = collections.OrderedDict() + for addr in addrinfos: + family = addr[0] + if family not in addrinfos_by_family: + addrinfos_by_family[family] = [] + addrinfos_by_family[family].append(addr) + addrinfos_lists = list(addrinfos_by_family.values()) + + reordered = [] + if first_address_family_count > 1: + reordered.extend(addrinfos_lists[0][:first_address_family_count - 1]) + del addrinfos_lists[0][:first_address_family_count - 1] + reordered.extend( + a for a in itertools.chain.from_iterable( + itertools.zip_longest(*addrinfos_lists) + ) if a is not None) + return reordered + + +def _run_until_complete_cb(fut): + if not fut.cancelled(): + exc = fut.exception() + if isinstance(exc, (SystemExit, KeyboardInterrupt)): + # Issue #22429: run_forever() already finished, no need to + # stop it. + return + futures._get_loop(fut).stop() + + +if hasattr(socket, 'TCP_NODELAY'): + def _set_nodelay(sock): + if (sock.family in {socket.AF_INET, socket.AF_INET6} and + sock.type == socket.SOCK_STREAM and + sock.proto == socket.IPPROTO_TCP): + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) +else: + def _set_nodelay(sock): + pass + + +class _SendfileFallbackProtocol(protocols.Protocol): + def __init__(self, transp): + if not isinstance(transp, transports._FlowControlMixin): + raise TypeError("transport should be _FlowControlMixin instance") + self._transport = transp + self._proto = transp.get_protocol() + self._should_resume_reading = transp.is_reading() + self._should_resume_writing = transp._protocol_paused + transp.pause_reading() + transp.set_protocol(self) + if self._should_resume_writing: + self._write_ready_fut = self._transport._loop.create_future() + else: + self._write_ready_fut = None + + async def drain(self): + if self._transport.is_closing(): + raise ConnectionError("Connection closed by peer") + fut = self._write_ready_fut + if fut is None: + return + await fut + + def connection_made(self, transport): + raise RuntimeError("Invalid state: " + "connection should have been established already.") + + def connection_lost(self, exc): + if self._write_ready_fut is not None: + # Never happens if peer disconnects after sending the whole content + # Thus disconnection is always an exception from user perspective + if exc is None: + self._write_ready_fut.set_exception( + ConnectionError("Connection is closed by peer")) + else: + self._write_ready_fut.set_exception(exc) + self._proto.connection_lost(exc) + + def pause_writing(self): + if self._write_ready_fut is not None: + return + self._write_ready_fut = self._transport._loop.create_future() + + def resume_writing(self): + if self._write_ready_fut is None: + return + self._write_ready_fut.set_result(False) + self._write_ready_fut = None + + def data_received(self, data): + raise RuntimeError("Invalid state: reading should be paused") + + def eof_received(self): + raise RuntimeError("Invalid state: reading should be paused") + + async def restore(self): + self._transport.set_protocol(self._proto) + if self._should_resume_reading: + self._transport.resume_reading() + if self._write_ready_fut is not None: + # Cancel the future. + # Basically it has no effect because protocol is switched back, + # no code should wait for it anymore. + self._write_ready_fut.cancel() + if self._should_resume_writing: + self._proto.resume_writing() + + +class Server(events.AbstractServer): + + def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog, + ssl_handshake_timeout): + self._loop = loop + self._sockets = sockets + self._active_count = 0 + self._waiters = [] + self._protocol_factory = protocol_factory + self._backlog = backlog + self._ssl_context = ssl_context + self._ssl_handshake_timeout = ssl_handshake_timeout + self._serving = False + self._serving_forever_fut = None + + def __repr__(self): + return f'<{self.__class__.__name__} sockets={self.sockets!r}>' + + def _attach(self): + assert self._sockets is not None + self._active_count += 1 + + def _detach(self): + assert self._active_count > 0 + self._active_count -= 1 + if self._active_count == 0 and self._sockets is None: + self._wakeup() + + def _wakeup(self): + waiters = self._waiters + self._waiters = None + for waiter in waiters: + if not waiter.done(): + waiter.set_result(waiter) + + def _start_serving(self): + if self._serving: + return + self._serving = True + for sock in self._sockets: + sock.listen(self._backlog) + self._loop._start_serving( + self._protocol_factory, sock, self._ssl_context, + self, self._backlog, self._ssl_handshake_timeout) + + def get_loop(self): + return self._loop + + def is_serving(self): + return self._serving + + @property + def sockets(self): + if self._sockets is None: + return () + return tuple(trsock.TransportSocket(s) for s in self._sockets) + + def close(self): + sockets = self._sockets + if sockets is None: + return + self._sockets = None + + for sock in sockets: + self._loop._stop_serving(sock) + + self._serving = False + + if (self._serving_forever_fut is not None and + not self._serving_forever_fut.done()): + self._serving_forever_fut.cancel() + self._serving_forever_fut = None + + if self._active_count == 0: + self._wakeup() + + async def start_serving(self): + self._start_serving() + # Skip one loop iteration so that all 'loop.add_reader' + # go through. + await tasks.sleep(0, loop=self._loop) + + async def serve_forever(self): + if self._serving_forever_fut is not None: + raise RuntimeError( + f'server {self!r} is already being awaited on serve_forever()') + if self._sockets is None: + raise RuntimeError(f'server {self!r} is closed') + + self._start_serving() + self._serving_forever_fut = self._loop.create_future() + + try: + await self._serving_forever_fut + except exceptions.CancelledError: + try: + self.close() + await self.wait_closed() + finally: + raise + finally: + self._serving_forever_fut = None + + async def wait_closed(self): + if self._sockets is None or self._waiters is None: + return + waiter = self._loop.create_future() + self._waiters.append(waiter) + await waiter + + +class BaseEventLoop(events.AbstractEventLoop): + + def __init__(self): + self._timer_cancelled_count = 0 + self._closed = False + self._stopping = False + self._ready = collections.deque() + self._scheduled = [] + self._default_executor = None + self._internal_fds = 0 + # Identifier of the thread running the event loop, or None if the + # event loop is not running + self._thread_id = None + self._clock_resolution = time.get_clock_info('monotonic').resolution + self._exception_handler = None + self.set_debug(coroutines._is_debug_mode()) + # In debug mode, if the execution of a callback or a step of a task + # exceed this duration in seconds, the slow callback/task is logged. + self.slow_callback_duration = 0.1 + self._current_handle = None + self._task_factory = None + self._coroutine_origin_tracking_enabled = False + self._coroutine_origin_tracking_saved_depth = None + + # A weak set of all asynchronous generators that are + # being iterated by the loop. + self._asyncgens = weakref.WeakSet() + # Set to True when `loop.shutdown_asyncgens` is called. + self._asyncgens_shutdown_called = False + + def __repr__(self): + return ( + f'<{self.__class__.__name__} running={self.is_running()} ' + f'closed={self.is_closed()} debug={self.get_debug()}>' + ) + + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + + def create_task(self, coro, *, name=None): + """Schedule a coroutine object. + + Return a task object. + """ + self._check_closed() + if self._task_factory is None: + task = tasks.Task(coro, loop=self, name=name) + if task._source_traceback: + del task._source_traceback[-1] + else: + task = self._task_factory(self, coro) + tasks._set_task_name(task, name) + + return task + + def set_task_factory(self, factory): + """Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + """ + if factory is not None and not callable(factory): + raise TypeError('task factory must be a callable or None') + self._task_factory = factory + + def get_task_factory(self): + """Return a task factory, or None if the default one is in use.""" + return self._task_factory + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + """Create socket transport.""" + raise NotImplementedError + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=None, + call_connection_made=True): + """Create SSL transport.""" + raise NotImplementedError + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + """Create datagram transport.""" + raise NotImplementedError + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create read pipe transport.""" + raise NotImplementedError + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create write pipe transport.""" + raise NotImplementedError + + async def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + """Create subprocess transport.""" + raise NotImplementedError + + def _write_to_self(self): + """Write a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + """ + raise NotImplementedError + + def _process_events(self, event_list): + """Process selector events.""" + raise NotImplementedError + + def _check_closed(self): + if self._closed: + raise RuntimeError('Event loop is closed') + + def _asyncgen_finalizer_hook(self, agen): + self._asyncgens.discard(agen) + if not self.is_closed(): + self.call_soon_threadsafe(self.create_task, agen.aclose()) + + def _asyncgen_firstiter_hook(self, agen): + if self._asyncgens_shutdown_called: + warnings.warn( + f"asynchronous generator {agen!r} was scheduled after " + f"loop.shutdown_asyncgens() call", + ResourceWarning, source=self) + + self._asyncgens.add(agen) + + async def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + self._asyncgens_shutdown_called = True + + if not len(self._asyncgens): + # If Python version is <3.6 or we don't have any asynchronous + # generators alive. + return + + closing_agens = list(self._asyncgens) + self._asyncgens.clear() + + results = await tasks.gather( + *[ag.aclose() for ag in closing_agens], + return_exceptions=True, + loop=self) + + for result, agen in zip(results, closing_agens): + if isinstance(result, Exception): + self.call_exception_handler({ + 'message': f'an error occurred during closing of ' + f'asynchronous generator {agen!r}', + 'exception': result, + 'asyncgen': agen + }) + + def _check_running(self): + if self.is_running(): + raise RuntimeError('This event loop is already running') + if events._get_running_loop() is not None: + raise RuntimeError( + 'Cannot run the event loop while another loop is running') + + def run_forever(self): + """Run until stop() is called.""" + self._check_closed() + self._check_running() + self._set_coroutine_origin_tracking(self._debug) + self._thread_id = threading.get_ident() + + old_agen_hooks = sys.get_asyncgen_hooks() + sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook) + try: + events._set_running_loop(self) + while True: + self._run_once() + if self._stopping: + break + finally: + self._stopping = False + self._thread_id = None + events._set_running_loop(None) + self._set_coroutine_origin_tracking(False) + sys.set_asyncgen_hooks(*old_agen_hooks) + + def run_until_complete(self, future): + """Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + """ + self._check_closed() + self._check_running() + + new_task = not futures.isfuture(future) + future = tasks.ensure_future(future, loop=self) + if new_task: + # An exception is raised if the future didn't complete, so there + # is no need to log the "destroy pending task" message + future._log_destroy_pending = False + + future.add_done_callback(_run_until_complete_cb) + try: + self.run_forever() + except: + if new_task and future.done() and not future.cancelled(): + # The coroutine raised a BaseException. Consume the exception + # to not log a warning, the caller doesn't have access to the + # local task. + future.exception() + raise + finally: + future.remove_done_callback(_run_until_complete_cb) + if not future.done(): + raise RuntimeError('Event loop stopped before Future completed.') + + return future.result() + + def stop(self): + """Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + """ + self._stopping = True + + def close(self): + """Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + """ + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self._closed: + return + if self._debug: + logger.debug("Close %r", self) + self._closed = True + self._ready.clear() + self._scheduled.clear() + executor = self._default_executor + if executor is not None: + self._default_executor = None + executor.shutdown(wait=False) + + def is_closed(self): + """Returns True if the event loop was closed.""" + return self._closed + + def __del__(self, _warn=warnings.warn): + if not self.is_closed(): + _warn(f"unclosed event loop {self!r}", ResourceWarning, source=self) + if not self.is_running(): + self.close() + + def is_running(self): + """Returns True if the event loop is running.""" + return (self._thread_id is not None) + + def time(self): + """Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + """ + return time.monotonic() + + def call_later(self, delay, callback, *args, context=None): + """Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + timer = self.call_at(self.time() + delay, callback, *args, + context=context) + if timer._source_traceback: + del timer._source_traceback[-1] + return timer + + def call_at(self, when, callback, *args, context=None): + """Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + """ + self._check_closed() + if self._debug: + self._check_thread() + self._check_callback(callback, 'call_at') + timer = events.TimerHandle(when, callback, args, self, context) + if timer._source_traceback: + del timer._source_traceback[-1] + heapq.heappush(self._scheduled, timer) + timer._scheduled = True + return timer + + def call_soon(self, callback, *args, context=None): + """Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + self._check_closed() + if self._debug: + self._check_thread() + self._check_callback(callback, 'call_soon') + handle = self._call_soon(callback, args, context) + if handle._source_traceback: + del handle._source_traceback[-1] + return handle + + def _check_callback(self, callback, method): + if (coroutines.iscoroutine(callback) or + coroutines.iscoroutinefunction(callback)): + raise TypeError( + f"coroutines cannot be used with {method}()") + if not callable(callback): + raise TypeError( + f'a callable object was expected by {method}(), ' + f'got {callback!r}') + + def _call_soon(self, callback, args, context): + handle = events.Handle(callback, args, self, context) + if handle._source_traceback: + del handle._source_traceback[-1] + self._ready.append(handle) + return handle + + def _check_thread(self): + """Check that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + """ + if self._thread_id is None: + return + thread_id = threading.get_ident() + if thread_id != self._thread_id: + raise RuntimeError( + "Non-thread-safe operation invoked on an event loop other " + "than the current one") + + def call_soon_threadsafe(self, callback, *args, context=None): + """Like call_soon(), but thread-safe.""" + self._check_closed() + if self._debug: + self._check_callback(callback, 'call_soon_threadsafe') + handle = self._call_soon(callback, args, context) + if handle._source_traceback: + del handle._source_traceback[-1] + self._write_to_self() + return handle + + def run_in_executor(self, executor, func, *args): + self._check_closed() + if self._debug: + self._check_callback(func, 'run_in_executor') + if executor is None: + executor = self._default_executor + if executor is None: + executor = concurrent.futures.ThreadPoolExecutor() + self._default_executor = executor + return futures.wrap_future( + executor.submit(func, *args), loop=self) + + def set_default_executor(self, executor): + if not isinstance(executor, concurrent.futures.ThreadPoolExecutor): + warnings.warn( + 'Using the default executor that is not an instance of ' + 'ThreadPoolExecutor is deprecated and will be prohibited ' + 'in Python 3.9', + DeprecationWarning, 2) + self._default_executor = executor + + def _getaddrinfo_debug(self, host, port, family, type, proto, flags): + msg = [f"{host}:{port!r}"] + if family: + msg.append(f'family={family!r}') + if type: + msg.append(f'type={type!r}') + if proto: + msg.append(f'proto={proto!r}') + if flags: + msg.append(f'flags={flags!r}') + msg = ', '.join(msg) + logger.debug('Get address info %s', msg) + + t0 = self.time() + addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags) + dt = self.time() - t0 + + msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}' + if dt >= self.slow_callback_duration: + logger.info(msg) + else: + logger.debug(msg) + return addrinfo + + async def getaddrinfo(self, host, port, *, + family=0, type=0, proto=0, flags=0): + if self._debug: + getaddr_func = self._getaddrinfo_debug + else: + getaddr_func = socket.getaddrinfo + + return await self.run_in_executor( + None, getaddr_func, host, port, family, type, proto, flags) + + async def getnameinfo(self, sockaddr, flags=0): + return await self.run_in_executor( + None, socket.getnameinfo, sockaddr, flags) + + async def sock_sendfile(self, sock, file, offset=0, count=None, + *, fallback=True): + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + self._check_sendfile_params(sock, file, offset, count) + try: + return await self._sock_sendfile_native(sock, file, + offset, count) + except exceptions.SendfileNotAvailableError as exc: + if not fallback: + raise + return await self._sock_sendfile_fallback(sock, file, + offset, count) + + async def _sock_sendfile_native(self, sock, file, offset, count): + # NB: sendfile syscall is not supported for SSL sockets and + # non-mmap files even if sendfile is supported by OS + raise exceptions.SendfileNotAvailableError( + f"syscall sendfile is not available for socket {sock!r} " + "and file {file!r} combination") + + async def _sock_sendfile_fallback(self, sock, file, offset, count): + if offset: + file.seek(offset) + blocksize = ( + min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE) + if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE + ) + buf = bytearray(blocksize) + total_sent = 0 + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + view = memoryview(buf)[:blocksize] + read = await self.run_in_executor(None, file.readinto, view) + if not read: + break # EOF + await self.sock_sendall(sock, view[:read]) + total_sent += read + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + + def _check_sendfile_params(self, sock, file, offset, count): + if 'b' not in getattr(file, 'mode', 'b'): + raise ValueError("file should be opened in binary mode") + if not sock.type == socket.SOCK_STREAM: + raise ValueError("only SOCK_STREAM type sockets are supported") + if count is not None: + if not isinstance(count, int): + raise TypeError( + "count must be a positive integer (got {!r})".format(count)) + if count <= 0: + raise ValueError( + "count must be a positive integer (got {!r})".format(count)) + if not isinstance(offset, int): + raise TypeError( + "offset must be a non-negative integer (got {!r})".format( + offset)) + if offset < 0: + raise ValueError( + "offset must be a non-negative integer (got {!r})".format( + offset)) + + async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): + """Create, bind and connect one socket.""" + my_exceptions = [] + exceptions.append(my_exceptions) + family, type_, proto, _, address = addr_info + sock = None + try: + sock = socket.socket(family=family, type=type_, proto=proto) + sock.setblocking(False) + if local_addr_infos is not None: + for _, _, _, _, laddr in local_addr_infos: + try: + sock.bind(laddr) + break + except OSError as exc: + msg = ( + f'error while attempting to bind on ' + f'address {laddr!r}: ' + f'{exc.strerror.lower()}' + ) + exc = OSError(exc.errno, msg) + my_exceptions.append(exc) + else: # all bind attempts failed + raise my_exceptions.pop() + await self.sock_connect(sock, address) + return sock + except OSError as exc: + my_exceptions.append(exc) + if sock is not None: + sock.close() + raise + except: + if sock is not None: + sock.close() + raise + + async def create_connection( + self, protocol_factory, host=None, port=None, + *, ssl=None, family=0, + proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None, + ssl_handshake_timeout=None, + happy_eyeballs_delay=None, interleave=None): + """Connect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + """ + if server_hostname is not None and not ssl: + raise ValueError('server_hostname is only meaningful with ssl') + + if server_hostname is None and ssl: + # Use host as default for server_hostname. It is an error + # if host is empty or not set, e.g. when an + # already-connected socket was passed or when only a port + # is given. To avoid this error, you can pass + # server_hostname='' -- this will bypass the hostname + # check. (This also means that if host is a numeric + # IP/IPv6 address, we will attempt to verify that exact + # address; this will probably fail, but it is possible to + # create a certificate for a specific IP address, so we + # don't judge it here.) + if not host: + raise ValueError('You must set server_hostname ' + 'when using ssl without a host') + server_hostname = host + + if ssl_handshake_timeout is not None and not ssl: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if happy_eyeballs_delay is not None and interleave is None: + # If using happy eyeballs, default to interleave addresses by family + interleave = 1 + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + infos = await self._ensure_resolved( + (host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self) + if not infos: + raise OSError('getaddrinfo() returned empty list') + + if local_addr is not None: + laddr_infos = await self._ensure_resolved( + local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) + if not laddr_infos: + raise OSError('getaddrinfo() returned empty list') + else: + laddr_infos = None + + if interleave: + infos = _interleave_addrinfos(infos, interleave) + + exceptions = [] + if happy_eyeballs_delay is None: + # not using happy eyeballs + for addrinfo in infos: + try: + sock = await self._connect_sock( + exceptions, addrinfo, laddr_infos) + break + except OSError: + continue + else: # using happy eyeballs + sock, _, _ = await staggered.staggered_race( + (functools.partial(self._connect_sock, + exceptions, addrinfo, laddr_infos) + for addrinfo in infos), + happy_eyeballs_delay, loop=self) + + if sock is None: + exceptions = [exc for sub in exceptions for exc in sub] + if len(exceptions) == 1: + raise exceptions[0] + else: + # If they all have the same str(), raise one. + model = str(exceptions[0]) + if all(str(exc) == model for exc in exceptions): + raise exceptions[0] + # Raise a combined exception so the user can see all + # the various error messages. + raise OSError('Multiple exceptions: {}'.format( + ', '.join(str(exc) for exc in exceptions))) + + else: + if sock is None: + raise ValueError( + 'host and port was not specified and no sock specified') + if sock.type != socket.SOCK_STREAM: + # We allow AF_INET, AF_INET6, AF_UNIX as long as they + # are SOCK_STREAM. + # We support passing AF_UNIX sockets even though we have + # a dedicated API for that: create_unix_connection. + # Disallowing AF_UNIX in this method, breaks backwards + # compatibility. + raise ValueError( + f'A Stream Socket was expected, got {sock!r}') + + transport, protocol = await self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r connected to %s:%r: (%r, %r)", + sock, host, port, transport, protocol) + return transport, protocol + + async def _create_connection_transport( + self, sock, protocol_factory, ssl, + server_hostname, server_side=False, + ssl_handshake_timeout=None): + + sock.setblocking(False) + + protocol = protocol_factory() + waiter = self.create_future() + if ssl: + sslcontext = None if isinstance(ssl, bool) else ssl + transport = self._make_ssl_transport( + sock, protocol, sslcontext, waiter, + server_side=server_side, server_hostname=server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + transport = self._make_socket_transport(sock, protocol, waiter) + + try: + await waiter + except: + transport.close() + raise + + return transport, protocol + + async def sendfile(self, transport, file, offset=0, count=None, + *, fallback=True): + """Send a file to transport. + + Return the total number of bytes which were sent. + + The method uses high-performance os.sendfile if available. + + file must be a regular file object opened in binary mode. + + offset tells from where to start reading the file. If specified, + count is the total number of bytes to transmit as opposed to + sending the file until EOF is reached. File position is updated on + return or also in case of error in which case file.tell() + can be used to figure out the number of bytes + which were sent. + + fallback set to True makes asyncio to manually read and send + the file when the platform does not support the sendfile syscall + (e.g. Windows or SSL socket on Unix). + + Raise SendfileNotAvailableError if the system does not support + sendfile syscall and fallback is False. + """ + if transport.is_closing(): + raise RuntimeError("Transport is closing") + mode = getattr(transport, '_sendfile_compatible', + constants._SendfileMode.UNSUPPORTED) + if mode is constants._SendfileMode.UNSUPPORTED: + raise RuntimeError( + f"sendfile is not supported for transport {transport!r}") + if mode is constants._SendfileMode.TRY_NATIVE: + try: + return await self._sendfile_native(transport, file, + offset, count) + except exceptions.SendfileNotAvailableError as exc: + if not fallback: + raise + + if not fallback: + raise RuntimeError( + f"fallback is disabled and native sendfile is not " + f"supported for transport {transport!r}") + + return await self._sendfile_fallback(transport, file, + offset, count) + + async def _sendfile_native(self, transp, file, offset, count): + raise exceptions.SendfileNotAvailableError( + "sendfile syscall is not supported") + + async def _sendfile_fallback(self, transp, file, offset, count): + if offset: + file.seek(offset) + blocksize = min(count, 16384) if count else 16384 + buf = bytearray(blocksize) + total_sent = 0 + proto = _SendfileFallbackProtocol(transp) + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + return total_sent + view = memoryview(buf)[:blocksize] + read = await self.run_in_executor(None, file.readinto, view) + if not read: + return total_sent # EOF + await proto.drain() + transp.write(view[:read]) + total_sent += read + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + await proto.restore() + + async def start_tls(self, transport, protocol, sslcontext, *, + server_side=False, + server_hostname=None, + ssl_handshake_timeout=None): + """Upgrade transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + """ + if ssl is None: + raise RuntimeError('Python ssl module is not available') + + if not isinstance(sslcontext, ssl.SSLContext): + raise TypeError( + f'sslcontext is expected to be an instance of ssl.SSLContext, ' + f'got {sslcontext!r}') + + if not getattr(transport, '_start_tls_compatible', False): + raise TypeError( + f'transport {transport!r} is not supported by start_tls()') + + waiter = self.create_future() + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout, + call_connection_made=False) + + # Pause early so that "ssl_protocol.data_received()" doesn't + # have a chance to get called before "ssl_protocol.connection_made()". + transport.pause_reading() + + transport.set_protocol(ssl_protocol) + conmade_cb = self.call_soon(ssl_protocol.connection_made, transport) + resume_cb = self.call_soon(transport.resume_reading) + + try: + await waiter + except BaseException: + transport.close() + conmade_cb.cancel() + resume_cb.cancel() + raise + + return ssl_protocol._app_transport + + async def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=_unset, reuse_port=None, + allow_broadcast=None, sock=None): + """Create datagram connection.""" + if sock is not None: + if sock.type != socket.SOCK_DGRAM: + raise ValueError( + f'A UDP Socket was expected, got {sock!r}') + if (local_addr or remote_addr or + family or proto or flags or + reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v) + raise ValueError( + f'socket modifier keyword arguments can not be used ' + f'when sock is specified. ({problems})') + sock.setblocking(False) + r_addr = None + else: + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX: + for addr in (local_addr, remote_addr): + if addr is not None and not isinstance(addr, str): + raise TypeError('string is expected') + + if local_addr and local_addr[0] not in (0, '\x00'): + try: + if stat.S_ISSOCK(os.stat(local_addr).st_mode): + os.remove(local_addr) + except FileNotFoundError: + pass + except OSError as err: + # Directory may have permissions only to create socket. + logger.error('Unable to check or remove stale UNIX ' + 'socket %r: %r', + local_addr, err) + + addr_pairs_info = (((family, proto), + (local_addr, remote_addr)), ) + else: + # join address by (family, protocol) + addr_infos = {} # Using order preserving dict + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') + + infos = await self._ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) + if not infos: + raise OSError('getaddrinfo() returned empty list') + + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address + + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] + + if not addr_pairs_info: + raise ValueError('can not get address information') + + exceptions = [] + + # bpo-37228 + if reuse_address is not _unset: + if reuse_address: + raise ValueError("Passing `reuse_address=True` is no " + "longer supported, as the usage of " + "SO_REUSEPORT in UDP poses a significant " + "security concern.") + else: + warnings.warn("The *reuse_address* parameter has been " + "deprecated as of 3.5.10 and is scheduled " + "for removal in 3.11.", DeprecationWarning, + stacklevel=2) + + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_port: + _set_reuseport(sock) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + if not allow_broadcast: + await self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + raise exceptions[0] + + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) + if self._debug: + if local_addr: + logger.info("Datagram endpoint local_addr=%r remote_addr=%r " + "created: (%r, %r)", + local_addr, remote_addr, transport, protocol) + else: + logger.debug("Datagram endpoint remote_addr=%r created: " + "(%r, %r)", + remote_addr, transport, protocol) + + try: + await waiter + except: + transport.close() + raise + + return transport, protocol + + async def _ensure_resolved(self, address, *, + family=0, type=socket.SOCK_STREAM, + proto=0, flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto, *address[2:]) + if info is not None: + # "host" is already a resolved IP. + return [info] + else: + return await loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) + + async def _create_server_getaddrinfo(self, host, port, family, flags): + infos = await self._ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, + flags=flags, loop=self) + if not infos: + raise OSError(f'getaddrinfo({host!r}) returned empty list') + return infos + + async def create_server( + self, protocol_factory, host=None, port=None, + *, + family=socket.AF_UNSPEC, + flags=socket.AI_PASSIVE, + sock=None, + backlog=100, + ssl=None, + reuse_address=None, + reuse_port=None, + ssl_handshake_timeout=None, + start_serving=True): + """Create a TCP server. + + The host parameter can be a string, in that case the TCP server is + bound to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + """ + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + + if ssl_handshake_timeout is not None and ssl is None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' + sockets = [] + if host == '': + hosts = [None] + elif (isinstance(host, str) or + not isinstance(host, collections.abc.Iterable)): + hosts = [host] + else: + hosts = host + + fs = [self._create_server_getaddrinfo(host, port, family=family, + flags=flags) + for host in hosts] + infos = await tasks.gather(*fs, loop=self) + infos = set(itertools.chain.from_iterable(infos)) + + completed = False + try: + for res in infos: + af, socktype, proto, canonname, sa = res + try: + sock = socket.socket(af, socktype, proto) + except socket.error: + # Assume it's a bad family/type/protocol combination. + if self._debug: + logger.warning('create_server() failed to create ' + 'socket.socket(%r, %r, %r)', + af, socktype, proto, exc_info=True) + continue + sockets.append(sock) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + _set_reuseport(sock) + # Disable IPv4/IPv6 dual stack support (enabled by + # default on Linux) which makes a single socket + # listen on both address families. + if (_HAS_IPv6 and + af == socket.AF_INET6 and + hasattr(socket, 'IPPROTO_IPV6')): + sock.setsockopt(socket.IPPROTO_IPV6, + socket.IPV6_V6ONLY, + True) + try: + sock.bind(sa) + except OSError as err: + raise OSError(err.errno, 'error while attempting ' + 'to bind on address %r: %s' + % (sa, err.strerror.lower())) from None + completed = True + finally: + if not completed: + for sock in sockets: + sock.close() + else: + if sock is None: + raise ValueError('Neither host/port nor sock were specified') + if sock.type != socket.SOCK_STREAM: + raise ValueError(f'A Stream Socket was expected, got {sock!r}') + sockets = [sock] + + for sock in sockets: + sock.setblocking(False) + + server = Server(self, sockets, protocol_factory, + ssl, backlog, ssl_handshake_timeout) + if start_serving: + server._start_serving() + # Skip one loop iteration so that all 'loop.add_reader' + # go through. + await tasks.sleep(0, loop=self) + + if self._debug: + logger.info("%r is serving", server) + return server + + async def connect_accepted_socket( + self, protocol_factory, sock, + *, ssl=None, + ssl_handshake_timeout=None): + """Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + """ + if sock.type != socket.SOCK_STREAM: + raise ValueError(f'A Stream Socket was expected, got {sock!r}') + + if ssl_handshake_timeout is not None and not ssl: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + transport, protocol = await self._create_connection_transport( + sock, protocol_factory, ssl, '', server_side=True, + ssl_handshake_timeout=ssl_handshake_timeout) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r handled: (%r, %r)", sock, transport, protocol) + return transport, protocol + + async def connect_read_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_read_pipe_transport(pipe, protocol, waiter) + + try: + await waiter + except: + transport.close() + raise + + if self._debug: + logger.debug('Read pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + return transport, protocol + + async def connect_write_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_write_pipe_transport(pipe, protocol, waiter) + + try: + await waiter + except: + transport.close() + raise + + if self._debug: + logger.debug('Write pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + return transport, protocol + + def _log_subprocess(self, msg, stdin, stdout, stderr): + info = [msg] + if stdin is not None: + info.append(f'stdin={_format_pipe(stdin)}') + if stdout is not None and stderr == subprocess.STDOUT: + info.append(f'stdout=stderr={_format_pipe(stdout)}') + else: + if stdout is not None: + info.append(f'stdout={_format_pipe(stdout)}') + if stderr is not None: + info.append(f'stderr={_format_pipe(stderr)}') + logger.debug(' '.join(info)) + + async def subprocess_shell(self, protocol_factory, cmd, *, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=False, + shell=True, bufsize=0, + encoding=None, errors=None, text=None, + **kwargs): + if not isinstance(cmd, (bytes, str)): + raise ValueError("cmd must be a string") + if universal_newlines: + raise ValueError("universal_newlines must be False") + if not shell: + raise ValueError("shell must be True") + if bufsize != 0: + raise ValueError("bufsize must be 0") + if text: + raise ValueError("text must be False") + if encoding is not None: + raise ValueError("encoding must be None") + if errors is not None: + raise ValueError("errors must be None") + + protocol = protocol_factory() + debug_log = None + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = 'run shell command %r' % cmd + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = await self._make_subprocess_transport( + protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) + if self._debug and debug_log is not None: + logger.info('%s: %r', debug_log, transport) + return transport, protocol + + async def subprocess_exec(self, protocol_factory, program, *args, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=False, + shell=False, bufsize=0, + encoding=None, errors=None, text=None, + **kwargs): + if universal_newlines: + raise ValueError("universal_newlines must be False") + if shell: + raise ValueError("shell must be False") + if bufsize != 0: + raise ValueError("bufsize must be 0") + if text: + raise ValueError("text must be False") + if encoding is not None: + raise ValueError("encoding must be None") + if errors is not None: + raise ValueError("errors must be None") + + popen_args = (program,) + args + protocol = protocol_factory() + debug_log = None + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = f'execute program {program!r}' + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = await self._make_subprocess_transport( + protocol, popen_args, False, stdin, stdout, stderr, + bufsize, **kwargs) + if self._debug and debug_log is not None: + logger.info('%s: %r', debug_log, transport) + return transport, protocol + + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + + def set_exception_handler(self, handler): + """Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + """ + if handler is not None and not callable(handler): + raise TypeError(f'A callable object or None is expected, ' + f'got {handler!r}') + self._exception_handler = handler + + def default_exception_handler(self, context): + """Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + This default handler logs the error message and other + context-dependent information. In debug mode, a truncated + stack trace is also appended showing where the given object + (e.g. a handle or future or task) was created, if any. + + The context parameter has the same meaning as in + `call_exception_handler()`. + """ + message = context.get('message') + if not message: + message = 'Unhandled exception in event loop' + + exception = context.get('exception') + if exception is not None: + exc_info = (type(exception), exception, exception.__traceback__) + else: + exc_info = False + + if ('source_traceback' not in context and + self._current_handle is not None and + self._current_handle._source_traceback): + context['handle_traceback'] = \ + self._current_handle._source_traceback + + log_lines = [message] + for key in sorted(context): + if key in {'message', 'exception'}: + continue + value = context[key] + if key == 'source_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Object created at (most recent call last):\n' + value += tb.rstrip() + elif key == 'handle_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Handle created at (most recent call last):\n' + value += tb.rstrip() + else: + value = repr(value) + log_lines.append(f'{key}: {value}') + + logger.error('\n'.join(log_lines), exc_info=exc_info) + + def call_exception_handler(self, context): + """Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'task' (optional): Task instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + """ + if self._exception_handler is None: + try: + self.default_exception_handler(context) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + # Second protection layer for unexpected errors + # in the default implementation, as well as for subclassed + # event loops with overloaded "default_exception_handler". + logger.error('Exception in default exception handler', + exc_info=True) + else: + try: + self._exception_handler(self, context) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + # Exception in the user set custom exception handler. + try: + # Let's try default handler. + self.default_exception_handler({ + 'message': 'Unhandled error in exception handler', + 'exception': exc, + 'context': context, + }) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + # Guard 'default_exception_handler' in case it is + # overloaded. + logger.error('Exception in default exception handler ' + 'while handling an unexpected error ' + 'in custom exception handler', + exc_info=True) + + def _add_callback(self, handle): + """Add a Handle to _scheduled (TimerHandle) or _ready.""" + assert isinstance(handle, events.Handle), 'A Handle is required here' + if handle._cancelled: + return + assert not isinstance(handle, events.TimerHandle) + self._ready.append(handle) + + def _add_callback_signalsafe(self, handle): + """Like _add_callback() but called from a signal handler.""" + self._add_callback(handle) + self._write_to_self() + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + if handle._scheduled: + self._timer_cancelled_count += 1 + + def _run_once(self): + """Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + """ + + sched_count = len(self._scheduled) + if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and + self._timer_cancelled_count / sched_count > + _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + # Remove delayed calls that were cancelled if their number + # is too high + new_scheduled = [] + for handle in self._scheduled: + if handle._cancelled: + handle._scheduled = False + else: + new_scheduled.append(handle) + + heapq.heapify(new_scheduled) + self._scheduled = new_scheduled + self._timer_cancelled_count = 0 + else: + # Remove delayed calls that were cancelled from head of queue. + while self._scheduled and self._scheduled[0]._cancelled: + self._timer_cancelled_count -= 1 + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + + timeout = None + if self._ready or self._stopping: + timeout = 0 + elif self._scheduled: + # Compute the desired timeout. + when = self._scheduled[0]._when + timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT) + + event_list = self._selector.select(timeout) + self._process_events(event_list) + + # Handle 'later' callbacks that are ready. + end_time = self.time() + self._clock_resolution + while self._scheduled: + handle = self._scheduled[0] + if handle._when >= end_time: + break + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + self._ready.append(handle) + + # This is the only place where callbacks are actually *called*. + # All other places just add them to ready. + # Note: We run all currently scheduled callbacks, but not any + # callbacks scheduled by callbacks run this time around -- + # they will be run the next time (after another I/O poll). + # Use an idiom that is thread-safe without using locks. + ntodo = len(self._ready) + for i in range(ntodo): + handle = self._ready.popleft() + if handle._cancelled: + continue + if self._debug: + try: + self._current_handle = handle + t0 = self.time() + handle._run() + dt = self.time() - t0 + if dt >= self.slow_callback_duration: + logger.warning('Executing %s took %.3f seconds', + _format_handle(handle), dt) + finally: + self._current_handle = None + else: + handle._run() + handle = None # Needed to break cycles when an exception occurs. + + def _set_coroutine_origin_tracking(self, enabled): + if bool(enabled) == bool(self._coroutine_origin_tracking_enabled): + return + + if enabled: + self._coroutine_origin_tracking_saved_depth = ( + sys.get_coroutine_origin_tracking_depth()) + sys.set_coroutine_origin_tracking_depth( + constants.DEBUG_STACK_DEPTH) + else: + sys.set_coroutine_origin_tracking_depth( + self._coroutine_origin_tracking_saved_depth) + + self._coroutine_origin_tracking_enabled = enabled + + def get_debug(self): + return self._debug + + def set_debug(self, enabled): + self._debug = enabled + + if self.is_running(): + self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_futures.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_futures.py new file mode 100644 index 0000000000000000000000000000000000000000..2c01ac98e10fcaa4ac0b88a2f196dc6450769bf7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_futures.py @@ -0,0 +1,80 @@ +__all__ = () + +import reprlib +from _thread import get_ident + +from . import format_helpers + +# States for Future. +_PENDING = 'PENDING' +_CANCELLED = 'CANCELLED' +_FINISHED = 'FINISHED' + + +def isfuture(obj): + """Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + """ + return (hasattr(obj.__class__, '_asyncio_future_blocking') and + obj._asyncio_future_blocking is not None) + + +def _format_callbacks(cb): + """helper function for Future.__repr__""" + size = len(cb) + if not size: + cb = '' + + def format_cb(callback): + return format_helpers._format_callback_source(callback, ()) + + if size == 1: + cb = format_cb(cb[0][0]) + elif size == 2: + cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0])) + elif size > 2: + cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]), + size - 2, + format_cb(cb[-1][0])) + return f'cb=[{cb}]' + + +# bpo-42183: _repr_running is needed for repr protection +# when a Future or Task result contains itself directly or indirectly. +# The logic is borrowed from @reprlib.recursive_repr decorator. +# Unfortunately, the direct decorator usage is impossible because of +# AttributeError: '_asyncio.Task' object has no attribute '__module__' error. +# +# After fixing this thing we can return to the decorator based approach. +_repr_running = set() + + +def _future_repr_info(future): + # (Future) -> str + """helper function for Future.__repr__""" + info = [future._state.lower()] + if future._state == _FINISHED: + if future._exception is not None: + info.append(f'exception={future._exception!r}') + else: + key = id(future), get_ident() + if key in _repr_running: + result = '...' + else: + _repr_running.add(key) + try: + # use reprlib to limit the length of the output, especially + # for very long strings + result = reprlib.repr(future._result) + finally: + _repr_running.discard(key) + info.append(f'result={result}') + if future._callbacks: + info.append(_format_callbacks(future._callbacks)) + if future._source_traceback: + frame = future._source_traceback[-1] + info.append(f'created at {frame[0]}:{frame[1]}') + return info diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_subprocess.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..14d505192288142616cb28e865dd53850df2d7d5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_subprocess.py @@ -0,0 +1,285 @@ +import collections +import subprocess +import warnings + +from . import protocols +from . import transports +from .log import logger + + +class BaseSubprocessTransport(transports.SubprocessTransport): + + def __init__(self, loop, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=None, extra=None, **kwargs): + super().__init__(extra) + self._closed = False + self._protocol = protocol + self._loop = loop + self._proc = None + self._pid = None + self._returncode = None + self._exit_waiters = [] + self._pending_calls = collections.deque() + self._pipes = {} + self._finished = False + + if stdin == subprocess.PIPE: + self._pipes[0] = None + if stdout == subprocess.PIPE: + self._pipes[1] = None + if stderr == subprocess.PIPE: + self._pipes[2] = None + + # Create the child process: set the _proc attribute + try: + self._start(args=args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, bufsize=bufsize, **kwargs) + except: + self.close() + raise + + self._pid = self._proc.pid + self._extra['subprocess'] = self._proc + + if self._loop.get_debug(): + if isinstance(args, (bytes, str)): + program = args + else: + program = args[0] + logger.debug('process %r created: pid %s', + program, self._pid) + + self._loop.create_task(self._connect_pipes(waiter)) + + def __repr__(self): + info = [self.__class__.__name__] + if self._closed: + info.append('closed') + if self._pid is not None: + info.append(f'pid={self._pid}') + if self._returncode is not None: + info.append(f'returncode={self._returncode}') + elif self._pid is not None: + info.append('running') + else: + info.append('not started') + + stdin = self._pipes.get(0) + if stdin is not None: + info.append(f'stdin={stdin.pipe}') + + stdout = self._pipes.get(1) + stderr = self._pipes.get(2) + if stdout is not None and stderr is stdout: + info.append(f'stdout=stderr={stdout.pipe}') + else: + if stdout is not None: + info.append(f'stdout={stdout.pipe}') + if stderr is not None: + info.append(f'stderr={stderr.pipe}') + + return '<{}>'.format(' '.join(info)) + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + raise NotImplementedError + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closed + + def close(self): + if self._closed: + return + self._closed = True + + for proto in self._pipes.values(): + if proto is None: + continue + proto.pipe.close() + + if (self._proc is not None and + # has the child process finished? + self._returncode is None and + # the child process has finished, but the + # transport hasn't been notified yet? + self._proc.poll() is None): + + if self._loop.get_debug(): + logger.warning('Close running child process: kill %r', self) + + try: + self._proc.kill() + except ProcessLookupError: + pass + + # Don't clear the _proc reference yet: _post_init() may still run + + def __del__(self, _warn=warnings.warn): + if not self._closed: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self.close() + + def get_pid(self): + return self._pid + + def get_returncode(self): + return self._returncode + + def get_pipe_transport(self, fd): + if fd in self._pipes: + return self._pipes[fd].pipe + else: + return None + + def _check_proc(self): + if self._proc is None: + raise ProcessLookupError() + + def send_signal(self, signal): + self._check_proc() + self._proc.send_signal(signal) + + def terminate(self): + self._check_proc() + self._proc.terminate() + + def kill(self): + self._check_proc() + self._proc.kill() + + async def _connect_pipes(self, waiter): + try: + proc = self._proc + loop = self._loop + + if proc.stdin is not None: + _, pipe = await loop.connect_write_pipe( + lambda: WriteSubprocessPipeProto(self, 0), + proc.stdin) + self._pipes[0] = pipe + + if proc.stdout is not None: + _, pipe = await loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 1), + proc.stdout) + self._pipes[1] = pipe + + if proc.stderr is not None: + _, pipe = await loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 2), + proc.stderr) + self._pipes[2] = pipe + + assert self._pending_calls is not None + + loop.call_soon(self._protocol.connection_made, self) + for callback, data in self._pending_calls: + loop.call_soon(callback, *data) + self._pending_calls = None + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if waiter is not None and not waiter.cancelled(): + waiter.set_exception(exc) + else: + if waiter is not None and not waiter.cancelled(): + waiter.set_result(None) + + def _call(self, cb, *data): + if self._pending_calls is not None: + self._pending_calls.append((cb, data)) + else: + self._loop.call_soon(cb, *data) + + def _pipe_connection_lost(self, fd, exc): + self._call(self._protocol.pipe_connection_lost, fd, exc) + self._try_finish() + + def _pipe_data_received(self, fd, data): + self._call(self._protocol.pipe_data_received, fd, data) + + def _process_exited(self, returncode): + assert returncode is not None, returncode + assert self._returncode is None, self._returncode + if self._loop.get_debug(): + logger.info('%r exited with return code %r', self, returncode) + self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode + self._call(self._protocol.process_exited) + self._try_finish() + + # wake up futures waiting for wait() + for waiter in self._exit_waiters: + if not waiter.cancelled(): + waiter.set_result(returncode) + self._exit_waiters = None + + async def _wait(self): + """Wait until the process exit and return the process return code. + + This method is a coroutine.""" + if self._returncode is not None: + return self._returncode + + waiter = self._loop.create_future() + self._exit_waiters.append(waiter) + return await waiter + + def _try_finish(self): + assert not self._finished + if self._returncode is None: + return + if all(p is not None and p.disconnected + for p in self._pipes.values()): + self._finished = True + self._call(self._call_connection_lost, None) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._loop = None + self._proc = None + self._protocol = None + + +class WriteSubprocessPipeProto(protocols.BaseProtocol): + + def __init__(self, proc, fd): + self.proc = proc + self.fd = fd + self.pipe = None + self.disconnected = False + + def connection_made(self, transport): + self.pipe = transport + + def __repr__(self): + return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>' + + def connection_lost(self, exc): + self.disconnected = True + self.proc._pipe_connection_lost(self.fd, exc) + self.proc = None + + def pause_writing(self): + self.proc._protocol.pause_writing() + + def resume_writing(self): + self.proc._protocol.resume_writing() + + +class ReadSubprocessPipeProto(WriteSubprocessPipeProto, + protocols.Protocol): + + def data_received(self, data): + self.proc._pipe_data_received(self.fd, data) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_tasks.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..09bb171a2ce750a06a39e978bea0d3a7accf25f4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/base_tasks.py @@ -0,0 +1,85 @@ +import linecache +import traceback + +from . import base_futures +from . import coroutines + + +def _task_repr_info(task): + info = base_futures._future_repr_info(task) + + if task._must_cancel: + # replace status + info[0] = 'cancelling' + + info.insert(1, 'name=%r' % task.get_name()) + + coro = coroutines._format_coroutine(task._coro) + info.insert(2, f'coro=<{coro}>') + + if task._fut_waiter is not None: + info.insert(3, f'wait_for={task._fut_waiter!r}') + return info + + +def _task_get_stack(task, limit): + frames = [] + if hasattr(task._coro, 'cr_frame'): + # case 1: 'async def' coroutines + f = task._coro.cr_frame + elif hasattr(task._coro, 'gi_frame'): + # case 2: legacy coroutines + f = task._coro.gi_frame + elif hasattr(task._coro, 'ag_frame'): + # case 3: async generators + f = task._coro.ag_frame + else: + # case 4: unknown objects + f = None + if f is not None: + while f is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(f) + f = f.f_back + frames.reverse() + elif task._exception is not None: + tb = task._exception.__traceback__ + while tb is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(tb.tb_frame) + tb = tb.tb_next + return frames + + +def _task_print_stack(task, limit, file): + extracted_list = [] + checked = set() + for f in task.get_stack(limit=limit): + lineno = f.f_lineno + co = f.f_code + filename = co.co_filename + name = co.co_name + if filename not in checked: + checked.add(filename) + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + extracted_list.append((filename, lineno, name, line)) + + exc = task._exception + if not extracted_list: + print(f'No stack for {task!r}', file=file) + elif exc is not None: + print(f'Traceback for {task!r} (most recent call last):', file=file) + else: + print(f'Stack for {task!r} (most recent call last):', file=file) + + traceback.print_list(extracted_list, file=file) + if exc is not None: + for line in traceback.format_exception_only(exc.__class__, exc): + print(line, file=file, end='') diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/constants.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..33feed60e55b008104ec01f0ce6cbb9d573a8eb3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/constants.py @@ -0,0 +1,27 @@ +import enum + +# After the connection is lost, log warnings after this many write()s. +LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5 + +# Seconds to wait before retrying accept(). +ACCEPT_RETRY_DELAY = 1 + +# Number of stack entries to capture in debug mode. +# The larger the number, the slower the operation in debug mode +# (see extract_stack() in format_helpers.py). +DEBUG_STACK_DEPTH = 10 + +# Number of seconds to wait for SSL handshake to complete +# The default timeout matches that of Nginx. +SSL_HANDSHAKE_TIMEOUT = 60.0 + +# Used in sendfile fallback code. We use fallback for platforms +# that don't support sendfile, or for TLS connections. +SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256 + +# The enum should be here to break circular dependencies between +# base_events and sslproto +class _SendfileMode(enum.Enum): + UNSUPPORTED = enum.auto() + TRY_NATIVE = enum.auto() + FALLBACK = enum.auto() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/coroutines.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/coroutines.py new file mode 100644 index 0000000000000000000000000000000000000000..9664ea74d7514775fac6622d25d1d9dc175b92a9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/coroutines.py @@ -0,0 +1,269 @@ +__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine' + +import collections.abc +import functools +import inspect +import os +import sys +import traceback +import types +import warnings + +from . import base_futures +from . import constants +from . import format_helpers +from .log import logger + + +def _is_debug_mode(): + # If you set _DEBUG to true, @coroutine will wrap the resulting + # generator objects in a CoroWrapper instance (defined below). That + # instance will log a message when the generator is never iterated + # over, which may happen when you forget to use "await" or "yield from" + # with a coroutine call. + # Note that the value of the _DEBUG flag is taken + # when the decorator is used, so to be of any use it must be set + # before you define your coroutines. A downside of using this feature + # is that tracebacks show entries for the CoroWrapper.__next__ method + # when _DEBUG is true. + return sys.flags.dev_mode or (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) + + +_DEBUG = _is_debug_mode() + + +class CoroWrapper: + # Wrapper for coroutine object in _DEBUG mode. + + def __init__(self, gen, func=None): + assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen + self.gen = gen + self.func = func # Used to unwrap @coroutine decorator + self._source_traceback = format_helpers.extract_stack(sys._getframe(1)) + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + + def __repr__(self): + coro_repr = _format_coroutine(self) + if self._source_traceback: + frame = self._source_traceback[-1] + coro_repr += f', created at {frame[0]}:{frame[1]}' + + return f'<{self.__class__.__name__} {coro_repr}>' + + def __iter__(self): + return self + + def __next__(self): + return self.gen.send(None) + + def send(self, value): + return self.gen.send(value) + + def throw(self, type, value=None, traceback=None): + return self.gen.throw(type, value, traceback) + + def close(self): + return self.gen.close() + + @property + def gi_frame(self): + return self.gen.gi_frame + + @property + def gi_running(self): + return self.gen.gi_running + + @property + def gi_code(self): + return self.gen.gi_code + + def __await__(self): + return self + + @property + def gi_yieldfrom(self): + return self.gen.gi_yieldfrom + + def __del__(self): + # Be careful accessing self.gen.frame -- self.gen might not exist. + gen = getattr(self, 'gen', None) + frame = getattr(gen, 'gi_frame', None) + if frame is not None and frame.f_lasti == -1: + msg = f'{self!r} was never yielded from' + tb = getattr(self, '_source_traceback', ()) + if tb: + tb = ''.join(traceback.format_list(tb)) + msg += (f'\nCoroutine object created at ' + f'(most recent call last, truncated to ' + f'{constants.DEBUG_STACK_DEPTH} last lines):\n') + msg += tb.rstrip() + logger.error(msg) + + +def coroutine(func): + """Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + """ + warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead', + DeprecationWarning, + stacklevel=2) + if inspect.iscoroutinefunction(func): + # In Python 3.5 that's all we need to do for coroutines + # defined with "async def". + return func + + if inspect.isgeneratorfunction(func): + coro = func + else: + @functools.wraps(func) + def coro(*args, **kw): + res = func(*args, **kw) + if (base_futures.isfuture(res) or inspect.isgenerator(res) or + isinstance(res, CoroWrapper)): + res = yield from res + else: + # If 'res' is an awaitable, run it. + try: + await_meth = res.__await__ + except AttributeError: + pass + else: + if isinstance(res, collections.abc.Awaitable): + res = yield from await_meth() + return res + + coro = types.coroutine(coro) + if not _DEBUG: + wrapper = coro + else: + @functools.wraps(func) + def wrapper(*args, **kwds): + w = CoroWrapper(coro(*args, **kwds), func=func) + if w._source_traceback: + del w._source_traceback[-1] + # Python < 3.5 does not implement __qualname__ + # on generator objects, so we set it manually. + # We use getattr as some callables (such as + # functools.partial may lack __qualname__). + w.__name__ = getattr(func, '__name__', None) + w.__qualname__ = getattr(func, '__qualname__', None) + return w + + wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction(). + return wrapper + + +# A marker for iscoroutinefunction. +_is_coroutine = object() + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function.""" + return (inspect.iscoroutinefunction(func) or + getattr(func, '_is_coroutine', None) is _is_coroutine) + + +# Prioritize native coroutine check to speed-up +# asyncio.iscoroutine. +_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, + collections.abc.Coroutine, CoroWrapper) +_iscoroutine_typecache = set() + + +def iscoroutine(obj): + """Return True if obj is a coroutine object.""" + if type(obj) in _iscoroutine_typecache: + return True + + if isinstance(obj, _COROUTINE_TYPES): + # Just in case we don't want to cache more than 100 + # positive types. That shouldn't ever happen, unless + # someone stressing the system on purpose. + if len(_iscoroutine_typecache) < 100: + _iscoroutine_typecache.add(type(obj)) + return True + else: + return False + + +def _format_coroutine(coro): + assert iscoroutine(coro) + + is_corowrapper = isinstance(coro, CoroWrapper) + + def get_name(coro): + # Coroutines compiled with Cython sometimes don't have + # proper __qualname__ or __name__. While that is a bug + # in Cython, asyncio shouldn't crash with an AttributeError + # in its __repr__ functions. + if is_corowrapper: + return format_helpers._format_callback(coro.func, (), {}) + + if hasattr(coro, '__qualname__') and coro.__qualname__: + coro_name = coro.__qualname__ + elif hasattr(coro, '__name__') and coro.__name__: + coro_name = coro.__name__ + else: + # Stop masking Cython bugs, expose them in a friendly way. + coro_name = f'<{type(coro).__name__} without __name__>' + return f'{coro_name}()' + + def is_running(coro): + try: + return coro.cr_running + except AttributeError: + try: + return coro.gi_running + except AttributeError: + return False + + coro_code = None + if hasattr(coro, 'cr_code') and coro.cr_code: + coro_code = coro.cr_code + elif hasattr(coro, 'gi_code') and coro.gi_code: + coro_code = coro.gi_code + + coro_name = get_name(coro) + + if not coro_code: + # Built-in types might not have __qualname__ or __name__. + if is_running(coro): + return f'{coro_name} running' + else: + return coro_name + + coro_frame = None + if hasattr(coro, 'gi_frame') and coro.gi_frame: + coro_frame = coro.gi_frame + elif hasattr(coro, 'cr_frame') and coro.cr_frame: + coro_frame = coro.cr_frame + + # If Cython's coroutine has a fake code object without proper + # co_filename -- expose that. + filename = coro_code.co_filename or '' + + lineno = 0 + if (is_corowrapper and + coro.func is not None and + not inspect.isgeneratorfunction(coro.func)): + source = format_helpers._get_function_source(coro.func) + if source is not None: + filename, lineno = source + if coro_frame is None: + coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' + else: + coro_repr = f'{coro_name} running, defined at {filename}:{lineno}' + + elif coro_frame is not None: + lineno = coro_frame.f_lineno + coro_repr = f'{coro_name} running at {filename}:{lineno}' + + else: + lineno = coro_code.co_firstlineno + coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' + + return coro_repr diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/events.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/events.py new file mode 100644 index 0000000000000000000000000000000000000000..353a9f5af0c49b30d9476dec3041a362c61cfcf4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/events.py @@ -0,0 +1,792 @@ +"""Event loop and event loop policy.""" + +__all__ = ( + 'AbstractEventLoopPolicy', + 'AbstractEventLoop', 'AbstractServer', + 'Handle', 'TimerHandle', + 'get_event_loop_policy', 'set_event_loop_policy', + 'get_event_loop', 'set_event_loop', 'new_event_loop', + 'get_child_watcher', 'set_child_watcher', + '_set_running_loop', 'get_running_loop', + '_get_running_loop', +) + +import contextvars +import os +import socket +import subprocess +import sys +import threading + +from . import format_helpers +from . import exceptions + + +class Handle: + """Object returned by callback registration methods.""" + + __slots__ = ('_callback', '_args', '_cancelled', '_loop', + '_source_traceback', '_repr', '__weakref__', + '_context') + + def __init__(self, callback, args, loop, context=None): + if context is None: + context = contextvars.copy_context() + self._context = context + self._loop = loop + self._callback = callback + self._args = args + self._cancelled = False + self._repr = None + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + else: + self._source_traceback = None + + def _repr_info(self): + info = [self.__class__.__name__] + if self._cancelled: + info.append('cancelled') + if self._callback is not None: + info.append(format_helpers._format_callback_source( + self._callback, self._args)) + if self._source_traceback: + frame = self._source_traceback[-1] + info.append(f'created at {frame[0]}:{frame[1]}') + return info + + def __repr__(self): + if self._repr is not None: + return self._repr + info = self._repr_info() + return '<{}>'.format(' '.join(info)) + + def cancel(self): + if not self._cancelled: + self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning + # "Executing took 2.5 second" + self._repr = repr(self) + self._callback = None + self._args = None + + def cancelled(self): + return self._cancelled + + def _run(self): + try: + self._context.run(self._callback, *self._args) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + cb = format_helpers._format_callback_source( + self._callback, self._args) + msg = f'Exception in callback {cb}' + context = { + 'message': msg, + 'exception': exc, + 'handle': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self = None # Needed to break cycles when an exception occurs. + + +class TimerHandle(Handle): + """Object returned by timed callback registration methods.""" + + __slots__ = ['_scheduled', '_when'] + + def __init__(self, when, callback, args, loop, context=None): + assert when is not None + super().__init__(callback, args, loop, context) + if self._source_traceback: + del self._source_traceback[-1] + self._when = when + self._scheduled = False + + def _repr_info(self): + info = super()._repr_info() + pos = 2 if self._cancelled else 1 + info.insert(pos, f'when={self._when}') + return info + + def __hash__(self): + return hash(self._when) + + def __lt__(self, other): + return self._when < other._when + + def __le__(self, other): + if self._when < other._when: + return True + return self.__eq__(other) + + def __gt__(self, other): + return self._when > other._when + + def __ge__(self, other): + if self._when > other._when: + return True + return self.__eq__(other) + + def __eq__(self, other): + if isinstance(other, TimerHandle): + return (self._when == other._when and + self._callback == other._callback and + self._args == other._args and + self._cancelled == other._cancelled) + return NotImplemented + + def __ne__(self, other): + equal = self.__eq__(other) + return NotImplemented if equal is NotImplemented else not equal + + def cancel(self): + if not self._cancelled: + self._loop._timer_handle_cancelled(self) + super().cancel() + + def when(self): + """Return a scheduled callback time. + + The time is an absolute timestamp, using the same time + reference as loop.time(). + """ + return self._when + + +class AbstractServer: + """Abstract server returned by create_server().""" + + def close(self): + """Stop serving. This leaves existing connections open.""" + raise NotImplementedError + + def get_loop(self): + """Get the event loop the Server object is attached to.""" + raise NotImplementedError + + def is_serving(self): + """Return True if the server is accepting connections.""" + raise NotImplementedError + + async def start_serving(self): + """Start accepting connections. + + This method is idempotent, so it can be called when + the server is already being serving. + """ + raise NotImplementedError + + async def serve_forever(self): + """Start accepting connections until the coroutine is cancelled. + + The server is closed when the coroutine is cancelled. + """ + raise NotImplementedError + + async def wait_closed(self): + """Coroutine to wait until service is closed.""" + raise NotImplementedError + + async def __aenter__(self): + return self + + async def __aexit__(self, *exc): + self.close() + await self.wait_closed() + + +class AbstractEventLoop: + """Abstract event loop.""" + + # Running and stopping the event loop. + + def run_forever(self): + """Run the event loop until stop() is called.""" + raise NotImplementedError + + def run_until_complete(self, future): + """Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + """ + raise NotImplementedError + + def stop(self): + """Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + """ + raise NotImplementedError + + def is_running(self): + """Return whether the event loop is currently running.""" + raise NotImplementedError + + def is_closed(self): + """Returns True if the event loop was closed.""" + raise NotImplementedError + + def close(self): + """Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + """ + raise NotImplementedError + + async def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + raise NotImplementedError + + # Methods scheduling callbacks. All these return Handles. + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + raise NotImplementedError + + def call_soon(self, callback, *args): + return self.call_later(0, callback, *args) + + def call_later(self, delay, callback, *args): + raise NotImplementedError + + def call_at(self, when, callback, *args): + raise NotImplementedError + + def time(self): + raise NotImplementedError + + def create_future(self): + raise NotImplementedError + + # Method scheduling a coroutine object: create a task. + + def create_task(self, coro, *, name=None): + raise NotImplementedError + + # Methods for interacting with threads. + + def call_soon_threadsafe(self, callback, *args): + raise NotImplementedError + + def run_in_executor(self, executor, func, *args): + raise NotImplementedError + + def set_default_executor(self, executor): + raise NotImplementedError + + # Network I/O methods returning Futures. + + async def getaddrinfo(self, host, port, *, + family=0, type=0, proto=0, flags=0): + raise NotImplementedError + + async def getnameinfo(self, sockaddr, flags=0): + raise NotImplementedError + + async def create_connection( + self, protocol_factory, host=None, port=None, + *, ssl=None, family=0, proto=0, + flags=0, sock=None, local_addr=None, + server_hostname=None, + ssl_handshake_timeout=None, + happy_eyeballs_delay=None, interleave=None): + raise NotImplementedError + + async def create_server( + self, protocol_factory, host=None, port=None, + *, family=socket.AF_UNSPEC, + flags=socket.AI_PASSIVE, sock=None, backlog=100, + ssl=None, reuse_address=None, reuse_port=None, + ssl_handshake_timeout=None, + start_serving=True): + """A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be + a sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + """ + raise NotImplementedError + + async def sendfile(self, transport, file, offset=0, count=None, + *, fallback=True): + """Send a file through a transport. + + Return an amount of sent bytes. + """ + raise NotImplementedError + + async def start_tls(self, transport, protocol, sslcontext, *, + server_side=False, + server_hostname=None, + ssl_handshake_timeout=None): + """Upgrade a transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + """ + raise NotImplementedError + + async def create_unix_connection( + self, protocol_factory, path=None, *, + ssl=None, sock=None, + server_hostname=None, + ssl_handshake_timeout=None): + raise NotImplementedError + + async def create_unix_server( + self, protocol_factory, path=None, *, + sock=None, backlog=100, ssl=None, + ssl_handshake_timeout=None, + start_serving=True): + """A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for the SSL handshake to complete (defaults to 60s). + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + """ + raise NotImplementedError + + async def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on + host (or family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ + raise NotImplementedError + + # Pipes and subprocesses. + + async def connect_read_pipe(self, protocol_factory, pipe): + """Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vise versa. + raise NotImplementedError + + async def connect_write_pipe(self, protocol_factory, pipe): + """Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vise versa. + raise NotImplementedError + + async def subprocess_shell(self, protocol_factory, cmd, *, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + async def subprocess_exec(self, protocol_factory, *args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + # Ready-based callback registration methods. + # The add_*() methods return None. + # The remove_*() methods return True if something was removed, + # False if there was nothing to delete. + + def add_reader(self, fd, callback, *args): + raise NotImplementedError + + def remove_reader(self, fd): + raise NotImplementedError + + def add_writer(self, fd, callback, *args): + raise NotImplementedError + + def remove_writer(self, fd): + raise NotImplementedError + + # Completion based I/O methods returning Futures. + + async def sock_recv(self, sock, nbytes): + raise NotImplementedError + + async def sock_recv_into(self, sock, buf): + raise NotImplementedError + + async def sock_sendall(self, sock, data): + raise NotImplementedError + + async def sock_connect(self, sock, address): + raise NotImplementedError + + async def sock_accept(self, sock): + raise NotImplementedError + + async def sock_sendfile(self, sock, file, offset=0, count=None, + *, fallback=None): + raise NotImplementedError + + # Signal handling. + + def add_signal_handler(self, sig, callback, *args): + raise NotImplementedError + + def remove_signal_handler(self, sig): + raise NotImplementedError + + # Task factory. + + def set_task_factory(self, factory): + raise NotImplementedError + + def get_task_factory(self): + raise NotImplementedError + + # Error handlers. + + def get_exception_handler(self): + raise NotImplementedError + + def set_exception_handler(self, handler): + raise NotImplementedError + + def default_exception_handler(self, context): + raise NotImplementedError + + def call_exception_handler(self, context): + raise NotImplementedError + + # Debug flag management. + + def get_debug(self): + raise NotImplementedError + + def set_debug(self, enabled): + raise NotImplementedError + + +class AbstractEventLoopPolicy: + """Abstract policy for accessing the event loop.""" + + def get_event_loop(self): + """Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.""" + raise NotImplementedError + + def set_event_loop(self, loop): + """Set the event loop for the current context to loop.""" + raise NotImplementedError + + def new_event_loop(self): + """Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.""" + raise NotImplementedError + + # Child processes handling (Unix only). + + def get_child_watcher(self): + "Get the watcher for child processes." + raise NotImplementedError + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + raise NotImplementedError + + +class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy): + """Default policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + """ + + _loop_factory = None + + class _Local(threading.local): + _loop = None + _set_called = False + + def __init__(self): + self._local = self._Local() + + def get_event_loop(self): + """Get the event loop for the current context. + + Returns an instance of EventLoop or raises an exception. + """ + if (self._local._loop is None and + not self._local._set_called and + isinstance(threading.current_thread(), threading._MainThread)): + self.set_event_loop(self.new_event_loop()) + + if self._local._loop is None: + raise RuntimeError('There is no current event loop in thread %r.' + % threading.current_thread().name) + + return self._local._loop + + def set_event_loop(self, loop): + """Set the event loop.""" + self._local._set_called = True + assert loop is None or isinstance(loop, AbstractEventLoop) + self._local._loop = loop + + def new_event_loop(self): + """Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + """ + return self._loop_factory() + + +# Event loop policy. The policy itself is always global, even if the +# policy's rules say that there is an event loop per thread (or other +# notion of context). The default policy is installed by the first +# call to get_event_loop_policy(). +_event_loop_policy = None + +# Lock for protecting the on-the-fly creation of the event loop policy. +_lock = threading.Lock() + + +# A TLS for the running event loop, used by _get_running_loop. +class _RunningLoop(threading.local): + loop_pid = (None, None) + + +_running_loop = _RunningLoop() + + +def get_running_loop(): + """Return the running event loop. Raise a RuntimeError if there is none. + + This function is thread-specific. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + loop = _get_running_loop() + if loop is None: + raise RuntimeError('no running event loop') + return loop + + +def _get_running_loop(): + """Return the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + running_loop, pid = _running_loop.loop_pid + if running_loop is not None and pid == os.getpid(): + return running_loop + + +def _set_running_loop(loop): + """Set the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + _running_loop.loop_pid = (loop, os.getpid()) + + +def _init_event_loop_policy(): + global _event_loop_policy + with _lock: + if _event_loop_policy is None: # pragma: no branch + from . import DefaultEventLoopPolicy + _event_loop_policy = DefaultEventLoopPolicy() + + +def get_event_loop_policy(): + """Get the current event loop policy.""" + if _event_loop_policy is None: + _init_event_loop_policy() + return _event_loop_policy + + +def set_event_loop_policy(policy): + """Set the current event loop policy. + + If policy is None, the default policy is restored.""" + global _event_loop_policy + assert policy is None or isinstance(policy, AbstractEventLoopPolicy) + _event_loop_policy = policy + + +def get_event_loop(): + """Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + current_loop = _get_running_loop() + if current_loop is not None: + return current_loop + return get_event_loop_policy().get_event_loop() + + +def set_event_loop(loop): + """Equivalent to calling get_event_loop_policy().set_event_loop(loop).""" + get_event_loop_policy().set_event_loop(loop) + + +def new_event_loop(): + """Equivalent to calling get_event_loop_policy().new_event_loop().""" + return get_event_loop_policy().new_event_loop() + + +def get_child_watcher(): + """Equivalent to calling get_event_loop_policy().get_child_watcher().""" + return get_event_loop_policy().get_child_watcher() + + +def set_child_watcher(watcher): + """Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher).""" + return get_event_loop_policy().set_child_watcher(watcher) + + +# Alias pure-Python implementations for testing purposes. +_py__get_running_loop = _get_running_loop +_py__set_running_loop = _set_running_loop +_py_get_running_loop = get_running_loop +_py_get_event_loop = get_event_loop + + +try: + # get_event_loop() is one of the most frequently called + # functions in asyncio. Pure Python implementation is + # about 4 times slower than C-accelerated. + from _asyncio import (_get_running_loop, _set_running_loop, + get_running_loop, get_event_loop) +except ImportError: + pass +else: + # Alias C implementations for testing purposes. + _c__get_running_loop = _get_running_loop + _c__set_running_loop = _set_running_loop + _c_get_running_loop = get_running_loop + _c_get_event_loop = get_event_loop diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/exceptions.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f07e4486577381616e7e2f7f05747ed7374316a1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/exceptions.py @@ -0,0 +1,58 @@ +"""asyncio exceptions.""" + + +__all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError', + 'IncompleteReadError', 'LimitOverrunError', + 'SendfileNotAvailableError') + + +class CancelledError(BaseException): + """The Future or Task was cancelled.""" + + +class TimeoutError(Exception): + """The operation exceeded the given deadline.""" + + +class InvalidStateError(Exception): + """The operation is not allowed in this state.""" + + +class SendfileNotAvailableError(RuntimeError): + """Sendfile syscall is not available. + + Raised if OS does not support sendfile syscall for given socket or + file type. + """ + + +class IncompleteReadError(EOFError): + """ + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + """ + def __init__(self, partial, expected): + r_expected = 'undefined' if expected is None else repr(expected) + super().__init__(f'{len(partial)} bytes read on a total of ' + f'{r_expected} expected bytes') + self.partial = partial + self.expected = expected + + def __reduce__(self): + return type(self), (self.partial, self.expected) + + +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + def __reduce__(self): + return type(self), (self.args[0], self.consumed) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/format_helpers.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/format_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..27d11fd4fa9553e21077bd4a93c523dbcb1d32cb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/format_helpers.py @@ -0,0 +1,76 @@ +import functools +import inspect +import reprlib +import sys +import traceback + +from . import constants + + +def _get_function_source(func): + func = inspect.unwrap(func) + if inspect.isfunction(func): + code = func.__code__ + return (code.co_filename, code.co_firstlineno) + if isinstance(func, functools.partial): + return _get_function_source(func.func) + if isinstance(func, functools.partialmethod): + return _get_function_source(func.func) + return None + + +def _format_callback_source(func, args): + func_repr = _format_callback(func, args, None) + source = _get_function_source(func) + if source: + func_repr += f' at {source[0]}:{source[1]}' + return func_repr + + +def _format_args_and_kwargs(args, kwargs): + """Format function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + """ + # use reprlib to limit the length of the output + items = [] + if args: + items.extend(reprlib.repr(arg) for arg in args) + if kwargs: + items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items()) + return '({})'.format(', '.join(items)) + + +def _format_callback(func, args, kwargs, suffix=''): + if isinstance(func, functools.partial): + suffix = _format_args_and_kwargs(args, kwargs) + suffix + return _format_callback(func.func, func.args, func.keywords, suffix) + + if hasattr(func, '__qualname__') and func.__qualname__: + func_repr = func.__qualname__ + elif hasattr(func, '__name__') and func.__name__: + func_repr = func.__name__ + else: + func_repr = repr(func) + + func_repr += _format_args_and_kwargs(args, kwargs) + if suffix: + func_repr += suffix + return func_repr + + +def extract_stack(f=None, limit=None): + """Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + """ + if f is None: + f = sys._getframe().f_back + if limit is None: + # Limit the amount of work to a reasonable amount, as extract_stack() + # can be called for each coroutine and future in debug mode. + limit = constants.DEBUG_STACK_DEPTH + stack = traceback.StackSummary.extract(traceback.walk_stack(f), + limit=limit, + lookup_lines=False) + stack.reverse() + return stack diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/futures.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/futures.py new file mode 100644 index 0000000000000000000000000000000000000000..9afda220bd78f24f514f016d5721b27698a81809 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/futures.py @@ -0,0 +1,399 @@ +"""A Future class similar to the one in PEP 3148.""" + +__all__ = ( + 'Future', 'wrap_future', 'isfuture', +) + +import concurrent.futures +import contextvars +import logging +import sys + +from . import base_futures +from . import events +from . import exceptions +from . import format_helpers + + +isfuture = base_futures.isfuture + + +_PENDING = base_futures._PENDING +_CANCELLED = base_futures._CANCELLED +_FINISHED = base_futures._FINISHED + + +STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging + + +class Future: + """This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - This class is not thread-safe. + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + """ + + # Class variables serving as defaults for instance variables. + _state = _PENDING + _result = None + _exception = None + _loop = None + _source_traceback = None + + # This field is used for a dual purpose: + # - Its presence is a marker to declare that a class implements + # the Future protocol (i.e. is intended to be duck-type compatible). + # The value must also be not-None, to enable a subclass to declare + # that it is not compatible by setting this to None. + # - It is set by __iter__() below so that Task._step() can tell + # the difference between + # `await Future()` or`yield from Future()` (correct) vs. + # `yield Future()` (incorrect). + _asyncio_future_blocking = False + + __log_traceback = False + + def __init__(self, *, loop=None): + """Initialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + """ + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._callbacks = [] + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + + _repr_info = base_futures._future_repr_info + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, + ' '.join(self._repr_info())) + + def __del__(self): + if not self.__log_traceback: + # set_exception() was not called, or result() or exception() + # has consumed the exception + return + exc = self._exception + context = { + 'message': + f'{self.__class__.__name__} exception was never retrieved', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + + @property + def _log_traceback(self): + return self.__log_traceback + + @_log_traceback.setter + def _log_traceback(self, val): + if bool(val): + raise ValueError('_log_traceback can only be set to False') + self.__log_traceback = False + + def get_loop(self): + """Return the event loop the Future is bound to.""" + loop = self._loop + if loop is None: + raise RuntimeError("Future object is not initialized.") + return loop + + def cancel(self): + """Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + """ + self.__log_traceback = False + if self._state != _PENDING: + return False + self._state = _CANCELLED + self.__schedule_callbacks() + return True + + def __schedule_callbacks(self): + """Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + """ + callbacks = self._callbacks[:] + if not callbacks: + return + + self._callbacks[:] = [] + for callback, ctx in callbacks: + self._loop.call_soon(callback, self, context=ctx) + + def cancelled(self): + """Return True if the future was cancelled.""" + return self._state == _CANCELLED + + # Don't implement running(); see http://bugs.python.org/issue18699 + + def done(self): + """Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + """ + return self._state != _PENDING + + def result(self): + """Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + """ + if self._state == _CANCELLED: + raise exceptions.CancelledError + if self._state != _FINISHED: + raise exceptions.InvalidStateError('Result is not ready.') + self.__log_traceback = False + if self._exception is not None: + raise self._exception + return self._result + + def exception(self): + """Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + """ + if self._state == _CANCELLED: + raise exceptions.CancelledError + if self._state != _FINISHED: + raise exceptions.InvalidStateError('Exception is not set.') + self.__log_traceback = False + return self._exception + + def add_done_callback(self, fn, *, context=None): + """Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + """ + if self._state != _PENDING: + self._loop.call_soon(fn, self, context=context) + else: + if context is None: + context = contextvars.copy_context() + self._callbacks.append((fn, context)) + + # New method not in PEP 3148. + + def remove_done_callback(self, fn): + """Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + """ + filtered_callbacks = [(f, ctx) + for (f, ctx) in self._callbacks + if f != fn] + removed_count = len(self._callbacks) - len(filtered_callbacks) + if removed_count: + self._callbacks[:] = filtered_callbacks + return removed_count + + # So-called internal methods (note: no set_running_or_notify_cancel()). + + def set_result(self, result): + """Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise exceptions.InvalidStateError(f'{self._state}: {self!r}') + self._result = result + self._state = _FINISHED + self.__schedule_callbacks() + + def set_exception(self, exception): + """Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise exceptions.InvalidStateError(f'{self._state}: {self!r}') + if isinstance(exception, type): + exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") + self._exception = exception + self._state = _FINISHED + self.__schedule_callbacks() + self.__log_traceback = True + + def __await__(self): + if not self.done(): + self._asyncio_future_blocking = True + yield self # This tells Task to wait for completion. + if not self.done(): + raise RuntimeError("await wasn't used with future") + return self.result() # May raise too. + + __iter__ = __await__ # make compatible with 'yield from'. + + +# Needed for testing purposes. +_PyFuture = Future + + +def _get_loop(fut): + # Tries to call Future.get_loop() if it's available. + # Otherwise fallbacks to using the old '_loop' property. + try: + get_loop = fut.get_loop + except AttributeError: + pass + else: + return get_loop() + return fut._loop + + +def _set_result_unless_cancelled(fut, result): + """Helper setting the result only if the future was not cancelled.""" + if fut.cancelled(): + return + fut.set_result(result) + + +def _convert_future_exc(exc): + exc_class = type(exc) + if exc_class is concurrent.futures.CancelledError: + return exceptions.CancelledError(*exc.args) + elif exc_class is concurrent.futures.TimeoutError: + return exceptions.TimeoutError(*exc.args) + elif exc_class is concurrent.futures.InvalidStateError: + return exceptions.InvalidStateError(*exc.args) + else: + return exc + + +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(_convert_future_exc(exception)) + else: + result = source.result() + concurrent.set_result(result) + + +def _copy_future_state(source, dest): + """Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + """ + assert source.done() + if dest.cancelled(): + return + assert not dest.done() + if source.cancelled(): + dest.cancel() + else: + exception = source.exception() + if exception is not None: + dest.set_exception(_convert_future_exc(exception)) + else: + result = source.result() + dest.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isfuture(source) and not isinstance(source, + concurrent.futures.Future): + raise TypeError('A future is required for source argument') + if not isfuture(destination) and not isinstance(destination, + concurrent.futures.Future): + raise TypeError('A future is required for destination argument') + source_loop = _get_loop(source) if isfuture(source) else None + dest_loop = _get_loop(destination) if isfuture(destination) else None + + def _set_state(future, other): + if isfuture(future): + _copy_future_state(other, future) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if (destination.cancelled() and + dest_loop is not None and dest_loop.is_closed()): + return + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): + """Wrap concurrent.futures.Future object.""" + if isfuture(future): + return future + assert isinstance(future, concurrent.futures.Future), \ + f'concurrent.futures.Future is expected, got {future!r}' + if loop is None: + loop = events.get_event_loop() + new_future = loop.create_future() + _chain_future(future, new_future) + return new_future + + +try: + import _asyncio +except ImportError: + pass +else: + # _CFuture is needed for tests. + Future = _CFuture = _asyncio.Future diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/locks.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/locks.py new file mode 100644 index 0000000000000000000000000000000000000000..d94daeb5a173f574f8509a366f1bcb9337e487ba --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/locks.py @@ -0,0 +1,534 @@ +"""Synchronization primitives.""" + +__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore') + +import collections +import types +import warnings + +from . import events +from . import futures +from . import exceptions +from .import coroutines + + +class _ContextManager: + """Context manager. + + This enables the following idiom for acquiring and releasing a + lock around a block: + + with (yield from lock): + + + while failing loudly when accidentally using: + + with lock: + + + Deprecated, use 'async with' statement: + async with lock: + + """ + + def __init__(self, lock): + self._lock = lock + + def __enter__(self): + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + def __exit__(self, *args): + try: + self._lock.release() + finally: + self._lock = None # Crudely prevent reuse. + + +class _ContextManagerMixin: + def __enter__(self): + raise RuntimeError( + '"yield from" should be used as context manager expression') + + def __exit__(self, *args): + # This must exist because __enter__ exists, even though that + # always raises; that's how the with-statement works. + pass + + @types.coroutine + def __iter__(self): + # This is not a coroutine. It is meant to enable the idiom: + # + # with (yield from lock): + # + # + # as an alternative to: + # + # yield from lock.acquire() + # try: + # + # finally: + # lock.release() + # Deprecated, use 'async with' statement: + # async with lock: + # + warnings.warn("'with (yield from lock)' is deprecated " + "use 'async with lock' instead", + DeprecationWarning, stacklevel=2) + yield from self.acquire() + return _ContextManager(self) + + # The flag is needed for legacy asyncio.iscoroutine() + __iter__._is_coroutine = coroutines._is_coroutine + + async def __acquire_ctx(self): + await self.acquire() + return _ContextManager(self) + + def __await__(self): + warnings.warn("'with await lock' is deprecated " + "use 'async with lock' instead", + DeprecationWarning, stacklevel=2) + # To make "with await lock" work. + return self.__acquire_ctx().__await__() + + async def __aenter__(self): + await self.acquire() + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + async def __aexit__(self, exc_type, exc, tb): + self.release() + + +class Lock(_ContextManagerMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'await'. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + """ + + def __init__(self, *, loop=None): + self._waiters = None + self._locked = False + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self._locked else 'unlocked' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def locked(self): + """Return True if lock is acquired.""" + return self._locked + + async def acquire(self): + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + if (not self._locked and (self._waiters is None or + all(w.cancelled() for w in self._waiters))): + self._locked = True + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._loop.create_future() + self._waiters.append(fut) + + # Finally block should be called before the CancelledError + # handling as we don't want CancelledError to call + # _wake_up_first() and attempt to wake up itself. + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + if not self._locked: + self._wake_up_first() + raise + + self._locked = True + return True + + def release(self): + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + self._wake_up_first() + else: + raise RuntimeError('Lock is not acquired.') + + def _wake_up_first(self): + """Wake up the first waiter if it isn't done.""" + if not self._waiters: + return + try: + fut = next(iter(self._waiters)) + except StopIteration: + return + + # .done() necessarily means that a waiter will wake up later on and + # either take the lock, or, if it was cancelled and lock wasn't + # taken already, will hit this again and wake up a new waiter. + if not fut.done(): + fut.set_result(True) + + +class Event: + """Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + """ + + def __init__(self, *, loop=None): + self._waiters = collections.deque() + self._value = False + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + def __repr__(self): + res = super().__repr__() + extra = 'set' if self._value else 'unset' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def is_set(self): + """Return True if and only if the internal flag is true.""" + return self._value + + def set(self): + """Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + """ + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + + def clear(self): + """Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.""" + self._value = False + + async def wait(self): + """Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + """ + if self._value: + return True + + fut = self._loop.create_future() + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + +class Condition(_ContextManagerMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock=None, *, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + if lock is None: + lock = Lock(loop=loop) + elif lock._loop is not self._loop: + raise ValueError("loop argument must agree with lock") + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters = collections.deque() + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else 'unlocked' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + async def wait(self): + """Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + """ + if not self.locked(): + raise RuntimeError('cannot wait on un-acquired lock') + + self.release() + try: + fut = self._loop.create_future() + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must reacquire lock even if wait is cancelled + cancelled = False + while True: + try: + await self.acquire() + break + except exceptions.CancelledError: + cancelled = True + + if cancelled: + raise exceptions.CancelledError + + async def wait_for(self, predicate): + """Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result + + def notify(self, n=1): + """By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError('cannot notify on un-acquired lock') + + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self): + """Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) + + +class Semaphore(_ContextManagerMixin): + """A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + """ + + def __init__(self, value=1, *, loop=None): + if value < 0: + raise ValueError("Semaphore initial value must be >= 0") + self._value = value + self._waiters = collections.deque() + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else f'unlocked, value:{self._value}' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def _wake_up_next(self): + while self._waiters: + waiter = self._waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + return + + def locked(self): + """Returns True if semaphore can not be acquired immediately.""" + return self._value == 0 + + async def acquire(self): + """Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + """ + while self._value <= 0: + fut = self._loop.create_future() + self._waiters.append(fut) + try: + await fut + except: + # See the similar code in Queue.get. + fut.cancel() + if self._value > 0 and not fut.cancelled(): + self._wake_up_next() + raise + self._value -= 1 + return True + + def release(self): + """Release a semaphore, incrementing the internal counter by one. + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + """ + self._value += 1 + self._wake_up_next() + + +class BoundedSemaphore(Semaphore): + """A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + """ + + def __init__(self, value=1, *, loop=None): + if loop: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + self._bound_value = value + super().__init__(value, loop=loop) + + def release(self): + if self._value >= self._bound_value: + raise ValueError('BoundedSemaphore released too many times') + super().release() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/log.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/log.py new file mode 100644 index 0000000000000000000000000000000000000000..23a7074afb0a53aa114341bdbf060ea68f543603 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/log.py @@ -0,0 +1,7 @@ +"""Logging configuration.""" + +import logging + + +# Name the logger after the package. +logger = logging.getLogger(__package__) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/proactor_events.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/proactor_events.py new file mode 100644 index 0000000000000000000000000000000000000000..3e0a14f6f4224ab8833674449c082bb46e436959 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/proactor_events.py @@ -0,0 +1,868 @@ +"""Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +""" + +__all__ = 'BaseProactorEventLoop', + +import io +import os +import socket +import warnings +import signal +import threading +import collections + +from . import base_events +from . import constants +from . import futures +from . import exceptions +from . import protocols +from . import sslproto +from . import transports +from . import trsock +from .log import logger + + +def _set_socket_extra(transport, sock): + transport._extra['socket'] = trsock.TransportSocket(sock) + + try: + transport._extra['sockname'] = sock.getsockname() + except socket.error: + if transport._loop.get_debug(): + logger.warning( + "getsockname() failed on %r", sock, exc_info=True) + + if 'peername' not in transport._extra: + try: + transport._extra['peername'] = sock.getpeername() + except socket.error: + # UDP sockets may not have a peer name + transport._extra['peername'] = None + + +class _ProactorBasePipeTransport(transports._FlowControlMixin, + transports.BaseTransport): + """Base class for pipe and socket transports.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(extra, loop) + self._set_extra(sock) + self._sock = sock + self.set_protocol(protocol) + self._server = server + self._buffer = None # None or bytearray. + self._read_fut = None + self._write_fut = None + self._pending_write = 0 + self._conn_lost = 0 + self._closing = False # Set when close() called. + self._eof_written = False + if self._server is not None: + self._server._attach() + self._loop.call_soon(self._protocol.connection_made, self) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + if self._sock is not None: + info.append(f'fd={self._sock.fileno()}') + if self._read_fut is not None: + info.append(f'read={self._read_fut!r}') + if self._write_fut is not None: + info.append(f'write={self._write_fut!r}') + if self._buffer: + info.append(f'write_bufsize={len(self._buffer)}') + if self._eof_written: + info.append('EOF written') + return '<{}>'.format(' '.join(info)) + + def _set_extra(self, sock): + self._extra['pipe'] = sock + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if not self._buffer and self._write_fut is None: + self._loop.call_soon(self._call_connection_lost, None) + if self._read_fut is not None: + self._read_fut.cancel() + self._read_fut = None + + def __del__(self, _warn=warnings.warn): + if self._sock is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + try: + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + finally: + self._force_close(exc) + + def _force_close(self, exc): + if self._empty_waiter is not None and not self._empty_waiter.done(): + if exc is None: + self._empty_waiter.set_result(None) + else: + self._empty_waiter.set_exception(exc) + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if self._write_fut: + self._write_fut.cancel() + self._write_fut = None + if self._read_fut: + self._read_fut.cancel() + self._read_fut = None + self._pending_write = 0 + self._buffer = None + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + # XXX If there is a pending overlapped read on the other + # end then it may fail with ERROR_NETNAME_DELETED if we + # just close our end. First calling shutdown() seems to + # cure it, but maybe using DisconnectEx() would be better. + if hasattr(self._sock, 'shutdown'): + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + self._sock = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + size = self._pending_write + if self._buffer is not None: + size += len(self._buffer) + return size + + +class _ProactorReadPipeTransport(_ProactorBasePipeTransport, + transports.ReadTransport): + """Transport for read pipes.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + self._pending_data = None + self._paused = True + super().__init__(loop, sock, protocol, waiter, extra, server) + + self._loop.call_soon(self._loop_reading) + self._paused = False + + def is_reading(self): + return not self._paused and not self._closing + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + + # bpo-33694: Don't cancel self._read_fut because cancelling an + # overlapped WSASend() loss silently data with the current proactor + # implementation. + # + # If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend() + # completed (even if HasOverlappedIoCompleted() returns 0), but + # Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND + # error. Once the overlapped is ignored, the IOCP loop will ignores the + # completion I/O event and so not read the result of the overlapped + # WSARecv(). + + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + + self._paused = False + if self._read_fut is None: + self._loop.call_soon(self._loop_reading, None) + + data = self._pending_data + self._pending_data = None + if data is not None: + # Call the protocol methode after calling _loop_reading(), + # since the protocol can decide to pause reading again. + self._loop.call_soon(self._data_received, data) + + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _eof_received(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if not keep_open: + self.close() + + def _data_received(self, data): + if self._paused: + # Don't call any protocol method while reading is paused. + # The protocol will be called on resume_reading(). + assert self._pending_data is None + self._pending_data = data + return + + if not data: + self._eof_received() + return + + if isinstance(self._protocol, protocols.BufferedProtocol): + try: + protocols._feed_data_to_buffered_proto(self._protocol, data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, + 'Fatal error: protocol.buffer_updated() ' + 'call failed.') + return + else: + self._protocol.data_received(data) + + def _loop_reading(self, fut=None): + data = None + try: + if fut is not None: + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + self._read_fut = None + if fut.done(): + # deliver data later in "finally" clause + data = fut.result() + else: + # the future will be replaced by next proactor.recv call + fut.cancel() + + if self._closing: + # since close() has been called we ignore any read data + data = None + return + + if data == b'': + # we got end-of-file so no need to reschedule a new read + return + + # bpo-33694: buffer_updated() has currently no fast path because of + # a data loss issue caused by overlapped WSASend() cancellation. + + if not self._paused: + # reschedule a new read + self._read_fut = self._loop._proactor.recv(self._sock, 32768) + except ConnectionAbortedError as exc: + if not self._closing: + self._fatal_error(exc, 'Fatal read error on pipe transport') + elif self._loop.get_debug(): + logger.debug("Read error on pipe transport while closing", + exc_info=True) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + except exceptions.CancelledError: + if not self._closing: + raise + else: + if not self._paused: + self._read_fut.add_done_callback(self._loop_reading) + finally: + if data is not None: + self._data_received(data) + + +class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, + transports.WriteTransport): + """Transport for write pipes.""" + + _start_tls_compatible = True + + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._empty_waiter = None + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError( + f"data argument must be a bytes-like object, " + f"not {type(data).__name__}") + if self._eof_written: + raise RuntimeError('write_eof() already called') + if self._empty_waiter is not None: + raise RuntimeError('unable to write; sendfile is in progress') + + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + # Observable states: + # 1. IDLE: _write_fut and _buffer both None + # 2. WRITING: _write_fut set; _buffer None + # 3. BACKED UP: _write_fut set; _buffer a bytearray + # We always copy the data, so the caller can't modify it + # while we're still waiting for the I/O to happen. + if self._write_fut is None: # IDLE -> WRITING + assert self._buffer is None + # Pass a copy, except if it's already immutable. + self._loop_writing(data=bytes(data)) + elif not self._buffer: # WRITING -> BACKED UP + # Make a mutable copy which we can extend. + self._buffer = bytearray(data) + self._maybe_pause_protocol() + else: # BACKED UP + # Append to buffer (also copies). + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _loop_writing(self, f=None, data=None): + try: + if f is not None and self._write_fut is None and self._closing: + # XXX most likely self._force_close() has been called, and + # it has set self._write_fut to None. + return + assert f is self._write_fut + self._write_fut = None + self._pending_write = 0 + if f: + f.result() + if data is None: + data = self._buffer + self._buffer = None + if not data: + if self._closing: + self._loop.call_soon(self._call_connection_lost, None) + if self._eof_written: + self._sock.shutdown(socket.SHUT_WR) + # Now that we've reduced the buffer size, tell the + # protocol to resume writing if it was paused. Note that + # we do this last since the callback is called immediately + # and it may add more data to the buffer (even causing the + # protocol to be paused again). + self._maybe_resume_protocol() + else: + self._write_fut = self._loop._proactor.send(self._sock, data) + if not self._write_fut.done(): + assert self._pending_write == 0 + self._pending_write = len(data) + self._write_fut.add_done_callback(self._loop_writing) + self._maybe_pause_protocol() + else: + self._write_fut.add_done_callback(self._loop_writing) + if self._empty_waiter is not None and self._write_fut is None: + self._empty_waiter.set_result(None) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal write error on pipe transport') + + def can_write_eof(self): + return True + + def write_eof(self): + self.close() + + def abort(self): + self._force_close(None) + + def _make_empty_waiter(self): + if self._empty_waiter is not None: + raise RuntimeError("Empty waiter is already set") + self._empty_waiter = self._loop.create_future() + if self._write_fut is None: + self._empty_waiter.set_result(None) + return self._empty_waiter + + def _reset_empty_waiter(self): + self._empty_waiter = None + + +class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport): + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._read_fut = self._loop._proactor.recv(self._sock, 16) + self._read_fut.add_done_callback(self._pipe_closed) + + def _pipe_closed(self, fut): + if fut.cancelled(): + # the transport has been closed + return + assert fut.result() == b'' + if self._closing: + assert self._read_fut is None + return + assert fut is self._read_fut, (fut, self._read_fut) + self._read_fut = None + if self._write_fut is not None: + self._force_close(BrokenPipeError()) + else: + self.close() + + +class _ProactorDatagramTransport(_ProactorBasePipeTransport): + max_size = 256 * 1024 + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + self._address = address + self._empty_waiter = None + # We don't need to call _protocol.connection_made() since our base + # constructor does it for us. + super().__init__(loop, sock, protocol, waiter=waiter, extra=extra) + + # The base constructor sets _buffer = None, so we set it here + self._buffer = collections.deque() + self._loop.call_soon(self._loop_reading) + + def _set_extra(self, sock): + _set_socket_extra(self, sock) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def abort(self): + self._force_close(None) + + def sendto(self, data, addr=None): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be bytes-like object (%r)', + type(data)) + + if not data: + return + + if self._address is not None and addr not in (None, self._address): + raise ValueError( + f'Invalid address: must be None or {self._address}') + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.sendto() raised exception.') + self._conn_lost += 1 + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + + if self._write_fut is None: + # No current write operations are active, kick one off + self._loop_writing() + # else: A write operation is already kicked off + + self._maybe_pause_protocol() + + def _loop_writing(self, fut=None): + try: + if self._conn_lost: + return + + assert fut is self._write_fut + self._write_fut = None + if fut: + # We are in a _loop_writing() done callback, get the result + fut.result() + + if not self._buffer or (self._conn_lost and self._address): + # The connection has been closed + if self._closing: + self._loop.call_soon(self._call_connection_lost, None) + return + + data, addr = self._buffer.popleft() + if self._address is not None: + self._write_fut = self._loop._proactor.send(self._sock, + data) + else: + self._write_fut = self._loop._proactor.sendto(self._sock, + data, + addr=addr) + except OSError as exc: + self._protocol.error_received(exc) + except Exception as exc: + self._fatal_error(exc, 'Fatal write error on datagram transport') + else: + self._write_fut.add_done_callback(self._loop_writing) + self._maybe_resume_protocol() + + def _loop_reading(self, fut=None): + data = None + try: + if self._conn_lost: + return + + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + + self._read_fut = None + if fut is not None: + res = fut.result() + + if self._closing: + # since close() has been called we ignore any read data + data = None + return + + if self._address is not None: + data, addr = res, self._address + else: + data, addr = res + + if self._conn_lost: + return + if self._address is not None: + self._read_fut = self._loop._proactor.recv(self._sock, + self.max_size) + else: + self._read_fut = self._loop._proactor.recvfrom(self._sock, + self.max_size) + except OSError as exc: + self._protocol.error_received(exc) + except exceptions.CancelledError: + if not self._closing: + raise + else: + if self._read_fut is not None: + self._read_fut.add_done_callback(self._loop_reading) + finally: + if data: + self._protocol.datagram_received(data, addr) + + +class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for duplex pipes.""" + + def can_write_eof(self): + return False + + def write_eof(self): + raise NotImplementedError + + +class _ProactorSocketTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for connected sockets.""" + + _sendfile_compatible = constants._SendfileMode.TRY_NATIVE + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(loop, sock, protocol, waiter, extra, server) + base_events._set_nodelay(sock) + + def _set_extra(self, sock): + _set_socket_extra(self, sock) + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing or self._eof_written: + return + self._eof_written = True + if self._write_fut is None: + self._sock.shutdown(socket.SHUT_WR) + + +class BaseProactorEventLoop(base_events.BaseEventLoop): + + def __init__(self, proactor): + super().__init__() + logger.debug('Using proactor: %s', proactor.__class__.__name__) + self._proactor = proactor + self._selector = proactor # convenient alias + self._self_reading_future = None + self._accept_futures = {} # socket file descriptor => Future + proactor.set_loop(self) + self._make_self_pipe() + if threading.current_thread() is threading.main_thread(): + # wakeup fd can only be installed to a file descriptor from the main thread + signal.set_wakeup_fd(self._csock.fileno()) + + def _make_socket_transport(self, sock, protocol, waiter=None, + extra=None, server=None): + return _ProactorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=None): + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + _ProactorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _ProactorDatagramTransport(self, sock, protocol, address, + waiter, extra) + + def _make_duplex_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorDuplexPipeTransport(self, + sock, protocol, waiter, extra) + + def _make_read_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra) + + def _make_write_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + # We want connection_lost() to be called when other end closes + return _ProactorWritePipeTransport(self, + sock, protocol, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + + if threading.current_thread() is threading.main_thread(): + signal.set_wakeup_fd(-1) + # Call these methods before closing the event loop (before calling + # BaseEventLoop.close), because they can schedule callbacks with + # call_soon(), which is forbidden when the event loop is closed. + self._stop_accept_futures() + self._close_self_pipe() + self._proactor.close() + self._proactor = None + self._selector = None + + # Close the event loop + super().close() + + async def sock_recv(self, sock, n): + return await self._proactor.recv(sock, n) + + async def sock_recv_into(self, sock, buf): + return await self._proactor.recv_into(sock, buf) + + async def sock_sendall(self, sock, data): + return await self._proactor.send(sock, data) + + async def sock_connect(self, sock, address): + return await self._proactor.connect(sock, address) + + async def sock_accept(self, sock): + return await self._proactor.accept(sock) + + async def _sock_sendfile_native(self, sock, file, offset, count): + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise exceptions.SendfileNotAvailableError("not a regular file") + try: + fsize = os.fstat(fileno).st_size + except OSError as err: + raise exceptions.SendfileNotAvailableError("not a regular file") + blocksize = count if count else fsize + if not blocksize: + return 0 # empty file + + blocksize = min(blocksize, 0xffff_ffff) + end_pos = min(offset + count, fsize) if count else fsize + offset = min(offset, fsize) + total_sent = 0 + try: + while True: + blocksize = min(end_pos - offset, blocksize) + if blocksize <= 0: + return total_sent + await self._proactor.sendfile(sock, file, offset, blocksize) + offset += blocksize + total_sent += blocksize + finally: + if total_sent > 0: + file.seek(offset) + + async def _sendfile_native(self, transp, file, offset, count): + resume_reading = transp.is_reading() + transp.pause_reading() + await transp._make_empty_waiter() + try: + return await self.sock_sendfile(transp._sock, file, offset, count, + fallback=False) + finally: + transp._reset_empty_waiter() + if resume_reading: + transp.resume_reading() + + def _close_self_pipe(self): + if self._self_reading_future is not None: + self._self_reading_future.cancel() + self._self_reading_future = None + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = socket.socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + + def _loop_self_reading(self, f=None): + try: + if f is not None: + f.result() # may raise + if self._self_reading_future is not f: + # When we scheduled this Future, we assigned it to + # _self_reading_future. If it's not there now, something has + # tried to cancel the loop while this callback was still in the + # queue (see windows_events.ProactorEventLoop.run_forever). In + # that case stop here instead of continuing to schedule a new + # iteration. + return + f = self._proactor.recv(self._ssock, 4096) + except exceptions.CancelledError: + # _close_self_pipe() has been called, stop waiting for data + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self.call_exception_handler({ + 'message': 'Error on reading from the event loop self pipe', + 'exception': exc, + 'loop': self, + }) + else: + self._self_reading_future = f + f.add_done_callback(self._loop_self_reading) + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is None: + return + + try: + csock.send(b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=None): + + def loop(f=None): + try: + if f is not None: + conn, addr = f.result() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + protocol = protocol_factory() + if sslcontext is not None: + self._make_ssl_transport( + conn, protocol, sslcontext, server_side=True, + extra={'peername': addr}, server=server, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + self._make_socket_transport( + conn, protocol, + extra={'peername': addr}, server=server) + if self.is_closed(): + return + f = self._proactor.accept(sock) + except OSError as exc: + if sock.fileno() != -1: + self.call_exception_handler({ + 'message': 'Accept failed on a socket', + 'exception': exc, + 'socket': trsock.TransportSocket(sock), + }) + sock.close() + elif self._debug: + logger.debug("Accept failed on socket %r", + sock, exc_info=True) + except exceptions.CancelledError: + sock.close() + else: + self._accept_futures[sock.fileno()] = f + f.add_done_callback(loop) + + self.call_soon(loop) + + def _process_events(self, event_list): + # Events are processed in the IocpProactor._poll() method + pass + + def _stop_accept_futures(self): + for future in self._accept_futures.values(): + future.cancel() + self._accept_futures.clear() + + def _stop_serving(self, sock): + future = self._accept_futures.pop(sock.fileno(), None) + if future: + future.cancel() + self._proactor._stop_serving(sock) + sock.close() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/protocols.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/protocols.py new file mode 100644 index 0000000000000000000000000000000000000000..69fa43e8b6511a9599f467eb522e03e6a72e4862 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/protocols.py @@ -0,0 +1,220 @@ +"""Abstract Protocol base classes.""" + +__all__ = ( + 'BaseProtocol', 'Protocol', 'DatagramProtocol', + 'SubprocessProtocol', 'BufferedProtocol', +) + + +class BaseProtocol: + """Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + """ + + __slots__ = () + + def connection_made(self, transport): + """Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + """ + + def connection_lost(self, exc): + """Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + + def pause_writing(self): + """Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + """ + + def resume_writing(self): + """Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + """ + + +class Protocol(BaseProtocol): + """Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + """ + + __slots__ = () + + def data_received(self, data): + """Called when some data is received. + + The argument is a bytes object. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class BufferedProtocol(BaseProtocol): + """Interface for stream protocol with manual buffer control. + + Important: this has been added to asyncio in Python 3.7 + *on a provisional basis*! Consider it as an experimental API that + might be changed or removed in Python 3.8. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + """ + + __slots__ = () + + def get_buffer(self, sizehint): + """Called to allocate a new receive buffer. + + *sizehint* is a recommended minimal size for the returned + buffer. When set to -1, the buffer size can be arbitrary. + + Must return an object that implements the + :ref:`buffer protocol `. + It is an error to return a zero-sized buffer. + """ + + def buffer_updated(self, nbytes): + """Called when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class DatagramProtocol(BaseProtocol): + """Interface for datagram protocol.""" + + __slots__ = () + + def datagram_received(self, data, addr): + """Called when some datagram is received.""" + + def error_received(self, exc): + """Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + """ + + +class SubprocessProtocol(BaseProtocol): + """Interface for protocol for subprocess calls.""" + + __slots__ = () + + def pipe_data_received(self, fd, data): + """Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + """ + + def pipe_connection_lost(self, fd, exc): + """Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + """ + + def process_exited(self): + """Called when subprocess has exited.""" + + +def _feed_data_to_buffered_proto(proto, data): + data_len = len(data) + while data_len: + buf = proto.get_buffer(data_len) + buf_len = len(buf) + if not buf_len: + raise RuntimeError('get_buffer() returned an empty buffer') + + if buf_len >= data_len: + buf[:data_len] = data + proto.buffer_updated(data_len) + return + else: + buf[:buf_len] = data[:buf_len] + proto.buffer_updated(buf_len) + data = data[buf_len:] + data_len = len(data) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/queues.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..390ae9a6821c4d4e3790458c1d5615f396b4fa14 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/queues.py @@ -0,0 +1,249 @@ +__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty') + +import collections +import heapq +import warnings + +from . import events +from . import locks + + +class QueueEmpty(Exception): + """Raised when Queue.get_nowait() is called on an empty Queue.""" + pass + + +class QueueFull(Exception): + """Raised when the Queue.put_nowait() method is called on a full Queue.""" + pass + + +class Queue: + """A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "await put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + """ + + def __init__(self, maxsize=0, *, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + self._maxsize = maxsize + + # Futures. + self._getters = collections.deque() + # Futures. + self._putters = collections.deque() + self._unfinished_tasks = 0 + self._finished = locks.Event(loop=loop) + self._finished.set() + self._init(maxsize) + + # These three are overridable in subclasses. + + def _init(self, maxsize): + self._queue = collections.deque() + + def _get(self): + return self._queue.popleft() + + def _put(self, item): + self._queue.append(item) + + # End of the overridable methods. + + def _wakeup_next(self, waiters): + # Wake up the next waiter (if any) that isn't cancelled. + while waiters: + waiter = waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + break + + def __repr__(self): + return f'<{type(self).__name__} at {id(self):#x} {self._format()}>' + + def __str__(self): + return f'<{type(self).__name__} {self._format()}>' + + def _format(self): + result = f'maxsize={self._maxsize!r}' + if getattr(self, '_queue', None): + result += f' _queue={list(self._queue)!r}' + if self._getters: + result += f' _getters[{len(self._getters)}]' + if self._putters: + result += f' _putters[{len(self._putters)}]' + if self._unfinished_tasks: + result += f' tasks={self._unfinished_tasks}' + return result + + def qsize(self): + """Number of items in the queue.""" + return len(self._queue) + + @property + def maxsize(self): + """Number of items allowed in the queue.""" + return self._maxsize + + def empty(self): + """Return True if the queue is empty, False otherwise.""" + return not self._queue + + def full(self): + """Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + """ + if self._maxsize <= 0: + return False + else: + return self.qsize() >= self._maxsize + + async def put(self, item): + """Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + """ + while self.full(): + putter = self._loop.create_future() + self._putters.append(putter) + try: + await putter + except: + putter.cancel() # Just in case putter is not done yet. + try: + # Clean self._putters from canceled putters. + self._putters.remove(putter) + except ValueError: + # The putter could be removed from self._putters by a + # previous get_nowait call. + pass + if not self.full() and not putter.cancelled(): + # We were woken up by get_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._putters) + raise + return self.put_nowait(item) + + def put_nowait(self, item): + """Put an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + """ + if self.full(): + raise QueueFull + self._put(item) + self._unfinished_tasks += 1 + self._finished.clear() + self._wakeup_next(self._getters) + + async def get(self): + """Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + """ + while self.empty(): + getter = self._loop.create_future() + self._getters.append(getter) + try: + await getter + except: + getter.cancel() # Just in case getter is not done yet. + try: + # Clean self._getters from canceled getters. + self._getters.remove(getter) + except ValueError: + # The getter could be removed from self._getters by a + # previous put_nowait call. + pass + if not self.empty() and not getter.cancelled(): + # We were woken up by put_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._getters) + raise + return self.get_nowait() + + def get_nowait(self): + """Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + """ + if self.empty(): + raise QueueEmpty + item = self._get() + self._wakeup_next(self._putters) + return item + + def task_done(self): + """Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + """ + if self._unfinished_tasks <= 0: + raise ValueError('task_done() called too many times') + self._unfinished_tasks -= 1 + if self._unfinished_tasks == 0: + self._finished.set() + + async def join(self): + """Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + """ + if self._unfinished_tasks > 0: + await self._finished.wait() + + +class PriorityQueue(Queue): + """A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + """ + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item, heappush=heapq.heappush): + heappush(self._queue, item) + + def _get(self, heappop=heapq.heappop): + return heappop(self._queue) + + +class LifoQueue(Queue): + """A subclass of Queue that retrieves most recently added entries first.""" + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item): + self._queue.append(item) + + def _get(self): + return self._queue.pop() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/runners.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/runners.py new file mode 100644 index 0000000000000000000000000000000000000000..d2fa8b7f13861c5eedbb2cdea2845f6246cc2435 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/runners.py @@ -0,0 +1,73 @@ +__all__ = 'run', + +from . import coroutines +from . import events +from . import tasks + + +def run(main, *, debug=None): + """Execute the coroutine and return the result. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + """ + if events._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop") + + if not coroutines.iscoroutine(main): + raise ValueError("a coroutine was expected, got {!r}".format(main)) + + loop = events.new_event_loop() + try: + events.set_event_loop(loop) + if debug is not None: + loop.set_debug(debug) + return loop.run_until_complete(main) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + finally: + events.set_event_loop(None) + loop.close() + + +def _cancel_all_tasks(loop): + to_cancel = tasks.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete( + tasks.gather(*to_cancel, loop=loop, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler({ + 'message': 'unhandled exception during asyncio.run() shutdown', + 'exception': task.exception(), + 'task': task, + }) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/selector_events.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/selector_events.py new file mode 100644 index 0000000000000000000000000000000000000000..6ba307163e1f7679732b3ba4b965f2f2b36f67a6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/selector_events.py @@ -0,0 +1,1091 @@ +"""Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +""" + +__all__ = 'BaseSelectorEventLoop', + +import collections +import errno +import functools +import selectors +import socket +import warnings +import weakref +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import constants +from . import events +from . import futures +from . import protocols +from . import sslproto +from . import transports +from . import trsock +from .log import logger + + +def _test_selector_event(selector, fd, event): + # Test if the selector is monitoring 'event' events + # for the file descriptor 'fd'. + try: + key = selector.get_key(fd) + except KeyError: + return False + else: + return bool(key.events & event) + + +def _check_ssl_socket(sock): + if ssl is not None and isinstance(sock, ssl.SSLSocket): + raise TypeError("Socket cannot be of type SSLSocket") + + +class BaseSelectorEventLoop(base_events.BaseEventLoop): + """Selector event loop. + + See events.EventLoop for API specification. + """ + + def __init__(self, selector=None): + super().__init__() + + if selector is None: + selector = selectors.DefaultSelector() + logger.debug('Using selector: %s', selector.__class__.__name__) + self._selector = selector + self._make_self_pipe() + self._transports = weakref.WeakValueDictionary() + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + return _SelectorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + _SelectorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _SelectorDatagramTransport(self, sock, protocol, + address, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + self._close_self_pipe() + super().close() + if self._selector is not None: + self._selector.close() + self._selector = None + + def _close_self_pipe(self): + self._remove_reader(self._ssock.fileno()) + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = socket.socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self._add_reader(self._ssock.fileno(), self._read_from_self) + + def _process_self_data(self, data): + pass + + def _read_from_self(self): + while True: + try: + data = self._ssock.recv(4096) + if not data: + break + self._process_self_data(data) + except InterruptedError: + continue + except BlockingIOError: + break + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is None: + return + + try: + csock.send(b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + self._add_reader(sock.fileno(), self._accept_connection, + protocol_factory, sock, sslcontext, server, backlog, + ssl_handshake_timeout) + + def _accept_connection( + self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + # This method is only called once for each event loop tick where the + # listening socket has triggered an EVENT_READ. There may be multiple + # connections waiting for an .accept() so it is called in a loop. + # See https://bugs.python.org/issue27906 for more details. + for _ in range(backlog): + try: + conn, addr = sock.accept() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + conn.setblocking(False) + except (BlockingIOError, InterruptedError, ConnectionAbortedError): + # Early exit because the socket accept buffer is empty. + return None + except OSError as exc: + # There's nowhere to send the error, so just log it. + if exc.errno in (errno.EMFILE, errno.ENFILE, + errno.ENOBUFS, errno.ENOMEM): + # Some platforms (e.g. Linux keep reporting the FD as + # ready, so we remove the read handler temporarily. + # We'll try again in a while. + self.call_exception_handler({ + 'message': 'socket.accept() out of system resource', + 'exception': exc, + 'socket': trsock.TransportSocket(sock), + }) + self._remove_reader(sock.fileno()) + self.call_later(constants.ACCEPT_RETRY_DELAY, + self._start_serving, + protocol_factory, sock, sslcontext, server, + backlog, ssl_handshake_timeout) + else: + raise # The event loop will catch, log and ignore it. + else: + extra = {'peername': addr} + accept = self._accept_connection2( + protocol_factory, conn, extra, sslcontext, server, + ssl_handshake_timeout) + self.create_task(accept) + + async def _accept_connection2( + self, protocol_factory, conn, extra, + sslcontext=None, server=None, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + protocol = None + transport = None + try: + protocol = protocol_factory() + waiter = self.create_future() + if sslcontext: + transport = self._make_ssl_transport( + conn, protocol, sslcontext, waiter=waiter, + server_side=True, extra=extra, server=server, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + transport = self._make_socket_transport( + conn, protocol, waiter=waiter, extra=extra, + server=server) + + try: + await waiter + except BaseException: + transport.close() + raise + # It's now up to the protocol to handle the connection. + + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if self._debug: + context = { + 'message': + 'Error on transport creation for incoming connection', + 'exception': exc, + } + if protocol is not None: + context['protocol'] = protocol + if transport is not None: + context['transport'] = transport + self.call_exception_handler(context) + + def _ensure_fd_no_transport(self, fd): + fileno = fd + if not isinstance(fileno, int): + try: + fileno = int(fileno.fileno()) + except (AttributeError, TypeError, ValueError): + # This code matches selectors._fileobj_to_fd function. + raise ValueError(f"Invalid file object: {fd!r}") from None + try: + transport = self._transports[fileno] + except KeyError: + pass + else: + if not transport.is_closing(): + raise RuntimeError( + f'File descriptor {fd!r} is used by transport ' + f'{transport!r}') + + def _add_reader(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self, None) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_READ, + (handle, None)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_READ, + (handle, writer)) + if reader is not None: + reader.cancel() + + def _remove_reader(self, fd): + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (None, writer)) + + if reader is not None: + reader.cancel() + return True + else: + return False + + def _add_writer(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self, None) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_WRITE, + (None, handle)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_WRITE, + (reader, handle)) + if writer is not None: + writer.cancel() + + def _remove_writer(self, fd): + """Remove a writer callback.""" + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (reader, None)) + + if writer is not None: + writer.cancel() + return True + else: + return False + + def add_reader(self, fd, callback, *args): + """Add a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._add_reader(fd, callback, *args) + + def remove_reader(self, fd): + """Remove a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_reader(fd) + + def add_writer(self, fd, callback, *args): + """Add a writer callback..""" + self._ensure_fd_no_transport(fd) + return self._add_writer(fd, callback, *args) + + def remove_writer(self, fd): + """Remove a writer callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_writer(fd) + + async def sock_recv(self, sock, n): + """Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + """ + _check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + return sock.recv(n) + except (BlockingIOError, InterruptedError): + pass + fut = self.create_future() + fd = sock.fileno() + self.add_reader(fd, self._sock_recv, fut, sock, n) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd)) + return await fut + + def _sock_read_done(self, fd, fut): + self.remove_reader(fd) + + def _sock_recv(self, fut, sock, n): + # _sock_recv() can add itself as an I/O callback if the operation can't + # be done immediately. Don't use it directly, call sock_recv(). + if fut.done(): + return + try: + data = sock.recv(n) + except (BlockingIOError, InterruptedError): + return # try again next time + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(data) + + async def sock_recv_into(self, sock, buf): + """Receive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + """ + _check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + return sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + pass + fut = self.create_future() + fd = sock.fileno() + self.add_reader(fd, self._sock_recv_into, fut, sock, buf) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd)) + return await fut + + def _sock_recv_into(self, fut, sock, buf): + # _sock_recv_into() can add itself as an I/O callback if the operation + # can't be done immediately. Don't use it directly, call + # sock_recv_into(). + if fut.done(): + return + try: + nbytes = sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return # try again next time + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(nbytes) + + async def sock_sendall(self, sock, data): + """Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + """ + _check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + n = sock.send(data) + except (BlockingIOError, InterruptedError): + n = 0 + + if n == len(data): + # all data sent + return + + fut = self.create_future() + fd = sock.fileno() + fut.add_done_callback( + functools.partial(self._sock_write_done, fd)) + # use a trick with a list in closure to store a mutable state + self.add_writer(fd, self._sock_sendall, fut, sock, + memoryview(data), [n]) + return await fut + + def _sock_sendall(self, fut, sock, view, pos): + if fut.done(): + # Future cancellation can be scheduled on previous loop iteration + return + start = pos[0] + try: + n = sock.send(view[start:]) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + return + + start += n + + if start == len(view): + fut.set_result(None) + else: + pos[0] = start + + async def sock_connect(self, sock, address): + """Connect to a remote socket at address. + + This method is a coroutine. + """ + _check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX: + resolved = await self._ensure_resolved( + address, family=sock.family, proto=sock.proto, loop=self) + _, _, _, _, address = resolved[0] + + fut = self.create_future() + self._sock_connect(fut, sock, address) + return await fut + + def _sock_connect(self, fut, sock, address): + fd = sock.fileno() + try: + sock.connect(address) + except (BlockingIOError, InterruptedError): + # Issue #23618: When the C function connect() fails with EINTR, the + # connection runs in background. We have to wait until the socket + # becomes writable to be notified when the connection succeed or + # fails. + fut.add_done_callback( + functools.partial(self._sock_write_done, fd)) + self.add_writer(fd, self._sock_connect_cb, fut, sock, address) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + + def _sock_write_done(self, fd, fut): + self.remove_writer(fd) + + def _sock_connect_cb(self, fut, sock, address): + if fut.done(): + return + + try: + err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # Jump to any except clause below. + raise OSError(err, f'Connect call failed {address}') + except (BlockingIOError, InterruptedError): + # socket is still registered, the callback will be retried later + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + + async def sock_accept(self, sock): + """Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + """ + _check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = self.create_future() + self._sock_accept(fut, False, sock) + return await fut + + def _sock_accept(self, fut, registered, sock): + fd = sock.fileno() + if registered: + self.remove_reader(fd) + if fut.done(): + return + try: + conn, address = sock.accept() + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + self.add_reader(fd, self._sock_accept, fut, True, sock) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result((conn, address)) + + async def _sendfile_native(self, transp, file, offset, count): + del self._transports[transp._sock_fd] + resume_reading = transp.is_reading() + transp.pause_reading() + await transp._make_empty_waiter() + try: + return await self.sock_sendfile(transp._sock, file, offset, count, + fallback=False) + finally: + transp._reset_empty_waiter() + if resume_reading: + transp.resume_reading() + self._transports[transp._sock_fd] = transp + + def _process_events(self, event_list): + for key, mask in event_list: + fileobj, (reader, writer) = key.fileobj, key.data + if mask & selectors.EVENT_READ and reader is not None: + if reader._cancelled: + self._remove_reader(fileobj) + else: + self._add_callback(reader) + if mask & selectors.EVENT_WRITE and writer is not None: + if writer._cancelled: + self._remove_writer(fileobj) + else: + self._add_callback(writer) + + def _stop_serving(self, sock): + self._remove_reader(sock.fileno()) + sock.close() + + +class _SelectorTransport(transports._FlowControlMixin, + transports.Transport): + + max_size = 256 * 1024 # Buffer size passed to recv(). + + _buffer_factory = bytearray # Constructs initial value for self._buffer. + + # Attribute used in the destructor: it must be set even if the constructor + # is not called (see _SelectorSslTransport which may start by raising an + # exception) + _sock = None + + def __init__(self, loop, sock, protocol, extra=None, server=None): + super().__init__(extra, loop) + self._extra['socket'] = trsock.TransportSocket(sock) + try: + self._extra['sockname'] = sock.getsockname() + except OSError: + self._extra['sockname'] = None + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except socket.error: + self._extra['peername'] = None + self._sock = sock + self._sock_fd = sock.fileno() + + self._protocol_connected = False + self.set_protocol(protocol) + + self._server = server + self._buffer = self._buffer_factory() + self._conn_lost = 0 # Set when call to connection_lost scheduled. + self._closing = False # Set when close() called. + if self._server is not None: + self._server._attach() + loop._transports[self._sock_fd] = self + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._sock_fd}') + # test if the transport was closed + if self._loop is not None and not self._loop.is_closed(): + polling = _test_selector_event(self._loop._selector, + self._sock_fd, selectors.EVENT_READ) + if polling: + info.append('read=polling') + else: + info.append('read=idle') + + polling = _test_selector_event(self._loop._selector, + self._sock_fd, + selectors.EVENT_WRITE) + if polling: + state = 'polling' + else: + state = 'idle' + + bufsize = self.get_write_buffer_size() + info.append(f'write=<{state}, bufsize={bufsize}>') + return '<{}>'.format(' '.join(info)) + + def abort(self): + self._force_close(None) + + def set_protocol(self, protocol): + self._protocol = protocol + self._protocol_connected = True + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._loop._remove_reader(self._sock_fd) + if not self._buffer: + self._conn_lost += 1 + self._loop._remove_writer(self._sock_fd) + self._loop.call_soon(self._call_connection_lost, None) + + def __del__(self, _warn=warnings.warn): + if self._sock is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._sock.close() + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._conn_lost: + return + if self._buffer: + self._buffer.clear() + self._loop._remove_writer(self._sock_fd) + if not self._closing: + self._closing = True + self._loop._remove_reader(self._sock_fd) + self._conn_lost += 1 + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + if self._protocol_connected: + self._protocol.connection_lost(exc) + finally: + self._sock.close() + self._sock = None + self._protocol = None + self._loop = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + return len(self._buffer) + + def _add_reader(self, fd, callback, *args): + if self._closing: + return + + self._loop._add_reader(fd, callback, *args) + + +class _SelectorSocketTransport(_SelectorTransport): + + _start_tls_compatible = True + _sendfile_compatible = constants._SendfileMode.TRY_NATIVE + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + + self._read_ready_cb = None + super().__init__(loop, sock, protocol, extra, server) + self._eof = False + self._paused = False + self._empty_waiter = None + + # Disable the Nagle algorithm -- small writes will be + # sent without waiting for the TCP ACK. This generally + # decreases the latency (in some cases significantly.) + base_events._set_nodelay(self._sock) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def set_protocol(self, protocol): + if isinstance(protocol, protocols.BufferedProtocol): + self._read_ready_cb = self._read_ready__get_buffer + else: + self._read_ready_cb = self._read_ready__data_received + + super().set_protocol(protocol) + + def is_reading(self): + return not self._paused and not self._closing + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + self._loop._remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + self._paused = False + self._add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _read_ready(self): + self._read_ready_cb() + + def _read_ready__get_buffer(self): + if self._conn_lost: + return + + try: + buf = self._protocol.get_buffer(-1) + if not len(buf): + raise RuntimeError('get_buffer() returned an empty buffer') + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.get_buffer() call failed.') + return + + try: + nbytes = self._sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not nbytes: + self._read_ready__on_eof() + return + + try: + self._protocol.buffer_updated(nbytes) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.buffer_updated() call failed.') + + def _read_ready__data_received(self): + if self._conn_lost: + return + try: + data = self._sock.recv(self.max_size) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not data: + self._read_ready__on_eof() + return + + try: + self._protocol.data_received(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.data_received() call failed.') + + def _read_ready__on_eof(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._loop._remove_reader(self._sock_fd) + else: + self.close() + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f'data argument must be a bytes-like object, ' + f'not {type(data).__name__!r}') + if self._eof: + raise RuntimeError('Cannot call write() after write_eof()') + if self._empty_waiter is not None: + raise RuntimeError('unable to write; sendfile is in progress') + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Optimization: try to send now. + try: + n = self._sock.send(data) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal write error on socket transport') + return + else: + data = data[n:] + if not data: + return + # Not all was written; register write handler. + self._loop._add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + if self._conn_lost: + return + try: + n = self._sock.send(self._buffer) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop._remove_writer(self._sock_fd) + self._buffer.clear() + self._fatal_error(exc, 'Fatal write error on socket transport') + if self._empty_waiter is not None: + self._empty_waiter.set_exception(exc) + else: + if n: + del self._buffer[:n] + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._empty_waiter is not None: + self._empty_waiter.set_result(None) + if self._closing: + self._call_connection_lost(None) + elif self._eof: + self._sock.shutdown(socket.SHUT_WR) + + def write_eof(self): + if self._closing or self._eof: + return + self._eof = True + if not self._buffer: + self._sock.shutdown(socket.SHUT_WR) + + def can_write_eof(self): + return True + + def _call_connection_lost(self, exc): + super()._call_connection_lost(exc) + if self._empty_waiter is not None: + self._empty_waiter.set_exception( + ConnectionError("Connection is closed by peer")) + + def _make_empty_waiter(self): + if self._empty_waiter is not None: + raise RuntimeError("Empty waiter is already set") + self._empty_waiter = self._loop.create_future() + if not self._buffer: + self._empty_waiter.set_result(None) + return self._empty_waiter + + def _reset_empty_waiter(self): + self._empty_waiter = None + + +class _SelectorDatagramTransport(_SelectorTransport): + + _buffer_factory = collections.deque + + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + super().__init__(loop, sock, protocol, extra) + self._address = address + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def _read_ready(self): + if self._conn_lost: + return + try: + data, addr = self._sock.recvfrom(self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._protocol.error_received(exc) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on datagram transport') + else: + self._protocol.datagram_received(data, addr) + + def sendto(self, data, addr=None): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f'data argument must be a bytes-like object, ' + f'not {type(data).__name__!r}') + if not data: + return + + if self._address: + if addr not in (None, self._address): + raise ValueError( + f'Invalid address: must be None or {self._address}') + addr = self._address + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + if self._extra['peername']: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + return + except (BlockingIOError, InterruptedError): + self._loop._add_writer(self._sock_fd, self._sendto_ready) + except OSError as exc: + self._protocol.error_received(exc) + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal write error on datagram transport') + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + self._maybe_pause_protocol() + + def _sendto_ready(self): + while self._buffer: + data, addr = self._buffer.popleft() + try: + if self._extra['peername']: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + except (BlockingIOError, InterruptedError): + self._buffer.appendleft((data, addr)) # Try again later. + break + except OSError as exc: + self._protocol.error_received(exc) + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal write error on datagram transport') + return + + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/sslproto.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/sslproto.py new file mode 100644 index 0000000000000000000000000000000000000000..3eca6b4a39128bdd6c71af96162df632cc1099f3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/sslproto.py @@ -0,0 +1,734 @@ +import collections +import warnings +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import constants +from . import protocols +from . import transports +from .log import logger + + +def _create_transport_context(server_side, server_hostname): + if server_side: + raise ValueError('Server side SSL needs a valid SSLContext') + + # Client side may pass ssl=True to use a default + # context; in that case the sslcontext passed is None. + # The default is secure for client connections. + # Python 3.4+: use up-to-date strong settings. + sslcontext = ssl.create_default_context() + if not server_hostname: + sslcontext.check_hostname = False + return sslcontext + + +# States of an _SSLPipe. +_UNWRAPPED = "UNWRAPPED" +_DO_HANDSHAKE = "DO_HANDSHAKE" +_WRAPPED = "WRAPPED" +_SHUTDOWN = "SHUTDOWN" + + +class _SSLPipe(object): + """An SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + """ + + max_size = 256 * 1024 # Buffer size passed to read() + + def __init__(self, context, server_side, server_hostname=None): + """ + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + """ + self._context = context + self._server_side = server_side + self._server_hostname = server_hostname + self._state = _UNWRAPPED + self._incoming = ssl.MemoryBIO() + self._outgoing = ssl.MemoryBIO() + self._sslobj = None + self._need_ssldata = False + self._handshake_cb = None + self._shutdown_cb = None + + @property + def context(self): + """The SSL context passed to the constructor.""" + return self._context + + @property + def ssl_object(self): + """The internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + """ + return self._sslobj + + @property + def need_ssldata(self): + """Whether more record level data is needed to complete a handshake + that is currently in progress.""" + return self._need_ssldata + + @property + def wrapped(self): + """ + Whether a security layer is currently in effect. + + Return False during handshake. + """ + return self._state == _WRAPPED + + def do_handshake(self, callback=None): + """Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + """ + if self._state != _UNWRAPPED: + raise RuntimeError('handshake in progress or completed') + self._sslobj = self._context.wrap_bio( + self._incoming, self._outgoing, + server_side=self._server_side, + server_hostname=self._server_hostname) + self._state = _DO_HANDSHAKE + self._handshake_cb = callback + ssldata, appdata = self.feed_ssldata(b'', only_handshake=True) + assert len(appdata) == 0 + return ssldata + + def shutdown(self, callback=None): + """Start the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + """ + if self._state == _UNWRAPPED: + raise RuntimeError('no security layer present') + if self._state == _SHUTDOWN: + raise RuntimeError('shutdown in progress') + assert self._state in (_WRAPPED, _DO_HANDSHAKE) + self._state = _SHUTDOWN + self._shutdown_cb = callback + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + return ssldata + + def feed_eof(self): + """Send a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + """ + self._incoming.write_eof() + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + + def feed_ssldata(self, data, only_handshake=False): + """Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + """ + if self._state == _UNWRAPPED: + # If unwrapped, pass plaintext data straight through. + if data: + appdata = [data] + else: + appdata = [] + return ([], appdata) + + self._need_ssldata = False + if data: + self._incoming.write(data) + + ssldata = [] + appdata = [] + try: + if self._state == _DO_HANDSHAKE: + # Call do_handshake() until it doesn't raise anymore. + self._sslobj.do_handshake() + self._state = _WRAPPED + if self._handshake_cb: + self._handshake_cb(None) + if only_handshake: + return (ssldata, appdata) + # Handshake done: execute the wrapped block + + if self._state == _WRAPPED: + # Main state: read data from SSL until close_notify + while True: + chunk = self._sslobj.read(self.max_size) + appdata.append(chunk) + if not chunk: # close_notify + break + + elif self._state == _SHUTDOWN: + # Call shutdown() until it doesn't raise anymore. + self._sslobj.unwrap() + self._sslobj = None + self._state = _UNWRAPPED + if self._shutdown_cb: + self._shutdown_cb() + + elif self._state == _UNWRAPPED: + # Drain possible plaintext data after close_notify. + appdata.append(self._incoming.read()) + except (ssl.SSLError, ssl.CertificateError) as exc: + exc_errno = getattr(exc, 'errno', None) + if exc_errno not in ( + ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + if self._state == _DO_HANDSHAKE and self._handshake_cb: + self._handshake_cb(exc) + raise + self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ) + + # Check for record level data that needs to be sent back. + # Happens for the initial handshake and renegotiations. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + return (ssldata, appdata) + + def feed_appdata(self, data, offset=0): + """Feed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + """ + assert 0 <= offset <= len(data) + if self._state == _UNWRAPPED: + # pass through data in unwrapped mode + if offset < len(data): + ssldata = [data[offset:]] + else: + ssldata = [] + return (ssldata, len(data)) + + ssldata = [] + view = memoryview(data) + while True: + self._need_ssldata = False + try: + if offset < len(view): + offset += self._sslobj.write(view[offset:]) + except ssl.SSLError as exc: + # It is not allowed to call write() after unwrap() until the + # close_notify is acknowledged. We return the condition to the + # caller as a short write. + exc_errno = getattr(exc, 'errno', None) + if exc.reason == 'PROTOCOL_IS_SHUTDOWN': + exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ + if exc_errno not in (ssl.SSL_ERROR_WANT_READ, + ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + raise + self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ) + + # See if there's any record level data back for us. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + if offset == len(view) or self._need_ssldata: + break + return (ssldata, offset) + + +class _SSLProtocolTransport(transports._FlowControlMixin, + transports.Transport): + + _sendfile_compatible = constants._SendfileMode.FALLBACK + + def __init__(self, loop, ssl_protocol): + self._loop = loop + # SSLProtocol instance + self._ssl_protocol = ssl_protocol + self._closed = False + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._ssl_protocol._get_extra_info(name, default) + + def set_protocol(self, protocol): + self._ssl_protocol._set_app_protocol(protocol) + + def get_protocol(self): + return self._ssl_protocol._app_protocol + + def is_closing(self): + return self._closed + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + self._closed = True + self._ssl_protocol._start_shutdown() + + def __del__(self, _warn=warnings.warn): + if not self._closed: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self.close() + + def is_reading(self): + tr = self._ssl_protocol._transport + if tr is None: + raise RuntimeError('SSL transport has not been initialized yet') + return tr.is_reading() + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + self._ssl_protocol._transport.pause_reading() + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + self._ssl_protocol._transport.resume_reading() + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + self._ssl_protocol._transport.set_write_buffer_limits(high, low) + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + return self._ssl_protocol._transport.get_write_buffer_size() + + @property + def _protocol_paused(self): + # Required for sendfile fallback pause_writing/resume_writing logic + return self._ssl_protocol._transport._protocol_paused + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f"data: expecting a bytes-like instance, " + f"got {type(data).__name__}") + if not data: + return + self._ssl_protocol._write_appdata(data) + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + return False + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + self._ssl_protocol._abort() + self._closed = True + + +class SSLProtocol(protocols.Protocol): + """SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + """ + + def __init__(self, loop, app_protocol, sslcontext, waiter, + server_side=False, server_hostname=None, + call_connection_made=True, + ssl_handshake_timeout=None): + if ssl is None: + raise RuntimeError('stdlib ssl module not available') + + if ssl_handshake_timeout is None: + ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT + elif ssl_handshake_timeout <= 0: + raise ValueError( + f"ssl_handshake_timeout should be a positive number, " + f"got {ssl_handshake_timeout}") + + if not sslcontext: + sslcontext = _create_transport_context( + server_side, server_hostname) + + self._server_side = server_side + if server_hostname and not server_side: + self._server_hostname = server_hostname + else: + self._server_hostname = None + self._sslcontext = sslcontext + # SSL-specific extra info. More info are set when the handshake + # completes. + self._extra = dict(sslcontext=sslcontext) + + # App data write buffering + self._write_backlog = collections.deque() + self._write_buffer_size = 0 + + self._waiter = waiter + self._loop = loop + self._set_app_protocol(app_protocol) + self._app_transport = _SSLProtocolTransport(self._loop, self) + # _SSLPipe instance (None until the connection is made) + self._sslpipe = None + self._session_established = False + self._in_handshake = False + self._in_shutdown = False + # transport, ex: SelectorSocketTransport + self._transport = None + self._call_connection_made = call_connection_made + self._ssl_handshake_timeout = ssl_handshake_timeout + + def _set_app_protocol(self, app_protocol): + self._app_protocol = app_protocol + self._app_protocol_is_buffer = \ + isinstance(app_protocol, protocols.BufferedProtocol) + + def _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def connection_made(self, transport): + """Called when the low-level connection is made. + + Start the SSL handshake. + """ + self._transport = transport + self._sslpipe = _SSLPipe(self._sslcontext, + self._server_side, + self._server_hostname) + self._start_handshake() + + def connection_lost(self, exc): + """Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + if self._session_established: + self._session_established = False + self._loop.call_soon(self._app_protocol.connection_lost, exc) + else: + # Most likely an exception occurred while in SSL handshake. + # Just mark the app transport as closed so that its __del__ + # doesn't complain. + if self._app_transport is not None: + self._app_transport._closed = True + self._transport = None + self._app_transport = None + if getattr(self, '_handshake_timeout_handle', None): + self._handshake_timeout_handle.cancel() + self._wakeup_waiter(exc) + self._app_protocol = None + self._sslpipe = None + + def pause_writing(self): + """Called when the low-level transport's buffer goes over + the high-water mark. + """ + self._app_protocol.pause_writing() + + def resume_writing(self): + """Called when the low-level transport's buffer drains below + the low-water mark. + """ + self._app_protocol.resume_writing() + + def data_received(self, data): + """Called when some SSL data is received. + + The argument is a bytes object. + """ + if self._sslpipe is None: + # transport closing, sslpipe is destroyed + return + + try: + ssldata, appdata = self._sslpipe.feed_ssldata(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as e: + self._fatal_error(e, 'SSL error in data received') + return + + for chunk in ssldata: + self._transport.write(chunk) + + for chunk in appdata: + if chunk: + try: + if self._app_protocol_is_buffer: + protocols._feed_data_to_buffered_proto( + self._app_protocol, chunk) + else: + self._app_protocol.data_received(chunk) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as ex: + self._fatal_error( + ex, 'application protocol failed to receive SSL data') + return + else: + self._start_shutdown() + break + + def eof_received(self): + """Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + try: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + self._wakeup_waiter(ConnectionResetError) + + if not self._in_handshake: + keep_open = self._app_protocol.eof_received() + if keep_open: + logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + finally: + self._transport.close() + + def _get_extra_info(self, name, default=None): + if name in self._extra: + return self._extra[name] + elif self._transport is not None: + return self._transport.get_extra_info(name, default) + else: + return default + + def _start_shutdown(self): + if self._in_shutdown: + return + if self._in_handshake: + self._abort() + else: + self._in_shutdown = True + self._write_appdata(b'') + + def _write_appdata(self, data): + self._write_backlog.append((data, 0)) + self._write_buffer_size += len(data) + self._process_write_backlog() + + def _start_handshake(self): + if self._loop.get_debug(): + logger.debug("%r starts SSL handshake", self) + self._handshake_start_time = self._loop.time() + else: + self._handshake_start_time = None + self._in_handshake = True + # (b'', 1) is a special value in _process_write_backlog() to do + # the SSL handshake + self._write_backlog.append((b'', 1)) + self._handshake_timeout_handle = \ + self._loop.call_later(self._ssl_handshake_timeout, + self._check_handshake_timeout) + self._process_write_backlog() + + def _check_handshake_timeout(self): + if self._in_handshake is True: + msg = ( + f"SSL handshake is taking longer than " + f"{self._ssl_handshake_timeout} seconds: " + f"aborting the connection" + ) + self._fatal_error(ConnectionAbortedError(msg)) + + def _on_handshake_complete(self, handshake_exc): + self._in_handshake = False + self._handshake_timeout_handle.cancel() + + sslobj = self._sslpipe.ssl_object + try: + if handshake_exc is not None: + raise handshake_exc + + peercert = sslobj.getpeercert() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if isinstance(exc, ssl.CertificateError): + msg = 'SSL handshake failed on verifying the certificate' + else: + msg = 'SSL handshake failed' + self._fatal_error(exc, msg) + return + + if self._loop.get_debug(): + dt = self._loop.time() - self._handshake_start_time + logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=sslobj.cipher(), + compression=sslobj.compression(), + ssl_object=sslobj, + ) + if self._call_connection_made: + self._app_protocol.connection_made(self._app_transport) + self._wakeup_waiter() + self._session_established = True + # In case transport.write() was already called. Don't call + # immediately _process_write_backlog(), but schedule it: + # _on_handshake_complete() can be called indirectly from + # _process_write_backlog(), and _process_write_backlog() is not + # reentrant. + self._loop.call_soon(self._process_write_backlog) + + def _process_write_backlog(self): + # Try to make progress on the write backlog. + if self._transport is None or self._sslpipe is None: + return + + try: + for i in range(len(self._write_backlog)): + data, offset = self._write_backlog[0] + if data: + ssldata, offset = self._sslpipe.feed_appdata(data, offset) + elif offset: + ssldata = self._sslpipe.do_handshake( + self._on_handshake_complete) + offset = 1 + else: + ssldata = self._sslpipe.shutdown(self._finalize) + offset = 1 + + for chunk in ssldata: + self._transport.write(chunk) + + if offset < len(data): + self._write_backlog[0] = (data, offset) + # A short write means that a write is blocked on a read + # We need to enable reading if it is paused! + assert self._sslpipe.need_ssldata + if self._transport._paused: + self._transport.resume_reading() + break + + # An entire chunk from the backlog was processed. We can + # delete it and reduce the outstanding buffer size. + del self._write_backlog[0] + self._write_buffer_size -= len(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if self._in_handshake: + # Exceptions will be re-raised in _on_handshake_complete. + self._on_handshake_complete(exc) + else: + self._fatal_error(exc, 'Fatal error on SSL transport') + + def _fatal_error(self, exc, message='Fatal error on transport'): + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self._transport, + 'protocol': self, + }) + if self._transport: + self._transport._force_close(exc) + + def _finalize(self): + self._sslpipe = None + + if self._transport is not None: + self._transport.close() + + def _abort(self): + try: + if self._transport is not None: + self._transport.abort() + finally: + self._finalize() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/staggered.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/staggered.py new file mode 100644 index 0000000000000000000000000000000000000000..451a53a16f3831611b6fc9b55f9eff5468924793 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/staggered.py @@ -0,0 +1,149 @@ +"""Support for running coroutines in parallel with staggered start times.""" + +__all__ = 'staggered_race', + +import contextlib +import typing + +from . import events +from . import exceptions as exceptions_mod +from . import locks +from . import tasks + + +async def staggered_race( + coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]], + delay: typing.Optional[float], + *, + loop: events.AbstractEventLoop = None, +) -> typing.Tuple[ + typing.Any, + typing.Optional[int], + typing.List[typing.Optional[Exception]] +]: + """Run coroutines with staggered start times and take the first to finish. + + This method takes an iterable of coroutine functions. The first one is + started immediately. From then on, whenever the immediately preceding one + fails (raises an exception), or when *delay* seconds has passed, the next + coroutine is started. This continues until one of the coroutines complete + successfully, in which case all others are cancelled, or until all + coroutines fail. + + The coroutines provided should be well-behaved in the following way: + + * They should only ``return`` if completed successfully. + + * They should always raise an exception if they did not complete + successfully. In particular, if they handle cancellation, they should + probably reraise, like this:: + + try: + # do work + except asyncio.CancelledError: + # undo partially completed work + raise + + Args: + coro_fns: an iterable of coroutine functions, i.e. callables that + return a coroutine object when called. Use ``functools.partial`` or + lambdas to pass arguments. + + delay: amount of time, in seconds, between starting coroutines. If + ``None``, the coroutines will run sequentially. + + loop: the event loop to use. + + Returns: + tuple *(winner_result, winner_index, exceptions)* where + + - *winner_result*: the result of the winning coroutine, or ``None`` + if no coroutines won. + + - *winner_index*: the index of the winning coroutine in + ``coro_fns``, or ``None`` if no coroutines won. If the winning + coroutine may return None on success, *winner_index* can be used + to definitively determine whether any coroutine won. + + - *exceptions*: list of exceptions returned by the coroutines. + ``len(exceptions)`` is equal to the number of coroutines actually + started, and the order is the same as in ``coro_fns``. The winning + coroutine's entry is ``None``. + + """ + # TODO: when we have aiter() and anext(), allow async iterables in coro_fns. + loop = loop or events.get_running_loop() + enum_coro_fns = enumerate(coro_fns) + winner_result = None + winner_index = None + exceptions = [] + running_tasks = [] + + async def run_one_coro( + previous_failed: typing.Optional[locks.Event]) -> None: + # Wait for the previous task to finish, or for delay seconds + if previous_failed is not None: + with contextlib.suppress(exceptions_mod.TimeoutError): + # Use asyncio.wait_for() instead of asyncio.wait() here, so + # that if we get cancelled at this point, Event.wait() is also + # cancelled, otherwise there will be a "Task destroyed but it is + # pending" later. + await tasks.wait_for(previous_failed.wait(), delay) + # Get the next coroutine to run + try: + this_index, coro_fn = next(enum_coro_fns) + except StopIteration: + return + # Start task that will run the next coroutine + this_failed = locks.Event() + next_task = loop.create_task(run_one_coro(this_failed)) + running_tasks.append(next_task) + assert len(running_tasks) == this_index + 2 + # Prepare place to put this coroutine's exceptions if not won + exceptions.append(None) + assert len(exceptions) == this_index + 1 + + try: + result = await coro_fn() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as e: + exceptions[this_index] = e + this_failed.set() # Kickstart the next coroutine + else: + # Store winner's results + nonlocal winner_index, winner_result + assert winner_index is None + winner_index = this_index + winner_result = result + # Cancel all other tasks. We take care to not cancel the current + # task as well. If we do so, then since there is no `await` after + # here and CancelledError are usually thrown at one, we will + # encounter a curious corner case where the current task will end + # up as done() == True, cancelled() == False, exception() == + # asyncio.CancelledError. This behavior is specified in + # https://bugs.python.org/issue30048 + for i, t in enumerate(running_tasks): + if i != this_index: + t.cancel() + + first_task = loop.create_task(run_one_coro(None)) + running_tasks.append(first_task) + try: + # Wait for a growing list of tasks to all finish: poor man's version of + # curio's TaskGroup or trio's nursery + done_count = 0 + while done_count != len(running_tasks): + done, _ = await tasks.wait(running_tasks) + done_count = len(done) + # If run_one_coro raises an unhandled exception, it's probably a + # programming error, and I want to see it. + if __debug__: + for d in done: + if d.done() and not d.cancelled() and d.exception(): + raise d.exception() + return winner_result, winner_index, exceptions + finally: + # Make sure no tasks are left running if we leave this function + for t in running_tasks: + t.cancel() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/streams.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..3c80bb8892590550acda19dc54b040093dbe1114 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/streams.py @@ -0,0 +1,741 @@ +__all__ = ( + 'StreamReader', 'StreamWriter', 'StreamReaderProtocol', + 'open_connection', 'start_server') + +import socket +import sys +import warnings +import weakref + +if hasattr(socket, 'AF_UNIX'): + __all__ += ('open_unix_connection', 'start_unix_server') + +from . import coroutines +from . import events +from . import exceptions +from . import format_helpers +from . import protocols +from .log import logger +from .tasks import sleep + + +_DEFAULT_LIMIT = 2 ** 16 # 64 KiB + + +async def open_connection(host=None, port=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """A wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + """ + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = await loop.create_connection( + lambda: protocol, host, port, **kwds) + writer = StreamWriter(transport, protocol, reader, loop) + return reader, writer + + +async def start_server(client_connected_cb, host=None, port=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Start a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + """ + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + return await loop.create_server(factory, host, port, **kwds) + + +if hasattr(socket, 'AF_UNIX'): + # UNIX Domain Sockets are supported on this platform + + async def open_unix_connection(path=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Similar to `open_connection` but works with UNIX Domain Sockets.""" + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = await loop.create_unix_connection( + lambda: protocol, path, **kwds) + writer = StreamWriter(transport, protocol, reader, loop) + return reader, writer + + async def start_unix_server(client_connected_cb, path=None, *, + loop=None, limit=_DEFAULT_LIMIT, **kwds): + """Similar to `start_server` but works with UNIX Domain Sockets.""" + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + return await loop.create_unix_server(factory, path, **kwds) + + +class FlowControlMixin(protocols.Protocol): + """Reusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_writing() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + """ + + def __init__(self, loop=None): + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._paused = False + self._drain_waiter = None + self._connection_lost = False + + def pause_writing(self): + assert not self._paused + self._paused = True + if self._loop.get_debug(): + logger.debug("%r pauses writing", self) + + def resume_writing(self): + assert self._paused + self._paused = False + if self._loop.get_debug(): + logger.debug("%r resumes writing", self) + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def connection_lost(self, exc): + self._connection_lost = True + # Wake up the writer if currently paused. + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + async def _drain_helper(self): + if self._connection_lost: + raise ConnectionResetError('Connection lost') + if not self._paused: + return + waiter = self._drain_waiter + assert waiter is None or waiter.cancelled() + waiter = self._loop.create_future() + self._drain_waiter = waiter + await waiter + + def _get_close_waiter(self, stream): + raise NotImplementedError + + +class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): + """Helper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + """ + + _source_traceback = None + + def __init__(self, stream_reader, client_connected_cb=None, loop=None): + super().__init__(loop=loop) + if stream_reader is not None: + self._stream_reader_wr = weakref.ref(stream_reader) + self._source_traceback = stream_reader._source_traceback + else: + self._stream_reader_wr = None + if client_connected_cb is not None: + # This is a stream created by the `create_server()` function. + # Keep a strong reference to the reader until a connection + # is established. + self._strong_reader = stream_reader + self._reject_connection = False + self._stream_writer = None + self._transport = None + self._client_connected_cb = client_connected_cb + self._over_ssl = False + self._closed = self._loop.create_future() + + @property + def _stream_reader(self): + if self._stream_reader_wr is None: + return None + return self._stream_reader_wr() + + def connection_made(self, transport): + if self._reject_connection: + context = { + 'message': ('An open stream was garbage collected prior to ' + 'establishing network connection; ' + 'call "stream.close()" explicitly.') + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + transport.abort() + return + self._transport = transport + reader = self._stream_reader + if reader is not None: + reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None + if self._client_connected_cb is not None: + self._stream_writer = StreamWriter(transport, self, + reader, + self._loop) + res = self._client_connected_cb(reader, + self._stream_writer) + if coroutines.iscoroutine(res): + self._loop.create_task(res) + self._strong_reader = None + + def connection_lost(self, exc): + reader = self._stream_reader + if reader is not None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + if not self._closed.done(): + if exc is None: + self._closed.set_result(None) + else: + self._closed.set_exception(exc) + super().connection_lost(exc) + self._stream_reader_wr = None + self._stream_writer = None + self._transport = None + + def data_received(self, data): + reader = self._stream_reader + if reader is not None: + reader.feed_data(data) + + def eof_received(self): + reader = self._stream_reader + if reader is not None: + reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False + return True + + def _get_close_waiter(self, stream): + return self._closed + + def __del__(self): + # Prevent reports about unhandled exceptions. + # Better than self._closed._log_traceback = False hack + closed = self._closed + if closed.done() and not closed.cancelled(): + closed.exception() + + +class StreamWriter: + """Wraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + """ + + def __init__(self, transport, protocol, reader, loop): + self._transport = transport + self._protocol = protocol + # drain() expects that the reader has an exception() method + assert reader is None or isinstance(reader, StreamReader) + self._reader = reader + self._loop = loop + self._complete_fut = self._loop.create_future() + self._complete_fut.set_result(None) + + def __repr__(self): + info = [self.__class__.__name__, f'transport={self._transport!r}'] + if self._reader is not None: + info.append(f'reader={self._reader!r}') + return '<{}>'.format(' '.join(info)) + + @property + def transport(self): + return self._transport + + def write(self, data): + self._transport.write(data) + + def writelines(self, data): + self._transport.writelines(data) + + def write_eof(self): + return self._transport.write_eof() + + def can_write_eof(self): + return self._transport.can_write_eof() + + def close(self): + return self._transport.close() + + def is_closing(self): + return self._transport.is_closing() + + async def wait_closed(self): + await self._protocol._get_close_waiter(self) + + def get_extra_info(self, name, default=None): + return self._transport.get_extra_info(name, default) + + async def drain(self): + """Flush the write buffer. + + The intended use is to write + + w.write(data) + await w.drain() + """ + if self._reader is not None: + exc = self._reader.exception() + if exc is not None: + raise exc + if self._transport.is_closing(): + # Wait for protocol.connection_lost() call + # Raise connection closing error if any, + # ConnectionResetError otherwise + # Yield to the event loop so connection_lost() may be + # called. Without this, _drain_helper() would return + # immediately, and code that calls + # write(...); await drain() + # in a loop would never call connection_lost(), so it + # would not see an error when the socket is closed. + await sleep(0) + await self._protocol._drain_helper() + + +class StreamReader: + + _source_traceback = None + + def __init__(self, limit=_DEFAULT_LIMIT, loop=None): + # The line length limit is a security feature; + # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + + self._limit = limit + if loop is None: + self._loop = events.get_event_loop() + else: + self._loop = loop + self._buffer = bytearray() + self._eof = False # Whether we're done. + self._waiter = None # A future used by _wait_for_data() + self._exception = None + self._transport = None + self._paused = False + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + + def __repr__(self): + info = ['StreamReader'] + if self._buffer: + info.append(f'{len(self._buffer)} bytes') + if self._eof: + info.append('eof') + if self._limit != _DEFAULT_LIMIT: + info.append(f'limit={self._limit}') + if self._waiter: + info.append(f'waiter={self._waiter!r}') + if self._exception: + info.append(f'exception={self._exception!r}') + if self._transport: + info.append(f'transport={self._transport!r}') + if self._paused: + info.append('paused') + return '<{}>'.format(' '.join(info)) + + def exception(self): + return self._exception + + def set_exception(self, exc): + self._exception = exc + + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_exception(exc) + + def _wakeup_waiter(self): + """Wakeup read*() functions waiting for data or EOF.""" + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_result(None) + + def set_transport(self, transport): + assert self._transport is None, 'Transport already set' + self._transport = transport + + def _maybe_resume_transport(self): + if self._paused and len(self._buffer) <= self._limit: + self._paused = False + self._transport.resume_reading() + + def feed_eof(self): + self._eof = True + self._wakeup_waiter() + + def at_eof(self): + """Return True if the buffer is empty and 'feed_eof' was called.""" + return self._eof and not self._buffer + + def feed_data(self, data): + assert not self._eof, 'feed_data after feed_eof' + + if not data: + return + + self._buffer.extend(data) + self._wakeup_waiter() + + if (self._transport is not None and + not self._paused and + len(self._buffer) > 2 * self._limit): + try: + self._transport.pause_reading() + except NotImplementedError: + # The transport can't be paused. + # We'll just have to buffer all data. + # Forget the transport so we don't keep trying. + self._transport = None + else: + self._paused = True + + async def _wait_for_data(self, func_name): + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ + # StreamReader uses a future to link the protocol feed_data() method + # to a read coroutine. Running two read coroutines at the same time + # would have an unexpected behaviour. It would not possible to know + # which coroutine would get the next data. + if self._waiter is not None: + raise RuntimeError( + f'{func_name}() called while another coroutine is ' + f'already waiting for incoming data') + + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + # This is essential for readexactly(n) for case when n > self._limit. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() + try: + await self._waiter + finally: + self._waiter = None + + async def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = await self.readuntil(sep) + except exceptions.IncompleteReadError as e: + return e.partial + except exceptions.LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + async def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + + if self._exception is not None: + raise self._exception + + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) + + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 + + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise exceptions.LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. + if self._eof: + chunk = bytes(self._buffer) + self._buffer.clear() + raise exceptions.IncompleteReadError(chunk, None) + + # _wait_for_data() will resume reading if stream was paused. + await self._wait_for_data('readuntil') + + if isep > self._limit: + raise exceptions.LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] + self._maybe_resume_transport() + return bytes(chunk) + + async def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediately. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + + if self._exception is not None: + raise self._exception + + if n == 0: + return b'' + + if n < 0: + # This used to just loop creating a new waiter hoping to + # collect everything in self._buffer, but that would + # deadlock if the subprocess sends more than self.limit + # bytes. So just call self.read(self._limit) until EOF. + blocks = [] + while True: + block = await self.read(self._limit) + if not block: + break + blocks.append(block) + return b''.join(blocks) + + if not self._buffer and not self._eof: + await self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] + + self._maybe_resume_transport() + return data + + async def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + + if self._exception is not None: + raise self._exception + + if n == 0: + return b'' + + while len(self._buffer) < n: + if self._eof: + incomplete = bytes(self._buffer) + self._buffer.clear() + raise exceptions.IncompleteReadError(incomplete, n) + + await self._wait_for_data('readexactly') + + if len(self._buffer) == n: + data = bytes(self._buffer) + self._buffer.clear() + else: + data = bytes(self._buffer[:n]) + del self._buffer[:n] + self._maybe_resume_transport() + return data + + def __aiter__(self): + return self + + async def __anext__(self): + val = await self.readline() + if val == b'': + raise StopAsyncIteration + return val diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/subprocess.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..c9506b158302b5e8027a636fdb7c9ca9c965b938 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/subprocess.py @@ -0,0 +1,241 @@ +__all__ = 'create_subprocess_exec', 'create_subprocess_shell' + +import subprocess +import warnings + +from . import events +from . import protocols +from . import streams +from . import tasks +from .log import logger + + +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +DEVNULL = subprocess.DEVNULL + + +class SubprocessStreamProtocol(streams.FlowControlMixin, + protocols.SubprocessProtocol): + """Like StreamReaderProtocol, but for a subprocess.""" + + def __init__(self, limit, loop): + super().__init__(loop=loop) + self._limit = limit + self.stdin = self.stdout = self.stderr = None + self._transport = None + self._process_exited = False + self._pipe_fds = [] + self._stdin_closed = self._loop.create_future() + + def __repr__(self): + info = [self.__class__.__name__] + if self.stdin is not None: + info.append(f'stdin={self.stdin!r}') + if self.stdout is not None: + info.append(f'stdout={self.stdout!r}') + if self.stderr is not None: + info.append(f'stderr={self.stderr!r}') + return '<{}>'.format(' '.join(info)) + + def connection_made(self, transport): + self._transport = transport + + stdout_transport = transport.get_pipe_transport(1) + if stdout_transport is not None: + self.stdout = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stdout.set_transport(stdout_transport) + self._pipe_fds.append(1) + + stderr_transport = transport.get_pipe_transport(2) + if stderr_transport is not None: + self.stderr = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stderr.set_transport(stderr_transport) + self._pipe_fds.append(2) + + stdin_transport = transport.get_pipe_transport(0) + if stdin_transport is not None: + self.stdin = streams.StreamWriter(stdin_transport, + protocol=self, + reader=None, + loop=self._loop) + + def pipe_data_received(self, fd, data): + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + reader.feed_data(data) + + def pipe_connection_lost(self, fd, exc): + if fd == 0: + pipe = self.stdin + if pipe is not None: + pipe.close() + self.connection_lost(exc) + if exc is None: + self._stdin_closed.set_result(None) + else: + self._stdin_closed.set_exception(exc) + return + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + + if fd in self._pipe_fds: + self._pipe_fds.remove(fd) + self._maybe_close_transport() + + def process_exited(self): + self._process_exited = True + self._maybe_close_transport() + + def _maybe_close_transport(self): + if len(self._pipe_fds) == 0 and self._process_exited: + self._transport.close() + self._transport = None + + def _get_close_waiter(self, stream): + if stream is self.stdin: + return self._stdin_closed + + +class Process: + def __init__(self, transport, protocol, loop): + self._transport = transport + self._protocol = protocol + self._loop = loop + self.stdin = protocol.stdin + self.stdout = protocol.stdout + self.stderr = protocol.stderr + self.pid = transport.get_pid() + + def __repr__(self): + return f'<{self.__class__.__name__} {self.pid}>' + + @property + def returncode(self): + return self._transport.get_returncode() + + async def wait(self): + """Wait until the process exit and return the process return code.""" + return await self._transport._wait() + + def send_signal(self, signal): + self._transport.send_signal(signal) + + def terminate(self): + self._transport.terminate() + + def kill(self): + self._transport.kill() + + async def _feed_stdin(self, input): + debug = self._loop.get_debug() + self.stdin.write(input) + if debug: + logger.debug( + '%r communicate: feed stdin (%s bytes)', self, len(input)) + try: + await self.stdin.drain() + except (BrokenPipeError, ConnectionResetError) as exc: + # communicate() ignores BrokenPipeError and ConnectionResetError + if debug: + logger.debug('%r communicate: stdin got %r', self, exc) + + if debug: + logger.debug('%r communicate: close stdin', self) + self.stdin.close() + + async def _noop(self): + return None + + async def _read_stream(self, fd): + transport = self._transport.get_pipe_transport(fd) + if fd == 2: + stream = self.stderr + else: + assert fd == 1 + stream = self.stdout + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: read %s', self, name) + output = await stream.read() + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: close %s', self, name) + transport.close() + return output + + async def communicate(self, input=None): + if input is not None: + stdin = self._feed_stdin(input) + else: + stdin = self._noop() + if self.stdout is not None: + stdout = self._read_stream(1) + else: + stdout = self._noop() + if self.stderr is not None: + stderr = self._read_stream(2) + else: + stderr = self._noop() + stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr, + loop=self._loop) + await self.wait() + return (stdout, stderr) + + +async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None, + loop=None, limit=streams._DEFAULT_LIMIT, + **kwds): + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8 " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, + stacklevel=2 + ) + + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = await loop.subprocess_shell( + protocol_factory, + cmd, stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) + + +async def create_subprocess_exec(program, *args, stdin=None, stdout=None, + stderr=None, loop=None, + limit=streams._DEFAULT_LIMIT, **kwds): + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8 " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, + stacklevel=2 + ) + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = await loop.subprocess_exec( + protocol_factory, + program, *args, + stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/tasks.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..709413258381408c1a5b269140a51f1b8f44137f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/tasks.py @@ -0,0 +1,984 @@ +"""Support for tasks, coroutines and the scheduler.""" + +__all__ = ( + 'Task', 'create_task', + 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', + 'wait', 'wait_for', 'as_completed', 'sleep', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', + 'current_task', 'all_tasks', + '_register_task', '_unregister_task', '_enter_task', '_leave_task', +) + +import concurrent.futures +import contextvars +import functools +import inspect +import itertools +import types +import warnings +import weakref + +from . import base_tasks +from . import coroutines +from . import events +from . import exceptions +from . import futures +from .coroutines import _is_coroutine + +# Helper to generate new task names +# This uses itertools.count() instead of a "+= 1" operation because the latter +# is not thread safe. See bpo-11866 for a longer explanation. +_task_name_counter = itertools.count(1).__next__ + + +def current_task(loop=None): + """Return a currently executed task.""" + if loop is None: + loop = events.get_running_loop() + return _current_tasks.get(loop) + + +def all_tasks(loop=None): + """Return a set of all tasks for the loop.""" + if loop is None: + loop = events.get_running_loop() + # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another + # thread while we do so. Therefore we cast it to list prior to filtering. The list + # cast itself requires iteration, so we repeat it several times ignoring + # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for + # details. + i = 0 + while True: + try: + tasks = list(_all_tasks) + except RuntimeError: + i += 1 + if i >= 1000: + raise + else: + break + return {t for t in tasks + if futures._get_loop(t) is loop and not t.done()} + + +def _all_tasks_compat(loop=None): + # Different from "all_task()" by returning *all* Tasks, including + # the completed ones. Used to implement deprecated "Tasks.all_task()" + # method. + if loop is None: + loop = events.get_event_loop() + # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another + # thread while we do so. Therefore we cast it to list prior to filtering. The list + # cast itself requires iteration, so we repeat it several times ignoring + # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for + # details. + i = 0 + while True: + try: + tasks = list(_all_tasks) + except RuntimeError: + i += 1 + if i >= 1000: + raise + else: + break + return {t for t in tasks if futures._get_loop(t) is loop} + + +def _set_task_name(task, name): + if name is not None: + try: + set_name = task.set_name + except AttributeError: + pass + else: + set_name(name) + + +class Task(futures._PyFuture): # Inherit Python Task implementation + # from a Python Future implementation. + + """A coroutine wrapped in a Future.""" + + # An important invariant maintained while a Task not done: + # + # - Either _fut_waiter is None, and _step() is scheduled; + # - or _fut_waiter is some Future, and _step() is *not* scheduled. + # + # The only transition from the latter to the former is through + # _wakeup(). When _fut_waiter is not None, one of its callbacks + # must be _wakeup(). + + # If False, don't log a message if the task is destroyed whereas its + # status is still pending + _log_destroy_pending = True + + @classmethod + def current_task(cls, loop=None): + """Return the currently running task in an event loop or None. + + By default the current task for the current event loop is returned. + + None is returned when called not in the context of a Task. + """ + warnings.warn("Task.current_task() is deprecated since Python 3.7, " + "use asyncio.current_task() instead", + DeprecationWarning, + stacklevel=2) + if loop is None: + loop = events.get_event_loop() + return current_task(loop) + + @classmethod + def all_tasks(cls, loop=None): + """Return a set of all tasks for an event loop. + + By default all tasks for the current event loop are returned. + """ + warnings.warn("Task.all_tasks() is deprecated since Python 3.7, " + "use asyncio.all_tasks() instead", + DeprecationWarning, + stacklevel=2) + return _all_tasks_compat(loop) + + def __init__(self, coro, *, loop=None, name=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + if not coroutines.iscoroutine(coro): + # raise after Future.__init__(), attrs are required for __del__ + # prevent logging for pending task in __del__ + self._log_destroy_pending = False + raise TypeError(f"a coroutine was expected, got {coro!r}") + + if name is None: + self._name = f'Task-{_task_name_counter()}' + else: + self._name = str(name) + + self._must_cancel = False + self._fut_waiter = None + self._coro = coro + self._context = contextvars.copy_context() + + self._loop.call_soon(self.__step, context=self._context) + _register_task(self) + + def __del__(self): + if self._state == futures._PENDING and self._log_destroy_pending: + context = { + 'task': self, + 'message': 'Task was destroyed but it is pending!', + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + super().__del__() + + def _repr_info(self): + return base_tasks._task_repr_info(self) + + def get_coro(self): + return self._coro + + def get_name(self): + return self._name + + def set_name(self, value): + self._name = str(value) + + def set_result(self, result): + raise RuntimeError('Task does not support set_result operation') + + def set_exception(self, exception): + raise RuntimeError('Task does not support set_exception operation') + + def get_stack(self, *, limit=None): + """Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + """ + return base_tasks._task_get_stack(self, limit) + + def print_stack(self, *, limit=None, file=None): + """Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + """ + return base_tasks._task_print_stack(self, limit, file) + + def cancel(self): + """Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + """ + self._log_traceback = False + if self.done(): + return False + if self._fut_waiter is not None: + if self._fut_waiter.cancel(): + # Leave self._fut_waiter; it may be a Task that + # catches and ignores the cancellation so we may have + # to cancel it again later. + return True + # It must be the case that self.__step is already scheduled. + self._must_cancel = True + return True + + def __step(self, exc=None): + if self.done(): + raise exceptions.InvalidStateError( + f'_step(): already done: {self!r}, {exc!r}') + if self._must_cancel: + if not isinstance(exc, exceptions.CancelledError): + exc = exceptions.CancelledError() + self._must_cancel = False + coro = self._coro + self._fut_waiter = None + + _enter_task(self._loop, self) + # Call either coro.throw(exc) or coro.send(None). + try: + if exc is None: + # We use the `send` method directly, because coroutines + # don't have `__iter__` and `__next__` methods. + result = coro.send(None) + else: + result = coro.throw(exc) + except StopIteration as exc: + if self._must_cancel: + # Task is cancelled right before coro stops. + self._must_cancel = False + super().cancel() + else: + super().set_result(exc.value) + except exceptions.CancelledError: + super().cancel() # I.e., Future.cancel(self). + except (KeyboardInterrupt, SystemExit) as exc: + super().set_exception(exc) + raise + except BaseException as exc: + super().set_exception(exc) + else: + blocking = getattr(result, '_asyncio_future_blocking', None) + if blocking is not None: + # Yielded Future must come from Future.__iter__(). + if futures._get_loop(result) is not self._loop: + new_exc = RuntimeError( + f'Task {self!r} got Future ' + f'{result!r} attached to a different loop') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + elif blocking: + if result is self: + new_exc = RuntimeError( + f'Task cannot await on itself: {self!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + else: + result._asyncio_future_blocking = False + result.add_done_callback( + self.__wakeup, context=self._context) + self._fut_waiter = result + if self._must_cancel: + if self._fut_waiter.cancel(): + self._must_cancel = False + else: + new_exc = RuntimeError( + f'yield was used instead of yield from ' + f'in task {self!r} with {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + + elif result is None: + # Bare yield relinquishes control for one event loop iteration. + self._loop.call_soon(self.__step, context=self._context) + elif inspect.isgenerator(result): + # Yielding a generator is just wrong. + new_exc = RuntimeError( + f'yield was used instead of yield from for ' + f'generator in task {self!r} with {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + else: + # Yielding something else is an error. + new_exc = RuntimeError(f'Task got bad yield: {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + finally: + _leave_task(self._loop, self) + self = None # Needed to break cycles when an exception occurs. + + def __wakeup(self, future): + try: + future.result() + except BaseException as exc: + # This may also be a cancellation. + self.__step(exc) + else: + # Don't pass the value of `future.result()` explicitly, + # as `Future.__iter__` and `Future.__await__` don't need it. + # If we call `_step(value, None)` instead of `_step()`, + # Python eval loop would use `.send(value)` method call, + # instead of `__next__()`, which is slower for futures + # that return non-generator iterators from their `__iter__`. + self.__step() + self = None # Needed to break cycles when an exception occurs. + + +_PyTask = Task + + +try: + import _asyncio +except ImportError: + pass +else: + # _CTask is needed for tests. + Task = _CTask = _asyncio.Task + + +def create_task(coro, *, name=None): + """Schedule the execution of a coroutine object in a spawn task. + + Return a Task object. + """ + loop = events.get_running_loop() + task = loop.create_task(coro) + _set_task_name(task, name) + return task + + +# wait() and as_completed() similar to those in PEP 3148. + +FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED +FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION +ALL_COMPLETED = concurrent.futures.ALL_COMPLETED + + +async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED): + """Wait for the Futures and coroutines given by fs to complete. + + The fs iterable must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = await asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError(f"expect a list of futures, not {type(fs).__name__}") + if not fs: + raise ValueError('Set of coroutines/Futures is empty.') + if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): + raise ValueError(f'Invalid return_when value: {return_when}') + + if loop is None: + loop = events.get_running_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + fs = {ensure_future(f, loop=loop) for f in set(fs)} + + return await _wait(fs, timeout, return_when, loop) + + +def _release_waiter(waiter, *args): + if not waiter.done(): + waiter.set_result(None) + + +async def wait_for(fut, timeout, *, loop=None): + """Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + """ + if loop is None: + loop = events.get_running_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + if timeout is None: + return await fut + + if timeout <= 0: + fut = ensure_future(fut, loop=loop) + + if fut.done(): + return fut.result() + + await _cancel_and_wait(fut, loop=loop) + try: + fut.result() + except exceptions.CancelledError as exc: + raise exceptions.TimeoutError() from exc + else: + raise exceptions.TimeoutError() + + waiter = loop.create_future() + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + cb = functools.partial(_release_waiter, waiter) + + fut = ensure_future(fut, loop=loop) + fut.add_done_callback(cb) + + try: + # wait until the future completes or the timeout + try: + await waiter + except exceptions.CancelledError: + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + # We must ensure that the task is not running + # after wait_for() returns. + # See https://bugs.python.org/issue32751 + await _cancel_and_wait(fut, loop=loop) + raise + + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + # We must ensure that the task is not running + # after wait_for() returns. + # See https://bugs.python.org/issue32751 + await _cancel_and_wait(fut, loop=loop) + raise exceptions.TimeoutError() + finally: + timeout_handle.cancel() + + +async def _wait(fs, timeout, return_when, loop): + """Internal helper for wait(). + + The fs argument must be a collection of Futures. + """ + assert fs, 'Set of Futures is empty.' + waiter = loop.create_future() + timeout_handle = None + if timeout is not None: + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + counter = len(fs) + + def _on_completion(f): + nonlocal counter + counter -= 1 + if (counter <= 0 or + return_when == FIRST_COMPLETED or + return_when == FIRST_EXCEPTION and (not f.cancelled() and + f.exception() is not None)): + if timeout_handle is not None: + timeout_handle.cancel() + if not waiter.done(): + waiter.set_result(None) + + for f in fs: + f.add_done_callback(_on_completion) + + try: + await waiter + finally: + if timeout_handle is not None: + timeout_handle.cancel() + for f in fs: + f.remove_done_callback(_on_completion) + + done, pending = set(), set() + for f in fs: + if f.done(): + done.add(f) + else: + pending.add(f) + return done, pending + + +async def _cancel_and_wait(fut, loop): + """Cancel the *fut* future or task and wait until it completes.""" + + waiter = loop.create_future() + cb = functools.partial(_release_waiter, waiter) + fut.add_done_callback(cb) + + try: + fut.cancel() + # We cannot wait on *fut* directly to make + # sure _cancel_and_wait itself is reliably cancellable. + await waiter + finally: + fut.remove_done_callback(cb) + + +# This is *not* a @coroutine! It is just an iterator (yielding Futures). +def as_completed(fs, *, loop=None, timeout=None): + """Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = await f # The 'await' may raise. + # Use result. + + If a timeout is specified, the 'await' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}") + + from .queues import Queue # Import here to avoid circular import problem. + done = Queue(loop=loop) + + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + todo = {ensure_future(f, loop=loop) for f in set(fs)} + timeout_handle = None + + def _on_timeout(): + for f in todo: + f.remove_done_callback(_on_completion) + done.put_nowait(None) # Queue a dummy value for _wait_for_one(). + todo.clear() # Can't do todo.remove(f) in the loop. + + def _on_completion(f): + if not todo: + return # _on_timeout() was here first. + todo.remove(f) + done.put_nowait(f) + if not todo and timeout_handle is not None: + timeout_handle.cancel() + + async def _wait_for_one(): + f = await done.get() + if f is None: + # Dummy value from _on_timeout(). + raise exceptions.TimeoutError + return f.result() # May raise f.exception(). + + for f in todo: + f.add_done_callback(_on_completion) + if todo and timeout is not None: + timeout_handle = loop.call_later(timeout, _on_timeout) + for _ in range(len(todo)): + yield _wait_for_one() + + +@types.coroutine +def __sleep0(): + """Skip one event loop run cycle. + + This is a private helper for 'asyncio.sleep()', used + when the 'delay' is set to 0. It uses a bare 'yield' + expression (which Task.__step knows how to handle) + instead of creating a Future object. + """ + yield + + +async def sleep(delay, result=None, *, loop=None): + """Coroutine that completes after a given time (in seconds).""" + if delay <= 0: + await __sleep0() + return result + + if loop is None: + loop = events.get_running_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + + future = loop.create_future() + h = loop.call_later(delay, + futures._set_result_unless_cancelled, + future, result) + try: + return await future + finally: + h.cancel() + + +def ensure_future(coro_or_future, *, loop=None): + """Wrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + """ + if coroutines.iscoroutine(coro_or_future): + if loop is None: + loop = events.get_event_loop() + task = loop.create_task(coro_or_future) + if task._source_traceback: + del task._source_traceback[-1] + return task + elif futures.isfuture(coro_or_future): + if loop is not None and loop is not futures._get_loop(coro_or_future): + raise ValueError('The future belongs to a different loop than ' + 'the one specified as the loop argument') + return coro_or_future + elif inspect.isawaitable(coro_or_future): + return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) + else: + raise TypeError('An asyncio.Future, a coroutine or an awaitable is ' + 'required') + + +@types.coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) + +_wrap_awaitable._is_coroutine = _is_coroutine + + +class _GatheringFuture(futures.Future): + """Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + """ + + def __init__(self, children, *, loop=None): + super().__init__(loop=loop) + self._children = children + self._cancel_requested = False + + def cancel(self): + if self.done(): + return False + ret = False + for child in self._children: + if child.cancel(): + ret = True + if ret: + # If any child tasks were actually cancelled, we should + # propagate the cancellation request regardless of + # *return_exceptions* argument. See issue 32684. + self._cancel_requested = True + return ret + + +def gather(*coros_or_futures, loop=None, return_exceptions=False): + """Return a future aggregating results from the given coroutines/futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + + If *return_exceptions* is False, cancelling gather() after it + has been marked done won't cancel any submitted awaitables. + For instance, gather can be marked done after propagating an + exception to the caller, therefore, calling ``gather.cancel()`` + after catching an exception (raised by one of the awaitables) from + gather won't cancel any other awaitables. + """ + if not coros_or_futures: + if loop is None: + loop = events.get_event_loop() + else: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + outer = loop.create_future() + outer.set_result([]) + return outer + + def _done_callback(fut): + nonlocal nfinished + nfinished += 1 + + if outer.done(): + if not fut.cancelled(): + # Mark exception retrieved. + fut.exception() + return + + if not return_exceptions: + if fut.cancelled(): + # Check if 'fut' is cancelled first, as + # 'fut.exception()' will *raise* a CancelledError + # instead of returning it. + exc = exceptions.CancelledError() + outer.set_exception(exc) + return + else: + exc = fut.exception() + if exc is not None: + outer.set_exception(exc) + return + + if nfinished == nfuts: + # All futures are done; create a list of results + # and set it to the 'outer' future. + results = [] + + for fut in children: + if fut.cancelled(): + # Check if 'fut' is cancelled first, as + # 'fut.exception()' will *raise* a CancelledError + # instead of returning it. + res = exceptions.CancelledError() + else: + res = fut.exception() + if res is None: + res = fut.result() + results.append(res) + + if outer._cancel_requested: + # If gather is being cancelled we must propagate the + # cancellation regardless of *return_exceptions* argument. + # See issue 32684. + outer.set_exception(exceptions.CancelledError()) + else: + outer.set_result(results) + + arg_to_fut = {} + children = [] + nfuts = 0 + nfinished = 0 + for arg in coros_or_futures: + if arg not in arg_to_fut: + fut = ensure_future(arg, loop=loop) + if loop is None: + loop = futures._get_loop(fut) + if fut is not arg: + # 'arg' was not a Future, therefore, 'fut' is a new + # Future created specifically for 'arg'. Since the caller + # can't control it, disable the "destroy pending task" + # warning. + fut._log_destroy_pending = False + + nfuts += 1 + arg_to_fut[arg] = fut + fut.add_done_callback(_done_callback) + + else: + # There's a duplicate Future object in coros_or_futures. + fut = arg_to_fut[arg] + + children.append(fut) + + outer = _GatheringFuture(children, loop=loop) + return outer + + +def shield(arg, *, loop=None): + """Wait for a future, shielding it from cancellation. + + The statement + + res = await shield(something()) + + is exactly equivalent to the statement + + res = await something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + try: + res = await shield(something()) + except CancelledError: + res = None + """ + if loop is not None: + warnings.warn("The loop argument is deprecated since Python 3.8, " + "and scheduled for removal in Python 3.10.", + DeprecationWarning, stacklevel=2) + inner = ensure_future(arg, loop=loop) + if inner.done(): + # Shortcut. + return inner + loop = futures._get_loop(inner) + outer = loop.create_future() + + def _inner_done_callback(inner): + if outer.cancelled(): + if not inner.cancelled(): + # Mark inner's result as retrieved. + inner.exception() + return + + if inner.cancelled(): + outer.cancel() + else: + exc = inner.exception() + if exc is not None: + outer.set_exception(exc) + else: + outer.set_result(inner.result()) + + + def _outer_done_callback(outer): + if not inner.done(): + inner.remove_done_callback(_inner_done_callback) + + inner.add_done_callback(_inner_done_callback) + outer.add_done_callback(_outer_done_callback) + return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise + + loop.call_soon_threadsafe(callback) + return future + + +# WeakSet containing all alive tasks. +_all_tasks = weakref.WeakSet() + +# Dictionary containing tasks that are currently active in +# all running event loops. {EventLoop: Task} +_current_tasks = {} + + +def _register_task(task): + """Register a new task in asyncio as executed by loop.""" + _all_tasks.add(task) + + +def _enter_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not None: + raise RuntimeError(f"Cannot enter into task {task!r} while another " + f"task {current_task!r} is being executed.") + _current_tasks[loop] = task + + +def _leave_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not task: + raise RuntimeError(f"Leaving task {task!r} does not match " + f"the current task {current_task!r}.") + del _current_tasks[loop] + + +def _unregister_task(task): + """Unregister a task.""" + _all_tasks.discard(task) + + +_py_register_task = _register_task +_py_unregister_task = _unregister_task +_py_enter_task = _enter_task +_py_leave_task = _leave_task + + +try: + from _asyncio import (_register_task, _unregister_task, + _enter_task, _leave_task, + _all_tasks, _current_tasks) +except ImportError: + pass +else: + _c_register_task = _register_task + _c_unregister_task = _unregister_task + _c_enter_task = _enter_task + _c_leave_task = _leave_task diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/transports.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/transports.py new file mode 100644 index 0000000000000000000000000000000000000000..45e155c94cad1b31fb48e12a2098ba41138f4d31 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/transports.py @@ -0,0 +1,329 @@ +"""Abstract Transport class.""" + +__all__ = ( + 'BaseTransport', 'ReadTransport', 'WriteTransport', + 'Transport', 'DatagramTransport', 'SubprocessTransport', +) + + +class BaseTransport: + """Base class for transports.""" + + __slots__ = ('_extra',) + + def __init__(self, extra=None): + if extra is None: + extra = {} + self._extra = extra + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._extra.get(name, default) + + def is_closing(self): + """Return True if the transport is closing or closed.""" + raise NotImplementedError + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + def set_protocol(self, protocol): + """Set a new protocol.""" + raise NotImplementedError + + def get_protocol(self): + """Return the current protocol.""" + raise NotImplementedError + + +class ReadTransport(BaseTransport): + """Interface for read-only transports.""" + + __slots__ = () + + def is_reading(self): + """Return True if the transport is receiving.""" + raise NotImplementedError + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + raise NotImplementedError + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + raise NotImplementedError + + +class WriteTransport(BaseTransport): + """Interface for write-only transports.""" + + __slots__ = () + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + raise NotImplementedError + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + raise NotImplementedError + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + raise NotImplementedError + + def writelines(self, list_of_data): + """Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + """ + data = b''.join(list_of_data) + self.write(data) + + def write_eof(self): + """Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + """ + raise NotImplementedError + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class Transport(ReadTransport, WriteTransport): + """Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + """ + + __slots__ = () + + +class DatagramTransport(BaseTransport): + """Interface for datagram (UDP) transports.""" + + __slots__ = () + + def sendto(self, data, addr=None): + """Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + """ + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class SubprocessTransport(BaseTransport): + + __slots__ = () + + def get_pid(self): + """Get subprocess id.""" + raise NotImplementedError + + def get_returncode(self): + """Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + """ + raise NotImplementedError + + def get_pipe_transport(self, fd): + """Get transport for pipe with number fd.""" + raise NotImplementedError + + def send_signal(self, signal): + """Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + """ + raise NotImplementedError + + def terminate(self): + """Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + """ + raise NotImplementedError + + def kill(self): + """Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + """ + raise NotImplementedError + + +class _FlowControlMixin(Transport): + """All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super().__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + """ + + __slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water') + + def __init__(self, extra=None, loop=None): + super().__init__(extra) + assert loop is not None + self._loop = loop + self._protocol_paused = False + self._set_write_buffer_limits() + + def _maybe_pause_protocol(self): + size = self.get_write_buffer_size() + if size <= self._high_water: + return + if not self._protocol_paused: + self._protocol_paused = True + try: + self._protocol.pause_writing() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.pause_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def _maybe_resume_protocol(self): + if (self._protocol_paused and + self.get_write_buffer_size() <= self._low_water): + self._protocol_paused = False + try: + self._protocol.resume_writing() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.resume_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def get_write_buffer_limits(self): + return (self._low_water, self._high_water) + + def _set_write_buffer_limits(self, high=None, low=None): + if high is None: + if low is None: + high = 64 * 1024 + else: + high = 4 * low + if low is None: + low = high // 4 + + if not high >= low >= 0: + raise ValueError( + f'high ({high!r}) must be >= low ({low!r}) must be >= 0') + + self._high_water = high + self._low_water = low + + def set_write_buffer_limits(self, high=None, low=None): + self._set_write_buffer_limits(high=high, low=low) + self._maybe_pause_protocol() + + def get_write_buffer_size(self): + raise NotImplementedError diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/trsock.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/trsock.py new file mode 100644 index 0000000000000000000000000000000000000000..e9ebcc326142596b289ef74ee3c35460f598b138 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/trsock.py @@ -0,0 +1,206 @@ +import socket +import warnings + + +class TransportSocket: + + """A socket-like wrapper for exposing real transport sockets. + + These objects can be safely returned by APIs like + `transport.get_extra_info('socket')`. All potentially disruptive + operations (like "socket.close()") are banned. + """ + + __slots__ = ('_sock',) + + def __init__(self, sock: socket.socket): + self._sock = sock + + def _na(self, what): + warnings.warn( + f"Using {what} on sockets returned from get_extra_info('socket') " + f"will be prohibited in asyncio 3.9. Please report your use case " + f"to bugs.python.org.", + DeprecationWarning, source=self) + + @property + def family(self): + return self._sock.family + + @property + def type(self): + return self._sock.type + + @property + def proto(self): + return self._sock.proto + + def __repr__(self): + s = ( + f"" + + def __getstate__(self): + raise TypeError("Cannot serialize asyncio.TransportSocket object") + + def fileno(self): + return self._sock.fileno() + + def dup(self): + return self._sock.dup() + + def get_inheritable(self): + return self._sock.get_inheritable() + + def shutdown(self, how): + # asyncio doesn't currently provide a high-level transport API + # to shutdown the connection. + self._sock.shutdown(how) + + def getsockopt(self, *args, **kwargs): + return self._sock.getsockopt(*args, **kwargs) + + def setsockopt(self, *args, **kwargs): + self._sock.setsockopt(*args, **kwargs) + + def getpeername(self): + return self._sock.getpeername() + + def getsockname(self): + return self._sock.getsockname() + + def getsockbyname(self): + return self._sock.getsockbyname() + + def accept(self): + self._na('accept() method') + return self._sock.accept() + + def connect(self, *args, **kwargs): + self._na('connect() method') + return self._sock.connect(*args, **kwargs) + + def connect_ex(self, *args, **kwargs): + self._na('connect_ex() method') + return self._sock.connect_ex(*args, **kwargs) + + def bind(self, *args, **kwargs): + self._na('bind() method') + return self._sock.bind(*args, **kwargs) + + def ioctl(self, *args, **kwargs): + self._na('ioctl() method') + return self._sock.ioctl(*args, **kwargs) + + def listen(self, *args, **kwargs): + self._na('listen() method') + return self._sock.listen(*args, **kwargs) + + def makefile(self): + self._na('makefile() method') + return self._sock.makefile() + + def sendfile(self, *args, **kwargs): + self._na('sendfile() method') + return self._sock.sendfile(*args, **kwargs) + + def close(self): + self._na('close() method') + return self._sock.close() + + def detach(self): + self._na('detach() method') + return self._sock.detach() + + def sendmsg_afalg(self, *args, **kwargs): + self._na('sendmsg_afalg() method') + return self._sock.sendmsg_afalg(*args, **kwargs) + + def sendmsg(self, *args, **kwargs): + self._na('sendmsg() method') + return self._sock.sendmsg(*args, **kwargs) + + def sendto(self, *args, **kwargs): + self._na('sendto() method') + return self._sock.sendto(*args, **kwargs) + + def send(self, *args, **kwargs): + self._na('send() method') + return self._sock.send(*args, **kwargs) + + def sendall(self, *args, **kwargs): + self._na('sendall() method') + return self._sock.sendall(*args, **kwargs) + + def set_inheritable(self, *args, **kwargs): + self._na('set_inheritable() method') + return self._sock.set_inheritable(*args, **kwargs) + + def share(self, process_id): + self._na('share() method') + return self._sock.share(process_id) + + def recv_into(self, *args, **kwargs): + self._na('recv_into() method') + return self._sock.recv_into(*args, **kwargs) + + def recvfrom_into(self, *args, **kwargs): + self._na('recvfrom_into() method') + return self._sock.recvfrom_into(*args, **kwargs) + + def recvmsg_into(self, *args, **kwargs): + self._na('recvmsg_into() method') + return self._sock.recvmsg_into(*args, **kwargs) + + def recvmsg(self, *args, **kwargs): + self._na('recvmsg() method') + return self._sock.recvmsg(*args, **kwargs) + + def recvfrom(self, *args, **kwargs): + self._na('recvfrom() method') + return self._sock.recvfrom(*args, **kwargs) + + def recv(self, *args, **kwargs): + self._na('recv() method') + return self._sock.recv(*args, **kwargs) + + def settimeout(self, value): + if value == 0: + return + raise ValueError( + 'settimeout(): only 0 timeout is allowed on transport sockets') + + def gettimeout(self): + return 0 + + def setblocking(self, flag): + if not flag: + return + raise ValueError( + 'setblocking(): transport sockets cannot be blocking') + + def __enter__(self): + self._na('context manager protocol') + return self._sock.__enter__() + + def __exit__(self, *err): + self._na('context manager protocol') + return self._sock.__exit__(*err) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/unix_events.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/unix_events.py new file mode 100644 index 0000000000000000000000000000000000000000..0420529c5b822a6ecc90846c7ab88636cf4263e7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/unix_events.py @@ -0,0 +1,1393 @@ +"""Selector event loop for Unix with signal handling.""" + +import errno +import io +import itertools +import os +import selectors +import signal +import socket +import stat +import subprocess +import sys +import threading +import warnings + +from . import base_events +from . import base_subprocess +from . import constants +from . import coroutines +from . import events +from . import exceptions +from . import futures +from . import selector_events +from . import tasks +from . import transports +from .log import logger + + +__all__ = ( + 'SelectorEventLoop', + 'AbstractChildWatcher', 'SafeChildWatcher', + 'FastChildWatcher', + 'MultiLoopChildWatcher', 'ThreadedChildWatcher', + 'DefaultEventLoopPolicy', +) + + +if sys.platform == 'win32': # pragma: no cover + raise ImportError('Signals are not really supported on Windows') + + +def _sighandler_noop(signum, frame): + """Dummy signal handler.""" + pass + + +class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Unix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + """ + + def __init__(self, selector=None): + super().__init__(selector) + self._signal_handlers = {} + + def close(self): + super().close() + if not sys.is_finalizing(): + for sig in list(self._signal_handlers): + self.remove_signal_handler(sig) + else: + if self._signal_handlers: + warnings.warn(f"Closing the loop {self!r} " + f"on interpreter shutdown " + f"stage, skipping signal handlers removal", + ResourceWarning, + source=self) + self._signal_handlers.clear() + + def _process_self_data(self, data): + for signum in data: + if not signum: + # ignore null bytes written by _write_to_self() + continue + self._handle_signal(signum) + + def add_signal_handler(self, sig, callback, *args): + """Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if (coroutines.iscoroutine(callback) or + coroutines.iscoroutinefunction(callback)): + raise TypeError("coroutines cannot be used " + "with add_signal_handler()") + self._check_signal(sig) + self._check_closed() + try: + # set_wakeup_fd() raises ValueError if this is not the + # main thread. By calling it early we ensure that an + # event loop running in another thread cannot add a signal + # handler. + signal.set_wakeup_fd(self._csock.fileno()) + except (ValueError, OSError) as exc: + raise RuntimeError(str(exc)) + + handle = events.Handle(callback, args, self, None) + self._signal_handlers[sig] = handle + + try: + # Register a dummy signal handler to ask Python to write the signal + # number in the wakeup file descriptor. _process_self_data() will + # read signal numbers from this file descriptor to handle signals. + signal.signal(sig, _sighandler_noop) + + # Set SA_RESTART to limit EINTR occurrences. + signal.siginterrupt(sig, False) + except OSError as exc: + del self._signal_handlers[sig] + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as nexc: + logger.info('set_wakeup_fd(-1) failed: %s', nexc) + + if exc.errno == errno.EINVAL: + raise RuntimeError(f'sig {sig} cannot be caught') + else: + raise + + def _handle_signal(self, sig): + """Internal helper that is the actual signal handler.""" + handle = self._signal_handlers.get(sig) + if handle is None: + return # Assume it's some race condition. + if handle._cancelled: + self.remove_signal_handler(sig) # Remove it properly. + else: + self._add_callback_signalsafe(handle) + + def remove_signal_handler(self, sig): + """Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + """ + self._check_signal(sig) + try: + del self._signal_handlers[sig] + except KeyError: + return False + + if sig == signal.SIGINT: + handler = signal.default_int_handler + else: + handler = signal.SIG_DFL + + try: + signal.signal(sig, handler) + except OSError as exc: + if exc.errno == errno.EINVAL: + raise RuntimeError(f'sig {sig} cannot be caught') + else: + raise + + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as exc: + logger.info('set_wakeup_fd(-1) failed: %s', exc) + + return True + + def _check_signal(self, sig): + """Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if not isinstance(sig, int): + raise TypeError(f'sig must be an int, not {sig!r}') + + if sig not in signal.valid_signals(): + raise ValueError(f'invalid signal number {sig}') + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra) + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra) + + async def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + with events.get_child_watcher() as watcher: + if not watcher.is_active(): + # Check early. + # Raising exception before process creation + # prevents subprocess execution if the watcher + # is not ready to handle it. + raise RuntimeError("asyncio.get_child_watcher() is not activated, " + "subprocess support is not installed.") + waiter = self.create_future() + transp = _UnixSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + + watcher.add_child_handler(transp.get_pid(), + self._child_watcher_callback, transp) + try: + await waiter + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + transp.close() + await transp._wait() + raise + + return transp + + def _child_watcher_callback(self, pid, returncode, transp): + self.call_soon_threadsafe(transp._process_exited, returncode) + + async def create_unix_connection( + self, protocol_factory, path=None, *, + ssl=None, sock=None, + server_hostname=None, + ssl_handshake_timeout=None): + assert server_hostname is None or isinstance(server_hostname, str) + if ssl: + if server_hostname is None: + raise ValueError( + 'you have to pass server_hostname when using ssl') + else: + if server_hostname is not None: + raise ValueError('server_hostname is only meaningful with ssl') + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + path = os.fspath(path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + try: + sock.setblocking(False) + await self.sock_connect(sock, path) + except: + sock.close() + raise + + else: + if sock is None: + raise ValueError('no path and sock were specified') + if (sock.family != socket.AF_UNIX or + sock.type != socket.SOCK_STREAM): + raise ValueError( + f'A UNIX Domain Stream Socket was expected, got {sock!r}') + sock.setblocking(False) + + transport, protocol = await self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + return transport, protocol + + async def create_unix_server( + self, protocol_factory, path=None, *, + sock=None, backlog=100, ssl=None, + ssl_handshake_timeout=None, + start_serving=True): + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + + if ssl_handshake_timeout is not None and not ssl: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + path = os.fspath(path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + # Check for abstract socket. `str` and `bytes` paths are supported. + if path[0] not in (0, '\x00'): + try: + if stat.S_ISSOCK(os.stat(path).st_mode): + os.remove(path) + except FileNotFoundError: + pass + except OSError as err: + # Directory may have permissions only to create socket. + logger.error('Unable to check or remove stale UNIX socket ' + '%r: %r', path, err) + + try: + sock.bind(path) + except OSError as exc: + sock.close() + if exc.errno == errno.EADDRINUSE: + # Let's improve the error message by adding + # with what exact address it occurs. + msg = f'Address {path!r} is already in use' + raise OSError(errno.EADDRINUSE, msg) from None + else: + raise + except: + sock.close() + raise + else: + if sock is None: + raise ValueError( + 'path was not specified, and no sock specified') + + if (sock.family != socket.AF_UNIX or + sock.type != socket.SOCK_STREAM): + raise ValueError( + f'A UNIX Domain Stream Socket was expected, got {sock!r}') + + sock.setblocking(False) + server = base_events.Server(self, [sock], protocol_factory, + ssl, backlog, ssl_handshake_timeout) + if start_serving: + server._start_serving() + # Skip one loop iteration so that all 'loop.add_reader' + # go through. + await tasks.sleep(0, loop=self) + + return server + + async def _sock_sendfile_native(self, sock, file, offset, count): + try: + os.sendfile + except AttributeError as exc: + raise exceptions.SendfileNotAvailableError( + "os.sendfile() is not available") + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise exceptions.SendfileNotAvailableError("not a regular file") + try: + fsize = os.fstat(fileno).st_size + except OSError as err: + raise exceptions.SendfileNotAvailableError("not a regular file") + blocksize = count if count else fsize + if not blocksize: + return 0 # empty file + + fut = self.create_future() + self._sock_sendfile_native_impl(fut, None, sock, fileno, + offset, count, blocksize, 0) + return await fut + + def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno, + offset, count, blocksize, total_sent): + fd = sock.fileno() + if registered_fd is not None: + # Remove the callback early. It should be rare that the + # selector says the fd is ready but the call still returns + # EAGAIN, and I am willing to take a hit in that case in + # order to simplify the common case. + self.remove_writer(registered_fd) + if fut.cancelled(): + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + return + if count: + blocksize = count - total_sent + if blocksize <= 0: + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_result(total_sent) + return + + try: + sent = os.sendfile(fd, fileno, offset, blocksize) + except (BlockingIOError, InterruptedError): + if registered_fd is None: + self._sock_add_cancellation_callback(fut, sock) + self.add_writer(fd, self._sock_sendfile_native_impl, fut, + fd, sock, fileno, + offset, count, blocksize, total_sent) + except OSError as exc: + if (registered_fd is not None and + exc.errno == errno.ENOTCONN and + type(exc) is not ConnectionError): + # If we have an ENOTCONN and this isn't a first call to + # sendfile(), i.e. the connection was closed in the middle + # of the operation, normalize the error to ConnectionError + # to make it consistent across all Posix systems. + new_exc = ConnectionError( + "socket is not connected", errno.ENOTCONN) + new_exc.__cause__ = exc + exc = new_exc + if total_sent == 0: + # We can get here for different reasons, the main + # one being 'file' is not a regular mmap(2)-like + # file, in which case we'll fall back on using + # plain send(). + err = exceptions.SendfileNotAvailableError( + "os.sendfile call failed") + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_exception(err) + else: + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_exception(exc) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_exception(exc) + else: + if sent == 0: + # EOF + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_result(total_sent) + else: + offset += sent + total_sent += sent + if registered_fd is None: + self._sock_add_cancellation_callback(fut, sock) + self.add_writer(fd, self._sock_sendfile_native_impl, fut, + fd, sock, fileno, + offset, count, blocksize, total_sent) + + def _sock_sendfile_update_filepos(self, fileno, offset, total_sent): + if total_sent > 0: + os.lseek(fileno, offset, os.SEEK_SET) + + def _sock_add_cancellation_callback(self, fut, sock): + def cb(fut): + if fut.cancelled(): + fd = sock.fileno() + if fd != -1: + self.remove_writer(fd) + fut.add_done_callback(cb) + + +class _UnixReadPipeTransport(transports.ReadTransport): + + max_size = 256 * 1024 # max bytes we read in one event loop iteration + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super().__init__(extra) + self._extra['pipe'] = pipe + self._loop = loop + self._pipe = pipe + self._fileno = pipe.fileno() + self._protocol = protocol + self._closing = False + self._paused = False + + mode = os.fstat(self._fileno).st_mode + if not (stat.S_ISFIFO(mode) or + stat.S_ISSOCK(mode) or + stat.S_ISCHR(mode)): + self._pipe = None + self._fileno = None + self._protocol = None + raise ValueError("Pipe transport is for pipes/sockets only.") + + os.set_blocking(self._fileno, False) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._fileno, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._fileno}') + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: + polling = selector_events._test_selector_event( + selector, self._fileno, selectors.EVENT_READ) + if polling: + info.append('polling') + else: + info.append('idle') + elif self._pipe is not None: + info.append('open') + else: + info.append('closed') + return '<{}>'.format(' '.join(info)) + + def _read_ready(self): + try: + data = os.read(self._fileno, self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + else: + if data: + self._protocol.data_received(data) + else: + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + self._closing = True + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._protocol.eof_received) + self._loop.call_soon(self._call_connection_lost, None) + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + self._loop._remove_reader(self._fileno) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + self._paused = False + self._loop._add_reader(self._fileno, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if not self._closing: + self._close(None) + + def __del__(self, _warn=warnings.warn): + if self._pipe is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._pipe.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if (isinstance(exc, OSError) and exc.errno == errno.EIO): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc): + self._closing = True + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +class _UnixWritePipeTransport(transports._FlowControlMixin, + transports.WriteTransport): + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super().__init__(extra, loop) + self._extra['pipe'] = pipe + self._pipe = pipe + self._fileno = pipe.fileno() + self._protocol = protocol + self._buffer = bytearray() + self._conn_lost = 0 + self._closing = False # Set when close() or write_eof() called. + + mode = os.fstat(self._fileno).st_mode + is_char = stat.S_ISCHR(mode) + is_fifo = stat.S_ISFIFO(mode) + is_socket = stat.S_ISSOCK(mode) + if not (is_char or is_fifo or is_socket): + self._pipe = None + self._fileno = None + self._protocol = None + raise ValueError("Pipe transport is only for " + "pipes, sockets and character devices") + + os.set_blocking(self._fileno, False) + self._loop.call_soon(self._protocol.connection_made, self) + + # On AIX, the reader trick (to be notified when the read end of the + # socket is closed) only works for sockets. On other platforms it + # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.) + if is_socket or (is_fifo and not sys.platform.startswith("aix")): + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._fileno, self._read_ready) + + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._fileno}') + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: + polling = selector_events._test_selector_event( + selector, self._fileno, selectors.EVENT_WRITE) + if polling: + info.append('polling') + else: + info.append('idle') + + bufsize = self.get_write_buffer_size() + info.append(f'bufsize={bufsize}') + elif self._pipe is not None: + info.append('open') + else: + info.append('closed') + return '<{}>'.format(' '.join(info)) + + def get_write_buffer_size(self): + return len(self._buffer) + + def _read_ready(self): + # Pipe was closed by peer. + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + if self._buffer: + self._close(BrokenPipeError()) + else: + self._close() + + def write(self, data): + assert isinstance(data, (bytes, bytearray, memoryview)), repr(data) + if isinstance(data, bytearray): + data = memoryview(data) + if not data: + return + + if self._conn_lost or self._closing: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('pipe closed by peer or ' + 'os.write(pipe, data) raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + n = os.write(self._fileno, data) + except (BlockingIOError, InterruptedError): + n = 0 + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._conn_lost += 1 + self._fatal_error(exc, 'Fatal write error on pipe transport') + return + if n == len(data): + return + elif n > 0: + data = memoryview(data)[n:] + self._loop._add_writer(self._fileno, self._write_ready) + + self._buffer += data + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + try: + n = os.write(self._fileno, self._buffer) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._buffer.clear() + self._conn_lost += 1 + # Remove writer here, _fatal_error() doesn't it + # because _buffer is empty. + self._loop._remove_writer(self._fileno) + self._fatal_error(exc, 'Fatal write error on pipe transport') + else: + if n == len(self._buffer): + self._buffer.clear() + self._loop._remove_writer(self._fileno) + self._maybe_resume_protocol() # May append to buffer. + if self._closing: + self._loop._remove_reader(self._fileno) + self._call_connection_lost(None) + return + elif n > 0: + del self._buffer[:n] + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing: + return + assert self._pipe + self._closing = True + if not self._buffer: + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, None) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._pipe is not None and not self._closing: + # write_eof is all what we needed to close the write pipe + self.write_eof() + + def __del__(self, _warn=warnings.warn): + if self._pipe is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._pipe.close() + + def abort(self): + self._close(None) + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc=None): + self._closing = True + if self._buffer: + self._loop._remove_writer(self._fileno) + self._buffer.clear() + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + stdin_w = None + if stdin == subprocess.PIPE: + # Use a socket pair for stdin, since not all platforms + # support selecting read events on the write end of a + # socket (which we use in order to detect closing of the + # other end). Notably this is needed on AIX, and works + # just fine on other platforms. + stdin, stdin_w = socket.socketpair() + try: + self._proc = subprocess.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + universal_newlines=False, bufsize=bufsize, **kwargs) + if stdin_w is not None: + stdin.close() + self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize) + stdin_w = None + finally: + if stdin_w is not None: + stdin.close() + stdin_w.close() + + +class AbstractChildWatcher: + """Abstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + """ + + def add_child_handler(self, pid, callback, *args): + """Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + """ + raise NotImplementedError() + + def remove_child_handler(self, pid): + """Removes the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.""" + + raise NotImplementedError() + + def attach_loop(self, loop): + """Attach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + """ + raise NotImplementedError() + + def close(self): + """Close the watcher. + + This must be called to make sure that any underlying resource is freed. + """ + raise NotImplementedError() + + def is_active(self): + """Return ``True`` if the watcher is active and is used by the event loop. + + Return True if the watcher is installed and ready to handle process exit + notifications. + + """ + raise NotImplementedError() + + def __enter__(self): + """Enter the watcher's context and allow starting new processes + + This function must return self""" + raise NotImplementedError() + + def __exit__(self, a, b, c): + """Exit the watcher's context""" + raise NotImplementedError() + + +def _compute_returncode(status): + if os.WIFSIGNALED(status): + # The child process died because of a signal. + return -os.WTERMSIG(status) + elif os.WIFEXITED(status): + # The child process exited (e.g sys.exit()). + return os.WEXITSTATUS(status) + else: + # The child exited, but we don't understand its status. + # This shouldn't happen, but if it does, let's just + # return that status; perhaps that helps debug it. + return status + + +class BaseChildWatcher(AbstractChildWatcher): + + def __init__(self): + self._loop = None + self._callbacks = {} + + def close(self): + self.attach_loop(None) + + def is_active(self): + return self._loop is not None and self._loop.is_running() + + def _do_waitpid(self, expected_pid): + raise NotImplementedError() + + def _do_waitpid_all(self): + raise NotImplementedError() + + def attach_loop(self, loop): + assert loop is None or isinstance(loop, events.AbstractEventLoop) + + if self._loop is not None and loop is None and self._callbacks: + warnings.warn( + 'A loop is being detached ' + 'from a child watcher with pending handlers', + RuntimeWarning) + + if self._loop is not None: + self._loop.remove_signal_handler(signal.SIGCHLD) + + self._loop = loop + if loop is not None: + loop.add_signal_handler(signal.SIGCHLD, self._sig_chld) + + # Prevent a race condition in case a child terminated + # during the switch. + self._do_waitpid_all() + + def _sig_chld(self): + try: + self._do_waitpid_all() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + # self._loop should always be available here + # as '_sig_chld' is added as a signal handler + # in 'attach_loop' + self._loop.call_exception_handler({ + 'message': 'Unknown exception in SIGCHLD handler', + 'exception': exc, + }) + + +class SafeChildWatcher(BaseChildWatcher): + """'Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + """ + + def close(self): + self._callbacks.clear() + super().close() + + def __enter__(self): + return self + + def __exit__(self, a, b, c): + pass + + def add_child_handler(self, pid, callback, *args): + self._callbacks[pid] = (callback, args) + + # Prevent a race condition in case the child is already terminated. + self._do_waitpid(pid) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + + for pid in list(self._callbacks): + self._do_waitpid(pid) + + def _do_waitpid(self, expected_pid): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, os.WNOHANG) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + else: + if pid == 0: + # The child process is still alive. + return + + returncode = _compute_returncode(status) + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + + try: + callback, args = self._callbacks.pop(pid) + except KeyError: # pragma: no cover + # May happen if .remove_child_handler() is called + # after os.waitpid() returns. + if self._loop.get_debug(): + logger.warning("Child watcher got an unexpected pid: %r", + pid, exc_info=True) + else: + callback(pid, returncode, *args) + + +class FastChildWatcher(BaseChildWatcher): + """'Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + """ + def __init__(self): + super().__init__() + self._lock = threading.Lock() + self._zombies = {} + self._forks = 0 + + def close(self): + self._callbacks.clear() + self._zombies.clear() + super().close() + + def __enter__(self): + with self._lock: + self._forks += 1 + + return self + + def __exit__(self, a, b, c): + with self._lock: + self._forks -= 1 + + if self._forks or not self._zombies: + return + + collateral_victims = str(self._zombies) + self._zombies.clear() + + logger.warning( + "Caught subprocesses termination from unknown pids: %s", + collateral_victims) + + def add_child_handler(self, pid, callback, *args): + assert self._forks, "Must use the context manager" + + with self._lock: + try: + returncode = self._zombies.pop(pid) + except KeyError: + # The child is running. + self._callbacks[pid] = callback, args + return + + # The child is dead already. We can fire the callback. + callback(pid, returncode, *args) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + # Because of signal coalescing, we must keep calling waitpid() as + # long as we're able to reap a child. + while True: + try: + pid, status = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + # No more child processes exist. + return + else: + if pid == 0: + # A child process is still alive. + return + + returncode = _compute_returncode(status) + + with self._lock: + try: + callback, args = self._callbacks.pop(pid) + except KeyError: + # unknown child + if self._forks: + # It may not be registered yet. + self._zombies[pid] = returncode + if self._loop.get_debug(): + logger.debug('unknown process %s exited ' + 'with returncode %s', + pid, returncode) + continue + callback = None + else: + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + pid, returncode) + + if callback is None: + logger.warning( + "Caught subprocess termination from unknown pid: " + "%d -> %d", pid, returncode) + else: + callback(pid, returncode, *args) + + +class MultiLoopChildWatcher(AbstractChildWatcher): + """A watcher that doesn't require running loop in the main thread. + + This implementation registers a SIGCHLD signal handler on + instantiation (which may conflict with other code that + install own handler for this signal). + + The solution is safe but it has a significant overhead when + handling a big number of processes (*O(n)* each time a + SIGCHLD is received). + """ + + # Implementation note: + # The class keeps compatibility with AbstractChildWatcher ABC + # To achieve this it has empty attach_loop() method + # and doesn't accept explicit loop argument + # for add_child_handler()/remove_child_handler() + # but retrieves the current loop by get_running_loop() + + def __init__(self): + self._callbacks = {} + self._saved_sighandler = None + + def is_active(self): + return self._saved_sighandler is not None + + def close(self): + self._callbacks.clear() + if self._saved_sighandler is None: + return + + handler = signal.getsignal(signal.SIGCHLD) + if handler != self._sig_chld: + logger.warning("SIGCHLD handler was changed by outside code") + else: + signal.signal(signal.SIGCHLD, self._saved_sighandler) + self._saved_sighandler = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def add_child_handler(self, pid, callback, *args): + loop = events.get_running_loop() + self._callbacks[pid] = (loop, callback, args) + + # Prevent a race condition in case the child is already terminated. + self._do_waitpid(pid) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def attach_loop(self, loop): + # Don't save the loop but initialize itself if called first time + # The reason to do it here is that attach_loop() is called from + # unix policy only for the main thread. + # Main thread is required for subscription on SIGCHLD signal + if self._saved_sighandler is not None: + return + + self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld) + if self._saved_sighandler is None: + logger.warning("Previous SIGCHLD handler was set by non-Python code, " + "restore to default handler on watcher close.") + self._saved_sighandler = signal.SIG_DFL + + # Set SA_RESTART to limit EINTR occurrences. + signal.siginterrupt(signal.SIGCHLD, False) + + def _do_waitpid_all(self): + for pid in list(self._callbacks): + self._do_waitpid(pid) + + def _do_waitpid(self, expected_pid): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, os.WNOHANG) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + debug_log = False + else: + if pid == 0: + # The child process is still alive. + return + + returncode = _compute_returncode(status) + debug_log = True + try: + loop, callback, args = self._callbacks.pop(pid) + except KeyError: # pragma: no cover + # May happen if .remove_child_handler() is called + # after os.waitpid() returns. + logger.warning("Child watcher got an unexpected pid: %r", + pid, exc_info=True) + else: + if loop.is_closed(): + logger.warning("Loop %r that handles pid %r is closed", loop, pid) + else: + if debug_log and loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + loop.call_soon_threadsafe(callback, pid, returncode, *args) + + def _sig_chld(self, signum, frame): + try: + self._do_waitpid_all() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + logger.warning('Unknown exception in SIGCHLD handler', exc_info=True) + + +class ThreadedChildWatcher(AbstractChildWatcher): + """Threaded child watcher implementation. + + The watcher uses a thread per process + for waiting for the process finish. + + It doesn't require subscription on POSIX signal + but a thread creation is not free. + + The watcher has O(1) complexity, its performance doesn't depend + on amount of spawn processes. + """ + + def __init__(self): + self._pid_counter = itertools.count(0) + self._threads = {} + + def is_active(self): + return True + + def close(self): + self._join_threads() + + def _join_threads(self): + """Internal: Join all non-daemon threads""" + threads = [thread for thread in list(self._threads.values()) + if thread.is_alive() and not thread.daemon] + for thread in threads: + thread.join() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def __del__(self, _warn=warnings.warn): + threads = [thread for thread in list(self._threads.values()) + if thread.is_alive()] + if threads: + _warn(f"{self.__class__} has registered but not finished child processes", + ResourceWarning, + source=self) + + def add_child_handler(self, pid, callback, *args): + loop = events.get_running_loop() + thread = threading.Thread(target=self._do_waitpid, + name=f"waitpid-{next(self._pid_counter)}", + args=(loop, pid, callback, args), + daemon=True) + self._threads[pid] = thread + thread.start() + + def remove_child_handler(self, pid): + # asyncio never calls remove_child_handler() !!! + # The method is no-op but is implemented because + # abstract base classe requires it + return True + + def attach_loop(self, loop): + pass + + def _do_waitpid(self, loop, expected_pid, callback, args): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, 0) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + else: + returncode = _compute_returncode(status) + if loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + + if loop.is_closed(): + logger.warning("Loop %r that handles pid %r is closed", loop, pid) + else: + loop.call_soon_threadsafe(callback, pid, returncode, *args) + + self._threads.pop(expected_pid) + + +class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + """UNIX event loop policy with a watcher for child processes.""" + _loop_factory = _UnixSelectorEventLoop + + def __init__(self): + super().__init__() + self._watcher = None + + def _init_watcher(self): + with events._lock: + if self._watcher is None: # pragma: no branch + self._watcher = ThreadedChildWatcher() + if isinstance(threading.current_thread(), + threading._MainThread): + self._watcher.attach_loop(self._local._loop) + + def set_event_loop(self, loop): + """Set the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + """ + + super().set_event_loop(loop) + + if (self._watcher is not None and + isinstance(threading.current_thread(), threading._MainThread)): + self._watcher.attach_loop(loop) + + def get_child_watcher(self): + """Get the watcher for child processes. + + If not yet set, a ThreadedChildWatcher object is automatically created. + """ + if self._watcher is None: + self._init_watcher() + + return self._watcher + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + + assert watcher is None or isinstance(watcher, AbstractChildWatcher) + + if self._watcher is not None: + self._watcher.close() + + self._watcher = watcher + + +SelectorEventLoop = _UnixSelectorEventLoop +DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/windows_events.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/windows_events.py new file mode 100644 index 0000000000000000000000000000000000000000..a5ad6fc423fbe8006ff2249dcfc2d1955be96d9e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/windows_events.py @@ -0,0 +1,908 @@ +"""Selector and proactor event loops for Windows.""" + +import _overlapped +import _winapi +import errno +import math +import msvcrt +import socket +import struct +import time +import weakref + +from . import events +from . import base_subprocess +from . import futures +from . import exceptions +from . import proactor_events +from . import selector_events +from . import tasks +from . import windows_utils +from .log import logger + + +__all__ = ( + 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', + 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy', + 'WindowsProactorEventLoopPolicy', +) + + +NULL = 0 +INFINITE = 0xffffffff +ERROR_CONNECTION_REFUSED = 1225 +ERROR_CONNECTION_ABORTED = 1236 + +# Initial delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_INIT_DELAY = 0.001 + +# Maximum delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_MAX_DELAY = 0.100 + + +class _OverlappedFuture(futures.Future): + """Subclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + """ + + def __init__(self, ov, *, loop=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + self._ov = ov + + def _repr_info(self): + info = super()._repr_info() + if self._ov is not None: + state = 'pending' if self._ov.pending else 'completed' + info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>') + return info + + def _cancel_overlapped(self): + if self._ov is None: + return + try: + self._ov.cancel() + except OSError as exc: + context = { + 'message': 'Cancelling an overlapped future failed', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self._ov = None + + def cancel(self): + self._cancel_overlapped() + return super().cancel() + + def set_exception(self, exception): + super().set_exception(exception) + self._cancel_overlapped() + + def set_result(self, result): + super().set_result(result) + self._ov = None + + +class _BaseWaitHandleFuture(futures.Future): + """Subclass of Future which represents a wait handle.""" + + def __init__(self, ov, handle, wait_handle, *, loop=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + # Keep a reference to the Overlapped object to keep it alive until the + # wait is unregistered + self._ov = ov + self._handle = handle + self._wait_handle = wait_handle + + # Should we call UnregisterWaitEx() if the wait completes + # or is cancelled? + self._registered = True + + def _poll(self): + # non-blocking wait: use a timeout of 0 millisecond + return (_winapi.WaitForSingleObject(self._handle, 0) == + _winapi.WAIT_OBJECT_0) + + def _repr_info(self): + info = super()._repr_info() + info.append(f'handle={self._handle:#x}') + if self._handle is not None: + state = 'signaled' if self._poll() else 'waiting' + info.append(state) + if self._wait_handle is not None: + info.append(f'wait_handle={self._wait_handle:#x}') + return info + + def _unregister_wait_cb(self, fut): + # The wait was unregistered: it's not safe to destroy the Overlapped + # object + self._ov = None + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWait(wait_handle) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING means that the unregister is pending + + self._unregister_wait_cb(None) + + def cancel(self): + self._unregister_wait() + return super().cancel() + + def set_exception(self, exception): + self._unregister_wait() + super().set_exception(exception) + + def set_result(self, result): + self._unregister_wait() + super().set_result(result) + + +class _WaitCancelFuture(_BaseWaitHandleFuture): + """Subclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + """ + + def __init__(self, ov, event, wait_handle, *, loop=None): + super().__init__(ov, event, wait_handle, loop=loop) + + self._done_callback = None + + def cancel(self): + raise RuntimeError("_WaitCancelFuture must not be cancelled") + + def set_result(self, result): + super().set_result(result) + if self._done_callback is not None: + self._done_callback(self) + + def set_exception(self, exception): + super().set_exception(exception) + if self._done_callback is not None: + self._done_callback(self) + + +class _WaitHandleFuture(_BaseWaitHandleFuture): + def __init__(self, ov, handle, wait_handle, proactor, *, loop=None): + super().__init__(ov, handle, wait_handle, loop=loop) + self._proactor = proactor + self._unregister_proactor = True + self._event = _overlapped.CreateEvent(None, True, False, None) + self._event_fut = None + + def _unregister_wait_cb(self, fut): + if self._event is not None: + _winapi.CloseHandle(self._event) + self._event = None + self._event_fut = None + + # If the wait was cancelled, the wait may never be signalled, so + # it's required to unregister it. Otherwise, IocpProactor.close() will + # wait forever for an event which will never come. + # + # If the IocpProactor already received the event, it's safe to call + # _unregister() because we kept a reference to the Overlapped object + # which is used as a unique key. + self._proactor._unregister(self._ov) + self._proactor = None + + super()._unregister_wait_cb(fut) + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWaitEx(wait_handle, self._event) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING is not an error, the wait was unregistered + + self._event_fut = self._proactor._wait_cancel(self._event, + self._unregister_wait_cb) + + +class PipeServer(object): + """Class representing a pipe server. + + This is much like a bound, listening socket. + """ + def __init__(self, address): + self._address = address + self._free_instances = weakref.WeakSet() + # initialize the pipe attribute before calling _server_pipe_handle() + # because this function can raise an exception and the destructor calls + # the close() method + self._pipe = None + self._accept_pipe_future = None + self._pipe = self._server_pipe_handle(True) + + def _get_unconnected_pipe(self): + # Create new instance and return previous one. This ensures + # that (until the server is closed) there is always at least + # one pipe handle for address. Therefore if a client attempt + # to connect it will not fail with FileNotFoundError. + tmp, self._pipe = self._pipe, self._server_pipe_handle(False) + return tmp + + def _server_pipe_handle(self, first): + # Return a wrapper for a new pipe handle. + if self.closed(): + return None + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + h = _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, + windows_utils.BUFSIZE, windows_utils.BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + pipe = windows_utils.PipeHandle(h) + self._free_instances.add(pipe) + return pipe + + def closed(self): + return (self._address is None) + + def close(self): + if self._accept_pipe_future is not None: + self._accept_pipe_future.cancel() + self._accept_pipe_future = None + # Close all instances which have not been connected to by a client. + if self._address is not None: + for pipe in self._free_instances: + pipe.close() + self._pipe = None + self._address = None + self._free_instances.clear() + + __del__ = close + + +class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Windows version of selector event loop.""" + + +class ProactorEventLoop(proactor_events.BaseProactorEventLoop): + """Windows version of proactor event loop using IOCP.""" + + def __init__(self, proactor=None): + if proactor is None: + proactor = IocpProactor() + super().__init__(proactor) + + def run_forever(self): + try: + assert self._self_reading_future is None + self.call_soon(self._loop_self_reading) + super().run_forever() + finally: + if self._self_reading_future is not None: + ov = self._self_reading_future._ov + self._self_reading_future.cancel() + # self_reading_future was just cancelled so if it hasn't been + # finished yet, it never will be (it's possible that it has + # already finished and its callback is waiting in the queue, + # where it could still happen if the event loop is restarted). + # Unregister it otherwise IocpProactor.close will wait for it + # forever + if ov is not None: + self._proactor._unregister(ov) + self._self_reading_future = None + + async def create_pipe_connection(self, protocol_factory, address): + f = self._proactor.connect_pipe(address) + pipe = await f + protocol = protocol_factory() + trans = self._make_duplex_pipe_transport(pipe, protocol, + extra={'addr': address}) + return trans, protocol + + async def start_serving_pipe(self, protocol_factory, address): + server = PipeServer(address) + + def loop_accept_pipe(f=None): + pipe = None + try: + if f: + pipe = f.result() + server._free_instances.discard(pipe) + + if server.closed(): + # A client connected before the server was closed: + # drop the client (close the pipe) and exit + pipe.close() + return + + protocol = protocol_factory() + self._make_duplex_pipe_transport( + pipe, protocol, extra={'addr': address}) + + pipe = server._get_unconnected_pipe() + if pipe is None: + return + + f = self._proactor.accept_pipe(pipe) + except OSError as exc: + if pipe and pipe.fileno() != -1: + self.call_exception_handler({ + 'message': 'Pipe accept failed', + 'exception': exc, + 'pipe': pipe, + }) + pipe.close() + elif self._debug: + logger.warning("Accept pipe failed on pipe %r", + pipe, exc_info=True) + except exceptions.CancelledError: + if pipe: + pipe.close() + else: + server._accept_pipe_future = f + f.add_done_callback(loop_accept_pipe) + + self.call_soon(loop_accept_pipe) + return [server] + + async def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + waiter = self.create_future() + transp = _WindowsSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + try: + await waiter + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + transp.close() + await transp._wait() + raise + + return transp + + +class IocpProactor: + """Proactor implementation using IOCP.""" + + def __init__(self, concurrency=0xffffffff): + self._loop = None + self._results = [] + self._iocp = _overlapped.CreateIoCompletionPort( + _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency) + self._cache = {} + self._registered = weakref.WeakSet() + self._unregistered = [] + self._stopped_serving = weakref.WeakSet() + + def _check_closed(self): + if self._iocp is None: + raise RuntimeError('IocpProactor is closed') + + def __repr__(self): + info = ['overlapped#=%s' % len(self._cache), + 'result#=%s' % len(self._results)] + if self._iocp is None: + info.append('closed') + return '<%s %s>' % (self.__class__.__name__, " ".join(info)) + + def set_loop(self, loop): + self._loop = loop + + def select(self, timeout=None): + if not self._results: + self._poll(timeout) + tmp = self._results + self._results = [] + return tmp + + def _result(self, value): + fut = self._loop.create_future() + fut.set_result(value) + return fut + + def recv(self, conn, nbytes, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + if isinstance(conn, socket.socket): + ov.WSARecv(conn.fileno(), nbytes, flags) + else: + ov.ReadFile(conn.fileno(), nbytes) + except BrokenPipeError: + return self._result(b'') + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def recv_into(self, conn, buf, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + if isinstance(conn, socket.socket): + ov.WSARecvInto(conn.fileno(), buf, flags) + else: + ov.ReadFileInto(conn.fileno(), buf) + except BrokenPipeError: + return self._result(0) + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def recvfrom(self, conn, nbytes, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + ov.WSARecvFrom(conn.fileno(), nbytes, flags) + except BrokenPipeError: + return self._result((b'', None)) + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def sendto(self, conn, buf, flags=0, addr=None): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + + ov.WSASendTo(conn.fileno(), buf, flags, addr) + + def finish_send(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_send) + + def send(self, conn, buf, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + if isinstance(conn, socket.socket): + ov.WSASend(conn.fileno(), buf, flags) + else: + ov.WriteFile(conn.fileno(), buf) + + def finish_send(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_send) + + def accept(self, listener): + self._register_with_iocp(listener) + conn = self._get_accept_socket(listener.family) + ov = _overlapped.Overlapped(NULL) + ov.AcceptEx(listener.fileno(), conn.fileno()) + + def finish_accept(trans, key, ov): + ov.getresult() + # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work. + buf = struct.pack('@P', listener.fileno()) + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf) + conn.settimeout(listener.gettimeout()) + return conn, conn.getpeername() + + async def accept_coro(future, conn): + # Coroutine closing the accept socket if the future is cancelled + try: + await future + except exceptions.CancelledError: + conn.close() + raise + + future = self._register(ov, listener, finish_accept) + coro = accept_coro(future, conn) + tasks.ensure_future(coro, loop=self._loop) + return future + + def connect(self, conn, address): + if conn.type == socket.SOCK_DGRAM: + # WSAConnect will complete immediately for UDP sockets so we don't + # need to register any IOCP operation + _overlapped.WSAConnect(conn.fileno(), address) + fut = self._loop.create_future() + fut.set_result(None) + return fut + + self._register_with_iocp(conn) + # The socket needs to be locally bound before we call ConnectEx(). + try: + _overlapped.BindLocal(conn.fileno(), conn.family) + except OSError as e: + if e.winerror != errno.WSAEINVAL: + raise + # Probably already locally bound; check using getsockname(). + if conn.getsockname()[1] == 0: + raise + ov = _overlapped.Overlapped(NULL) + ov.ConnectEx(conn.fileno(), address) + + def finish_connect(trans, key, ov): + ov.getresult() + # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work. + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0) + return conn + + return self._register(ov, conn, finish_connect) + + def sendfile(self, sock, file, offset, count): + self._register_with_iocp(sock) + ov = _overlapped.Overlapped(NULL) + offset_low = offset & 0xffff_ffff + offset_high = (offset >> 32) & 0xffff_ffff + ov.TransmitFile(sock.fileno(), + msvcrt.get_osfhandle(file.fileno()), + offset_low, offset_high, + count, 0, 0) + + def finish_sendfile(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + return self._register(ov, sock, finish_sendfile) + + def accept_pipe(self, pipe): + self._register_with_iocp(pipe) + ov = _overlapped.Overlapped(NULL) + connected = ov.ConnectNamedPipe(pipe.fileno()) + + if connected: + # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means + # that the pipe is connected. There is no need to wait for the + # completion of the connection. + return self._result(pipe) + + def finish_accept_pipe(trans, key, ov): + ov.getresult() + return pipe + + return self._register(ov, pipe, finish_accept_pipe) + + async def connect_pipe(self, address): + delay = CONNECT_PIPE_INIT_DELAY + while True: + # Unfortunately there is no way to do an overlapped connect to + # a pipe. Call CreateFile() in a loop until it doesn't fail with + # ERROR_PIPE_BUSY. + try: + handle = _overlapped.ConnectPipe(address) + break + except OSError as exc: + if exc.winerror != _overlapped.ERROR_PIPE_BUSY: + raise + + # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later + delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY) + await tasks.sleep(delay) + + return windows_utils.PipeHandle(handle) + + def wait_for_handle(self, handle, timeout=None): + """Wait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + """ + return self._wait_for_handle(handle, timeout, False) + + def _wait_cancel(self, event, done_callback): + fut = self._wait_for_handle(event, None, True) + # add_done_callback() cannot be used because the wait may only complete + # in IocpProactor.close(), while the event loop is not running. + fut._done_callback = done_callback + return fut + + def _wait_for_handle(self, handle, timeout, _is_cancel): + self._check_closed() + + if timeout is None: + ms = _winapi.INFINITE + else: + # RegisterWaitForSingleObject() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = math.ceil(timeout * 1e3) + + # We only create ov so we can use ov.address as a key for the cache. + ov = _overlapped.Overlapped(NULL) + wait_handle = _overlapped.RegisterWaitWithQueue( + handle, self._iocp, ov.address, ms) + if _is_cancel: + f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop) + else: + f = _WaitHandleFuture(ov, handle, wait_handle, self, + loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + + def finish_wait_for_handle(trans, key, ov): + # Note that this second wait means that we should only use + # this with handles types where a successful wait has no + # effect. So events or processes are all right, but locks + # or semaphores are not. Also note if the handle is + # signalled and then quickly reset, then we may return + # False even though we have not timed out. + return f._poll() + + self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle) + return f + + def _register_with_iocp(self, obj): + # To get notifications of finished ops on this objects sent to the + # completion port, were must register the handle. + if obj not in self._registered: + self._registered.add(obj) + _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0) + # XXX We could also use SetFileCompletionNotificationModes() + # to avoid sending notifications to completion port of ops + # that succeed immediately. + + def _register(self, ov, obj, callback): + self._check_closed() + + # Return a future which will be set with the result of the + # operation when it completes. The future's value is actually + # the value returned by callback(). + f = _OverlappedFuture(ov, loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + if not ov.pending: + # The operation has completed, so no need to postpone the + # work. We cannot take this short cut if we need the + # NumberOfBytes, CompletionKey values returned by + # PostQueuedCompletionStatus(). + try: + value = callback(None, None, ov) + except OSError as e: + f.set_exception(e) + else: + f.set_result(value) + # Even if GetOverlappedResult() was called, we have to wait for the + # notification of the completion in GetQueuedCompletionStatus(). + # Register the overlapped operation to keep a reference to the + # OVERLAPPED object, otherwise the memory is freed and Windows may + # read uninitialized memory. + + # Register the overlapped operation for later. Note that + # we only store obj to prevent it from being garbage + # collected too early. + self._cache[ov.address] = (f, ov, obj, callback) + return f + + def _unregister(self, ov): + """Unregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + """ + self._check_closed() + self._unregistered.append(ov) + + def _get_accept_socket(self, family): + s = socket.socket(family) + s.settimeout(0) + return s + + def _poll(self, timeout=None): + if timeout is None: + ms = INFINITE + elif timeout < 0: + raise ValueError("negative timeout") + else: + # GetQueuedCompletionStatus() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = math.ceil(timeout * 1e3) + if ms >= INFINITE: + raise ValueError("timeout too big") + + while True: + status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms) + if status is None: + break + ms = 0 + + err, transferred, key, address = status + try: + f, ov, obj, callback = self._cache.pop(address) + except KeyError: + if self._loop.get_debug(): + self._loop.call_exception_handler({ + 'message': ('GetQueuedCompletionStatus() returned an ' + 'unexpected event'), + 'status': ('err=%s transferred=%s key=%#x address=%#x' + % (err, transferred, key, address)), + }) + + # key is either zero, or it is used to return a pipe + # handle which should be closed to avoid a leak. + if key not in (0, _overlapped.INVALID_HANDLE_VALUE): + _winapi.CloseHandle(key) + continue + + if obj in self._stopped_serving: + f.cancel() + # Don't call the callback if _register() already read the result or + # if the overlapped has been cancelled + elif not f.done(): + try: + value = callback(transferred, key, ov) + except OSError as e: + f.set_exception(e) + self._results.append(f) + else: + f.set_result(value) + self._results.append(f) + + # Remove unregistered futures + for ov in self._unregistered: + self._cache.pop(ov.address, None) + self._unregistered.clear() + + def _stop_serving(self, obj): + # obj is a socket or pipe handle. It will be closed in + # BaseProactorEventLoop._stop_serving() which will make any + # pending operations fail quickly. + self._stopped_serving.add(obj) + + def close(self): + if self._iocp is None: + # already closed + return + + # Cancel remaining registered operations. + for address, (fut, ov, obj, callback) in list(self._cache.items()): + if fut.cancelled(): + # Nothing to do with cancelled futures + pass + elif isinstance(fut, _WaitCancelFuture): + # _WaitCancelFuture must not be cancelled + pass + else: + try: + fut.cancel() + except OSError as exc: + if self._loop is not None: + context = { + 'message': 'Cancelling a future failed', + 'exception': exc, + 'future': fut, + } + if fut._source_traceback: + context['source_traceback'] = fut._source_traceback + self._loop.call_exception_handler(context) + + # Wait until all cancelled overlapped complete: don't exit with running + # overlapped to prevent a crash. Display progress every second if the + # loop is still running. + msg_update = 1.0 + start_time = time.monotonic() + next_msg = start_time + msg_update + while self._cache: + if next_msg <= time.monotonic(): + logger.debug('%r is running after closing for %.1f seconds', + self, time.monotonic() - start_time) + next_msg = time.monotonic() + msg_update + + # handle a few events, or timeout + self._poll(msg_update) + + self._results = [] + + _winapi.CloseHandle(self._iocp) + self._iocp = None + + def __del__(self): + self.close() + + +class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + self._proc = windows_utils.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + bufsize=bufsize, **kwargs) + + def callback(f): + returncode = self._proc.poll() + self._process_exited(returncode) + + f = self._loop._proactor.wait_for_handle(int(self._proc._handle)) + f.add_done_callback(callback) + + +SelectorEventLoop = _WindowsSelectorEventLoop + + +class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + _loop_factory = SelectorEventLoop + + +class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + _loop_factory = ProactorEventLoop + + +DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/windows_utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/windows_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef277fac3e291c307d5e26ada8ef5e28c47434f1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/asyncio/windows_utils.py @@ -0,0 +1,173 @@ +"""Various Windows specific bits and pieces.""" + +import sys + +if sys.platform != 'win32': # pragma: no cover + raise ImportError('win32 only') + +import _winapi +import itertools +import msvcrt +import os +import subprocess +import tempfile +import warnings + + +__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle' + + +# Constants/globals + + +BUFSIZE = 8192 +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +_mmap_counter = itertools.count() + + +# Replacement for os.pipe() using handles instead of fds + + +def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): + """Like os.pipe() but with overlapped support and using handles not fds.""" + address = tempfile.mktemp( + prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format( + os.getpid(), next(_mmap_counter))) + + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = bufsize, bufsize + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, bufsize + + openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + + if overlapped[0]: + openmode |= _winapi.FILE_FLAG_OVERLAPPED + + if overlapped[1]: + flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED + else: + flags_and_attribs = 0 + + h1 = h2 = None + try: + h1 = _winapi.CreateNamedPipe( + address, openmode, _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + flags_and_attribs, _winapi.NULL) + + ov = _winapi.ConnectNamedPipe(h1, overlapped=True) + ov.GetOverlappedResult(True) + return h1, h2 + except: + if h1 is not None: + _winapi.CloseHandle(h1) + if h2 is not None: + _winapi.CloseHandle(h2) + raise + + +# Wrapper for a pipe handle + + +class PipeHandle: + """Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + """ + def __init__(self, handle): + self._handle = handle + + def __repr__(self): + if self._handle is not None: + handle = f'handle={self._handle!r}' + else: + handle = 'closed' + return f'<{self.__class__.__name__} {handle}>' + + @property + def handle(self): + return self._handle + + def fileno(self): + if self._handle is None: + raise ValueError("I/O operation on closed pipe") + return self._handle + + def close(self, *, CloseHandle=_winapi.CloseHandle): + if self._handle is not None: + CloseHandle(self._handle) + self._handle = None + + def __del__(self, _warn=warnings.warn): + if self._handle is not None: + _warn(f"unclosed {self!r}", ResourceWarning, source=self) + self.close() + + def __enter__(self): + return self + + def __exit__(self, t, v, tb): + self.close() + + +# Replacement for subprocess.Popen using overlapped pipe handles + + +class Popen(subprocess.Popen): + """Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + """ + def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): + assert not kwds.get('universal_newlines') + assert kwds.get('bufsize', 0) == 0 + stdin_rfd = stdout_wfd = stderr_wfd = None + stdin_wh = stdout_rh = stderr_rh = None + if stdin == PIPE: + stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) + stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) + else: + stdin_rfd = stdin + if stdout == PIPE: + stdout_rh, stdout_wh = pipe(overlapped=(True, False)) + stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) + else: + stdout_wfd = stdout + if stderr == PIPE: + stderr_rh, stderr_wh = pipe(overlapped=(True, False)) + stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) + elif stderr == STDOUT: + stderr_wfd = stdout_wfd + else: + stderr_wfd = stderr + try: + super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, + stderr=stderr_wfd, **kwds) + except: + for h in (stdin_wh, stdout_rh, stderr_rh): + if h is not None: + _winapi.CloseHandle(h) + raise + else: + if stdin_wh is not None: + self.stdin = PipeHandle(stdin_wh) + if stdout_rh is not None: + self.stdout = PipeHandle(stdout_rh) + if stderr_rh is not None: + self.stderr = PipeHandle(stderr_rh) + finally: + if stdin == PIPE: + os.close(stdin_rfd) + if stdout == PIPE: + os.close(stdout_wfd) + if stderr == PIPE: + os.close(stderr_wfd) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/collections/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/collections/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..064e3622863f3d0990de987d5dedaf99e694bdad --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/collections/__init__.py @@ -0,0 +1,1279 @@ +'''This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +''' + +__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', + 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] + +import _collections_abc +from operator import itemgetter as _itemgetter, eq as _eq +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +from _weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from reprlib import recursive_repr as _recursive_repr + +try: + from _collections import deque +except ImportError: + pass +else: + _collections_abc.MutableSequence.register(deque) + +try: + from _collections import defaultdict +except ImportError: + pass + + +def __getattr__(name): + # For backwards compatibility, continue to make the collections ABCs + # through Python 3.6 available through the collections module. + # Note, no new collections ABCs were added in Python 3.7 + if name in _collections_abc.__all__: + obj = getattr(_collections_abc, name) + import warnings + warnings.warn("Using or importing the ABCs from 'collections' instead " + "of from 'collections.abc' is deprecated since Python 3.3, " + "and in 3.10 it will stop working", + DeprecationWarning, stacklevel=2) + globals()[name] = obj + return obj + raise AttributeError(f'module {__name__!r} has no attribute {name!r}') + +################################################################################ +### OrderedDict +################################################################################ + +class _OrderedDictKeysView(_collections_abc.KeysView): + + def __reversed__(self): + yield from reversed(self._mapping) + +class _OrderedDictItemsView(_collections_abc.ItemsView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield (key, self._mapping[key]) + +class _OrderedDictValuesView(_collections_abc.ValuesView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield self._mapping[key] + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, other=(), /, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + ''' + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(other, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + link.prev = None + link.next = None + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + soft_link = link_next.prev + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + root.prev = soft_link + last.next = link + else: + first = root.next + link.prev = root + link.next = first + first.prev = soft_link + root.next = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = _collections_abc.MutableMapping.update + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return _OrderedDictKeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return _OrderedDictItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return _OrderedDictValuesView(self) + + __ne__ = _collections_abc.MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + '''Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + ''' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + return self.__class__, (), inst_dict or None, None, iter(self.items()) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''Create a new ordered dictionary with keys from iterable and values set to value. + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(map(_eq, self, other)) + return dict.__eq__(self, other) + + +try: + from _collections import OrderedDict +except ImportError: + # Leave the pure Python version in place. + pass + + +################################################################################ +### namedtuple +################################################################################ + +try: + from _collections import _tuplegetter +except ImportError: + _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) + +def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Validate the field names. At the user's option, either generate an error + # message or automatically replace the field name with a valid name. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() + field_names = list(map(str, field_names)) + typename = _sys.intern(str(typename)) + + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not name.isidentifier() + or _iskeyword(name) + or name.startswith('_') + or name in seen): + field_names[index] = f'_{index}' + seen.add(name) + + for name in [typename] + field_names: + if type(name) is not str: + raise TypeError('Type names and field names must be strings') + if not name.isidentifier(): + raise ValueError('Type names and field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Encountered duplicate field name: {name!r}') + seen.add(name) + + field_defaults = {} + if defaults is not None: + defaults = tuple(defaults) + if len(defaults) > len(field_names): + raise TypeError('Got more default values than field names') + field_defaults = dict(reversed(list(zip(reversed(field_names), + reversed(defaults))))) + + # Variables used in the methods and docstrings + field_names = tuple(map(_sys.intern, field_names)) + num_fields = len(field_names) + arg_list = repr(field_names).replace("'", "")[1:-1] + repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' + tuple_new = tuple.__new__ + _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip + + # Create all the named tuple methods to be added to the class namespace + + s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))' + namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'} + # Note: exec() has the side-effect of interning the field names + exec(s, namespace) + __new__ = namespace['__new__'] + __new__.__doc__ = f'Create new instance of {typename}({arg_list})' + if defaults is not None: + __new__.__defaults__ = defaults + + @classmethod + def _make(cls, iterable): + result = tuple_new(cls, iterable) + if _len(result) != num_fields: + raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') + return result + + _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' + 'or iterable') + + def _replace(self, /, **kwds): + result = self._make(_map(kwds.pop, field_names, self)) + if kwds: + raise ValueError(f'Got unexpected field names: {list(kwds)!r}') + return result + + _replace.__doc__ = (f'Return a new {typename} object replacing specified ' + 'fields with new values') + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self + + def _asdict(self): + 'Return a new dict which maps field names to their values.' + return _dict(_zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return _tuple(self) + + # Modify function metadata to help with introspection and debugging + for method in (__new__, _make.__func__, _replace, + __repr__, _asdict, __getnewargs__): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({arg_list})', + '__slots__': (), + '_fields': field_names, + '_field_defaults': field_defaults, + # alternate spelling for backward compatibility + '_fields_defaults': field_defaults, + '__new__': __new__, + '_make': _make, + '_replace': _replace, + '__repr__': __repr__, + '_asdict': _asdict, + '__getnewargs__': __getnewargs__, + } + for index, name in enumerate(field_names): + doc = _sys.intern(f'Alias for field number {index}') + class_namespace[name] = _tuplegetter(index, doc) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython), or where the user has + # specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + + return result + + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +try: # Load C helper function if available + from _collections import _count_elements +except ImportError: + pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(self, iterable=None, /, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + super(Counter, self).__init__() + self.update(iterable, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abracadabra').most_common(3) + [('a', 5), ('b', 2), ('r', 2)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because the semantics + # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). + # Initializing counters to zero values isn't necessary because zero + # is already the default value for counter lookups. Initializing + # to one is easily accomplished with Counter(set(iterable)). For + # more exotic cases, create a dictionary first using a dictionary + # comprehension or dict.fromkeys(). + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(self, iterable=None, /, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if iterable is not None: + if isinstance(iterable, _collections_abc.Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super(Counter, self).update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(self, iterable=None, /, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if iterable is not None: + self_get = self.get + if isinstance(iterable, _collections_abc.Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + # + # Rich comparison operators for multiset subset and superset tests + # are deliberately omitted due to semantic conflicts with the + # existing inherited dict equality method. Subset and superset + # semantics ignore zero counts and require that p≤q ∧ p≥q → p=q; + # however, that would not be the case for p=Counter(a=1, b=0) + # and q=Counter(a=1) where the dictionaries are not equal. + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + def __pos__(self): + 'Adds an empty counter, effectively stripping negative and zero counts' + result = Counter() + for elem, count in self.items(): + if count > 0: + result[elem] = count + return result + + def __neg__(self): + '''Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + ''' + result = Counter() + for elem, count in self.items(): + if count < 0: + result[elem] = 0 - count + return result + + def _keep_positive(self): + '''Internal method to strip elements with a negative or zero count''' + nonpositive = [elem for elem, count in self.items() if not count > 0] + for elem in nonpositive: + del self[elem] + return self + + def __iadd__(self, other): + '''Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] += count + return self._keep_positive() + + def __isub__(self, other): + '''Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] -= count + return self._keep_positive() + + def __ior__(self, other): + '''Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + for elem, other_count in other.items(): + count = self[elem] + if other_count > count: + self[elem] = other_count + return self._keep_positive() + + def __iand__(self, other): + '''Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + ''' + for elem, count in self.items(): + other_count = other[elem] + if other_count < count: + self[elem] = other_count + return self._keep_positive() + + +######################################################################## +### ChainMap +######################################################################## + +class ChainMap(_collections_abc.MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + d = {} + for mapping in reversed(self.maps): + d.update(mapping) # reuses stored hash values if possible + return iter(d) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self, m=None): # like Django's Context.push() + '''New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + ''' + if m is None: + m = {} + return self.__class__(m, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(_collections_abc.MutableMapping): + + # Start by filling-out the abstract methods + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + dict = None + self.data = {} + if dict is not None: + self.update(dict) + if kwargs: + self.update(kwargs) + __init__.__text_signature__ = '($self, dict=None, /, **kwargs)' + + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): return repr(self.data) + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + return inst + + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + + +################################################################################ +### UserList +################################################################################ + +class UserList(_collections_abc.MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + def __repr__(self): return repr(self.data) + def __lt__(self, other): return self.data < self.__cast(other) + def __le__(self, other): return self.data <= self.__cast(other) + def __eq__(self, other): return self.data == self.__cast(other) + def __gt__(self, other): return self.data > self.__cast(other) + def __ge__(self, other): return self.data >= self.__cast(other) + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + def __contains__(self, item): return item in self.data + def __len__(self): return len(self.data) + def __getitem__(self, i): + if isinstance(i, slice): + return self.__class__(self.data[i]) + else: + return self.data[i] + def __setitem__(self, i, item): self.data[i] = item + def __delitem__(self, i): del self.data[i] + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __imul__(self, n): + self.data *= n + return self + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"][:] + return inst + def append(self, item): self.data.append(item) + def insert(self, i, item): self.data.insert(i, item) + def pop(self, i=-1): return self.data.pop(i) + def remove(self, item): self.data.remove(item) + def clear(self): self.data.clear() + def copy(self): return self.__class__(self) + def count(self, item): return self.data.count(item) + def index(self, item, *args): return self.data.index(item, *args) + def reverse(self): self.data.reverse() + def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds) + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + + +################################################################################ +### UserString +################################################################################ + +class UserString(_collections_abc.Sequence): + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + def __str__(self): return str(self.data) + def __repr__(self): return repr(self.data) + def __int__(self): return int(self.data) + def __float__(self): return float(self.data) + def __complex__(self): return complex(self.data) + def __hash__(self): return hash(self.data) + def __getnewargs__(self): + return (self.data[:],) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): return len(self.data) + def __getitem__(self, index): return self.__class__(self.data[index]) + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __mod__(self, args): + return self.__class__(self.data % args) + def __rmod__(self, template): + return self.__class__(str(template) % self) + # the following methods are defined in alphabetical order: + def capitalize(self): return self.__class__(self.data.capitalize()) + def casefold(self): + return self.__class__(self.data.casefold()) + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + def encode(self, encoding='utf-8', errors='strict'): + encoding = 'utf-8' if encoding is None else encoding + errors = 'strict' if errors is None else errors + return self.data.encode(encoding, errors) + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + def format(self, /, *args, **kwds): + return self.data.format(*args, **kwds) + def format_map(self, mapping): + return self.data.format_map(mapping) + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + def isalpha(self): return self.data.isalpha() + def isalnum(self): return self.data.isalnum() + def isascii(self): return self.data.isascii() + def isdecimal(self): return self.data.isdecimal() + def isdigit(self): return self.data.isdigit() + def isidentifier(self): return self.data.isidentifier() + def islower(self): return self.data.islower() + def isnumeric(self): return self.data.isnumeric() + def isprintable(self): return self.data.isprintable() + def isspace(self): return self.data.isspace() + def istitle(self): return self.data.istitle() + def isupper(self): return self.data.isupper() + def join(self, seq): return self.data.join(seq) + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + def lower(self): return self.__class__(self.data.lower()) + def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) + maketrans = str.maketrans + def partition(self, sep): + return self.data.partition(sep) + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + def rpartition(self, sep): + return self.data.rpartition(sep) + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + def splitlines(self, keepends=False): return self.data.splitlines(keepends) + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + def strip(self, chars=None): return self.__class__(self.data.strip(chars)) + def swapcase(self): return self.__class__(self.data.swapcase()) + def title(self): return self.__class__(self.data.title()) + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + def upper(self): return self.__class__(self.data.upper()) + def zfill(self, width): return self.__class__(self.data.zfill(width)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/concurrent/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/concurrent/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..196d3788575993eb403588b01ac6b0a3fbba13da --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/concurrent/__init__.py @@ -0,0 +1 @@ +# This directory is a Python package. diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/ChangeLog b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/ChangeLog new file mode 100644 index 0000000000000000000000000000000000000000..d7d7e1efdb1d308e22d07e7dca3cab502b204d1d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/ChangeLog @@ -0,0 +1,1591 @@ +Please refer to the IDLEfork and IDLE CVS repositories for +change details subsequent to the 0.8.1 release. + + +IDLEfork ChangeLog +================== + +2001-07-20 11:35 elguavas + + * README.txt, NEWS.txt: bring up to date for 0.8.1 release + +2001-07-19 16:40 elguavas + + * IDLEFORK.html: replaced by IDLEFORK-index.html + +2001-07-19 16:39 elguavas + + * IDLEFORK-index.html: updated placeholder idlefork homepage + +2001-07-19 14:49 elguavas + + * ChangeLog, EditorWindow.py, INSTALLATION, NEWS.txt, README.txt, + TODO.txt, idlever.py: + minor tidy-ups ready for 0.8.1 alpha tarball release + +2001-07-17 15:12 kbk + + * INSTALLATION, setup.py: INSTALLATION: Remove the coexist.patch + instructions + + **************** setup.py: + + Remove the idles script, add some words on IDLE Fork to the + long_description, and clean up some line spacing. + +2001-07-17 15:01 kbk + + * coexist.patch: Put this in the attic, at least for now... + +2001-07-17 14:59 kbk + + * PyShell.py, idle, idles: Implement idle command interface as + suggested by GvR [idle-dev] 16 July **************** PyShell: Added + functionality: + + usage: idle.py [-c command] [-d] [-i] [-r script] [-s] [-t title] + [arg] ... + + idle file(s) (without options) edit the file(s) + + -c cmd run the command in a shell -d enable the + debugger -i open an interactive shell -i file(s) open a + shell and also an editor window for each file -r script run a file + as a script in a shell -s run $IDLESTARTUP or + $PYTHONSTARTUP before anything else -t title set title of shell + window + + Remaining arguments are applied to the command (-c) or script (-r). + + ****************** idles: Removed the idles script, not needed + + ****************** idle: Removed the IdleConf references, not + required anymore + +2001-07-16 17:08 kbk + + * INSTALLATION, coexist.patch: Added installation instructions. + + Added a patch which modifies idlefork so that it can co-exist with + "official" IDLE in the site-packages directory. This patch is not + necessary if only idlefork IDLE is installed. See INSTALLATION for + further details. + +2001-07-16 15:50 kbk + + * idles: Add a script "idles" which opens a Python Shell window. + + The default behaviour of idlefork idle is to open an editor window + instead of a shell. Complex expressions may be run in a fresh + environment by selecting "run". There are times, however, when a + shell is desired. Though one can be started by "idle -t 'foo'", + this script is more convenient. In addition, a shell and an editor + window can be started in parallel by "idles -e foo.py". + +2001-07-16 15:25 kbk + + * PyShell.py: Call out IDLE Fork in startup message. + +2001-07-16 14:00 kbk + + * PyShell.py, setup.py: Add a script "idles" which opens a Python + Shell window. + + The default behaviour of idlefork idle is to open an editor window + instead of a shell. Complex expressions may be run in a fresh + environment by selecting "run". There are times, however, when a + shell is desired. Though one can be started by "idle -t 'foo'", + this script is more convenient. In addition, a shell and an editor + window can be started in parallel by "idles -e foo.py". + +2001-07-15 03:06 kbk + + * pyclbr.py, tabnanny.py: tabnanny and pyclbr are now found in /Lib + +2001-07-15 02:29 kbk + + * BrowserControl.py: Remove, was retained for 1.5.2 support + +2001-07-14 15:48 kbk + + * setup.py: Installing Idle to site-packages via Distutils does not + copy the Idle help.txt file. + + Ref SF Python Patch 422471 + +2001-07-14 15:26 kbk + + * keydefs.py: py-cvs-2001_07_13 (Rev 1.3) merge + + "Make copy, cut and paste events case insensitive. Reported by + Patrick K. O'Brien on idle-dev. (Should other bindings follow + suit?)" --GvR + +2001-07-14 15:21 kbk + + * idle.py: py-cvs-2001_07_13 (Rev 1.4) merge + + "Move the action of loading the configuration to the IdleConf + module rather than the idle.py script. This has advantages and + disadvantages; the biggest advantage being that we can more easily + have an alternative main program." --GvR + +2001-07-14 15:18 kbk + + * extend.txt: py-cvs-2001_07_13 (Rev 1.4) merge + + "Quick update to the extension mechanism (extend.py is gone, long + live config.txt)" --GvR + +2001-07-14 15:15 kbk + + * StackViewer.py: py-cvs-2001_07_13 (Rev 1.16) merge + + "Refactored, with some future plans in mind. This now uses the new + gotofileline() method defined in FileList.py" --GvR + +2001-07-14 15:10 kbk + + * PyShell.py: py-cvs-2001_07_13 (Rev 1.34) merge + + "Amazing. A very subtle change in policy in descr-branch actually + found a bug here. Here's the deal: Class PyShell derives from + class OutputWindow. Method PyShell.close() wants to invoke its + parent method, but because PyShell long ago was inherited from + class PyShellEditorWindow, it invokes + PyShelEditorWindow.close(self). Now, class PyShellEditorWindow + itself derives from class OutputWindow, and inherits the close() + method from there without overriding it. Under the old rules, + PyShellEditorWindow.close would return an unbound method restricted + to the class that defined the implementation of close(), which was + OutputWindow.close. Under the new rules, the unbound method is + restricted to the class whose method was requested, that is + PyShellEditorWindow, and this was correctly trapped as an error." + --GvR + +2001-07-14 14:59 kbk + + * PyParse.py: py-cvs-2001_07_13 (Rel 1.9) merge + + "Taught IDLE's autoident parser that "yield" is a keyword that + begins a stmt. Along w/ the preceding change to keyword.py, making + all this work w/ a future-stmt just looks harder and harder." + --tim_one + + (From Rel 1.8: "Hack to make this still work with Python 1.5.2. + ;-( " --fdrake) + +2001-07-14 14:51 kbk + + * IdleConf.py: py-cvs-2001_07_13 (Rel 1.7) merge + + "Move the action of loading the configuration to the IdleConf + module rather than the idle.py script. This has advantages and + disadvantages; the biggest advantage being that we can more easily + have an alternative main program." --GvR + +2001-07-14 14:45 kbk + + * FileList.py: py-cvs-2000_07_13 (Rev 1.9) merge + + "Delete goodname() method, which is unused. Add gotofileline(), a + convenience method which I intend to use in a variant. Rename + test() to _test()." --GvR + + This was an interesting merge. The join completely missed removing + goodname(), which was adjacent, but outside of, a small conflict. + I only caught it by comparing the 1.1.3.2/1.1.3.3 diff. CVS ain't + infallible. + +2001-07-14 13:58 kbk + + * EditorWindow.py: py-cvs-2000_07_13 (Rev 1.38) merge "Remove + legacy support for the BrowserControl module; the webbrowser module + has been included since Python 2.0, and that is the preferred + interface." --fdrake + +2001-07-14 13:32 kbk + + * EditorWindow.py, FileList.py, IdleConf.py, PyParse.py, + PyShell.py, StackViewer.py, extend.txt, idle.py, keydefs.py: Import + the 2001 July 13 23:59 GMT version of Python CVS IDLE on the + existing 1.1.3 vendor branch named py-cvs-vendor-branch. Release + tag is py-cvs-2001_07_13. + +2001-07-14 12:02 kbk + + * Icons/python.gif: py-cvs-rel2_1 (Rev 1.2) merge Copied py-cvs rev + 1.2 changed file to idlefork MAIN + +2001-07-14 11:58 kbk + + * Icons/minusnode.gif: py-cvs-rel2_1 (Rev 1.2) merge Copied py-cvs + 1.2 changed file to idlefork MAIN + +2001-07-14 11:23 kbk + + * ScrolledList.py: py-cvs-rel2_1 (rev 1.5) merge - whitespace + normalization + +2001-07-14 11:20 kbk + + * Separator.py: py-cvs-rel2_1 (Rev 1.3) merge - whitespace + normalization + +2001-07-14 11:16 kbk + + * StackViewer.py: py-cvs-rel2_1 (Rev 1.15) merge - whitespace + normalization + +2001-07-14 11:14 kbk + + * ToolTip.py: py-cvs-rel2_1 (Rev 1.2) merge - whitespace + normalization + +2001-07-14 10:13 kbk + + * PyShell.py: cvs-py-rel2_1 (Rev 1.29 - 1.33) merge + + Merged the following py-cvs revs without conflict: 1.29 Reduce + copyright text output at startup 1.30 Delay setting sys.args until + Tkinter is fully initialized 1.31 Whitespace normalization 1.32 + Turn syntax warning into error when interactive 1.33 Fix warning + initialization bug + + Note that module is extensively modified wrt py-cvs + +2001-07-14 06:33 kbk + + * PyParse.py: py-cvs-rel2_1 (Rev 1.6 - 1.8) merge Fix autoindent + bug and deflect Unicode from text.get() + +2001-07-14 06:00 kbk + + * Percolator.py: py-cvs-rel2_1 (Rev 1.3) "move "from Tkinter import + *" to module level" --jhylton + +2001-07-14 05:57 kbk + + * PathBrowser.py: py-cvs-rel2_1 (Rev 1.6) merge - whitespace + normalization + +2001-07-14 05:49 kbk + + * ParenMatch.py: cvs-py-rel2_1 (Rev 1.5) merge - whitespace + normalization + +2001-07-14 03:57 kbk + + * ObjectBrowser.py: py-cvs-rel2_1 (Rev 1.3) merge "Make the test + program work outside IDLE." -- GvR + +2001-07-14 03:52 kbk + + * MultiStatusBar.py: py-cvs-rel2_1 (Rev 1.2) merge - whitespace + normalization + +2001-07-14 03:44 kbk + + * MultiScrolledLists.py: py-cvs-rel2_1 (Rev 1.2) merge - whitespace + normalization + +2001-07-14 03:40 kbk + + * IdleHistory.py: py-cvs-rel2_1 (Rev 1.4) merge - whitespace + normalization + +2001-07-14 03:38 kbk + + * IdleConf.py: py-cvs-rel2_1 (Rev 1.6) merge - whitespace + normalization + +2001-07-13 14:18 kbk + + * IOBinding.py: py-cvs-rel2_1 (Rev 1.4) merge - move "import *" to + module level + +2001-07-13 14:12 kbk + + * FormatParagraph.py: py-cvs-rel2_1 (Rev 1.9) merge - whitespace + normalization + +2001-07-13 14:07 kbk + + * FileList.py: py-cvs-rel2_1 (Rev 1.8) merge - whitespace + normalization + +2001-07-13 13:35 kbk + + * EditorWindow.py: py-cvs-rel2_1 (Rev 1.33 - 1.37) merge + + VP IDLE version depended on VP's ExecBinding.py and spawn.py to get + the path to the Windows Doc directory (relative to python.exe). + Removed this conflicting code in favor of py-cvs updates which on + Windows use a hard coded path relative to the location of this + module. py-cvs updates include support for webbrowser.py. Module + still has BrowserControl.py for 1.5.2 support. + + At this point, the differences wrt py-cvs relate to menu + functionality. + +2001-07-13 11:30 kbk + + * ConfigParser.py: py-cvs-rel2_1 merge - Remove, lives in /Lib + +2001-07-13 10:10 kbk + + * Delegator.py: py-cvs-rel2_1 (Rev 1.3) merge - whitespace + normalization + +2001-07-13 10:07 kbk + + * Debugger.py: py-cvs-rel2_1 (Rev 1.15) merge - whitespace + normalization + +2001-07-13 10:04 kbk + + * ColorDelegator.py: py-cvs-rel2_1 (Rev 1.11 and 1.12) merge + Colorize "as" after "import" / use DEBUG instead of __debug__ + +2001-07-13 09:54 kbk + + * ClassBrowser.py: py-cvs-rel2_1 (Rev 1.12) merge - whitespace + normalization + +2001-07-13 09:41 kbk + + * BrowserControl.py: py-cvs-rel2_1 (Rev 1.1) merge - New File - + Force HEAD to trunk with -f Note: browser.py was renamed + BrowserControl.py 10 May 2000. It provides a collection of classes + and convenience functions to control external browsers "for 1.5.2 + support". It was removed from py-cvs 18 April 2001. + +2001-07-13 09:10 kbk + + * CallTips.py: py-cvs-rel2_1 (Rev 1.8) merge - whitespace + normalization + +2001-07-13 08:26 kbk + + * CallTipWindow.py: py-cvs-rel2_1 (Rev 1.3) merge - whitespace + normalization + +2001-07-13 08:13 kbk + + * AutoExpand.py: py-cvs-rel1_2 (Rev 1.4) merge, "Add Alt-slash to + Unix keydefs (I somehow need it on RH 6.2). Get rid of assignment + to unused self.text.wordlist." --GvR + +2001-07-12 16:54 elguavas + + * ReplaceDialog.py: py-cvs merge, python 1.5.2 compatibility + +2001-07-12 16:46 elguavas + + * ScriptBinding.py: py-cvs merge, better error dialog + +2001-07-12 16:38 elguavas + + * TODO.txt: py-cvs merge, additions + +2001-07-12 15:35 elguavas + + * WindowList.py: py-cvs merge, correct indentation + +2001-07-12 15:24 elguavas + + * config.txt: py-cvs merge, correct typo + +2001-07-12 15:21 elguavas + + * help.txt: py-cvs merge, update colour changing info + +2001-07-12 14:51 elguavas + + * idle.py: py-cvs merge, idle_dir loading changed + +2001-07-12 14:44 elguavas + + * idlever.py: py-cvs merge, version update + +2001-07-11 12:53 kbk + + * BrowserControl.py: Initial revision + +2001-07-11 12:53 kbk + + * AutoExpand.py, BrowserControl.py, CallTipWindow.py, CallTips.py, + ClassBrowser.py, ColorDelegator.py, Debugger.py, Delegator.py, + EditorWindow.py, FileList.py, FormatParagraph.py, IOBinding.py, + IdleConf.py, IdleHistory.py, MultiScrolledLists.py, + MultiStatusBar.py, ObjectBrowser.py, OutputWindow.py, + ParenMatch.py, PathBrowser.py, Percolator.py, PyParse.py, + PyShell.py, RemoteInterp.py, ReplaceDialog.py, ScriptBinding.py, + ScrolledList.py, Separator.py, StackViewer.py, TODO.txt, + ToolTip.py, WindowList.py, config.txt, help.txt, idle, idle.bat, + idle.py, idlever.py, setup.py, Icons/minusnode.gif, + Icons/python.gif: Import the release 2.1 version of Python CVS IDLE + on the existing 1.1.3 vendor branch named py-cvs-vendor-branch, + with release tag py-cvs-rel2_1. + +2001-07-11 12:34 kbk + + * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py, + CallTips.py, ChangeLog, ClassBrowser.py, ColorDelegator.py, + Debugger.py, Delegator.py, EditorWindow.py, FileList.py, + FormatParagraph.py, FrameViewer.py, GrepDialog.py, IOBinding.py, + IdleConf.py, IdleHistory.py, MultiScrolledLists.py, + MultiStatusBar.py, NEWS.txt, ObjectBrowser.py, OldStackViewer.py, + OutputWindow.py, ParenMatch.py, PathBrowser.py, Percolator.py, + PyParse.py, PyShell.py, README.txt, RemoteInterp.py, + ReplaceDialog.py, ScriptBinding.py, ScrolledList.py, + SearchBinding.py, SearchDialog.py, SearchDialogBase.py, + SearchEngine.py, Separator.py, StackViewer.py, TODO.txt, + ToolTip.py, TreeWidget.py, UndoDelegator.py, WidgetRedirector.py, + WindowList.py, ZoomHeight.py, __init__.py, config-unix.txt, + config-win.txt, config.txt, eventparse.py, extend.txt, help.txt, + idle.bat, idle.py, idle.pyw, idlever.py, keydefs.py, pyclbr.py, + tabnanny.py, testcode.py, Icons/folder.gif, Icons/minusnode.gif, + Icons/openfolder.gif, Icons/plusnode.gif, Icons/python.gif, + Icons/tk.gif: Import the 9 March 2000 version of Python CVS IDLE as + 1.1.3 vendor branch named py-cvs-vendor-branch. + +2001-07-04 13:43 kbk + + * Icons/: folder.gif, minusnode.gif, openfolder.gif, plusnode.gif, + python.gif, tk.gif: Null commit with -f option to force an uprev + and put HEADs firmly on the trunk. + +2001-07-04 13:15 kbk + + * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py, + CallTips.py, ChangeLog, ClassBrowser.py, ColorDelegator.py, + ConfigParser.py, Debugger.py, Delegator.py, EditorWindow.py, + ExecBinding.py, FileList.py, FormatParagraph.py, FrameViewer.py, + GrepDialog.py, IDLEFORK.html, IOBinding.py, IdleConf.py, + IdleHistory.py, MultiScrolledLists.py, MultiStatusBar.py, NEWS.txt, + ObjectBrowser.py, OldStackViewer.py, OutputWindow.py, + ParenMatch.py, PathBrowser.py, Percolator.py, PyParse.py, + PyShell.py, README.txt, Remote.py, RemoteInterp.py, + ReplaceDialog.py, ScriptBinding.py, ScrolledList.py, + SearchBinding.py, SearchDialog.py, SearchDialogBase.py, + SearchEngine.py, Separator.py, StackViewer.py, TODO.txt, + ToolTip.py, TreeWidget.py, UndoDelegator.py, WidgetRedirector.py, + WindowList.py, ZoomHeight.py, __init__.py, config-unix.txt, + config-win.txt, config.txt, eventparse.py, extend.txt, help.txt, + idle, idle.bat, idle.py, idle.pyw, idlever.py, keydefs.py, + loader.py, protocol.py, pyclbr.py, setup.py, spawn.py, tabnanny.py, + testcode.py: Null commit with -f option to force an uprev and put + HEADs firmly on the trunk. + +2001-06-27 10:24 elguavas + + * IDLEFORK.html: updated contact details + +2001-06-25 17:23 elguavas + + * idle, RemoteInterp.py, setup.py: Initial revision + +2001-06-25 17:23 elguavas + + * idle, RemoteInterp.py, setup.py: import current python cvs idle + as a vendor branch + +2001-06-24 15:10 elguavas + + * IDLEFORK.html: tiny change to test new syncmail setup + +2001-06-24 14:41 elguavas + + * IDLEFORK.html: change to new developer contact, also a test + commit for new syncmail setup + +2001-06-23 18:15 elguavas + + * IDLEFORK.html: tiny test update for revitalised idle-fork + +2000-09-24 17:29 nriley + + * protocol.py: Fixes for Python 1.6 compatibility - socket bind and + connect get a tuple instead two arguments. + +2000-09-24 17:28 nriley + + * spawn.py: Change for Python 1.6 compatibility - UNIX's 'os' + module defines 'spawnv' now, so we check for 'fork' first. + +2000-08-15 22:51 nowonder + + * IDLEFORK.html: + corrected email address + +2000-08-15 22:47 nowonder + + * IDLEFORK.html: + added .html file for http://idlefork.sourceforge.net + +2000-08-15 11:13 dscherer + + * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py, + CallTips.py, __init__.py, ChangeLog, ClassBrowser.py, + ColorDelegator.py, ConfigParser.py, Debugger.py, Delegator.py, + FileList.py, FormatParagraph.py, FrameViewer.py, GrepDialog.py, + IOBinding.py, IdleConf.py, IdleHistory.py, MultiScrolledLists.py, + MultiStatusBar.py, NEWS.txt, ObjectBrowser.py, OldStackViewer.py, + OutputWindow.py, ParenMatch.py, PathBrowser.py, Percolator.py, + PyParse.py, PyShell.py, README.txt, ReplaceDialog.py, + ScriptBinding.py, ScrolledList.py, SearchBinding.py, + SearchDialog.py, SearchDialogBase.py, SearchEngine.py, + Separator.py, StackViewer.py, TODO.txt, ToolTip.py, TreeWidget.py, + UndoDelegator.py, WidgetRedirector.py, WindowList.py, help.txt, + ZoomHeight.py, config-unix.txt, config-win.txt, config.txt, + eventparse.py, extend.txt, idle.bat, idle.py, idle.pyw, idlever.py, + keydefs.py, loader.py, pyclbr.py, tabnanny.py, testcode.py, + EditorWindow.py, ExecBinding.py, Remote.py, protocol.py, spawn.py, + Icons/folder.gif, Icons/minusnode.gif, Icons/openfolder.gif, + Icons/plusnode.gif, Icons/python.gif, Icons/tk.gif: Initial + revision + +2000-08-15 11:13 dscherer + + * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py, + CallTips.py, __init__.py, ChangeLog, ClassBrowser.py, + ColorDelegator.py, ConfigParser.py, Debugger.py, Delegator.py, + FileList.py, FormatParagraph.py, FrameViewer.py, GrepDialog.py, + IOBinding.py, IdleConf.py, IdleHistory.py, MultiScrolledLists.py, + MultiStatusBar.py, NEWS.txt, ObjectBrowser.py, OldStackViewer.py, + OutputWindow.py, ParenMatch.py, PathBrowser.py, Percolator.py, + PyParse.py, PyShell.py, README.txt, ReplaceDialog.py, + ScriptBinding.py, ScrolledList.py, SearchBinding.py, + SearchDialog.py, SearchDialogBase.py, SearchEngine.py, + Separator.py, StackViewer.py, TODO.txt, ToolTip.py, TreeWidget.py, + UndoDelegator.py, WidgetRedirector.py, WindowList.py, help.txt, + ZoomHeight.py, config-unix.txt, config-win.txt, config.txt, + eventparse.py, extend.txt, idle.bat, idle.py, idle.pyw, idlever.py, + keydefs.py, loader.py, pyclbr.py, tabnanny.py, testcode.py, + EditorWindow.py, ExecBinding.py, Remote.py, protocol.py, spawn.py, + Icons/folder.gif, Icons/minusnode.gif, Icons/openfolder.gif, + Icons/plusnode.gif, Icons/python.gif, Icons/tk.gif: Modified IDLE + from VPython 0.2 + + +original IDLE ChangeLog: +======================== + +Tue Feb 15 18:08:19 2000 Guido van Rossum + + * NEWS.txt: Notice status bar and stack viewer. + + * EditorWindow.py: Support for Moshe's status bar. + + * MultiStatusBar.py: Status bar code -- by Moshe Zadka. + + * OldStackViewer.py: + Adding the old stack viewer implementation back, for the debugger. + + * StackViewer.py: New stack viewer, uses a tree widget. + (XXX: the debugger doesn't yet use this.) + + * WindowList.py: + Correct a typo and remove an unqualified except that was hiding the error. + + * ClassBrowser.py: Add an XXX comment about the ClassBrowser AIP. + + * ChangeLog: Updated change log. + + * NEWS.txt: News update. Probably incomplete; what else is new? + + * README.txt: + Updated for pending IDLE 0.5 release (still very rough -- just getting + it out in a more convenient format than CVS). + + * TODO.txt: Tiny addition. + +Thu Sep 9 14:16:02 1999 Guido van Rossum + + * TODO.txt: A few new TODO entries. + +Thu Aug 26 23:06:22 1999 Guido van Rossum + + * Bindings.py: Add Python Documentation entry to Help menu. + + * EditorWindow.py: + Find the help.txt file relative to __file__ or ".", not in sys.path. + (Suggested by Moshe Zadka, but implemented differently.) + + Add <> event which, on Unix, brings up Netscape pointing + to http://www.python.doc/current/ (a local copy would be nice but its + location can't be predicted). Windows solution TBD. + +Wed Aug 11 14:55:43 1999 Guido van Rossum + + * TreeWidget.py: + Moshe noticed an inconsistency in his comment, so I'm rephrasing it to + be clearer. + + * TreeWidget.py: + Patch inspired by Moshe Zadka to search for the Icons directory in the + same directory as __file__, rather than searching for it along sys.path. + This works better when idle is a package. + +Thu Jul 15 13:11:02 1999 Guido van Rossum + + * TODO.txt: New wishes. + +Sat Jul 10 13:17:35 1999 Guido van Rossum + + * IdlePrefs.py: + Make the color for stderr red (i.e. the standard warning/danger/stop + color) rather than green. Suggested by Sam Schulenburg. + +Fri Jun 25 17:26:34 1999 Guido van Rossum + + * PyShell.py: Close debugger when closing. This may break a cycle. + + * Debugger.py: Break cycle on close. + + * ClassBrowser.py: Destroy the tree when closing. + + * TreeWidget.py: Add destroy() method to recursively destroy a tree. + + * PyShell.py: Extend _close() to break cycles. + Break some other cycles too (and destroy the root when done). + + * EditorWindow.py: + Add _close() method that does the actual cleanup (close() asks the + user what they want first if there's unsaved stuff, and may cancel). + It closes more than before. + + Add unload_extensions() method to unload all extensions; called from + _close(). It calls an extension's close() method if it has one. + + * Percolator.py: Add close() method that breaks cycles. + + * WidgetRedirector.py: Add unregister() method. + Unregister everything at closing. + Don't call close() in __del__, rely on explicit call to close(). + + * IOBinding.py, FormatParagraph.py, CallTips.py: + Add close() method that breaks a cycle. + +Fri Jun 11 15:03:00 1999 Guido van Rossum + + * AutoIndent.py, EditorWindow.py, FormatParagraph.py: + Tim Peters smart.patch: + + EditorWindow.py: + + + Added get_tabwidth & set_tabwidth "virtual text" methods, that get/set the + widget's view of what a tab means. + + + Moved TK_TABWIDTH_DEFAULT here from AutoIndent. + + + Renamed Mark's get_selection_index to get_selection_indices (sorry, Mark, + but the name was plain wrong ). + + FormatParagraph.py: renamed use of get_selection_index. + + AutoIndent.py: + + + Moved TK_TABWIDTH_DEFAULT to EditorWindow. + + + Rewrote set_indentation_params to use new VTW get/set_tabwidth methods. + + + Changed smart_backspace_event to delete whitespace back to closest + preceding virtual tab stop or real character (note that this may require + inserting characters if backspacing over a tab!). + + + Nuked almost references to the selection tag, in favor of using + get_selection_indices. The sole exception is in set_region, for which no + "set_selection" abstraction has yet been agreed upon. + + + Had too much fun using the spiffy new features of the format-paragraph + cmd. + +Thu Jun 10 17:48:02 1999 Guido van Rossum + + * FormatParagraph.py: + Code by Mark Hammond to format paragraphs embedded in comments. + Read the comments (which I reformatted using the new feature :-) + for some limitations. + + * EditorWindow.py: + Added abstraction get_selection_index() (Mark Hammond). Also + reformatted some comment blocks to show off a cool feature I'm about + to check in next. + + * ClassBrowser.py: + Adapt to the new pyclbr's support of listing top-level functions. If + this functionality is not present (e.g. when used with a vintage + Python 1.5.2 installation) top-level functions are not listed. + + (Hmm... Any distribution of IDLE 0.5 should probably include a copy + of the new pyclbr.py!) + + * AutoIndent.py: + Fix off-by-one error in Tim's recent change to comment_region(): the + list of lines returned by get_region() contains an empty line at the + end representing the start of the next line, and this shouldn't be + commented out! + + * CallTips.py: + Mark Hammond writes: Here is another change that allows it to work for + class creation - tries to locate an __init__ function. Also updated + the test code to reflect your new "***" change. + + * CallTipWindow.py: + Mark Hammond writes: Tim's suggestion of copying the font for the + CallTipWindow from the text control makes sense, and actually makes + the control look better IMO. + +Wed Jun 9 20:34:57 1999 Guido van Rossum + + * CallTips.py: + Append "..." if the appropriate flag (for varargs) in co_flags is set. + Ditto "***" for kwargs. + +Tue Jun 8 13:06:07 1999 Guido van Rossum + + * ReplaceDialog.py: + Hmm... Tim didn't turn "replace all" into a single undo block. + I think I like it better if it os, so here. + + * ReplaceDialog.py: Tim Peters: made replacement atomic for undo/redo. + + * AutoIndent.py: Tim Peters: + + + Set usetabs=1. Editing pyclbr.py was driving me nuts <0.6 wink>. + usetabs=1 is the Emacs pymode default too, and thanks to indentwidth != + tabwidth magical usetabs disabling, new files are still created with tabs + turned off. The only implication is that if you open a file whose first + indent is a single tab, IDLE will now magically use tabs for that file (and + set indentwidth to 8). Note that the whole scheme doesn't work right for + PythonWin, though, since Windows users typically set tabwidth to 4; Mark + probably has to hide the IDLE algorithm from them (which he already knows). + + + Changed comment_region_event to stick "##" in front of every line. The + "holes" previously left on blank lines were visually confusing (made it + needlessly hard to figure out what to uncomment later). + +Mon Jun 7 15:38:40 1999 Guido van Rossum + + * TreeWidget.py, ObjectBrowser.py: + Remove unnecessary reference to pyclbr from test() code. + + * PyParse.py: Tim Peters: + + Smarter logic for finding a parse synch point. + + Does a half to a fifth the work in normal cases; don't notice the speedup, + but makes more breathing room for other extensions. + + Speeds terrible cases by at least a factor of 10. "Terrible" == e.g. you put + """ at the start of Tkinter.py, undo it, zoom to the bottom, and start + typing in code. Used to take about 8 seconds for ENTER to respond, now some + large fraction of a second. The new code gets indented correctly, despite + that it all remains "string colored" until the colorizer catches up (after + which, ENTER appears instantaneous again). + +Fri Jun 4 19:21:19 1999 Guido van Rossum + + * extend.py: Might as well enable CallTips by default. + If there are too many complaints I'll remove it again or fix it. + +Thu Jun 3 14:32:16 1999 Guido van Rossum + + * AutoIndent.py, EditorWindow.py, PyParse.py: + New offerings by Tim Peters; he writes: + + IDLE is now the first Python editor in the Universe not confused by my + doctest.py . + + As threatened, this defines IDLE's is_char_in_string function as a + method of EditorWindow. You just need to define one similarly in + whatever it is you pass as editwin to AutoIndent; looking at the + EditorWindow.py part of the patch should make this clear. + + * GrepDialog.py: Enclose pattern in quotes in status message. + + * CallTips.py: + Mark Hammond fixed some comments and improved the way the tip text is + constructed. + +Wed Jun 2 18:18:57 1999 Guido van Rossum + + * CallTips.py: + My fix to Mark's code: restore the universal check on . + Always cancel on or . + + * CallTips.py: + A version that Mark Hammond posted to the newsgroup. Has some newer + stuff for getting the tip. Had to fix the Key-( and Key-) events + for Unix. Will have to re-apply my patch for catching KeyRelease and + ButtonRelease events. + + * CallTipWindow.py, CallTips.py: + Call tips by Mark Hammond (plus tiny fix by me.) + + * IdleHistory.py: + Changes by Mark Hammond: (1) support optional output_sep argument to + the constructor so he can eliminate the sys.ps2 that PythonWin leaves + in the source; (2) remove duplicate history items. + + * AutoIndent.py: + Changes by Mark Hammond to allow using IDLE extensions in PythonWin as + well: make three dialog routines instance variables. + + * EditorWindow.py: + Change by Mark Hammond to allow using IDLE extensions in PythonWin as + well: make three dialog routines instance variables. + +Tue Jun 1 20:06:44 1999 Guido van Rossum + + * AutoIndent.py: Hah! A fix of my own to Tim's code! + Unix bindings for <> and <> were + missing, and somehow that meant the events were never generated, + even though they were in the menu. The new Unix bindings are now + the same as the Windows bindings (M-t and M-u). + + * AutoIndent.py, PyParse.py, PyShell.py: Tim Peters again: + + The new version (attached) is fast enough all the time in every real module + I have . You can make it slow by, e.g., creating an open list with + 5,000 90-character identifiers (+ trailing comma) each on its own line, then + adding an item to the end -- but that still consumes less than a second on + my P5-166. Response time in real code appears instantaneous. + + Fixed some bugs. + + New feature: when hitting ENTER and the cursor is beyond the line's leading + indentation, whitespace is removed on both sides of the cursor; before + whitespace was removed only on the left; e.g., assuming the cursor is + between the comma and the space: + + def something(arg1, arg2): + ^ cursor to the left of here, and hit ENTER + arg2): # new line used to end up here + arg2): # but now lines up the way you expect + + New hack: AutoIndent has grown a context_use_ps1 Boolean config option, + defaulting to 0 (false) and set to 1 (only) by PyShell. Reason: handling + the fancy stuff requires looking backward for a parsing synch point; ps1 + lines are the only sensible thing to look for in a shell window, but are a + bad thing to look for in a file window (ps1 lines show up in my module + docstrings often). PythonWin's shell should set this true too. + + Persistent problem: strings containing def/class can still screw things up + completely. No improvement. Simplest workaround is on the user's head, and + consists of inserting e.g. + + def _(): pass + + (or any other def/class) after the end of the multiline string that's + screwing them up. This is especially irksome because IDLE's syntax coloring + is *not* confused, so when this happens the colors don't match the + indentation behavior they see. + + * AutoIndent.py: Tim Peters again: + + [Tim, after adding some bracket smarts to AutoIndent.py] + > ... + > What it can't possibly do without reparsing large gobs of text is + > suggest a reasonable indent level after you've *closed* a bracket + > left open on some previous line. + > ... + + The attached can, and actually fast enough to use -- most of the time. The + code is tricky beyond belief to achieve that, but it works so far; e.g., + + return len(string.expandtabs(str[self.stmt_start : + ^ indents to caret + i], + ^ indents to caret + self.tabwidth)) + 1 + ^ indents to caret + + It's about as smart as pymode now, wrt both bracket and backslash + continuation rules. It does require reparsing large gobs of text, and if it + happens to find something that looks like a "def" or "class" or sys.ps1 + buried in a multiline string, but didn't suck up enough preceding text to + see the start of the string, it's completely hosed. I can't repair that -- + it's just too slow to reparse from the start of the file all the time. + + AutoIndent has grown a new num_context_lines tuple attribute that controls + how far to look back, and-- like other params --this could/should be made + user-overridable at startup and per-file on the fly. + + * PyParse.py: New file by Tim Peters: + + One new file in the attached, PyParse.py. The LineStudier (whatever it was + called ) class was removed from AutoIndent; PyParse subsumes its + functionality. + + * AutoIndent.py: Tim Peters keeps revising this module (more to come): + + Removed "New tabwidth" menu binding. + + Added "a tab means how many spaces?" dialog to block tabify and untabify. I + think prompting for this is good now: they're usually at-most-once-per-file + commands, and IDLE can't let them change tabwidth from the Tk default + anymore, so IDLE can no longer presume to have any idea what a tab means. + + Irony: for the purpose of keeping comments aligned via tabs, Tk's + non-default approach is much nicer than the Emacs/Notepad/Codewright/vi/etc + approach. + + * EditorWindow.py: + 1. Catch NameError on import (could be raised by case mismatch on Windows). + 2. No longer need to reset pyclbr cache and show watch cursor when calling + ClassBrowser -- the ClassBrowser takes care of pyclbr and the TreeWidget + takes care of the watch cursor. + 3. Reset the focus to the current window after error message about class + browser on buffer without filename. + + * Icons/minusnode.gif, Icons/plusnode.gif: Missed a few. + + * ClassBrowser.py, PathBrowser.py: Rewritten based on TreeWidget.py + + * ObjectBrowser.py: Object browser, based on TreeWidget.py. + + * TreeWidget.py: Tree widget done right. + + * ToolTip.py: As yet unused code for tool tips. + + * ScriptBinding.py: + Ensure sys.argv[0] is the script name on Run Script. + + * ZoomHeight.py: Move zoom height functionality to separate function. + + * Icons/folder.gif, Icons/openfolder.gif, Icons/python.gif, Icons/tk.gif: + A few icons used by ../TreeWidget.py and its callers. + + * AutoIndent.py: New version by Tim Peters improves block opening test. + +Fri May 21 04:46:17 1999 Guido van Rossum + + * Attic/History.py, PyShell.py: Rename History to IdleHistory. + Add isatty() to pseudo files. + + * StackViewer.py: Make initial stack viewer wider + + * TODO.txt: New wishes + + * AutoIndent.py, EditorWindow.py, PyShell.py: + Much improved autoindent and handling of tabs, + by Tim Peters. + +Mon May 3 15:49:52 1999 Guido van Rossum + + * AutoIndent.py, EditorWindow.py, FormatParagraph.py, UndoDelegator.py: + Tim Peters writes: + + I'm still unsure, but couldn't stand the virtual event trickery so tried a + different sin (adding undo_block_start/stop methods to the Text instance in + EditorWindow.py). Like it or not, it's efficient and works . Better + idea? + + Give the attached a whirl. Even if you hate the implementation, I think + you'll like the results. Think I caught all the "block edit" cmds, + including Format Paragraph, plus subtler ones involving smart indents and + backspacing. + + * WidgetRedirector.py: Tim Peters writes: + + [W]hile trying to dope out how redirection works, stumbled into two + possible glitches. In the first, it doesn't appear to make sense to try to + rename a command that's already been destroyed; in the second, the name + "previous" doesn't really bring to mind "ignore the previous value" . + +Fri Apr 30 19:39:25 1999 Guido van Rossum + + * __init__.py: Support for using idle as a package. + + * PathBrowser.py: + Avoid listing files more than once (e.g. foomodule.so has two hits: + once for foo + module.so, once for foomodule + .so). + +Mon Apr 26 22:20:38 1999 Guido van Rossum + + * ChangeLog, ColorDelegator.py, PyShell.py: Tim Peters strikes again: + + Ho ho ho -- that's trickier than it sounded! The colorizer is working with + "line.col" strings instead of Text marks, and the absolute coordinates of + the point of interest can change across the self.update call (voice of + baffled experience, when two quick backspaces no longer fooled it, but a + backspace followed by a quick ENTER did ). + + Anyway, the attached appears to do the trick. CPU usage goes way up when + typing quickly into a long triple-quoted string, but the latency is fine for + me (a relatively fast typist on a relatively slow machine). Most of the + changes here are left over from reducing the # of vrbl names to help me + reason about the logic better; I hope the code is a *little* easier to + +Fri Apr 23 14:01:25 1999 Guido van Rossum + + * EditorWindow.py: + Provide full arguments to __import__ so it works in packagized IDLE. + +Thu Apr 22 23:20:17 1999 Guido van Rossum + + * help.txt: + Bunch of updates necessary due to recent changes; added docs for File + menu, command line and color preferences. + + * Bindings.py: Remove obsolete 'script' menu. + + * TODO.txt: Several wishes fulfilled. + + * OutputWindow.py: + Moved classes OnDemandOutputWindow and PseudoFile here, + from ScriptBinding.py where they are no longer needed. + + * ScriptBinding.py: + Mostly rewritten. Instead of the old Run module and Debug module, + there are two new commands: + + Import module (F5) imports or reloads the module and also adds its + name to the __main__ namespace. This gets executed in the PyShell + window under control of its debug settings. + + Run script (Control-F5) is similar but executes the contents of the + file directly in the __main__ namespace. + + * PyShell.py: Nits: document use of $IDLESTARTUP; display idle version + + * idlever.py: New version to celebrate new command line + + * OutputWindow.py: Added flush(), for completeness. + + * PyShell.py: + A lot of changes to make the command line more useful. You can now do: + idle.py -e file ... -- to edit files + idle.py script arg ... -- to run a script + idle.py -c cmd arg ... -- to run a command + Other options, see also the usage message (also new!) for more details: + -d -- enable debugger + -s -- run $IDLESTARTUP or $PYTHONSTARTUP + -t title -- set Python Shell window's title + sys.argv is set accordingly, unless -e is used. + sys.path is absolutized, and all relevant paths are inserted into it. + + Other changes: + - the environment in which commands are executed is now the + __main__ module + - explicitly save sys.stdout etc., don't restore from sys.__stdout__ + - new interpreter methods execsource(), execfile(), stuffsource() + - a few small nits + + * TODO.txt: + Some more TODO items. Made up my mind about command line args, + Run/Import, __main__. + + * ColorDelegator.py: + Super-elegant patch by Tim Peters that speeds up colorization + dramatically (up to 15 times he claims). Works by reading more than + one line at a time, up to 100-line chunks (starting with one line and + then doubling up to the limit). On a typical machine (e.g. Tim's + P5-166) this doesn't reduce interactive responsiveness in a noticeable + way. + +Wed Apr 21 15:49:34 1999 Guido van Rossum + + * ColorDelegator.py: + Patch by Tim Peters to speed up colorizing of big multiline strings. + +Tue Apr 20 17:32:52 1999 Guido van Rossum + + * extend.txt: + For an event 'foo-bar', the corresponding method must be called + foo_bar_event(). Therefore, fix the references to zoom_height() in + the example. + + * IdlePrefs.py: Restored the original IDLE color scheme. + + * PyShell.py, IdlePrefs.py, ColorDelegator.py, EditorWindow.py: + Color preferences code by Loren Luke (massaged by me somewhat) + + * SearchEngine.py: + Patch by Mark Favas: it fixes the search engine behaviour where an + unsuccessful search wraps around and re-searches that part of the file + between the start of the search and the end of the file - only really + an issue for very large files, but... (also removes a redundant + m.span() call). + +Mon Apr 19 16:26:02 1999 Guido van Rossum + + * TODO.txt: A few wishes are now fulfilled. + + * AutoIndent.py: Tim Peters implements some of my wishes: + + o Makes the tab key intelligently insert spaces when appropriate + (see Help list banter twixt David Ascher and me; idea stolen from + every other editor on earth ). + + o newline_and_indent_event trims trailing whitespace on the old + line (pymode and Codewright). + + o newline_and_indent_event no longer fooled by trailing whitespace or + comment after ":" (pymode, PTUI). + + o newline_and_indent_event now reduces the new line's indentation after + return, break, continue, raise and pass stmts (pymode). + + The last two are easy to fool in the presence of strings & + continuations, but pymode requires Emacs's high-powered C parsing + functions to avoid that in finite time. + +====================================================================== + Python release 1.5.2c1, IDLE version 0.4 +====================================================================== + +Wed Apr 7 18:41:59 1999 Guido van Rossum + + * README.txt, NEWS.txt: New version. + + * idlever.py: Version bump awaiting impending new release. + (Not much has changed :-( ) + +Mon Mar 29 14:52:28 1999 Guido van Rossum + + * ScriptBinding.py, PyShell.py: + At Tim Peters' recommendation, add a dummy flush() method to + PseudoFile. + +Thu Mar 11 23:21:23 1999 Guido van Rossum + + * PathBrowser.py: Don't crash when sys.path contains an empty string. + + * Attic/Outline.py: This file was never supposed to be part of IDLE. + + * PathBrowser.py: + - Don't crash in the case where a superclass is a string instead of a + pyclbr.Class object; this can happen when the superclass is + unrecognizable (to pyclbr), e.g. when module renaming is used. + + - Show a watch cursor when calling pyclbr (since it may take a while + recursively parsing imported modules!). + +Wed Mar 10 05:18:02 1999 Guido van Rossum + + * EditorWindow.py, Bindings.py: Add PathBrowser to File module + + * PathBrowser.py: "Path browser" - 4 scrolled lists displaying: + directories on sys.path + modules in selected directory + classes in selected module + methods of selected class + + Sinlge clicking in a directory, module or class item updates the next + column with info about the selected item. Double clicking in a + module, class or method item opens the file (and selects the clicked + item if it is a class or method). + + I guess eventually I should be using a tree widget for this, but the + ones I've seen don't work well enough, so for now I use the old + Smalltalk or NeXT style multi-column hierarchical browser. + + * MultiScrolledLists.py: + New utility: multiple scrolled lists in parallel + + * ScrolledList.py: - White background. + - Display "(None)" (or text of your choosing) when empty. + - Don't set the focus. + +====================================================================== + Python release 1.5.2b2, IDLE version 0.3 +====================================================================== + +Wed Feb 17 22:47:41 1999 Guido van Rossum + + * NEWS.txt: News in 0.3. + + * README.txt, idlever.py: Bump version to 0.3. + + * EditorWindow.py: + After all, we don't need to call the callbacks ourselves! + + * WindowList.py: + When deleting, call the callbacks *after* deleting the window from our list! + + * EditorWindow.py: + Fix up the Windows menu via the new callback mechanism instead of + depending on menu post commands (which don't work when the menu is + torn off). + + * WindowList.py: + Support callbacks to patch up Windows menus everywhere. + + * ChangeLog: Oh, why not. Checking in the Emacs-generated change log. + +Tue Feb 16 22:34:17 1999 Guido van Rossum + + * ScriptBinding.py: + Only pop up the stack viewer when requested in the Debug menu. + +Mon Feb 8 22:27:49 1999 Guido van Rossum + + * WindowList.py: Don't crash if a window no longer exists. + + * TODO.txt: Restructured a bit. + +Mon Feb 1 23:06:17 1999 Guido van Rossum + + * PyShell.py: Add current dir or paths of file args to sys.path. + + * Debugger.py: Add canonic() function -- for brand new bdb.py feature. + + * StackViewer.py: Protect against accessing an empty stack. + +Fri Jan 29 20:44:45 1999 Guido van Rossum + + * ZoomHeight.py: + Use only the height to decide whether to zoom in or out. + +Thu Jan 28 22:24:30 1999 Guido van Rossum + + * EditorWindow.py, FileList.py: + Make sure the Tcl variables are shared between windows. + + * PyShell.py, EditorWindow.py, Bindings.py: + Move menu/key binding code from Bindings.py to EditorWindow.py, + with changed APIs -- it makes much more sense there. + Also add a new feature: if the first character of a menu label is + a '!', it gets a checkbox. Checkboxes are bound to Boolean Tcl variables + that can be accessed through the new getvar/setvar/getrawvar API; + the variable is named after the event to which the menu is bound. + + * Debugger.py: Add Quit button to the debugger window. + + * SearchDialog.py: + When find_again() finds exactly the current selection, it's a failure. + + * idle.py, Attic/idle: Rename idle -> idle.py + +Mon Jan 18 15:18:57 1999 Guido van Rossum + + * EditorWindow.py, WindowList.py: Only deiconify when iconic. + + * TODO.txt: Misc + +Tue Jan 12 22:14:34 1999 Guido van Rossum + + * testcode.py, Attic/test.py: + Renamed test.py to testcode.py so one can import Python's + test package from inside IDLE. (Suggested by Jack Jansen.) + + * EditorWindow.py, ColorDelegator.py: + Hack to close a window that is colorizing. + + * Separator.py: Vladimir Marangozov's patch: + The separator dances too much and seems to jump by arbitrary amounts + in arbitrary directions when I try to move it for resizing the frames. + This patch makes it more quiet. + +Mon Jan 11 14:52:40 1999 Guido van Rossum + + * TODO.txt: Some requests have been fulfilled. + + * EditorWindow.py: + Set the cursor to a watch when opening the class browser (which may + take quite a while, browsing multiple files). + + Newer, better center() -- but assumes no wrapping. + + * SearchBinding.py: + Got rid of debug print statement in goto_line_event(). + + * ScriptBinding.py: + I think I like it better if it prints the traceback even when it displays + the stack viewer. + + * Debugger.py: Bind ESC to close-window. + + * ClassBrowser.py: Use a HSeparator between the classes and the items. + Make the list of classes wider by default (40 chars). + Bind ESC to close-window. + + * Separator.py: + Separator classes (draggable divider between two panes). + +Sat Jan 9 22:01:33 1999 Guido van Rossum + + * WindowList.py: + Don't traceback when wakeup() is called when the window has been destroyed. + This can happen when a torn-of Windows menu references closed windows. + And Tim Peters claims that the Windows menu is his favorite to tear off... + + * EditorWindow.py: Allow tearing off of the Windows menu. + + * StackViewer.py: Close on ESC. + + * help.txt: Updated a bunch of things (it was mostly still 0.1!) + + * extend.py: Added ScriptBinding to standard bindings. + + * ScriptBinding.py: + This now actually works. See doc string. It can run a module (i.e. + import or reload) or debug it (same with debugger control). Output + goes to a fresh output window, only created when needed. + +====================================================================== + Python release 1.5.2b1, IDLE version 0.2 +====================================================================== + +Fri Jan 8 17:26:02 1999 Guido van Rossum + + * README.txt, NEWS.txt: What's new in this release. + + * Bindings.py, PyShell.py: + Paul Prescod's patches to allow the stack viewer to pop up when a + traceback is printed. + +Thu Jan 7 00:12:15 1999 Guido van Rossum + + * FormatParagraph.py: + Change paragraph width limit to 70 (like Emacs M-Q). + + * README.txt: + Separating TODO from README. Slight reformulation of features. No + exact release date. + + * TODO.txt: Separating TODO from README. + +Mon Jan 4 21:19:09 1999 Guido van Rossum + + * FormatParagraph.py: + Hm. There was a boundary condition error at the end of the file too. + + * SearchBinding.py: Hm. Add Unix binding for replace, too. + + * keydefs.py: Ran eventparse.py again. + + * FormatParagraph.py: Added Unix Meta-q key binding; + fix find_paragraph when at start of file. + + * AutoExpand.py: Added Meta-/ binding for Unix as alt for Alt-/. + + * SearchBinding.py: + Add unix binding for grep (otherwise the menu entry doesn't work!) + + * ZoomHeight.py: Adjusted Unix height to work with fvwm96. :=( + + * GrepDialog.py: Need to import sys! + + * help.txt, extend.txt, README.txt: Formatted some paragraphs + + * extend.py, FormatParagraph.py: + Add new extension to reformat a (text) paragraph. + + * ZoomHeight.py: Typo in Win specific height setting. + +Sun Jan 3 00:47:35 1999 Guido van Rossum + + * AutoIndent.py: Added something like Tim Peters' backspace patch. + + * ZoomHeight.py: Adapted to Unix (i.e., more hardcoded constants). + +Sat Jan 2 21:28:54 1999 Guido van Rossum + + * keydefs.py, idlever.py, idle.pyw, idle.bat, help.txt, extend.txt, extend.py, eventparse.py, ZoomHeight.py, WindowList.py, UndoDelegator.py, StackViewer.py, SearchEngine.py, SearchDialogBase.py, SearchDialog.py, ScrolledList.py, SearchBinding.py, ScriptBinding.py, ReplaceDialog.py, Attic/README, README.txt, PyShell.py, Attic/PopupMenu.py, OutputWindow.py, IOBinding.py, Attic/HelpWindow.py, History.py, GrepDialog.py, FileList.py, FrameViewer.py, EditorWindow.py, Debugger.py, Delegator.py, ColorDelegator.py, Bindings.py, ClassBrowser.py, AutoExpand.py, AutoIndent.py: + Checking in IDLE 0.2. + + Much has changed -- too much, in fact, to write down. + The big news is that there's a standard way to write IDLE extensions; + see extend.txt. Some sample extensions have been provided, and + some existing code has been converted to extensions. Probably the + biggest new user feature is a new search dialog with more options, + search and replace, and even search in files (grep). + + This is exactly as downloaded from my laptop after returning + from the holidays -- it hasn't even been tested on Unix yet. + +Fri Dec 18 15:52:54 1998 Guido van Rossum + + * FileList.py, ClassBrowser.py: + Fix the class browser to work even when the file is not on sys.path. + +Tue Dec 8 20:39:36 1998 Guido van Rossum + + * Attic/turtle.py: Moved to Python 1.5.2/Lib + +Fri Nov 27 03:19:20 1998 Guido van Rossum + + * help.txt: Typo + + * EditorWindow.py, FileList.py: Support underlining of menu labels + + * Bindings.py: + New approach, separate tables for menus (platform-independent) and key + definitions (platform-specific), and generating accelerator strings + automatically from the key definitions. + +Mon Nov 16 18:37:42 1998 Guido van Rossum + + * Attic/README: Clarify portability and main program. + + * Attic/README: Added intro for 0.1 release and append Grail notes. + +Mon Oct 26 18:49:00 1998 Guido van Rossum + + * Attic/turtle.py: root is now a global called _root + +Sat Oct 24 16:38:38 1998 Guido van Rossum + + * Attic/turtle.py: Raise the root window on reset(). + Different action on WM_DELETE_WINDOW is more likely to do the right thing, + allowing us to destroy old windows. + + * Attic/turtle.py: + Split the goto() function in two: _goto() is the internal one, + using Canvas coordinates, and goto() uses turtle coordinates + and accepts variable argument lists. + + * Attic/turtle.py: Cope with destruction of the window + + * Attic/turtle.py: Turtle graphics + + * Debugger.py: Use of Breakpoint class should be bdb.Breakpoint. + +Mon Oct 19 03:33:40 1998 Guido van Rossum + + * SearchBinding.py: + Speed up the search a bit -- don't drag a mark around... + + * PyShell.py: + Change our special entries from to . + Patch linecache.checkcache() to keep our special entries alive. + Add popup menu to all editor windows to set a breakpoint. + + * Debugger.py: + Use and pass through the 'force' flag to set_dict() where appropriate. + Default source and globals checkboxes to false. + Don't interact in user_return(). + Add primitive set_breakpoint() method. + + * ColorDelegator.py: + Raise priority of 'sel' tag so its foreground (on Windows) will take + priority over text colorization (which on Windows is almost the + same color as the selection background). + + Define a tag and color for breakpoints ("BREAK"). + + * Attic/PopupMenu.py: Disable "Open stack viewer" and "help" commands. + + * StackViewer.py: + Add optional 'force' argument (default 0) to load_dict(). + If set, redo the display even if it's the same dict. + +Fri Oct 16 21:10:12 1998 Guido van Rossum + + * StackViewer.py: Do nothing when loading the same dict as before. + + * PyShell.py: Details for debugger interface. + + * Debugger.py: + Restructured and more consistent. Save checkboxes across instantiations. + + * EditorWindow.py, Attic/README, Bindings.py: + Get rid of conflicting ^X binding. Use ^W. + + * Debugger.py, StackViewer.py: + Debugger can now show local and global variables. + + * Debugger.py: Oops + + * Debugger.py, PyShell.py: Better debugger support (show stack etc). + + * Attic/PopupMenu.py: Follow renames in StackViewer module + + * StackViewer.py: + Rename classes to StackViewer (the widget) and StackBrowser (the toplevel). + + * ScrolledList.py: Add close() method + + * EditorWindow.py: Clarify 'Open Module' dialog text + + * StackViewer.py: Restructured into a browser and a widget. + +Thu Oct 15 23:27:08 1998 Guido van Rossum + + * ClassBrowser.py, ScrolledList.py: + Generalized the scrolled list which is the base for the class and + method browser into a separate class in its own module. + + * Attic/test.py: Cosmetic change + + * Debugger.py: Don't show function name if there is none + +Wed Oct 14 03:43:05 1998 Guido van Rossum + + * Debugger.py, PyShell.py: Polish the Debugger GUI a bit. + Closing it now also does the right thing. + +Tue Oct 13 23:51:13 1998 Guido van Rossum + + * Debugger.py, PyShell.py, Bindings.py: + Ad primitive debugger interface (so far it will step and show you the + source, but it doesn't yet show the stack). + + * Attic/README: Misc + + * StackViewer.py: Whoops -- referenced self.top before it was set. + + * help.txt: Added history and completion commands. + + * help.txt: Updated + + * FileList.py: Add class browser functionality. + + * StackViewer.py: + Add a close() method and bind to WM_DELETE_WINDOW protocol + + * PyShell.py: Clear the linecache before printing a traceback + + * Bindings.py: Added class browser binding. + + * ClassBrowser.py: Much improved, much left to do. + + * PyShell.py: Make the return key do what I mean more often. + + * ClassBrowser.py: + Adding the beginnings of a Class browser. Incomplete, yet. + + * EditorWindow.py, Bindings.py: + Add new command, "Open module". You select or type a module name, + and it opens the source. + +Mon Oct 12 23:59:27 1998 Guido van Rossum + + * PyShell.py: Subsume functionality from Popup menu in Debug menu. + Other stuff so the PyShell window can be resurrected from the Windows menu. + + * FileList.py: Get rid of PopUp menu. + Create a simple Windows menu. (Imperfect when Untitled windows exist.) + Add wakeup() method: deiconify, raise, focus. + + * EditorWindow.py: Generalize menu creation. + + * Bindings.py: Add Debug and Help menu items. + + * EditorWindow.py: Added a menu bar to every window. + + * Bindings.py: Add menu configuration to the event configuration. + + * Attic/PopupMenu.py: Pass a root to the help window. + + * SearchBinding.py: + Add parent argument to 'go to line number' dialog box. + +Sat Oct 10 19:15:32 1998 Guido van Rossum + + * StackViewer.py: + Add a label at the top showing (very basic) help for the stack viewer. + Add a label at the bottom showing the exception info. + + * Attic/test.py, Attic/idle: Add Unix main script and test program. + + * idle.pyw, help.txt, WidgetRedirector.py, UndoDelegator.py, StackViewer.py, SearchBinding.py, Attic/README, PyShell.py, Attic/PopupMenu.py, Percolator.py, Outline.py, IOBinding.py, History.py, Attic/HelpWindow.py, FrameViewer.py, FileList.py, EditorWindow.py, Delegator.py, ColorDelegator.py, Bindings.py, AutoIndent.py, AutoExpand.py: + Initial checking of Tk-based Python IDE. + Features: text editor with syntax coloring and undo; + subclassed into interactive Python shell which adds history. + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/calltip_w.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/calltip_w.py new file mode 100644 index 0000000000000000000000000000000000000000..1e0404aa49f56287462d48400ecc76c1eaa57262 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/calltip_w.py @@ -0,0 +1,201 @@ +"""A call-tip window class for Tkinter/IDLE. + +After tooltip.py, which uses ideas gleaned from PySol. +Used by calltip.py. +""" +from tkinter import Label, LEFT, SOLID, TclError + +from idlelib.tooltip import TooltipBase + +HIDE_EVENT = "<>" +HIDE_SEQUENCES = ("", "") +CHECKHIDE_EVENT = "<>" +CHECKHIDE_SEQUENCES = ("", "") +CHECKHIDE_TIME = 100 # milliseconds + +MARK_RIGHT = "calltipwindowregion_right" + + +class CalltipWindow(TooltipBase): + """A call-tip widget for tkinter text widgets.""" + + def __init__(self, text_widget): + """Create a call-tip; shown by showtip(). + + text_widget: a Text widget with code for which call-tips are desired + """ + # Note: The Text widget will be accessible as self.anchor_widget + super(CalltipWindow, self).__init__(text_widget) + + self.label = self.text = None + self.parenline = self.parencol = self.lastline = None + self.hideid = self.checkhideid = None + self.checkhide_after_id = None + + def get_position(self): + """Choose the position of the call-tip.""" + curline = int(self.anchor_widget.index("insert").split('.')[0]) + if curline == self.parenline: + anchor_index = (self.parenline, self.parencol) + else: + anchor_index = (curline, 0) + box = self.anchor_widget.bbox("%d.%d" % anchor_index) + if not box: + box = list(self.anchor_widget.bbox("insert")) + # align to left of window + box[0] = 0 + box[2] = 0 + return box[0] + 2, box[1] + box[3] + + def position_window(self): + "Reposition the window if needed." + curline = int(self.anchor_widget.index("insert").split('.')[0]) + if curline == self.lastline: + return + self.lastline = curline + self.anchor_widget.see("insert") + super(CalltipWindow, self).position_window() + + def showtip(self, text, parenleft, parenright): + """Show the call-tip, bind events which will close it and reposition it. + + text: the text to display in the call-tip + parenleft: index of the opening parenthesis in the text widget + parenright: index of the closing parenthesis in the text widget, + or the end of the line if there is no closing parenthesis + """ + # Only called in calltip.Calltip, where lines are truncated + self.text = text + if self.tipwindow or not self.text: + return + + self.anchor_widget.mark_set(MARK_RIGHT, parenright) + self.parenline, self.parencol = map( + int, self.anchor_widget.index(parenleft).split(".")) + + super(CalltipWindow, self).showtip() + + self._bind_events() + + def showcontents(self): + """Create the call-tip widget.""" + self.label = Label(self.tipwindow, text=self.text, justify=LEFT, + background="#ffffd0", foreground="black", + relief=SOLID, borderwidth=1, + font=self.anchor_widget['font']) + self.label.pack() + + def checkhide_event(self, event=None): + """Handle CHECK_HIDE_EVENT: call hidetip or reschedule.""" + if not self.tipwindow: + # If the event was triggered by the same event that unbound + # this function, the function will be called nevertheless, + # so do nothing in this case. + return None + + # Hide the call-tip if the insertion cursor moves outside of the + # parenthesis. + curline, curcol = map(int, self.anchor_widget.index("insert").split('.')) + if curline < self.parenline or \ + (curline == self.parenline and curcol <= self.parencol) or \ + self.anchor_widget.compare("insert", ">", MARK_RIGHT): + self.hidetip() + return "break" + + # Not hiding the call-tip. + + self.position_window() + # Re-schedule this function to be called again in a short while. + if self.checkhide_after_id is not None: + self.anchor_widget.after_cancel(self.checkhide_after_id) + self.checkhide_after_id = \ + self.anchor_widget.after(CHECKHIDE_TIME, self.checkhide_event) + return None + + def hide_event(self, event): + """Handle HIDE_EVENT by calling hidetip.""" + if not self.tipwindow: + # See the explanation in checkhide_event. + return None + self.hidetip() + return "break" + + def hidetip(self): + """Hide the call-tip.""" + if not self.tipwindow: + return + + try: + self.label.destroy() + except TclError: + pass + self.label = None + + self.parenline = self.parencol = self.lastline = None + try: + self.anchor_widget.mark_unset(MARK_RIGHT) + except TclError: + pass + + try: + self._unbind_events() + except (TclError, ValueError): + # ValueError may be raised by MultiCall + pass + + super(CalltipWindow, self).hidetip() + + def _bind_events(self): + """Bind event handlers.""" + self.checkhideid = self.anchor_widget.bind(CHECKHIDE_EVENT, + self.checkhide_event) + for seq in CHECKHIDE_SEQUENCES: + self.anchor_widget.event_add(CHECKHIDE_EVENT, seq) + self.anchor_widget.after(CHECKHIDE_TIME, self.checkhide_event) + self.hideid = self.anchor_widget.bind(HIDE_EVENT, + self.hide_event) + for seq in HIDE_SEQUENCES: + self.anchor_widget.event_add(HIDE_EVENT, seq) + + def _unbind_events(self): + """Unbind event handlers.""" + for seq in CHECKHIDE_SEQUENCES: + self.anchor_widget.event_delete(CHECKHIDE_EVENT, seq) + self.anchor_widget.unbind(CHECKHIDE_EVENT, self.checkhideid) + self.checkhideid = None + for seq in HIDE_SEQUENCES: + self.anchor_widget.event_delete(HIDE_EVENT, seq) + self.anchor_widget.unbind(HIDE_EVENT, self.hideid) + self.hideid = None + + +def _calltip_window(parent): # htest # + from tkinter import Toplevel, Text, LEFT, BOTH + + top = Toplevel(parent) + top.title("Test call-tips") + x, y = map(int, parent.geometry().split('+')[1:]) + top.geometry("250x100+%d+%d" % (x + 175, y + 150)) + text = Text(top) + text.pack(side=LEFT, fill=BOTH, expand=1) + text.insert("insert", "string.split") + top.update() + + calltip = CalltipWindow(text) + def calltip_show(event): + calltip.showtip("(s='Hello world')", "insert", "end") + def calltip_hide(event): + calltip.hidetip() + text.event_add("<>", "(") + text.event_add("<>", ")") + text.bind("<>", calltip_show) + text.bind("<>", calltip_hide) + + text.focus_set() + +if __name__ == '__main__': + from unittest import main + main('idlelib.idle_test.test_calltip_w', verbosity=2, exit=False) + + from idlelib.idle_test.htest import run + run(_calltip_window) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/config_key.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/config_key.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca3a156f4b97f1c2d2a40234a5acc163a433750 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/config_key.py @@ -0,0 +1,327 @@ +""" +Dialog for building Tkinter accelerator key bindings +""" +from tkinter import Toplevel, Listbox, StringVar, TclError +from tkinter.ttk import Frame, Button, Checkbutton, Entry, Label, Scrollbar +from tkinter import messagebox +from tkinter.simpledialog import _setup_dialog +import string +import sys + + +FUNCTION_KEYS = ('F1', 'F2' ,'F3' ,'F4' ,'F5' ,'F6', + 'F7', 'F8' ,'F9' ,'F10' ,'F11' ,'F12') +ALPHANUM_KEYS = tuple(string.ascii_lowercase + string.digits) +PUNCTUATION_KEYS = tuple('~!@#%^&*()_-+={}[]|;:,.<>/?') +WHITESPACE_KEYS = ('Tab', 'Space', 'Return') +EDIT_KEYS = ('BackSpace', 'Delete', 'Insert') +MOVE_KEYS = ('Home', 'End', 'Page Up', 'Page Down', 'Left Arrow', + 'Right Arrow', 'Up Arrow', 'Down Arrow') +AVAILABLE_KEYS = (ALPHANUM_KEYS + PUNCTUATION_KEYS + FUNCTION_KEYS + + WHITESPACE_KEYS + EDIT_KEYS + MOVE_KEYS) + + +def translate_key(key, modifiers): + "Translate from keycap symbol to the Tkinter keysym." + mapping = {'Space':'space', + '~':'asciitilde', '!':'exclam', '@':'at', '#':'numbersign', + '%':'percent', '^':'asciicircum', '&':'ampersand', + '*':'asterisk', '(':'parenleft', ')':'parenright', + '_':'underscore', '-':'minus', '+':'plus', '=':'equal', + '{':'braceleft', '}':'braceright', + '[':'bracketleft', ']':'bracketright', '|':'bar', + ';':'semicolon', ':':'colon', ',':'comma', '.':'period', + '<':'less', '>':'greater', '/':'slash', '?':'question', + 'Page Up':'Prior', 'Page Down':'Next', + 'Left Arrow':'Left', 'Right Arrow':'Right', + 'Up Arrow':'Up', 'Down Arrow': 'Down', 'Tab':'Tab'} + key = mapping.get(key, key) + if 'Shift' in modifiers and key in string.ascii_lowercase: + key = key.upper() + return f'Key-{key}' + + +class GetKeysDialog(Toplevel): + + # Dialog title for invalid key sequence + keyerror_title = 'Key Sequence Error' + + def __init__(self, parent, title, action, current_key_sequences, + *, _htest=False, _utest=False): + """ + parent - parent of this dialog + title - string which is the title of the popup dialog + action - string, the name of the virtual event these keys will be + mapped to + current_key_sequences - list, a list of all key sequence lists + currently mapped to virtual events, for overlap checking + _htest - bool, change box location when running htest + _utest - bool, do not wait when running unittest + """ + Toplevel.__init__(self, parent) + self.withdraw() # Hide while setting geometry. + self.configure(borderwidth=5) + self.resizable(height=False, width=False) + self.title(title) + self.transient(parent) + _setup_dialog(self) + self.grab_set() + self.protocol("WM_DELETE_WINDOW", self.cancel) + self.parent = parent + self.action = action + self.current_key_sequences = current_key_sequences + self.result = '' + self.key_string = StringVar(self) + self.key_string.set('') + # Set self.modifiers, self.modifier_label. + self.set_modifiers_for_platform() + self.modifier_vars = [] + for modifier in self.modifiers: + variable = StringVar(self) + variable.set('') + self.modifier_vars.append(variable) + self.advanced = False + self.create_widgets() + self.update_idletasks() + self.geometry( + "+%d+%d" % ( + parent.winfo_rootx() + + (parent.winfo_width()/2 - self.winfo_reqwidth()/2), + parent.winfo_rooty() + + ((parent.winfo_height()/2 - self.winfo_reqheight()/2) + if not _htest else 150) + ) ) # Center dialog over parent (or below htest box). + if not _utest: + self.deiconify() # Geometry set, unhide. + self.wait_window() + + def showerror(self, *args, **kwargs): + # Make testing easier. Replace in #30751. + messagebox.showerror(*args, **kwargs) + + def create_widgets(self): + self.frame = frame = Frame(self, borderwidth=2, relief='sunken') + frame.pack(side='top', expand=True, fill='both') + + frame_buttons = Frame(self) + frame_buttons.pack(side='bottom', fill='x') + + self.button_ok = Button(frame_buttons, text='OK', + width=8, command=self.ok) + self.button_ok.grid(row=0, column=0, padx=5, pady=5) + self.button_cancel = Button(frame_buttons, text='Cancel', + width=8, command=self.cancel) + self.button_cancel.grid(row=0, column=1, padx=5, pady=5) + + # Basic entry key sequence. + self.frame_keyseq_basic = Frame(frame, name='keyseq_basic') + self.frame_keyseq_basic.grid(row=0, column=0, sticky='nsew', + padx=5, pady=5) + basic_title = Label(self.frame_keyseq_basic, + text=f"New keys for '{self.action}' :") + basic_title.pack(anchor='w') + + basic_keys = Label(self.frame_keyseq_basic, justify='left', + textvariable=self.key_string, relief='groove', + borderwidth=2) + basic_keys.pack(ipadx=5, ipady=5, fill='x') + + # Basic entry controls. + self.frame_controls_basic = Frame(frame) + self.frame_controls_basic.grid(row=1, column=0, sticky='nsew', padx=5) + + # Basic entry modifiers. + self.modifier_checkbuttons = {} + column = 0 + for modifier, variable in zip(self.modifiers, self.modifier_vars): + label = self.modifier_label.get(modifier, modifier) + check = Checkbutton(self.frame_controls_basic, + command=self.build_key_string, text=label, + variable=variable, onvalue=modifier, offvalue='') + check.grid(row=0, column=column, padx=2, sticky='w') + self.modifier_checkbuttons[modifier] = check + column += 1 + + # Basic entry help text. + help_basic = Label(self.frame_controls_basic, justify='left', + text="Select the desired modifier keys\n"+ + "above, and the final key from the\n"+ + "list on the right.\n\n" + + "Use upper case Symbols when using\n" + + "the Shift modifier. (Letters will be\n" + + "converted automatically.)") + help_basic.grid(row=1, column=0, columnspan=4, padx=2, sticky='w') + + # Basic entry key list. + self.list_keys_final = Listbox(self.frame_controls_basic, width=15, + height=10, selectmode='single') + self.list_keys_final.insert('end', *AVAILABLE_KEYS) + self.list_keys_final.bind('', self.final_key_selected) + self.list_keys_final.grid(row=0, column=4, rowspan=4, sticky='ns') + scroll_keys_final = Scrollbar(self.frame_controls_basic, + orient='vertical', + command=self.list_keys_final.yview) + self.list_keys_final.config(yscrollcommand=scroll_keys_final.set) + scroll_keys_final.grid(row=0, column=5, rowspan=4, sticky='ns') + self.button_clear = Button(self.frame_controls_basic, + text='Clear Keys', + command=self.clear_key_seq) + self.button_clear.grid(row=2, column=0, columnspan=4) + + # Advanced entry key sequence. + self.frame_keyseq_advanced = Frame(frame, name='keyseq_advanced') + self.frame_keyseq_advanced.grid(row=0, column=0, sticky='nsew', + padx=5, pady=5) + advanced_title = Label(self.frame_keyseq_advanced, justify='left', + text=f"Enter new binding(s) for '{self.action}' :\n" + + "(These bindings will not be checked for validity!)") + advanced_title.pack(anchor='w') + self.advanced_keys = Entry(self.frame_keyseq_advanced, + textvariable=self.key_string) + self.advanced_keys.pack(fill='x') + + # Advanced entry help text. + self.frame_help_advanced = Frame(frame) + self.frame_help_advanced.grid(row=1, column=0, sticky='nsew', padx=5) + help_advanced = Label(self.frame_help_advanced, justify='left', + text="Key bindings are specified using Tkinter keysyms as\n"+ + "in these samples: , , ,\n" + ", , .\n" + "Upper case is used when the Shift modifier is present!\n\n" + + "'Emacs style' multi-keystroke bindings are specified as\n" + + "follows: , where the first key\n" + + "is the 'do-nothing' keybinding.\n\n" + + "Multiple separate bindings for one action should be\n"+ + "separated by a space, eg., ." ) + help_advanced.grid(row=0, column=0, sticky='nsew') + + # Switch between basic and advanced. + self.button_level = Button(frame, command=self.toggle_level, + text='<< Basic Key Binding Entry') + self.button_level.grid(row=2, column=0, stick='ew', padx=5, pady=5) + self.toggle_level() + + def set_modifiers_for_platform(self): + """Determine list of names of key modifiers for this platform. + + The names are used to build Tk bindings -- it doesn't matter if the + keyboard has these keys; it matters if Tk understands them. The + order is also important: key binding equality depends on it, so + config-keys.def must use the same ordering. + """ + if sys.platform == "darwin": + self.modifiers = ['Shift', 'Control', 'Option', 'Command'] + else: + self.modifiers = ['Control', 'Alt', 'Shift'] + self.modifier_label = {'Control': 'Ctrl'} # Short name. + + def toggle_level(self): + "Toggle between basic and advanced keys." + if self.button_level.cget('text').startswith('Advanced'): + self.clear_key_seq() + self.button_level.config(text='<< Basic Key Binding Entry') + self.frame_keyseq_advanced.lift() + self.frame_help_advanced.lift() + self.advanced_keys.focus_set() + self.advanced = True + else: + self.clear_key_seq() + self.button_level.config(text='Advanced Key Binding Entry >>') + self.frame_keyseq_basic.lift() + self.frame_controls_basic.lift() + self.advanced = False + + def final_key_selected(self, event=None): + "Handler for clicking on key in basic settings list." + self.build_key_string() + + def build_key_string(self): + "Create formatted string of modifiers plus the key." + keylist = modifiers = self.get_modifiers() + final_key = self.list_keys_final.get('anchor') + if final_key: + final_key = translate_key(final_key, modifiers) + keylist.append(final_key) + self.key_string.set(f"<{'-'.join(keylist)}>") + + def get_modifiers(self): + "Return ordered list of modifiers that have been selected." + mod_list = [variable.get() for variable in self.modifier_vars] + return [mod for mod in mod_list if mod] + + def clear_key_seq(self): + "Clear modifiers and keys selection." + self.list_keys_final.select_clear(0, 'end') + self.list_keys_final.yview('moveto', '0.0') + for variable in self.modifier_vars: + variable.set('') + self.key_string.set('') + + def ok(self, event=None): + keys = self.key_string.get().strip() + if not keys: + self.showerror(title=self.keyerror_title, parent=self, + message="No key specified.") + return + if (self.advanced or self.keys_ok(keys)) and self.bind_ok(keys): + self.result = keys + self.grab_release() + self.destroy() + + def cancel(self, event=None): + self.result = '' + self.grab_release() + self.destroy() + + def keys_ok(self, keys): + """Validity check on user's 'basic' keybinding selection. + + Doesn't check the string produced by the advanced dialog because + 'modifiers' isn't set. + """ + final_key = self.list_keys_final.get('anchor') + modifiers = self.get_modifiers() + title = self.keyerror_title + key_sequences = [key for keylist in self.current_key_sequences + for key in keylist] + if not keys.endswith('>'): + self.showerror(title, parent=self, + message='Missing the final Key') + elif (not modifiers + and final_key not in FUNCTION_KEYS + MOVE_KEYS): + self.showerror(title=title, parent=self, + message='No modifier key(s) specified.') + elif (modifiers == ['Shift']) \ + and (final_key not in + FUNCTION_KEYS + MOVE_KEYS + ('Tab', 'Space')): + msg = 'The shift modifier by itself may not be used with'\ + ' this key symbol.' + self.showerror(title=title, parent=self, message=msg) + elif keys in key_sequences: + msg = 'This key combination is already in use.' + self.showerror(title=title, parent=self, message=msg) + else: + return True + return False + + def bind_ok(self, keys): + "Return True if Tcl accepts the new keys else show message." + try: + binding = self.bind(keys, lambda: None) + except TclError as err: + self.showerror( + title=self.keyerror_title, parent=self, + message=(f'The entered key sequence is not accepted.\n\n' + f'Error: {err}')) + return False + else: + self.unbind(keys, binding) + return True + + +if __name__ == '__main__': + from unittest import main + main('idlelib.idle_test.test_config_key', verbosity=2, exit=False) + + from idlelib.idle_test.htest import run + run(GetKeysDialog) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/dynoption.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/dynoption.py new file mode 100644 index 0000000000000000000000000000000000000000..9c6ffa435a1089ed6f8a9cb4728ffa93447b9ee3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/dynoption.py @@ -0,0 +1,58 @@ +""" +OptionMenu widget modified to allow dynamic menu reconfiguration +and setting of highlightthickness +""" +import copy + +from tkinter import OptionMenu, _setit, StringVar, Button + +class DynOptionMenu(OptionMenu): + """ + unlike OptionMenu, our kwargs can include highlightthickness + """ + def __init__(self, master, variable, value, *values, **kwargs): + # TODO copy value instead of whole dict + kwargsCopy=copy.copy(kwargs) + if 'highlightthickness' in list(kwargs.keys()): + del(kwargs['highlightthickness']) + OptionMenu.__init__(self, master, variable, value, *values, **kwargs) + self.config(highlightthickness=kwargsCopy.get('highlightthickness')) + #self.menu=self['menu'] + self.variable=variable + self.command=kwargs.get('command') + + def SetMenu(self,valueList,value=None): + """ + clear and reload the menu with a new set of options. + valueList - list of new options + value - initial value to set the optionmenu's menubutton to + """ + self['menu'].delete(0,'end') + for item in valueList: + self['menu'].add_command(label=item, + command=_setit(self.variable,item,self.command)) + if value: + self.variable.set(value) + +def _dyn_option_menu(parent): # htest # + from tkinter import Toplevel # + StringVar, Button + + top = Toplevel(parent) + top.title("Tets dynamic option menu") + x, y = map(int, parent.geometry().split('+')[1:]) + top.geometry("200x100+%d+%d" % (x + 250, y + 175)) + top.focus_set() + + var = StringVar(top) + var.set("Old option set") #Set the default value + dyn = DynOptionMenu(top,var, "old1","old2","old3","old4") + dyn.pack() + + def update(): + dyn.SetMenu(["new1","new2","new3","new4"], value="new option set") + button = Button(top, text="Change option set", command=update) + button.pack() + +if __name__ == '__main__': + from idlelib.idle_test.htest import run + run(_dyn_option_menu) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/iomenu.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/iomenu.py new file mode 100644 index 0000000000000000000000000000000000000000..5ebf7089fb9abede84b41c504f4d95cac0923de0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/iomenu.py @@ -0,0 +1,434 @@ +import io +import os +import shlex +import sys +import tempfile +import tokenize + +from tkinter import filedialog +from tkinter import messagebox +from tkinter.simpledialog import askstring + +import idlelib +from idlelib.config import idleConf + +encoding = 'utf-8' +if sys.platform == 'win32': + errors = 'surrogatepass' +else: + errors = 'surrogateescape' + + + +class IOBinding: +# One instance per editor Window so methods know which to save, close. +# Open returns focus to self.editwin if aborted. +# EditorWindow.open_module, others, belong here. + + def __init__(self, editwin): + self.editwin = editwin + self.text = editwin.text + self.__id_open = self.text.bind("<>", self.open) + self.__id_save = self.text.bind("<>", self.save) + self.__id_saveas = self.text.bind("<>", + self.save_as) + self.__id_savecopy = self.text.bind("<>", + self.save_a_copy) + self.fileencoding = 'utf-8' + self.__id_print = self.text.bind("<>", self.print_window) + + def close(self): + # Undo command bindings + self.text.unbind("<>", self.__id_open) + self.text.unbind("<>", self.__id_save) + self.text.unbind("<>",self.__id_saveas) + self.text.unbind("<>", self.__id_savecopy) + self.text.unbind("<>", self.__id_print) + # Break cycles + self.editwin = None + self.text = None + self.filename_change_hook = None + + def get_saved(self): + return self.editwin.get_saved() + + def set_saved(self, flag): + self.editwin.set_saved(flag) + + def reset_undo(self): + self.editwin.reset_undo() + + filename_change_hook = None + + def set_filename_change_hook(self, hook): + self.filename_change_hook = hook + + filename = None + dirname = None + + def set_filename(self, filename): + if filename and os.path.isdir(filename): + self.filename = None + self.dirname = filename + else: + self.filename = filename + self.dirname = None + self.set_saved(1) + if self.filename_change_hook: + self.filename_change_hook() + + def open(self, event=None, editFile=None): + flist = self.editwin.flist + # Save in case parent window is closed (ie, during askopenfile()). + if flist: + if not editFile: + filename = self.askopenfile() + else: + filename=editFile + if filename: + # If editFile is valid and already open, flist.open will + # shift focus to its existing window. + # If the current window exists and is a fresh unnamed, + # unmodified editor window (not an interpreter shell), + # pass self.loadfile to flist.open so it will load the file + # in the current window (if the file is not already open) + # instead of a new window. + if (self.editwin and + not getattr(self.editwin, 'interp', None) and + not self.filename and + self.get_saved()): + flist.open(filename, self.loadfile) + else: + flist.open(filename) + else: + if self.text: + self.text.focus_set() + return "break" + + # Code for use outside IDLE: + if self.get_saved(): + reply = self.maybesave() + if reply == "cancel": + self.text.focus_set() + return "break" + if not editFile: + filename = self.askopenfile() + else: + filename=editFile + if filename: + self.loadfile(filename) + else: + self.text.focus_set() + return "break" + + eol_convention = os.linesep # default + + def loadfile(self, filename): + try: + try: + with tokenize.open(filename) as f: + chars = f.read() + fileencoding = f.encoding + eol_convention = f.newlines + converted = False + except (UnicodeDecodeError, SyntaxError): + # Wait for the editor window to appear + self.editwin.text.update() + enc = askstring( + "Specify file encoding", + "The file's encoding is invalid for Python 3.x.\n" + "IDLE will convert it to UTF-8.\n" + "What is the current encoding of the file?", + initialvalue='utf-8', + parent=self.editwin.text) + with open(filename, encoding=enc) as f: + chars = f.read() + fileencoding = f.encoding + eol_convention = f.newlines + converted = True + except OSError as err: + messagebox.showerror("I/O Error", str(err), parent=self.text) + return False + except UnicodeDecodeError: + messagebox.showerror("Decoding Error", + "File %s\nFailed to Decode" % filename, + parent=self.text) + return False + + if not isinstance(eol_convention, str): + # If the file does not contain line separators, it is None. + # If the file contains mixed line separators, it is a tuple. + if eol_convention is not None: + messagebox.showwarning("Mixed Newlines", + "Mixed newlines detected.\n" + "The file will be changed on save.", + parent=self.text) + converted = True + eol_convention = os.linesep # default + + self.text.delete("1.0", "end") + self.set_filename(None) + self.fileencoding = fileencoding + self.eol_convention = eol_convention + self.text.insert("1.0", chars) + self.reset_undo() + self.set_filename(filename) + if converted: + # We need to save the conversion results first + # before being able to execute the code + self.set_saved(False) + self.text.mark_set("insert", "1.0") + self.text.yview("insert") + self.updaterecentfileslist(filename) + return True + + def maybesave(self): + if self.get_saved(): + return "yes" + message = "Do you want to save %s before closing?" % ( + self.filename or "this untitled document") + confirm = messagebox.askyesnocancel( + title="Save On Close", + message=message, + default=messagebox.YES, + parent=self.text) + if confirm: + reply = "yes" + self.save(None) + if not self.get_saved(): + reply = "cancel" + elif confirm is None: + reply = "cancel" + else: + reply = "no" + self.text.focus_set() + return reply + + def save(self, event): + if not self.filename: + self.save_as(event) + else: + if self.writefile(self.filename): + self.set_saved(True) + try: + self.editwin.store_file_breaks() + except AttributeError: # may be a PyShell + pass + self.text.focus_set() + return "break" + + def save_as(self, event): + filename = self.asksavefile() + if filename: + if self.writefile(filename): + self.set_filename(filename) + self.set_saved(1) + try: + self.editwin.store_file_breaks() + except AttributeError: + pass + self.text.focus_set() + self.updaterecentfileslist(filename) + return "break" + + def save_a_copy(self, event): + filename = self.asksavefile() + if filename: + self.writefile(filename) + self.text.focus_set() + self.updaterecentfileslist(filename) + return "break" + + def writefile(self, filename): + text = self.fixnewlines() + chars = self.encode(text) + try: + with open(filename, "wb") as f: + f.write(chars) + f.flush() + os.fsync(f.fileno()) + return True + except OSError as msg: + messagebox.showerror("I/O Error", str(msg), + parent=self.text) + return False + + def fixnewlines(self): + "Return text with final \n if needed and os eols." + if (self.text.get("end-2c") != '\n' + and not hasattr(self.editwin, "interp")): # Not shell. + self.text.insert("end-1c", "\n") + text = self.text.get("1.0", "end-1c") + if self.eol_convention != "\n": + text = text.replace("\n", self.eol_convention) + return text + + def encode(self, chars): + if isinstance(chars, bytes): + # This is either plain ASCII, or Tk was returning mixed-encoding + # text to us. Don't try to guess further. + return chars + # Preserve a BOM that might have been present on opening + if self.fileencoding == 'utf-8-sig': + return chars.encode('utf-8-sig') + # See whether there is anything non-ASCII in it. + # If not, no need to figure out the encoding. + try: + return chars.encode('ascii') + except UnicodeEncodeError: + pass + # Check if there is an encoding declared + try: + encoded = chars.encode('ascii', 'replace') + enc, _ = tokenize.detect_encoding(io.BytesIO(encoded).readline) + return chars.encode(enc) + except SyntaxError as err: + failed = str(err) + except UnicodeEncodeError: + failed = "Invalid encoding '%s'" % enc + messagebox.showerror( + "I/O Error", + "%s.\nSaving as UTF-8" % failed, + parent=self.text) + # Fallback: save as UTF-8, with BOM - ignoring the incorrect + # declared encoding + return chars.encode('utf-8-sig') + + def print_window(self, event): + confirm = messagebox.askokcancel( + title="Print", + message="Print to Default Printer", + default=messagebox.OK, + parent=self.text) + if not confirm: + self.text.focus_set() + return "break" + tempfilename = None + saved = self.get_saved() + if saved: + filename = self.filename + # shell undo is reset after every prompt, looks saved, probably isn't + if not saved or filename is None: + (tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_') + filename = tempfilename + os.close(tfd) + if not self.writefile(tempfilename): + os.unlink(tempfilename) + return "break" + platform = os.name + printPlatform = True + if platform == 'posix': #posix platform + command = idleConf.GetOption('main','General', + 'print-command-posix') + command = command + " 2>&1" + elif platform == 'nt': #win32 platform + command = idleConf.GetOption('main','General','print-command-win') + else: #no printing for this platform + printPlatform = False + if printPlatform: #we can try to print for this platform + command = command % shlex.quote(filename) + pipe = os.popen(command, "r") + # things can get ugly on NT if there is no printer available. + output = pipe.read().strip() + status = pipe.close() + if status: + output = "Printing failed (exit status 0x%x)\n" % \ + status + output + if output: + output = "Printing command: %s\n" % repr(command) + output + messagebox.showerror("Print status", output, parent=self.text) + else: #no printing for this platform + message = "Printing is not enabled for this platform: %s" % platform + messagebox.showinfo("Print status", message, parent=self.text) + if tempfilename: + os.unlink(tempfilename) + return "break" + + opendialog = None + savedialog = None + + filetypes = ( + ("Python files", "*.py *.pyw", "TEXT"), + ("Text files", "*.txt", "TEXT"), + ("All files", "*"), + ) + + defaultextension = '.py' if sys.platform == 'darwin' else '' + + def askopenfile(self): + dir, base = self.defaultfilename("open") + if not self.opendialog: + self.opendialog = filedialog.Open(parent=self.text, + filetypes=self.filetypes) + filename = self.opendialog.show(initialdir=dir, initialfile=base) + return filename + + def defaultfilename(self, mode="open"): + if self.filename: + return os.path.split(self.filename) + elif self.dirname: + return self.dirname, "" + else: + try: + pwd = os.getcwd() + except OSError: + pwd = "" + return pwd, "" + + def asksavefile(self): + dir, base = self.defaultfilename("save") + if not self.savedialog: + self.savedialog = filedialog.SaveAs( + parent=self.text, + filetypes=self.filetypes, + defaultextension=self.defaultextension) + filename = self.savedialog.show(initialdir=dir, initialfile=base) + return filename + + def updaterecentfileslist(self,filename): + "Update recent file list on all editor windows" + if self.editwin.flist: + self.editwin.update_recent_files_list(filename) + +def _io_binding(parent): # htest # + from tkinter import Toplevel, Text + + root = Toplevel(parent) + root.title("Test IOBinding") + x, y = map(int, parent.geometry().split('+')[1:]) + root.geometry("+%d+%d" % (x, y + 175)) + class MyEditWin: + def __init__(self, text): + self.text = text + self.flist = None + self.text.bind("", self.open) + self.text.bind('', self.print) + self.text.bind("", self.save) + self.text.bind("", self.saveas) + self.text.bind('', self.savecopy) + def get_saved(self): return 0 + def set_saved(self, flag): pass + def reset_undo(self): pass + def open(self, event): + self.text.event_generate("<>") + def print(self, event): + self.text.event_generate("<>") + def save(self, event): + self.text.event_generate("<>") + def saveas(self, event): + self.text.event_generate("<>") + def savecopy(self, event): + self.text.event_generate("<>") + + text = Text(root) + text.pack() + text.focus_set() + editwin = MyEditWin(text) + IOBinding(editwin) + +if __name__ == "__main__": + from unittest import main + main('idlelib.idle_test.test_iomenu', verbosity=2, exit=False) + + from idlelib.idle_test.htest import run + run(_io_binding) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/redirector.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/redirector.py new file mode 100644 index 0000000000000000000000000000000000000000..9ab34c5acfb22c6683882c80954ab503b40d9e47 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/redirector.py @@ -0,0 +1,174 @@ +from tkinter import TclError + +class WidgetRedirector: + """Support for redirecting arbitrary widget subcommands. + + Some Tk operations don't normally pass through tkinter. For example, if a + character is inserted into a Text widget by pressing a key, a default Tk + binding to the widget's 'insert' operation is activated, and the Tk library + processes the insert without calling back into tkinter. + + Although a binding to could be made via tkinter, what we really want + to do is to hook the Tk 'insert' operation itself. For one thing, we want + a text.insert call in idle code to have the same effect as a key press. + + When a widget is instantiated, a Tcl command is created whose name is the + same as the pathname widget._w. This command is used to invoke the various + widget operations, e.g. insert (for a Text widget). We are going to hook + this command and provide a facility ('register') to intercept the widget + operation. We will also intercept method calls on the tkinter class + instance that represents the tk widget. + + In IDLE, WidgetRedirector is used in Percolator to intercept Text + commands. The function being registered provides access to the top + of a Percolator chain. At the bottom of the chain is a call to the + original Tk widget operation. + """ + def __init__(self, widget): + '''Initialize attributes and setup redirection. + + _operations: dict mapping operation name to new function. + widget: the widget whose tcl command is to be intercepted. + tk: widget.tk, a convenience attribute, probably not needed. + orig: new name of the original tcl command. + + Since renaming to orig fails with TclError when orig already + exists, only one WidgetDirector can exist for a given widget. + ''' + self._operations = {} + self.widget = widget # widget instance + self.tk = tk = widget.tk # widget's root + w = widget._w # widget's (full) Tk pathname + self.orig = w + "_orig" + # Rename the Tcl command within Tcl: + tk.call("rename", w, self.orig) + # Create a new Tcl command whose name is the widget's pathname, and + # whose action is to dispatch on the operation passed to the widget: + tk.createcommand(w, self.dispatch) + + def __repr__(self): + return "%s(%s<%s>)" % (self.__class__.__name__, + self.widget.__class__.__name__, + self.widget._w) + + def close(self): + "Unregister operations and revert redirection created by .__init__." + for operation in list(self._operations): + self.unregister(operation) + widget = self.widget + tk = widget.tk + w = widget._w + # Restore the original widget Tcl command. + tk.deletecommand(w) + tk.call("rename", self.orig, w) + del self.widget, self.tk # Should not be needed + # if instance is deleted after close, as in Percolator. + + def register(self, operation, function): + '''Return OriginalCommand(operation) after registering function. + + Registration adds an operation: function pair to ._operations. + It also adds a widget function attribute that masks the tkinter + class instance method. Method masking operates independently + from command dispatch. + + If a second function is registered for the same operation, the + first function is replaced in both places. + ''' + self._operations[operation] = function + setattr(self.widget, operation, function) + return OriginalCommand(self, operation) + + def unregister(self, operation): + '''Return the function for the operation, or None. + + Deleting the instance attribute unmasks the class attribute. + ''' + if operation in self._operations: + function = self._operations[operation] + del self._operations[operation] + try: + delattr(self.widget, operation) + except AttributeError: + pass + return function + else: + return None + + def dispatch(self, operation, *args): + '''Callback from Tcl which runs when the widget is referenced. + + If an operation has been registered in self._operations, apply the + associated function to the args passed into Tcl. Otherwise, pass the + operation through to Tk via the original Tcl function. + + Note that if a registered function is called, the operation is not + passed through to Tk. Apply the function returned by self.register() + to *args to accomplish that. For an example, see colorizer.py. + + ''' + m = self._operations.get(operation) + try: + if m: + return m(*args) + else: + return self.tk.call((self.orig, operation) + args) + except TclError: + return "" + + +class OriginalCommand: + '''Callable for original tk command that has been redirected. + + Returned by .register; can be used in the function registered. + redir = WidgetRedirector(text) + def my_insert(*args): + print("insert", args) + original_insert(*args) + original_insert = redir.register("insert", my_insert) + ''' + + def __init__(self, redir, operation): + '''Create .tk_call and .orig_and_operation for .__call__ method. + + .redir and .operation store the input args for __repr__. + .tk and .orig copy attributes of .redir (probably not needed). + ''' + self.redir = redir + self.operation = operation + self.tk = redir.tk # redundant with self.redir + self.orig = redir.orig # redundant with self.redir + # These two could be deleted after checking recipient code. + self.tk_call = redir.tk.call + self.orig_and_operation = (redir.orig, operation) + + def __repr__(self): + return "%s(%r, %r)" % (self.__class__.__name__, + self.redir, self.operation) + + def __call__(self, *args): + return self.tk_call(self.orig_and_operation + args) + + +def _widget_redirector(parent): # htest # + from tkinter import Toplevel, Text + + top = Toplevel(parent) + top.title("Test WidgetRedirector") + x, y = map(int, parent.geometry().split('+')[1:]) + top.geometry("+%d+%d" % (x, y + 175)) + text = Text(top) + text.pack() + text.focus_set() + redir = WidgetRedirector(text) + def my_insert(*args): + print("insert", args) + original_insert(*args) + original_insert = redir.register("insert", my_insert) + +if __name__ == "__main__": + from unittest import main + main('idlelib.idle_test.test_redirector', verbosity=2, exit=False) + + from idlelib.idle_test.htest import run + run(_widget_redirector) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/run.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/run.py new file mode 100644 index 0000000000000000000000000000000000000000..07e9a2bf9ceeae1d356e404c58c2247cd43a688f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/run.py @@ -0,0 +1,623 @@ +""" idlelib.run + +Simplified, pyshell.ModifiedInterpreter spawns a subprocess with +f'''{sys.executable} -c "__import__('idlelib.run').run.main()"''' +'.run' is needed because __import__ returns idlelib, not idlelib.run. +""" +import functools +import io +import linecache +import queue +import sys +import textwrap +import time +import traceback +import _thread as thread +import threading +import warnings + +import idlelib # testing +from idlelib import autocomplete # AutoComplete, fetch_encodings +from idlelib import calltip # Calltip +from idlelib import debugger_r # start_debugger +from idlelib import debugobj_r # remote_object_tree_item +from idlelib import iomenu # encoding +from idlelib import rpc # multiple objects +from idlelib import stackviewer # StackTreeItem +import __main__ + +import tkinter # Use tcl and, if startup fails, messagebox. +if not hasattr(sys.modules['idlelib.run'], 'firstrun'): + # Undo modifications of tkinter by idlelib imports; see bpo-25507. + for mod in ('simpledialog', 'messagebox', 'font', + 'dialog', 'filedialog', 'commondialog', + 'ttk'): + delattr(tkinter, mod) + del sys.modules['tkinter.' + mod] + # Avoid AttributeError if run again; see bpo-37038. + sys.modules['idlelib.run'].firstrun = False + +LOCALHOST = '127.0.0.1' + + +def idle_formatwarning(message, category, filename, lineno, line=None): + """Format warnings the IDLE way.""" + + s = "\nWarning (from warnings module):\n" + s += ' File \"%s\", line %s\n' % (filename, lineno) + if line is None: + line = linecache.getline(filename, lineno) + line = line.strip() + if line: + s += " %s\n" % line + s += "%s: %s\n" % (category.__name__, message) + return s + +def idle_showwarning_subproc( + message, category, filename, lineno, file=None, line=None): + """Show Idle-format warning after replacing warnings.showwarning. + + The only difference is the formatter called. + """ + if file is None: + file = sys.stderr + try: + file.write(idle_formatwarning( + message, category, filename, lineno, line)) + except OSError: + pass # the file (probably stderr) is invalid - this warning gets lost. + +_warnings_showwarning = None + +def capture_warnings(capture): + "Replace warning.showwarning with idle_showwarning_subproc, or reverse." + + global _warnings_showwarning + if capture: + if _warnings_showwarning is None: + _warnings_showwarning = warnings.showwarning + warnings.showwarning = idle_showwarning_subproc + else: + if _warnings_showwarning is not None: + warnings.showwarning = _warnings_showwarning + _warnings_showwarning = None + +capture_warnings(True) +tcl = tkinter.Tcl() + +def handle_tk_events(tcl=tcl): + """Process any tk events that are ready to be dispatched if tkinter + has been imported, a tcl interpreter has been created and tk has been + loaded.""" + tcl.eval("update") + +# Thread shared globals: Establish a queue between a subthread (which handles +# the socket) and the main thread (which runs user code), plus global +# completion, exit and interruptable (the main thread) flags: + +exit_now = False +quitting = False +interruptable = False + +def main(del_exitfunc=False): + """Start the Python execution server in a subprocess + + In the Python subprocess, RPCServer is instantiated with handlerclass + MyHandler, which inherits register/unregister methods from RPCHandler via + the mix-in class SocketIO. + + When the RPCServer 'server' is instantiated, the TCPServer initialization + creates an instance of run.MyHandler and calls its handle() method. + handle() instantiates a run.Executive object, passing it a reference to the + MyHandler object. That reference is saved as attribute rpchandler of the + Executive instance. The Executive methods have access to the reference and + can pass it on to entities that they command + (e.g. debugger_r.Debugger.start_debugger()). The latter, in turn, can + call MyHandler(SocketIO) register/unregister methods via the reference to + register and unregister themselves. + + """ + global exit_now + global quitting + global no_exitfunc + no_exitfunc = del_exitfunc + #time.sleep(15) # test subprocess not responding + try: + assert(len(sys.argv) > 1) + port = int(sys.argv[-1]) + except: + print("IDLE Subprocess: no IP port passed in sys.argv.", + file=sys.__stderr__) + return + + capture_warnings(True) + sys.argv[:] = [""] + sockthread = threading.Thread(target=manage_socket, + name='SockThread', + args=((LOCALHOST, port),)) + sockthread.daemon = True + sockthread.start() + while 1: + try: + if exit_now: + try: + exit() + except KeyboardInterrupt: + # exiting but got an extra KBI? Try again! + continue + try: + request = rpc.request_queue.get(block=True, timeout=0.05) + except queue.Empty: + request = None + # Issue 32207: calling handle_tk_events here adds spurious + # queue.Empty traceback to event handling exceptions. + if request: + seq, (method, args, kwargs) = request + ret = method(*args, **kwargs) + rpc.response_queue.put((seq, ret)) + else: + handle_tk_events() + except KeyboardInterrupt: + if quitting: + exit_now = True + continue + except SystemExit: + capture_warnings(False) + raise + except: + type, value, tb = sys.exc_info() + try: + print_exception() + rpc.response_queue.put((seq, None)) + except: + # Link didn't work, print same exception to __stderr__ + traceback.print_exception(type, value, tb, file=sys.__stderr__) + exit() + else: + continue + +def manage_socket(address): + for i in range(3): + time.sleep(i) + try: + server = MyRPCServer(address, MyHandler) + break + except OSError as err: + print("IDLE Subprocess: OSError: " + err.args[1] + + ", retrying....", file=sys.__stderr__) + socket_error = err + else: + print("IDLE Subprocess: Connection to " + "IDLE GUI failed, exiting.", file=sys.__stderr__) + show_socket_error(socket_error, address) + global exit_now + exit_now = True + return + server.handle_request() # A single request only + +def show_socket_error(err, address): + "Display socket error from manage_socket." + import tkinter + from tkinter.messagebox import showerror + root = tkinter.Tk() + fix_scaling(root) + root.withdraw() + showerror( + "Subprocess Connection Error", + f"IDLE's subprocess can't connect to {address[0]}:{address[1]}.\n" + f"Fatal OSError #{err.errno}: {err.strerror}.\n" + "See the 'Startup failure' section of the IDLE doc, online at\n" + "https://docs.python.org/3/library/idle.html#startup-failure", + parent=root) + root.destroy() + +def print_exception(): + import linecache + linecache.checkcache() + flush_stdout() + efile = sys.stderr + typ, val, tb = excinfo = sys.exc_info() + sys.last_type, sys.last_value, sys.last_traceback = excinfo + seen = set() + + def print_exc(typ, exc, tb): + seen.add(id(exc)) + context = exc.__context__ + cause = exc.__cause__ + if cause is not None and id(cause) not in seen: + print_exc(type(cause), cause, cause.__traceback__) + print("\nThe above exception was the direct cause " + "of the following exception:\n", file=efile) + elif (context is not None and + not exc.__suppress_context__ and + id(context) not in seen): + print_exc(type(context), context, context.__traceback__) + print("\nDuring handling of the above exception, " + "another exception occurred:\n", file=efile) + if tb: + tbe = traceback.extract_tb(tb) + print('Traceback (most recent call last):', file=efile) + exclude = ("run.py", "rpc.py", "threading.py", "queue.py", + "debugger_r.py", "bdb.py") + cleanup_traceback(tbe, exclude) + traceback.print_list(tbe, file=efile) + lines = traceback.format_exception_only(typ, exc) + for line in lines: + print(line, end='', file=efile) + + print_exc(typ, val, tb) + +def cleanup_traceback(tb, exclude): + "Remove excluded traces from beginning/end of tb; get cached lines" + orig_tb = tb[:] + while tb: + for rpcfile in exclude: + if tb[0][0].count(rpcfile): + break # found an exclude, break for: and delete tb[0] + else: + break # no excludes, have left RPC code, break while: + del tb[0] + while tb: + for rpcfile in exclude: + if tb[-1][0].count(rpcfile): + break + else: + break + del tb[-1] + if len(tb) == 0: + # exception was in IDLE internals, don't prune! + tb[:] = orig_tb[:] + print("** IDLE Internal Exception: ", file=sys.stderr) + rpchandler = rpc.objecttable['exec'].rpchandler + for i in range(len(tb)): + fn, ln, nm, line = tb[i] + if nm == '?': + nm = "-toplevel-" + if not line and fn.startswith(" 1.4: + for name in tkinter.font.names(root): + font = tkinter.font.Font(root=root, name=name, exists=True) + size = int(font['size']) + if size < 0: + font['size'] = round(-0.75*size) + + +def fixdoc(fun, text): + tem = (fun.__doc__ + '\n\n') if fun.__doc__ is not None else '' + fun.__doc__ = tem + textwrap.fill(textwrap.dedent(text)) + +RECURSIONLIMIT_DELTA = 30 + +def install_recursionlimit_wrappers(): + """Install wrappers to always add 30 to the recursion limit.""" + # see: bpo-26806 + + @functools.wraps(sys.setrecursionlimit) + def setrecursionlimit(*args, **kwargs): + # mimic the original sys.setrecursionlimit()'s input handling + if kwargs: + raise TypeError( + "setrecursionlimit() takes no keyword arguments") + try: + limit, = args + except ValueError: + raise TypeError(f"setrecursionlimit() takes exactly one " + f"argument ({len(args)} given)") + if not limit > 0: + raise ValueError( + "recursion limit must be greater or equal than 1") + + return setrecursionlimit.__wrapped__(limit + RECURSIONLIMIT_DELTA) + + fixdoc(setrecursionlimit, f"""\ + This IDLE wrapper adds {RECURSIONLIMIT_DELTA} to prevent possible + uninterruptible loops.""") + + @functools.wraps(sys.getrecursionlimit) + def getrecursionlimit(): + return getrecursionlimit.__wrapped__() - RECURSIONLIMIT_DELTA + + fixdoc(getrecursionlimit, f"""\ + This IDLE wrapper subtracts {RECURSIONLIMIT_DELTA} to compensate + for the {RECURSIONLIMIT_DELTA} IDLE adds when setting the limit.""") + + # add the delta to the default recursion limit, to compensate + sys.setrecursionlimit(sys.getrecursionlimit() + RECURSIONLIMIT_DELTA) + + sys.setrecursionlimit = setrecursionlimit + sys.getrecursionlimit = getrecursionlimit + + +def uninstall_recursionlimit_wrappers(): + """Uninstall the recursion limit wrappers from the sys module. + + IDLE only uses this for tests. Users can import run and call + this to remove the wrapping. + """ + if ( + getattr(sys.setrecursionlimit, '__wrapped__', None) and + getattr(sys.getrecursionlimit, '__wrapped__', None) + ): + sys.setrecursionlimit = sys.setrecursionlimit.__wrapped__ + sys.getrecursionlimit = sys.getrecursionlimit.__wrapped__ + sys.setrecursionlimit(sys.getrecursionlimit() - RECURSIONLIMIT_DELTA) + + +class MyRPCServer(rpc.RPCServer): + + def handle_error(self, request, client_address): + """Override RPCServer method for IDLE + + Interrupt the MainThread and exit server if link is dropped. + + """ + global quitting + try: + raise + except SystemExit: + raise + except EOFError: + global exit_now + exit_now = True + thread.interrupt_main() + except: + erf = sys.__stderr__ + print(textwrap.dedent(f""" + {'-'*40} + Unhandled exception in user code execution server!' + Thread: {threading.current_thread().name} + IDLE Client Address: {client_address} + Request: {request!r} + """), file=erf) + traceback.print_exc(limit=-20, file=erf) + print(textwrap.dedent(f""" + *** Unrecoverable, server exiting! + + Users should never see this message; it is likely transient. + If this recurs, report this with a copy of the message + and an explanation of how to make it repeat. + {'-'*40}"""), file=erf) + quitting = True + thread.interrupt_main() + + +# Pseudofiles for shell-remote communication (also used in pyshell) + +class StdioFile(io.TextIOBase): + + def __init__(self, shell, tags, encoding='utf-8', errors='strict'): + self.shell = shell + self.tags = tags + self._encoding = encoding + self._errors = errors + + @property + def encoding(self): + return self._encoding + + @property + def errors(self): + return self._errors + + @property + def name(self): + return '<%s>' % self.tags + + def isatty(self): + return True + + +class StdOutputFile(StdioFile): + + def writable(self): + return True + + def write(self, s): + if self.closed: + raise ValueError("write to closed file") + s = str.encode(s, self.encoding, self.errors).decode(self.encoding, self.errors) + return self.shell.write(s, self.tags) + + +class StdInputFile(StdioFile): + _line_buffer = '' + + def readable(self): + return True + + def read(self, size=-1): + if self.closed: + raise ValueError("read from closed file") + if size is None: + size = -1 + elif not isinstance(size, int): + raise TypeError('must be int, not ' + type(size).__name__) + result = self._line_buffer + self._line_buffer = '' + if size < 0: + while True: + line = self.shell.readline() + if not line: break + result += line + else: + while len(result) < size: + line = self.shell.readline() + if not line: break + result += line + self._line_buffer = result[size:] + result = result[:size] + return result + + def readline(self, size=-1): + if self.closed: + raise ValueError("read from closed file") + if size is None: + size = -1 + elif not isinstance(size, int): + raise TypeError('must be int, not ' + type(size).__name__) + line = self._line_buffer or self.shell.readline() + if size < 0: + size = len(line) + eol = line.find('\n', 0, size) + if eol >= 0: + size = eol + 1 + self._line_buffer = line[size:] + return line[:size] + + def close(self): + self.shell.close() + + +class MyHandler(rpc.RPCHandler): + + def handle(self): + """Override base method""" + executive = Executive(self) + self.register("exec", executive) + self.console = self.get_remote_proxy("console") + sys.stdin = StdInputFile(self.console, "stdin", + iomenu.encoding, iomenu.errors) + sys.stdout = StdOutputFile(self.console, "stdout", + iomenu.encoding, iomenu.errors) + sys.stderr = StdOutputFile(self.console, "stderr", + iomenu.encoding, "backslashreplace") + + sys.displayhook = rpc.displayhook + # page help() text to shell. + import pydoc # import must be done here to capture i/o binding + pydoc.pager = pydoc.plainpager + + # Keep a reference to stdin so that it won't try to exit IDLE if + # sys.stdin gets changed from within IDLE's shell. See issue17838. + self._keep_stdin = sys.stdin + + install_recursionlimit_wrappers() + + self.interp = self.get_remote_proxy("interp") + rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05) + + def exithook(self): + "override SocketIO method - wait for MainThread to shut us down" + time.sleep(10) + + def EOFhook(self): + "Override SocketIO method - terminate wait on callback and exit thread" + global quitting + quitting = True + thread.interrupt_main() + + def decode_interrupthook(self): + "interrupt awakened thread" + global quitting + quitting = True + thread.interrupt_main() + + +class Executive: + + def __init__(self, rpchandler): + self.rpchandler = rpchandler + if idlelib.testing is False: + self.locals = __main__.__dict__ + self.calltip = calltip.Calltip() + self.autocomplete = autocomplete.AutoComplete() + else: + self.locals = {} + + def runcode(self, code): + global interruptable + try: + self.user_exc_info = None + interruptable = True + try: + exec(code, self.locals) + finally: + interruptable = False + except SystemExit as e: + if e.args: # SystemExit called with an argument. + ob = e.args[0] + if not isinstance(ob, (type(None), int)): + print('SystemExit: ' + str(ob), file=sys.stderr) + # Return to the interactive prompt. + except: + self.user_exc_info = sys.exc_info() # For testing, hook, viewer. + if quitting: + exit() + if sys.excepthook is sys.__excepthook__: + print_exception() + else: + try: + sys.excepthook(*self.user_exc_info) + except: + self.user_exc_info = sys.exc_info() # For testing. + print_exception() + jit = self.rpchandler.console.getvar("<>") + if jit: + self.rpchandler.interp.open_remote_stack_viewer() + else: + flush_stdout() + + def interrupt_the_server(self): + if interruptable: + thread.interrupt_main() + + def start_the_debugger(self, gui_adap_oid): + return debugger_r.start_debugger(self.rpchandler, gui_adap_oid) + + def stop_the_debugger(self, idb_adap_oid): + "Unregister the Idb Adapter. Link objects and Idb then subject to GC" + self.rpchandler.unregister(idb_adap_oid) + + def get_the_calltip(self, name): + return self.calltip.fetch_tip(name) + + def get_the_completion_list(self, what, mode): + return self.autocomplete.fetch_completions(what, mode) + + def stackviewer(self, flist_oid=None): + if self.user_exc_info: + typ, val, tb = self.user_exc_info + else: + return None + flist = None + if flist_oid is not None: + flist = self.rpchandler.get_remote_proxy(flist_oid) + while tb and tb.tb_frame.f_globals["__name__"] in ["rpc", "run"]: + tb = tb.tb_next + sys.last_type = typ + sys.last_value = val + item = stackviewer.StackTreeItem(flist, tb) + return debugobj_r.remote_object_tree_item(item) + + +if __name__ == '__main__': + from unittest import main + main('idlelib.idle_test.test_run', verbosity=2) + +capture_warnings(False) # Make sure turned off; see bpo-18081. diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/window.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/window.py new file mode 100644 index 0000000000000000000000000000000000000000..460d5b67948dde2e1eee32fa548fdd289e3512de --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/window.py @@ -0,0 +1,98 @@ +from tkinter import Toplevel, TclError +import sys + + +class WindowList: + + def __init__(self): + self.dict = {} + self.callbacks = [] + + def add(self, window): + window.after_idle(self.call_callbacks) + self.dict[str(window)] = window + + def delete(self, window): + try: + del self.dict[str(window)] + except KeyError: + # Sometimes, destroy() is called twice + pass + self.call_callbacks() + + def add_windows_to_menu(self, menu): + list = [] + for key in self.dict: + window = self.dict[key] + try: + title = window.get_title() + except TclError: + continue + list.append((title, key, window)) + list.sort() + for title, key, window in list: + menu.add_command(label=title, command=window.wakeup) + + def register_callback(self, callback): + self.callbacks.append(callback) + + def unregister_callback(self, callback): + try: + self.callbacks.remove(callback) + except ValueError: + pass + + def call_callbacks(self): + for callback in self.callbacks: + try: + callback() + except: + t, v, tb = sys.exc_info() + print("warning: callback failed in WindowList", t, ":", v) + + +registry = WindowList() + +add_windows_to_menu = registry.add_windows_to_menu +register_callback = registry.register_callback +unregister_callback = registry.unregister_callback + + +class ListedToplevel(Toplevel): + + def __init__(self, master, **kw): + Toplevel.__init__(self, master, kw) + registry.add(self) + self.focused_widget = self + + def destroy(self): + registry.delete(self) + Toplevel.destroy(self) + # If this is Idle's last window then quit the mainloop + # (Needed for clean exit on Windows 98) + if not registry.dict: + self.quit() + + def update_windowlist_registry(self, window): + registry.call_callbacks() + + def get_title(self): + # Subclass can override + return self.wm_title() + + def wakeup(self): + try: + if self.wm_state() == "iconic": + self.wm_withdraw() + self.wm_deiconify() + self.tkraise() + self.focused_widget.focus_set() + except TclError: + # This can happen when the Window menu was torn off. + # Simply ignore it. + pass + + +if __name__ == "__main__": + from unittest import main + main('idlelib.idle_test.test_window', verbosity=2) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/zzdummy.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/zzdummy.py new file mode 100644 index 0000000000000000000000000000000000000000..1247e8f1cc052860b173e9959deaf134d4b6906a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/idlelib/zzdummy.py @@ -0,0 +1,73 @@ +"""Example extension, also used for testing. + +See extend.txt for more details on creating an extension. +See config-extension.def for configuring an extension. +""" + +from idlelib.config import idleConf +from functools import wraps + + +def format_selection(format_line): + "Apply a formatting function to all of the selected lines." + + @wraps(format_line) + def apply(self, event=None): + head, tail, chars, lines = self.formatter.get_region() + for pos in range(len(lines) - 1): + line = lines[pos] + lines[pos] = format_line(self, line) + self.formatter.set_region(head, tail, chars, lines) + return 'break' + + return apply + + +class ZzDummy: + """Prepend or remove initial text from selected lines.""" + + # Extend the format menu. + menudefs = [ + ('format', [ + ('Z in', '<>'), + ('Z out', '<>'), + ] ) + ] + + def __init__(self, editwin): + "Initialize the settings for this extension." + self.editwin = editwin + self.text = editwin.text + self.formatter = editwin.fregion + + @classmethod + def reload(cls): + "Load class variables from config." + cls.ztext = idleConf.GetOption('extensions', 'ZzDummy', 'z-text') + + @format_selection + def z_in_event(self, line): + """Insert text at the beginning of each selected line. + + This is bound to the <> virtual event when the extensions + are loaded. + """ + return f'{self.ztext}{line}' + + @format_selection + def z_out_event(self, line): + """Remove specific text from the beginning of each selected line. + + This is bound to the <> virtual event when the extensions + are loaded. + """ + zlength = 0 if not line.startswith(self.ztext) else len(self.ztext) + return line[zlength:] + + +ZzDummy.reload() + + +if __name__ == "__main__": + import unittest + unittest.main('idlelib.idle_test.test_zzdummy', verbosity=2, exit=False) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/Grammar.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/Grammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..f6c2fc5b1f323153c4b2eaa7ab9ddaa93eb73dd3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/Grammar.txt @@ -0,0 +1,196 @@ +# Grammar for 2to3. This grammar supports Python 2.x and 3.x. + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# file_input is a module or sequence of commands read from an input file; +# single_input is a single interactive statement; +# eval_input is the input for the eval() and input() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +file_input: (NEWLINE | stmt)* ENDMARKER +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) +async_funcdef: ASYNC funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' + +# The following definition for typedarglist is equivalent to this set of rules: +# +# arguments = argument (',' argument)* +# argument = tfpdef ['=' test] +# kwargs = '**' tname [','] +# args = '*' [tname] +# kwonly_kwargs = (',' argument)* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# typedargslist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)" +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ + ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] + ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + [',' ['**' tname [',']]] | '**' tname [',']) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + +tname: NAME [':' test] +tfpdef: tname | '(' tfplist ')' +tfplist: tfpdef (',' tfpdef)* [','] + +# The following definition for varargslist is equivalent to this set of rules: +# +# arguments = argument (',' argument )* +# argument = vfpdef ['=' test] +# kwargs = '**' vname [','] +# args = '*' [vname] +# kwonly_kwargs = (',' argument )* [',' [kwargs]] +# args_kwonly_kwargs = args kwonly_kwargs | kwargs +# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] +# vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs +# varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly) +# +# It needs to be fully expanded to allow our LL(1) parser to work on it. + +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ + ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* + [',' ['**' vname [',']]] | '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + ]] | ((vfpdef ['=' test] ',')* + ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]]| '**' vname [',']) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + +vname: NAME +vfpdef: vname | '(' vfplist ')' +vfplist: vfpdef (',' vfpdef)* [','] + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | exec_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' test] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +import_from: ('from' ('.'* dotted_name | '.'+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: ASYNC (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +with_var: 'as' expr +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [(',' | 'as') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +# Backward compatibility cruft to support: +# [ x for x in lambda: True, lambda: False if x() ] +# even while also allowing: +# lambda x: 5 if x else 2 +# (But not a mix of the two) +testlist_safe: old_test [(',' old_test)+ [',']] +old_test: or_test | old_lambdef +old_lambdef: 'lambda' [varargslist] ':' old_test + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: [AWAIT] atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_gexp] ')' | + '[' [listmaker] ']' | + '{' [dictsetmaker] '}' | + '`' testlist1 '`' | + NAME | NUMBER | STRING+ | '.' '.' '.') +listmaker: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +testlist_gexp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +lambdef: 'lambda' [varargslist] ':' test +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [comp_iter] +comp_if: 'if' old_test [comp_iter] + +testlist1: test (',' test)* + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/PatternGrammar.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/PatternGrammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..36bf8148273bd7a27b4a76817a776fe2ab234562 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/PatternGrammar.txt @@ -0,0 +1,28 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# A grammar to describe tree matching patterns. +# Not shown here: +# - 'TOKEN' stands for any token (leaf node) +# - 'any' stands for any node (leaf or interior) +# With 'any' we can still specify the sub-structure. + +# The start symbol is 'Matcher'. + +Matcher: Alternatives ENDMARKER + +Alternatives: Alternative ('|' Alternative)* + +Alternative: (Unit | NegatedUnit)+ + +Unit: [NAME '='] ( STRING [Repeater] + | NAME [Details] [Repeater] + | '(' Alternatives ')' [Repeater] + | '[' Alternatives ']' + ) + +NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') + +Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' + +Details: '<' Alternatives '>' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea30561d839798e1ef284fb70adc009fda12db16 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/__init__.py @@ -0,0 +1 @@ +#empty diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/__main__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..80688baf27abfcaf99ea680787f17212747db5f6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/__main__.py @@ -0,0 +1,4 @@ +import sys +from .main import main + +sys.exit(main("lib2to3.fixes")) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/btm_matcher.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/btm_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3b78868038bda0733f03995e3f31c517c17bdc00 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/btm_matcher.py @@ -0,0 +1,163 @@ +"""A bottom-up tree matching algorithm implementation meant to speed +up 2to3's matching process. After the tree patterns are reduced to +their rarest linear path, a linear Aho-Corasick automaton is +created. The linear automaton traverses the linear paths from the +leaves to the root of the AST and returns a set of nodes for further +matching. This reduces significantly the number of candidate nodes.""" + +__author__ = "George Boutsioukis " + +import logging +import itertools +from collections import defaultdict + +from . import pytree +from .btm_utils import reduce_tree + +class BMNode(object): + """Class for a node of the Aho-Corasick automaton used in matching""" + count = itertools.count() + def __init__(self): + self.transition_table = {} + self.fixers = [] + self.id = next(BMNode.count) + self.content = '' + +class BottomMatcher(object): + """The main matcher class. After instantiating the patterns should + be added using the add_fixer method""" + + def __init__(self): + self.match = set() + self.root = BMNode() + self.nodes = [self.root] + self.fixers = [] + self.logger = logging.getLogger("RefactoringTool") + + def add_fixer(self, fixer): + """Reduces a fixer's pattern tree to a linear path and adds it + to the matcher(a common Aho-Corasick automaton). The fixer is + appended on the matching states and called when they are + reached""" + self.fixers.append(fixer) + tree = reduce_tree(fixer.pattern_tree) + linear = tree.get_linear_subpattern() + match_nodes = self.add(linear, start=self.root) + for match_node in match_nodes: + match_node.fixers.append(fixer) + + def add(self, pattern, start): + "Recursively adds a linear pattern to the AC automaton" + #print("adding pattern", pattern, "to", start) + if not pattern: + #print("empty pattern") + return [start] + if isinstance(pattern[0], tuple): + #alternatives + #print("alternatives") + match_nodes = [] + for alternative in pattern[0]: + #add all alternatives, and add the rest of the pattern + #to each end node + end_nodes = self.add(alternative, start=start) + for end in end_nodes: + match_nodes.extend(self.add(pattern[1:], end)) + return match_nodes + else: + #single token + #not last + if pattern[0] not in start.transition_table: + #transition did not exist, create new + next_node = BMNode() + start.transition_table[pattern[0]] = next_node + else: + #transition exists already, follow + next_node = start.transition_table[pattern[0]] + + if pattern[1:]: + end_nodes = self.add(pattern[1:], start=next_node) + else: + end_nodes = [next_node] + return end_nodes + + def run(self, leaves): + """The main interface with the bottom matcher. The tree is + traversed from the bottom using the constructed + automaton. Nodes are only checked once as the tree is + retraversed. When the automaton fails, we give it one more + shot(in case the above tree matches as a whole with the + rejected leaf), then we break for the next leaf. There is the + special case of multiple arguments(see code comments) where we + recheck the nodes + + Args: + The leaves of the AST tree to be matched + + Returns: + A dictionary of node matches with fixers as the keys + """ + current_ac_node = self.root + results = defaultdict(list) + for leaf in leaves: + current_ast_node = leaf + while current_ast_node: + current_ast_node.was_checked = True + for child in current_ast_node.children: + # multiple statements, recheck + if isinstance(child, pytree.Leaf) and child.value == ";": + current_ast_node.was_checked = False + break + if current_ast_node.type == 1: + #name + node_token = current_ast_node.value + else: + node_token = current_ast_node.type + + if node_token in current_ac_node.transition_table: + #token matches + current_ac_node = current_ac_node.transition_table[node_token] + for fixer in current_ac_node.fixers: + results[fixer].append(current_ast_node) + else: + #matching failed, reset automaton + current_ac_node = self.root + if (current_ast_node.parent is not None + and current_ast_node.parent.was_checked): + #the rest of the tree upwards has been checked, next leaf + break + + #recheck the rejected node once from the root + if node_token in current_ac_node.transition_table: + #token matches + current_ac_node = current_ac_node.transition_table[node_token] + for fixer in current_ac_node.fixers: + results[fixer].append(current_ast_node) + + current_ast_node = current_ast_node.parent + return results + + def print_ac(self): + "Prints a graphviz diagram of the BM automaton(for debugging)" + print("digraph g{") + def print_node(node): + for subnode_key in node.transition_table.keys(): + subnode = node.transition_table[subnode_key] + print("%d -> %d [label=%s] //%s" % + (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) + if subnode_key == 1: + print(subnode.content) + print_node(subnode) + print_node(self.root) + print("}") + +# taken from pytree.py for debugging; only used by print_ac +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/btm_utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/btm_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ff76ba340470729f7e44ec2ce19afc7cc1ca931a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/btm_utils.py @@ -0,0 +1,281 @@ +"Utility functions used by the btm_matcher module" + +from . import pytree +from .pgen2 import grammar, token +from .pygram import pattern_symbols, python_symbols + +syms = pattern_symbols +pysyms = python_symbols +tokens = grammar.opmap +token_labels = token + +TYPE_ANY = -1 +TYPE_ALTERNATIVES = -2 +TYPE_GROUP = -3 + +class MinNode(object): + """This class serves as an intermediate representation of the + pattern tree during the conversion to sets of leaf-to-root + subpatterns""" + + def __init__(self, type=None, name=None): + self.type = type + self.name = name + self.children = [] + self.leaf = False + self.parent = None + self.alternatives = [] + self.group = [] + + def __repr__(self): + return str(self.type) + ' ' + str(self.name) + + def leaf_to_root(self): + """Internal method. Returns a characteristic path of the + pattern tree. This method must be run for all leaves until the + linear subpatterns are merged into a single""" + node = self + subp = [] + while node: + if node.type == TYPE_ALTERNATIVES: + node.alternatives.append(subp) + if len(node.alternatives) == len(node.children): + #last alternative + subp = [tuple(node.alternatives)] + node.alternatives = [] + node = node.parent + continue + else: + node = node.parent + subp = None + break + + if node.type == TYPE_GROUP: + node.group.append(subp) + #probably should check the number of leaves + if len(node.group) == len(node.children): + subp = get_characteristic_subpattern(node.group) + node.group = [] + node = node.parent + continue + else: + node = node.parent + subp = None + break + + if node.type == token_labels.NAME and node.name: + #in case of type=name, use the name instead + subp.append(node.name) + else: + subp.append(node.type) + + node = node.parent + return subp + + def get_linear_subpattern(self): + """Drives the leaf_to_root method. The reason that + leaf_to_root must be run multiple times is because we need to + reject 'group' matches; for example the alternative form + (a | b c) creates a group [b c] that needs to be matched. Since + matching multiple linear patterns overcomes the automaton's + capabilities, leaf_to_root merges each group into a single + choice based on 'characteristic'ity, + + i.e. (a|b c) -> (a|b) if b more characteristic than c + + Returns: The most 'characteristic'(as defined by + get_characteristic_subpattern) path for the compiled pattern + tree. + """ + + for l in self.leaves(): + subp = l.leaf_to_root() + if subp: + return subp + + def leaves(self): + "Generator that returns the leaves of the tree" + for child in self.children: + yield from child.leaves() + if not self.children: + yield self + +def reduce_tree(node, parent=None): + """ + Internal function. Reduces a compiled pattern tree to an + intermediate representation suitable for feeding the + automaton. This also trims off any optional pattern elements(like + [a], a*). + """ + + new_node = None + #switch on the node type + if node.type == syms.Matcher: + #skip + node = node.children[0] + + if node.type == syms.Alternatives : + #2 cases + if len(node.children) <= 2: + #just a single 'Alternative', skip this node + new_node = reduce_tree(node.children[0], parent) + else: + #real alternatives + new_node = MinNode(type=TYPE_ALTERNATIVES) + #skip odd children('|' tokens) + for child in node.children: + if node.children.index(child)%2: + continue + reduced = reduce_tree(child, new_node) + if reduced is not None: + new_node.children.append(reduced) + elif node.type == syms.Alternative: + if len(node.children) > 1: + + new_node = MinNode(type=TYPE_GROUP) + for child in node.children: + reduced = reduce_tree(child, new_node) + if reduced: + new_node.children.append(reduced) + if not new_node.children: + # delete the group if all of the children were reduced to None + new_node = None + + else: + new_node = reduce_tree(node.children[0], parent) + + elif node.type == syms.Unit: + if (isinstance(node.children[0], pytree.Leaf) and + node.children[0].value == '('): + #skip parentheses + return reduce_tree(node.children[1], parent) + if ((isinstance(node.children[0], pytree.Leaf) and + node.children[0].value == '[') + or + (len(node.children)>1 and + hasattr(node.children[1], "value") and + node.children[1].value == '[')): + #skip whole unit if its optional + return None + + leaf = True + details_node = None + alternatives_node = None + has_repeater = False + repeater_node = None + has_variable_name = False + + for child in node.children: + if child.type == syms.Details: + leaf = False + details_node = child + elif child.type == syms.Repeater: + has_repeater = True + repeater_node = child + elif child.type == syms.Alternatives: + alternatives_node = child + if hasattr(child, 'value') and child.value == '=': # variable name + has_variable_name = True + + #skip variable name + if has_variable_name: + #skip variable name, '=' + name_leaf = node.children[2] + if hasattr(name_leaf, 'value') and name_leaf.value == '(': + # skip parenthesis + name_leaf = node.children[3] + else: + name_leaf = node.children[0] + + #set node type + if name_leaf.type == token_labels.NAME: + #(python) non-name or wildcard + if name_leaf.value == 'any': + new_node = MinNode(type=TYPE_ANY) + else: + if hasattr(token_labels, name_leaf.value): + new_node = MinNode(type=getattr(token_labels, name_leaf.value)) + else: + new_node = MinNode(type=getattr(pysyms, name_leaf.value)) + + elif name_leaf.type == token_labels.STRING: + #(python) name or character; remove the apostrophes from + #the string value + name = name_leaf.value.strip("'") + if name in tokens: + new_node = MinNode(type=tokens[name]) + else: + new_node = MinNode(type=token_labels.NAME, name=name) + elif name_leaf.type == syms.Alternatives: + new_node = reduce_tree(alternatives_node, parent) + + #handle repeaters + if has_repeater: + if repeater_node.children[0].value == '*': + #reduce to None + new_node = None + elif repeater_node.children[0].value == '+': + #reduce to a single occurrence i.e. do nothing + pass + else: + #TODO: handle {min, max} repeaters + raise NotImplementedError + pass + + #add children + if details_node and new_node is not None: + for child in details_node.children[1:-1]: + #skip '<', '>' markers + reduced = reduce_tree(child, new_node) + if reduced is not None: + new_node.children.append(reduced) + if new_node: + new_node.parent = parent + return new_node + + +def get_characteristic_subpattern(subpatterns): + """Picks the most characteristic from a list of linear patterns + Current order used is: + names > common_names > common_chars + """ + if not isinstance(subpatterns, list): + return subpatterns + if len(subpatterns)==1: + return subpatterns[0] + + # first pick out the ones containing variable names + subpatterns_with_names = [] + subpatterns_with_common_names = [] + common_names = ['in', 'for', 'if' , 'not', 'None'] + subpatterns_with_common_chars = [] + common_chars = "[]().,:" + for subpattern in subpatterns: + if any(rec_test(subpattern, lambda x: type(x) is str)): + if any(rec_test(subpattern, + lambda x: isinstance(x, str) and x in common_chars)): + subpatterns_with_common_chars.append(subpattern) + elif any(rec_test(subpattern, + lambda x: isinstance(x, str) and x in common_names)): + subpatterns_with_common_names.append(subpattern) + + else: + subpatterns_with_names.append(subpattern) + + if subpatterns_with_names: + subpatterns = subpatterns_with_names + elif subpatterns_with_common_names: + subpatterns = subpatterns_with_common_names + elif subpatterns_with_common_chars: + subpatterns = subpatterns_with_common_chars + # of the remaining subpatterns pick out the longest one + return max(subpatterns, key=len) + +def rec_test(sequence, test_func): + """Tests test_func on all items of sequence and items of included + sub-iterables""" + for x in sequence: + if isinstance(x, (list, tuple)): + yield from rec_test(x, test_func) + else: + yield test_func(x) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/fixer_base.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/fixer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..df581a4deab9e93aade8591bf13f949050d7e50d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/fixer_base.py @@ -0,0 +1,186 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Base class for fixers (optional, but recommended).""" + +# Python imports +import itertools + +# Local imports +from .patcomp import PatternCompiler +from . import pygram +from .fixer_util import does_tree_import + +class BaseFix(object): + + """Optional base class for fixers. + + The subclass name must be FixFooBar where FooBar is the result of + removing underscores and capitalizing the words of the fix name. + For example, the class name for a fixer named 'has_key' should be + FixHasKey. + """ + + PATTERN = None # Most subclasses should override with a string literal + pattern = None # Compiled pattern, set by compile_pattern() + pattern_tree = None # Tree representation of the pattern + options = None # Options object passed to initializer + filename = None # The filename (set by set_filename) + numbers = itertools.count(1) # For new_name() + used_names = set() # A set of all used NAMEs + order = "post" # Does the fixer prefer pre- or post-order traversal + explicit = False # Is this ignored by refactor.py -f all? + run_order = 5 # Fixers will be sorted by run order before execution + # Lower numbers will be run first. + _accept_type = None # [Advanced and not public] This tells RefactoringTool + # which node type to accept when there's not a pattern. + + keep_line_order = False # For the bottom matcher: match with the + # original line order + BM_compatible = False # Compatibility with the bottom matching + # module; every fixer should set this + # manually + + # Shortcut for access to Python grammar symbols + syms = pygram.python_symbols + + def __init__(self, options, log): + """Initializer. Subclass may override. + + Args: + options: a dict containing the options passed to RefactoringTool + that could be used to customize the fixer through the command line. + log: a list to append warnings and other messages to. + """ + self.options = options + self.log = log + self.compile_pattern() + + def compile_pattern(self): + """Compiles self.PATTERN into self.pattern. + + Subclass may override if it doesn't want to use + self.{pattern,PATTERN} in .match(). + """ + if self.PATTERN is not None: + PC = PatternCompiler() + self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, + with_tree=True) + + def set_filename(self, filename): + """Set the filename. + + The main refactoring tool should call this. + """ + self.filename = filename + + def match(self, node): + """Returns match for a given parse tree node. + + Should return a true or false object (not necessarily a bool). + It may return a non-empty dict of matching sub-nodes as + returned by a matching pattern. + + Subclass may override. + """ + results = {"node": node} + return self.pattern.match(node, results) and results + + def transform(self, node, results): + """Returns the transformation for a given parse tree node. + + Args: + node: the root of the parse tree that matched the fixer. + results: a dict mapping symbolic names to part of the match. + + Returns: + None, or a node that is a modified copy of the + argument node. The node argument may also be modified in-place to + effect the same change. + + Subclass *must* override. + """ + raise NotImplementedError() + + def new_name(self, template="xxx_todo_changeme"): + """Return a string suitable for use as an identifier + + The new name is guaranteed not to conflict with other identifiers. + """ + name = template + while name in self.used_names: + name = template + str(next(self.numbers)) + self.used_names.add(name) + return name + + def log_message(self, message): + if self.first_log: + self.first_log = False + self.log.append("### In file %s ###" % self.filename) + self.log.append(message) + + def cannot_convert(self, node, reason=None): + """Warn the user that a given chunk of code is not valid Python 3, + but that it cannot be converted automatically. + + First argument is the top-level node for the code in question. + Optional second argument is why it can't be converted. + """ + lineno = node.get_lineno() + for_output = node.clone() + for_output.prefix = "" + msg = "Line %d: could not convert: %s" + self.log_message(msg % (lineno, for_output)) + if reason: + self.log_message(reason) + + def warning(self, node, reason): + """Used for warning the user about possible uncertainty in the + translation. + + First argument is the top-level node for the code in question. + Optional second argument is why it can't be converted. + """ + lineno = node.get_lineno() + self.log_message("Line %d: %s" % (lineno, reason)) + + def start_tree(self, tree, filename): + """Some fixers need to maintain tree-wide state. + This method is called once, at the start of tree fix-up. + + tree - the root node of the tree to be processed. + filename - the name of the file the tree came from. + """ + self.used_names = tree.used_names + self.set_filename(filename) + self.numbers = itertools.count(1) + self.first_log = True + + def finish_tree(self, tree, filename): + """Some fixers need to maintain tree-wide state. + This method is called once, at the conclusion of tree fix-up. + + tree - the root node of the tree to be processed. + filename - the name of the file the tree came from. + """ + pass + + +class ConditionalFix(BaseFix): + """ Base class for fixers which not execute if an import is found. """ + + # This is the name of the import which, if found, will cause the test to be skipped + skip_on = None + + def start_tree(self, *args): + super(ConditionalFix, self).start_tree(*args) + self._should_skip = None + + def should_skip(self, node): + if self._should_skip is not None: + return self._should_skip + pkg = self.skip_on.split(".") + name = pkg[-1] + pkg = ".".join(pkg[:-1]) + self._should_skip = does_tree_import(pkg, name, node) + return self._should_skip diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/fixer_util.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/fixer_util.py new file mode 100644 index 0000000000000000000000000000000000000000..babe6cb3f662a961b2f89d3a9023344327afa036 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/fixer_util.py @@ -0,0 +1,453 @@ +"""Utility functions, node construction macros, etc.""" +# Author: Collin Winter + +# Local imports +from .pgen2 import token +from .pytree import Leaf, Node +from .pygram import python_symbols as syms +from . import patcomp + + +########################################################### +### Common node-construction "macros" +########################################################### + +def KeywordArg(keyword, value): + return Node(syms.argument, + [keyword, Leaf(token.EQUAL, "="), value]) + +def LParen(): + return Leaf(token.LPAR, "(") + +def RParen(): + return Leaf(token.RPAR, ")") + +def Assign(target, source): + """Build an assignment statement""" + if not isinstance(target, list): + target = [target] + if not isinstance(source, list): + source.prefix = " " + source = [source] + + return Node(syms.atom, + target + [Leaf(token.EQUAL, "=", prefix=" ")] + source) + +def Name(name, prefix=None): + """Return a NAME leaf""" + return Leaf(token.NAME, name, prefix=prefix) + +def Attr(obj, attr): + """A node tuple for obj.attr""" + return [obj, Node(syms.trailer, [Dot(), attr])] + +def Comma(): + """A comma leaf""" + return Leaf(token.COMMA, ",") + +def Dot(): + """A period (.) leaf""" + return Leaf(token.DOT, ".") + +def ArgList(args, lparen=LParen(), rparen=RParen()): + """A parenthesised argument list, used by Call()""" + node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) + if args: + node.insert_child(1, Node(syms.arglist, args)) + return node + +def Call(func_name, args=None, prefix=None): + """A function call""" + node = Node(syms.power, [func_name, ArgList(args)]) + if prefix is not None: + node.prefix = prefix + return node + +def Newline(): + """A newline literal""" + return Leaf(token.NEWLINE, "\n") + +def BlankLine(): + """A blank line""" + return Leaf(token.NEWLINE, "") + +def Number(n, prefix=None): + return Leaf(token.NUMBER, n, prefix=prefix) + +def Subscript(index_node): + """A numeric or string subscript""" + return Node(syms.trailer, [Leaf(token.LBRACE, "["), + index_node, + Leaf(token.RBRACE, "]")]) + +def String(string, prefix=None): + """A string leaf""" + return Leaf(token.STRING, string, prefix=prefix) + +def ListComp(xp, fp, it, test=None): + """A list comprehension of the form [xp for fp in it if test]. + + If test is None, the "if test" part is omitted. + """ + xp.prefix = "" + fp.prefix = " " + it.prefix = " " + for_leaf = Leaf(token.NAME, "for") + for_leaf.prefix = " " + in_leaf = Leaf(token.NAME, "in") + in_leaf.prefix = " " + inner_args = [for_leaf, fp, in_leaf, it] + if test: + test.prefix = " " + if_leaf = Leaf(token.NAME, "if") + if_leaf.prefix = " " + inner_args.append(Node(syms.comp_if, [if_leaf, test])) + inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) + return Node(syms.atom, + [Leaf(token.LBRACE, "["), + inner, + Leaf(token.RBRACE, "]")]) + +def FromImport(package_name, name_leafs): + """ Return an import statement in the form: + from package import name_leafs""" + # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') + #assert package_name == '.' or '.' not in package_name, "FromImport has "\ + # "not been tested with dotted package names -- use at your own "\ + # "peril!" + + for leaf in name_leafs: + # Pull the leaves out of their old tree + leaf.remove() + + children = [Leaf(token.NAME, "from"), + Leaf(token.NAME, package_name, prefix=" "), + Leaf(token.NAME, "import", prefix=" "), + Node(syms.import_as_names, name_leafs)] + imp = Node(syms.import_from, children) + return imp + +def ImportAndCall(node, results, names): + """Returns an import statement and calls a method + of the module: + + import module + module.name()""" + obj = results["obj"].clone() + if obj.type == syms.arglist: + newarglist = obj.clone() + else: + newarglist = Node(syms.arglist, [obj.clone()]) + after = results["after"] + if after: + after = [n.clone() for n in after] + new = Node(syms.power, + Attr(Name(names[0]), Name(names[1])) + + [Node(syms.trailer, + [results["lpar"].clone(), + newarglist, + results["rpar"].clone()])] + after) + new.prefix = node.prefix + return new + + +########################################################### +### Determine whether a node represents a given literal +########################################################### + +def is_tuple(node): + """Does the node represent a tuple literal?""" + if isinstance(node, Node) and node.children == [LParen(), RParen()]: + return True + return (isinstance(node, Node) + and len(node.children) == 3 + and isinstance(node.children[0], Leaf) + and isinstance(node.children[1], Node) + and isinstance(node.children[2], Leaf) + and node.children[0].value == "(" + and node.children[2].value == ")") + +def is_list(node): + """Does the node represent a list literal?""" + return (isinstance(node, Node) + and len(node.children) > 1 + and isinstance(node.children[0], Leaf) + and isinstance(node.children[-1], Leaf) + and node.children[0].value == "[" + and node.children[-1].value == "]") + + +########################################################### +### Misc +########################################################### + +def parenthesize(node): + return Node(syms.atom, [LParen(), node, RParen()]) + + +consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum", + "min", "max", "enumerate"} + +def attr_chain(obj, attr): + """Follow an attribute chain. + + If you have a chain of objects where a.foo -> b, b.foo-> c, etc, + use this to iterate over all objects in the chain. Iteration is + terminated by getattr(x, attr) is None. + + Args: + obj: the starting object + attr: the name of the chaining attribute + + Yields: + Each successive object in the chain. + """ + next = getattr(obj, attr) + while next: + yield next + next = getattr(next, attr) + +p0 = """for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + """ +p1 = """ +power< + ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | + 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) + trailer< '(' node=any ')' > + any* +> +""" +p2 = """ +power< + ( 'sorted' | 'enumerate' ) + trailer< '(' arglist ')' > + any* +> +""" +pats_built = False +def in_special_context(node): + """ Returns true if node is in an environment where all that is required + of it is being iterable (ie, it doesn't matter if it returns a list + or an iterator). + See test_map_nochange in test_fixers.py for some examples and tests. + """ + global p0, p1, p2, pats_built + if not pats_built: + p0 = patcomp.compile_pattern(p0) + p1 = patcomp.compile_pattern(p1) + p2 = patcomp.compile_pattern(p2) + pats_built = True + patterns = [p0, p1, p2] + for pattern, parent in zip(patterns, attr_chain(node, "parent")): + results = {} + if pattern.match(parent, results) and results["node"] is node: + return True + return False + +def is_probably_builtin(node): + """ + Check that something isn't an attribute or function name etc. + """ + prev = node.prev_sibling + if prev is not None and prev.type == token.DOT: + # Attribute lookup. + return False + parent = node.parent + if parent.type in (syms.funcdef, syms.classdef): + return False + if parent.type == syms.expr_stmt and parent.children[0] is node: + # Assignment. + return False + if parent.type == syms.parameters or \ + (parent.type == syms.typedargslist and ( + (prev is not None and prev.type == token.COMMA) or + parent.children[0] is node + )): + # The name of an argument. + return False + return True + +def find_indentation(node): + """Find the indentation of *node*.""" + while node is not None: + if node.type == syms.suite and len(node.children) > 2: + indent = node.children[1] + if indent.type == token.INDENT: + return indent.value + node = node.parent + return "" + +########################################################### +### The following functions are to find bindings in a suite +########################################################### + +def make_suite(node): + if node.type == syms.suite: + return node + node = node.clone() + parent, node.parent = node.parent, None + suite = Node(syms.suite, [node]) + suite.parent = parent + return suite + +def find_root(node): + """Find the top level namespace.""" + # Scamper up to the top level namespace + while node.type != syms.file_input: + node = node.parent + if not node: + raise ValueError("root found before file_input node was found.") + return node + +def does_tree_import(package, name, node): + """ Returns true if name is imported from package at the + top level of the tree which node belongs to. + To cover the case of an import like 'import foo', use + None for the package and 'foo' for the name. """ + binding = find_binding(name, find_root(node), package) + return bool(binding) + +def is_import(node): + """Returns true if the node is an import statement.""" + return node.type in (syms.import_name, syms.import_from) + +def touch_import(package, name, node): + """ Works like `does_tree_import` but adds an import statement + if it was not imported. """ + def is_import_stmt(node): + return (node.type == syms.simple_stmt and node.children and + is_import(node.children[0])) + + root = find_root(node) + + if does_tree_import(package, name, root): + return + + # figure out where to insert the new import. First try to find + # the first import and then skip to the last one. + insert_pos = offset = 0 + for idx, node in enumerate(root.children): + if not is_import_stmt(node): + continue + for offset, node2 in enumerate(root.children[idx:]): + if not is_import_stmt(node2): + break + insert_pos = idx + offset + break + + # if there are no imports where we can insert, find the docstring. + # if that also fails, we stick to the beginning of the file + if insert_pos == 0: + for idx, node in enumerate(root.children): + if (node.type == syms.simple_stmt and node.children and + node.children[0].type == token.STRING): + insert_pos = idx + 1 + break + + if package is None: + import_ = Node(syms.import_name, [ + Leaf(token.NAME, "import"), + Leaf(token.NAME, name, prefix=" ") + ]) + else: + import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")]) + + children = [import_, Newline()] + root.insert_child(insert_pos, Node(syms.simple_stmt, children)) + + +_def_syms = {syms.classdef, syms.funcdef} +def find_binding(name, node, package=None): + """ Returns the node which binds variable name, otherwise None. + If optional argument package is supplied, only imports will + be returned. + See test cases for examples.""" + for child in node.children: + ret = None + if child.type == syms.for_stmt: + if _find(name, child.children[1]): + return child + n = find_binding(name, make_suite(child.children[-1]), package) + if n: ret = n + elif child.type in (syms.if_stmt, syms.while_stmt): + n = find_binding(name, make_suite(child.children[-1]), package) + if n: ret = n + elif child.type == syms.try_stmt: + n = find_binding(name, make_suite(child.children[2]), package) + if n: + ret = n + else: + for i, kid in enumerate(child.children[3:]): + if kid.type == token.COLON and kid.value == ":": + # i+3 is the colon, i+4 is the suite + n = find_binding(name, make_suite(child.children[i+4]), package) + if n: ret = n + elif child.type in _def_syms and child.children[1].value == name: + ret = child + elif _is_import_binding(child, name, package): + ret = child + elif child.type == syms.simple_stmt: + ret = find_binding(name, child, package) + elif child.type == syms.expr_stmt: + if _find(name, child.children[0]): + ret = child + + if ret: + if not package: + return ret + if is_import(ret): + return ret + return None + +_block_syms = {syms.funcdef, syms.classdef, syms.trailer} +def _find(name, node): + nodes = [node] + while nodes: + node = nodes.pop() + if node.type > 256 and node.type not in _block_syms: + nodes.extend(node.children) + elif node.type == token.NAME and node.value == name: + return node + return None + +def _is_import_binding(node, name, package=None): + """ Will reuturn node if node will import name, or node + will import * from package. None is returned otherwise. + See test cases for examples. """ + + if node.type == syms.import_name and not package: + imp = node.children[1] + if imp.type == syms.dotted_as_names: + for child in imp.children: + if child.type == syms.dotted_as_name: + if child.children[2].value == name: + return node + elif child.type == token.NAME and child.value == name: + return node + elif imp.type == syms.dotted_as_name: + last = imp.children[-1] + if last.type == token.NAME and last.value == name: + return node + elif imp.type == token.NAME and imp.value == name: + return node + elif node.type == syms.import_from: + # str(...) is used to make life easier here, because + # from a.b import parses to ['import', ['a', '.', 'b'], ...] + if package and str(node.children[1]).strip() != package: + return None + n = node.children[3] + if package and _find("as", n): + # See test_from_import_as for explanation + return None + elif n.type == syms.import_as_names and _find(name, n): + return node + elif n.type == syms.import_as_name: + child = n.children[2] + if child.type == token.NAME and child.value == name: + return node + elif n.type == token.NAME and n.value == name: + return node + elif package and n.type == token.STAR: + return node + return None diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/main.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/main.py new file mode 100644 index 0000000000000000000000000000000000000000..d6b708848ede1ac6c2d6dea5ebaca619d80fc170 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/main.py @@ -0,0 +1,268 @@ +""" +Main program for 2to3. +""" + +from __future__ import with_statement, print_function + +import sys +import os +import difflib +import logging +import shutil +import optparse + +from . import refactor + + +def diff_texts(a, b, filename): + """Return a unified diff of two strings.""" + a = a.splitlines() + b = b.splitlines() + return difflib.unified_diff(a, b, filename, filename, + "(original)", "(refactored)", + lineterm="") + + +class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): + """ + A refactoring tool that can avoid overwriting its input files. + Prints output to stdout. + + Output files can optionally be written to a different directory and or + have an extra file suffix appended to their name for use in situations + where you do not want to replace the input files. + """ + + def __init__(self, fixers, options, explicit, nobackups, show_diffs, + input_base_dir='', output_dir='', append_suffix=''): + """ + Args: + fixers: A list of fixers to import. + options: A dict with RefactoringTool configuration. + explicit: A list of fixers to run even if they are explicit. + nobackups: If true no backup '.bak' files will be created for those + files that are being refactored. + show_diffs: Should diffs of the refactoring be printed to stdout? + input_base_dir: The base directory for all input files. This class + will strip this path prefix off of filenames before substituting + it with output_dir. Only meaningful if output_dir is supplied. + All files processed by refactor() must start with this path. + output_dir: If supplied, all converted files will be written into + this directory tree instead of input_base_dir. + append_suffix: If supplied, all files output by this tool will have + this appended to their filename. Useful for changing .py to + .py3 for example by passing append_suffix='3'. + """ + self.nobackups = nobackups + self.show_diffs = show_diffs + if input_base_dir and not input_base_dir.endswith(os.sep): + input_base_dir += os.sep + self._input_base_dir = input_base_dir + self._output_dir = output_dir + self._append_suffix = append_suffix + super(StdoutRefactoringTool, self).__init__(fixers, options, explicit) + + def log_error(self, msg, *args, **kwargs): + self.errors.append((msg, args, kwargs)) + self.logger.error(msg, *args, **kwargs) + + def write_file(self, new_text, filename, old_text, encoding): + orig_filename = filename + if self._output_dir: + if filename.startswith(self._input_base_dir): + filename = os.path.join(self._output_dir, + filename[len(self._input_base_dir):]) + else: + raise ValueError('filename %s does not start with the ' + 'input_base_dir %s' % ( + filename, self._input_base_dir)) + if self._append_suffix: + filename += self._append_suffix + if orig_filename != filename: + output_dir = os.path.dirname(filename) + if not os.path.isdir(output_dir) and output_dir: + os.makedirs(output_dir) + self.log_message('Writing converted %s to %s.', orig_filename, + filename) + if not self.nobackups: + # Make backup + backup = filename + ".bak" + if os.path.lexists(backup): + try: + os.remove(backup) + except OSError as err: + self.log_message("Can't remove backup %s", backup) + try: + os.rename(filename, backup) + except OSError as err: + self.log_message("Can't rename %s to %s", filename, backup) + # Actually write the new file + write = super(StdoutRefactoringTool, self).write_file + write(new_text, filename, old_text, encoding) + if not self.nobackups: + shutil.copymode(backup, filename) + if orig_filename != filename: + # Preserve the file mode in the new output directory. + shutil.copymode(orig_filename, filename) + + def print_output(self, old, new, filename, equal): + if equal: + self.log_message("No changes to %s", filename) + else: + self.log_message("Refactored %s", filename) + if self.show_diffs: + diff_lines = diff_texts(old, new, filename) + try: + if self.output_lock is not None: + with self.output_lock: + for line in diff_lines: + print(line) + sys.stdout.flush() + else: + for line in diff_lines: + print(line) + except UnicodeEncodeError: + warn("couldn't encode %s's diff for your terminal" % + (filename,)) + return + +def warn(msg): + print("WARNING: %s" % (msg,), file=sys.stderr) + + +def main(fixer_pkg, args=None): + """Main program. + + Args: + fixer_pkg: the name of a package where the fixers are located. + args: optional; a list of command line arguments. If omitted, + sys.argv[1:] is used. + + Returns a suggested exit status (0, 1, 2). + """ + # Set up option parser + parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") + parser.add_option("-d", "--doctests_only", action="store_true", + help="Fix up doctests only") + parser.add_option("-f", "--fix", action="append", default=[], + help="Each FIX specifies a transformation; default: all") + parser.add_option("-j", "--processes", action="store", default=1, + type="int", help="Run 2to3 concurrently") + parser.add_option("-x", "--nofix", action="append", default=[], + help="Prevent a transformation from being run") + parser.add_option("-l", "--list-fixes", action="store_true", + help="List available transformations") + parser.add_option("-p", "--print-function", action="store_true", + help="Modify the grammar so that print() is a function") + parser.add_option("-v", "--verbose", action="store_true", + help="More verbose logging") + parser.add_option("--no-diffs", action="store_true", + help="Don't show diffs of the refactoring") + parser.add_option("-w", "--write", action="store_true", + help="Write back modified files") + parser.add_option("-n", "--nobackups", action="store_true", default=False, + help="Don't write backups for modified files") + parser.add_option("-o", "--output-dir", action="store", type="str", + default="", help="Put output files in this directory " + "instead of overwriting the input files. Requires -n.") + parser.add_option("-W", "--write-unchanged-files", action="store_true", + help="Also write files even if no changes were required" + " (useful with --output-dir); implies -w.") + parser.add_option("--add-suffix", action="store", type="str", default="", + help="Append this string to all output filenames." + " Requires -n if non-empty. " + "ex: --add-suffix='3' will generate .py3 files.") + + # Parse command line arguments + refactor_stdin = False + flags = {} + options, args = parser.parse_args(args) + if options.write_unchanged_files: + flags["write_unchanged_files"] = True + if not options.write: + warn("--write-unchanged-files/-W implies -w.") + options.write = True + # If we allowed these, the original files would be renamed to backup names + # but not replaced. + if options.output_dir and not options.nobackups: + parser.error("Can't use --output-dir/-o without -n.") + if options.add_suffix and not options.nobackups: + parser.error("Can't use --add-suffix without -n.") + + if not options.write and options.no_diffs: + warn("not writing files and not printing diffs; that's not very useful") + if not options.write and options.nobackups: + parser.error("Can't use -n without -w") + if options.list_fixes: + print("Available transformations for the -f/--fix option:") + for fixname in refactor.get_all_fix_names(fixer_pkg): + print(fixname) + if not args: + return 0 + if not args: + print("At least one file or directory argument required.", file=sys.stderr) + print("Use --help to show usage.", file=sys.stderr) + return 2 + if "-" in args: + refactor_stdin = True + if options.write: + print("Can't write to stdin.", file=sys.stderr) + return 2 + if options.print_function: + flags["print_function"] = True + + # Set up logging handler + level = logging.DEBUG if options.verbose else logging.INFO + logging.basicConfig(format='%(name)s: %(message)s', level=level) + logger = logging.getLogger('lib2to3.main') + + # Initialize the refactoring tool + avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) + unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) + explicit = set() + if options.fix: + all_present = False + for fix in options.fix: + if fix == "all": + all_present = True + else: + explicit.add(fixer_pkg + ".fix_" + fix) + requested = avail_fixes.union(explicit) if all_present else explicit + else: + requested = avail_fixes.union(explicit) + fixer_names = requested.difference(unwanted_fixes) + input_base_dir = os.path.commonprefix(args) + if (input_base_dir and not input_base_dir.endswith(os.sep) + and not os.path.isdir(input_base_dir)): + # One or more similar names were passed, their directory is the base. + # os.path.commonprefix() is ignorant of path elements, this corrects + # for that weird API. + input_base_dir = os.path.dirname(input_base_dir) + if options.output_dir: + input_base_dir = input_base_dir.rstrip(os.sep) + logger.info('Output in %r will mirror the input directory %r layout.', + options.output_dir, input_base_dir) + rt = StdoutRefactoringTool( + sorted(fixer_names), flags, sorted(explicit), + options.nobackups, not options.no_diffs, + input_base_dir=input_base_dir, + output_dir=options.output_dir, + append_suffix=options.add_suffix) + + # Refactor all files and directories passed as arguments + if not rt.errors: + if refactor_stdin: + rt.refactor_stdin() + else: + try: + rt.refactor(args, options.write, options.doctests_only, + options.processes) + except refactor.MultiprocessingUnsupported: + assert options.processes > 1 + print("Sorry, -j isn't supported on this platform.", + file=sys.stderr) + return 1 + rt.summarize() + + # Return error status (0 if rt.errors is zero) + return int(bool(rt.errors)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/patcomp.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/patcomp.py new file mode 100644 index 0000000000000000000000000000000000000000..f57f4954b26ce7f59c44ce7cd67bf966fd594cd5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/patcomp.py @@ -0,0 +1,204 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +""" + +__author__ = "Guido van Rossum " + +# Python imports +import io + +# Fairly local imports +from .pgen2 import driver, literals, token, tokenize, parse, grammar + +# Really local imports +from . import pytree +from . import pygram + + +class PatternSyntaxError(Exception): + pass + + +def tokenize_wrapper(input): + """Tokenizes a string suppressing significant whitespace.""" + skip = {token.NEWLINE, token.INDENT, token.DEDENT} + tokens = tokenize.generate_tokens(io.StringIO(input).readline) + for quintuple in tokens: + type, value, start, end, line_text = quintuple + if type not in skip: + yield quintuple + + +class PatternCompiler(object): + + def __init__(self, grammar_file=None): + """Initializer. + + Takes an optional alternative filename for the pattern grammar. + """ + if grammar_file is None: + self.grammar = pygram.pattern_grammar + self.syms = pygram.pattern_symbols + else: + self.grammar = driver.load_grammar(grammar_file) + self.syms = pygram.Symbols(self.grammar) + self.pygrammar = pygram.python_grammar + self.pysyms = pygram.python_symbols + self.driver = driver.Driver(self.grammar, convert=pattern_convert) + + def compile_pattern(self, input, debug=False, with_tree=False): + """Compiles a pattern string to a nested pytree.*Pattern object.""" + tokens = tokenize_wrapper(input) + try: + root = self.driver.parse_tokens(tokens, debug=debug) + except parse.ParseError as e: + raise PatternSyntaxError(str(e)) from None + if with_tree: + return self.compile_node(root), root + else: + return self.compile_node(root) + + def compile_node(self, node): + """Compiles a node, recursively. + + This is one big switch on the node type. + """ + # XXX Optimize certain Wildcard-containing-Wildcard patterns + # that can be merged + if node.type == self.syms.Matcher: + node = node.children[0] # Avoid unneeded recursion + + if node.type == self.syms.Alternatives: + # Skip the odd children since they are just '|' tokens + alts = [self.compile_node(ch) for ch in node.children[::2]] + if len(alts) == 1: + return alts[0] + p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) + return p.optimize() + + if node.type == self.syms.Alternative: + units = [self.compile_node(ch) for ch in node.children] + if len(units) == 1: + return units[0] + p = pytree.WildcardPattern([units], min=1, max=1) + return p.optimize() + + if node.type == self.syms.NegatedUnit: + pattern = self.compile_basic(node.children[1:]) + p = pytree.NegatedPattern(pattern) + return p.optimize() + + assert node.type == self.syms.Unit + + name = None + nodes = node.children + if len(nodes) >= 3 and nodes[1].type == token.EQUAL: + name = nodes[0].value + nodes = nodes[2:] + repeat = None + if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: + repeat = nodes[-1] + nodes = nodes[:-1] + + # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] + pattern = self.compile_basic(nodes, repeat) + + if repeat is not None: + assert repeat.type == self.syms.Repeater + children = repeat.children + child = children[0] + if child.type == token.STAR: + min = 0 + max = pytree.HUGE + elif child.type == token.PLUS: + min = 1 + max = pytree.HUGE + elif child.type == token.LBRACE: + assert children[-1].type == token.RBRACE + assert len(children) in (3, 5) + min = max = self.get_int(children[1]) + if len(children) == 5: + max = self.get_int(children[3]) + else: + assert False + if min != 1 or max != 1: + pattern = pattern.optimize() + pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) + + if name is not None: + pattern.name = name + return pattern.optimize() + + def compile_basic(self, nodes, repeat=None): + # Compile STRING | NAME [Details] | (...) | [...] + assert len(nodes) >= 1 + node = nodes[0] + if node.type == token.STRING: + value = str(literals.evalString(node.value)) + return pytree.LeafPattern(_type_of_literal(value), value) + elif node.type == token.NAME: + value = node.value + if value.isupper(): + if value not in TOKEN_MAP: + raise PatternSyntaxError("Invalid token: %r" % value) + if nodes[1:]: + raise PatternSyntaxError("Can't have details for token") + return pytree.LeafPattern(TOKEN_MAP[value]) + else: + if value == "any": + type = None + elif not value.startswith("_"): + type = getattr(self.pysyms, value, None) + if type is None: + raise PatternSyntaxError("Invalid symbol: %r" % value) + if nodes[1:]: # Details present + content = [self.compile_node(nodes[1].children[1])] + else: + content = None + return pytree.NodePattern(type, content) + elif node.value == "(": + return self.compile_node(nodes[1]) + elif node.value == "[": + assert repeat is None + subpattern = self.compile_node(nodes[1]) + return pytree.WildcardPattern([[subpattern]], min=0, max=1) + assert False, node + + def get_int(self, node): + assert node.type == token.NUMBER + return int(node.value) + + +# Map named tokens to the type value for a LeafPattern +TOKEN_MAP = {"NAME": token.NAME, + "STRING": token.STRING, + "NUMBER": token.NUMBER, + "TOKEN": None} + + +def _type_of_literal(value): + if value[0].isalpha(): + return token.NAME + elif value in grammar.opmap: + return grammar.opmap[value] + else: + return None + + +def pattern_convert(grammar, raw_node_info): + """Converts raw node information to a Node or Leaf instance.""" + type, value, context, children = raw_node_info + if children or type in grammar.number2symbol: + return pytree.Node(type, children, context=context) + else: + return pytree.Leaf(type, value, context=context) + + +def compile_pattern(pattern): + return PatternCompiler().compile_pattern(pattern) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/pygram.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/pygram.py new file mode 100644 index 0000000000000000000000000000000000000000..24d9db9217f131adf50f6eca7d571ce69cbcbc3e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/pygram.py @@ -0,0 +1,43 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Export the Python grammar and symbols.""" + +# Python imports +import os + +# Local imports +from .pgen2 import token +from .pgen2 import driver +from . import pytree + +# The grammar file +_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") +_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), + "PatternGrammar.txt") + + +class Symbols(object): + + def __init__(self, grammar): + """Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + """ + for name, symbol in grammar.symbol2number.items(): + setattr(self, name, symbol) + + +python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) + +python_symbols = Symbols(python_grammar) + +python_grammar_no_print_statement = python_grammar.copy() +del python_grammar_no_print_statement.keywords["print"] + +python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy() +del python_grammar_no_print_and_exec_statement.keywords["exec"] + +pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) +pattern_symbols = Symbols(pattern_grammar) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/pytree.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6ef2ef5240a2b1aa9eecb5162368d5649b624c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/pytree.py @@ -0,0 +1,853 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +""" +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +""" + +__author__ = "Guido van Rossum " + +import sys +from io import StringIO + +HUGE = 0x7FFFFFFF # maximum repeat count, default max + +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) + +class Base(object): + + """ + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type = None # int: token number (< 256) or symbol number (>= 256) + parent = None # Parent node pointer, or None + children = () # Tuple of subnodes + was_changed = False + was_checked = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other): + """ + Compare two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + __hash__ = None # For Py3 compatibility. + + def _eq(self, other): + """ + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + """ + raise NotImplementedError + + def clone(self): + """ + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def post_order(self): + """ + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def pre_order(self): + """ + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def replace(self, new): + """Replace this node with a new one in the parent.""" + assert self.parent is not None, str(self) + assert new is not None + if not isinstance(new, list): + new = [new] + l_children = [] + found = False + for ch in self.parent.children: + if ch is self: + assert not found, (self.parent.children, self, new) + if new is not None: + l_children.extend(new) + found = True + else: + l_children.append(ch) + assert found, (self.children, self, new) + self.parent.changed() + self.parent.children = l_children + for x in new: + x.parent = self.parent + self.parent = None + + def get_lineno(self): + """Return the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return + node = node.children[0] + return node.lineno + + def changed(self): + if self.parent: + self.parent.changed() + self.was_changed = True + + def remove(self): + """ + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + """ + if self.parent: + for i, node in enumerate(self.parent.children): + if node is self: + self.parent.changed() + del self.parent.children[i] + self.parent = None + return i + + @property + def next_sibling(self): + """ + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + @property + def prev_sibling(self): + """ + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def leaves(self): + for child in self.children: + yield from child.leaves() + + def depth(self): + if self.parent is None: + return 0 + return 1 + self.parent.depth() + + def get_suffix(self): + """ + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + """ + next_sib = self.next_sibling + if next_sib is None: + return "" + return next_sib.prefix + + if sys.version_info < (3, 0): + def __str__(self): + return str(self).encode("ascii") + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + def __init__(self,type, children, + context=None, + prefix=None, + fixers_applied=None): + """ + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + if prefix is not None: + self.prefix = prefix + if fixers_applied: + self.fixers_applied = fixers_applied[:] + else: + self.fixers_applied = None + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%s, %r)" % (self.__class__.__name__, + type_repr(self.type), + self.children) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return "".join(map(str, self.children)) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Node(self.type, [ch.clone() for ch in self.children], + fixers_applied=self.fixers_applied) + + def post_order(self): + """Return a post-order iterator for the tree.""" + for child in self.children: + yield from child.post_order() + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + for child in self.children: + yield from child.pre_order() + + @property + def prefix(self): + """ + The whitespace and comments preceding this node in the input. + """ + if not self.children: + return "" + return self.children[0].prefix + + @prefix.setter + def prefix(self, prefix): + if self.children: + self.children[0].prefix = prefix + + def set_child(self, i, child): + """ + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children[i].parent = None + self.children[i] = child + self.changed() + + def insert_child(self, i, child): + """ + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + """ + child.parent = self + self.children.insert(i, child) + self.changed() + + def append_child(self, child): + """ + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children.append(child) + self.changed() + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + _prefix = "" # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, + context=None, + prefix=None, + fixers_applied=[]): + """ + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + """ + assert 0 <= type < 256, type + if context is not None: + self._prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self._prefix = prefix + self.fixers_applied = fixers_applied[:] + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%r, %r)" % (self.__class__.__name__, + self.type, + self.value) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return self.prefix + str(self.value) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Leaf(self.type, self.value, + (self.prefix, (self.lineno, self.column)), + fixers_applied=self.fixers_applied) + + def leaves(self): + yield self + + def post_order(self): + """Return a post-order iterator for the tree.""" + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + + @property + def prefix(self): + """ + The whitespace and comments preceding this token in the input. + """ + return self._prefix + + @prefix.setter + def prefix(self, prefix): + self.changed() + self._prefix = prefix + +def convert(gr, raw_node): + """ + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + type, value, context, children = raw_node + if children or type in gr.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +class BasePattern(object): + + """ + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + """ + + # Defaults for instance variables + type = None # Node type (token if < 256, symbol if >= 256) + content = None # Optional content matching pattern + name = None # Optional name used to store match in results dict + + def __new__(cls, *args, **kwds): + """Constructor that prevents BasePattern from being instantiated.""" + assert cls is not BasePattern, "Cannot instantiate BasePattern" + return object.__new__(cls) + + def __repr__(self): + args = [type_repr(self.type), self.content, self.name] + while args and args[-1] is None: + del args[-1] + return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + + def optimize(self): + """ + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + """ + return self + + def match(self, node, results=None): + """ + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + """ + if self.type is not None and node.type != self.type: + return False + if self.content is not None: + r = None + if results is not None: + r = {} + if not self._submatch(node, r): + return False + if r: + results.update(r) + if results is not None and self.name: + results[self.name] = node + return True + + def match_seq(self, nodes, results=None): + """ + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + """ + if len(nodes) != 1: + return False + return self.match(nodes[0], results) + + def generate_matches(self, nodes): + """ + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + """ + r = {} + if nodes and self.match(nodes[0], r): + yield 1, r + + +class LeafPattern(BasePattern): + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert 0 <= type < 256, type + if content is not None: + assert isinstance(content, str), repr(content) + self.type = type + self.content = content + self.name = name + + def match(self, node, results=None): + """Override match() to insist on a leaf node.""" + if not isinstance(node, Leaf): + return False + return BasePattern.match(self, node, results) + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + return self.content == node.value + + +class NodePattern(BasePattern): + + wildcards = False + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert type >= 256, type + if content is not None: + assert not isinstance(content, str), repr(content) + content = list(content) + for i, item in enumerate(content): + assert isinstance(item, BasePattern), (i, item) + if isinstance(item, WildcardPattern): + self.wildcards = True + self.type = type + self.content = content + self.name = name + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + if self.wildcards: + for c, r in generate_matches(self.content, node.children): + if c == len(node.children): + if results is not None: + results.update(r) + return True + return False + if len(self.content) != len(node.children): + return False + for subpattern, child in zip(self.content, node.children): + if not subpattern.match(child, results): + return False + return True + + +class WildcardPattern(BasePattern): + + """ + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + """ + + def __init__(self, content=None, min=0, max=HUGE, name=None): + """ + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + """ + assert 0 <= min <= max <= HUGE, (min, max) + if content is not None: + content = tuple(map(tuple, content)) # Protect against alterations + # Check sanity of alternatives + assert len(content), repr(content) # Can't have zero alternatives + for alt in content: + assert len(alt), repr(alt) # Can have empty alternatives + self.content = content + self.min = min + self.max = max + self.name = name + + def optimize(self): + """Optimize certain stacked wildcard patterns.""" + subpattern = None + if (self.content is not None and + len(self.content) == 1 and len(self.content[0]) == 1): + subpattern = self.content[0][0] + if self.min == 1 and self.max == 1: + if self.content is None: + return NodePattern(name=self.name) + if subpattern is not None and self.name == subpattern.name: + return subpattern.optimize() + if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and + subpattern.min <= 1 and self.name == subpattern.name): + return WildcardPattern(subpattern.content, + self.min*subpattern.min, + self.max*subpattern.max, + subpattern.name) + return self + + def match(self, node, results=None): + """Does this pattern exactly match a node?""" + return self.match_seq([node], results) + + def match_seq(self, nodes, results=None): + """Does this pattern exactly match a sequence of nodes?""" + for c, r in self.generate_matches(nodes): + if c == len(nodes): + if results is not None: + results.update(r) + if self.name: + results[self.name] = list(nodes) + return True + return False + + def generate_matches(self, nodes): + """ + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + """ + if self.content is None: + # Shortcut for special case (see __init__.__doc__) + for count in range(self.min, 1 + min(len(nodes), self.max)): + r = {} + if self.name: + r[self.name] = nodes[:count] + yield count, r + elif self.name == "bare_name": + yield self._bare_name_matches(nodes) + else: + # The reason for this is that hitting the recursion limit usually + # results in some ugly messages about how RuntimeErrors are being + # ignored. We only have to do this on CPython, though, because other + # implementations don't have this nasty bug in the first place. + if hasattr(sys, "getrefcount"): + save_stderr = sys.stderr + sys.stderr = StringIO() + try: + for count, r in self._recursive_matches(nodes, 0): + if self.name: + r[self.name] = nodes[:count] + yield count, r + except RuntimeError: + # We fall back to the iterative pattern matching scheme if the recursive + # scheme hits the recursion limit. + for count, r in self._iterative_matches(nodes): + if self.name: + r[self.name] = nodes[:count] + yield count, r + finally: + if hasattr(sys, "getrefcount"): + sys.stderr = save_stderr + + def _iterative_matches(self, nodes): + """Helper to iteratively yield the matches.""" + nodelen = len(nodes) + if 0 >= self.min: + yield 0, {} + + results = [] + # generate matches that use just one alt from self.content + for alt in self.content: + for c, r in generate_matches(alt, nodes): + yield c, r + results.append((c, r)) + + # for each match, iterate down the nodes + while results: + new_results = [] + for c0, r0 in results: + # stop if the entire set of nodes has been matched + if c0 < nodelen and c0 <= self.max: + for alt in self.content: + for c1, r1 in generate_matches(alt, nodes[c0:]): + if c1 > 0: + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + new_results.append((c0 + c1, r)) + results = new_results + + def _bare_name_matches(self, nodes): + """Special optimized matcher for bare_name.""" + count = 0 + r = {} + done = False + max = len(nodes) + while not done and count < max: + done = True + for leaf in self.content: + if leaf[0].match(nodes[count], r): + count += 1 + done = False + break + r[self.name] = nodes[:count] + return count, r + + def _recursive_matches(self, nodes, count): + """Helper to recursively yield the matches.""" + assert self.content is not None + if count >= self.min: + yield 0, {} + if count < self.max: + for alt in self.content: + for c0, r0 in generate_matches(alt, nodes): + for c1, r1 in self._recursive_matches(nodes[c0:], count+1): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + + +class NegatedPattern(BasePattern): + + def __init__(self, content=None): + """ + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + """ + if content is not None: + assert isinstance(content, BasePattern), repr(content) + self.content = content + + def match(self, node): + # We never match a node in its entirety + return False + + def match_seq(self, nodes): + # We only match an empty sequence of nodes in its entirety + return len(nodes) == 0 + + def generate_matches(self, nodes): + if self.content is None: + # Return a match if there is an empty sequence + if len(nodes) == 0: + yield 0, {} + else: + # Return a match if the argument pattern has no matches + for c, r in self.content.generate_matches(nodes): + return + yield 0, {} + + +def generate_matches(patterns, nodes): + """ + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + """ + if not patterns: + yield 0, {} + else: + p, rest = patterns[0], patterns[1:] + for c0, r0 in p.generate_matches(nodes): + if not rest: + yield c0, r0 + else: + for c1, r1 in generate_matches(rest, nodes[c0:]): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/refactor.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/refactor.py new file mode 100644 index 0000000000000000000000000000000000000000..55fd60fa27c3da9ba0eed4fff1c472aac62e5d9d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/refactor.py @@ -0,0 +1,728 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Refactoring framework. + +Used as a main program, this can refactor any number of files and/or +recursively descend down directories. Imported as a module, this +provides infrastructure to write your own refactoring tool. +""" + +__author__ = "Guido van Rossum " + + +# Python imports +import io +import os +import pkgutil +import sys +import logging +import operator +import collections +from itertools import chain + +# Local imports +from .pgen2 import driver, tokenize, token +from .fixer_util import find_root +from . import pytree, pygram +from . import btm_matcher as bm + + +def get_all_fix_names(fixer_pkg, remove_prefix=True): + """Return a sorted list of all available fix names in the given package.""" + pkg = __import__(fixer_pkg, [], [], ["*"]) + fix_names = [] + for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__): + if name.startswith("fix_"): + if remove_prefix: + name = name[4:] + fix_names.append(name) + return fix_names + + +class _EveryNode(Exception): + pass + + +def _get_head_types(pat): + """ Accepts a pytree Pattern Node and returns a set + of the pattern types which will match first. """ + + if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): + # NodePatters must either have no type and no content + # or a type and content -- so they don't get any farther + # Always return leafs + if pat.type is None: + raise _EveryNode + return {pat.type} + + if isinstance(pat, pytree.NegatedPattern): + if pat.content: + return _get_head_types(pat.content) + raise _EveryNode # Negated Patterns don't have a type + + if isinstance(pat, pytree.WildcardPattern): + # Recurse on each node in content + r = set() + for p in pat.content: + for x in p: + r.update(_get_head_types(x)) + return r + + raise Exception("Oh no! I don't understand pattern %s" %(pat)) + + +def _get_headnode_dict(fixer_list): + """ Accepts a list of fixers and returns a dictionary + of head node type --> fixer list. """ + head_nodes = collections.defaultdict(list) + every = [] + for fixer in fixer_list: + if fixer.pattern: + try: + heads = _get_head_types(fixer.pattern) + except _EveryNode: + every.append(fixer) + else: + for node_type in heads: + head_nodes[node_type].append(fixer) + else: + if fixer._accept_type is not None: + head_nodes[fixer._accept_type].append(fixer) + else: + every.append(fixer) + for node_type in chain(pygram.python_grammar.symbol2number.values(), + pygram.python_grammar.tokens): + head_nodes[node_type].extend(every) + return dict(head_nodes) + + +def get_fixers_from_package(pkg_name): + """ + Return the fully qualified names for fixers in the package pkg_name. + """ + return [pkg_name + "." + fix_name + for fix_name in get_all_fix_names(pkg_name, False)] + +def _identity(obj): + return obj + + +def _detect_future_features(source): + have_docstring = False + gen = tokenize.generate_tokens(io.StringIO(source).readline) + def advance(): + tok = next(gen) + return tok[0], tok[1] + ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT}) + features = set() + try: + while True: + tp, value = advance() + if tp in ignore: + continue + elif tp == token.STRING: + if have_docstring: + break + have_docstring = True + elif tp == token.NAME and value == "from": + tp, value = advance() + if tp != token.NAME or value != "__future__": + break + tp, value = advance() + if tp != token.NAME or value != "import": + break + tp, value = advance() + if tp == token.OP and value == "(": + tp, value = advance() + while tp == token.NAME: + features.add(value) + tp, value = advance() + if tp != token.OP or value != ",": + break + tp, value = advance() + else: + break + except StopIteration: + pass + return frozenset(features) + + +class FixerError(Exception): + """A fixer could not be loaded.""" + + +class RefactoringTool(object): + + _default_options = {"print_function" : False, + "write_unchanged_files" : False} + + CLASS_PREFIX = "Fix" # The prefix for fixer classes + FILE_PREFIX = "fix_" # The prefix for modules with a fixer within + + def __init__(self, fixer_names, options=None, explicit=None): + """Initializer. + + Args: + fixer_names: a list of fixers to import + options: a dict with configuration. + explicit: a list of fixers to run even if they are explicit. + """ + self.fixers = fixer_names + self.explicit = explicit or [] + self.options = self._default_options.copy() + if options is not None: + self.options.update(options) + if self.options["print_function"]: + self.grammar = pygram.python_grammar_no_print_statement + else: + self.grammar = pygram.python_grammar + # When this is True, the refactor*() methods will call write_file() for + # files processed even if they were not changed during refactoring. If + # and only if the refactor method's write parameter was True. + self.write_unchanged_files = self.options.get("write_unchanged_files") + self.errors = [] + self.logger = logging.getLogger("RefactoringTool") + self.fixer_log = [] + self.wrote = False + self.driver = driver.Driver(self.grammar, + convert=pytree.convert, + logger=self.logger) + self.pre_order, self.post_order = self.get_fixers() + + + self.files = [] # List of files that were or should be modified + + self.BM = bm.BottomMatcher() + self.bmi_pre_order = [] # Bottom Matcher incompatible fixers + self.bmi_post_order = [] + + for fixer in chain(self.post_order, self.pre_order): + if fixer.BM_compatible: + self.BM.add_fixer(fixer) + # remove fixers that will be handled by the bottom-up + # matcher + elif fixer in self.pre_order: + self.bmi_pre_order.append(fixer) + elif fixer in self.post_order: + self.bmi_post_order.append(fixer) + + self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) + self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) + + + + def get_fixers(self): + """Inspects the options to load the requested patterns and handlers. + + Returns: + (pre_order, post_order), where pre_order is the list of fixers that + want a pre-order AST traversal, and post_order is the list that want + post-order traversal. + """ + pre_order_fixers = [] + post_order_fixers = [] + for fix_mod_path in self.fixers: + mod = __import__(fix_mod_path, {}, {}, ["*"]) + fix_name = fix_mod_path.rsplit(".", 1)[-1] + if fix_name.startswith(self.FILE_PREFIX): + fix_name = fix_name[len(self.FILE_PREFIX):] + parts = fix_name.split("_") + class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) + try: + fix_class = getattr(mod, class_name) + except AttributeError: + raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None + fixer = fix_class(self.options, self.fixer_log) + if fixer.explicit and self.explicit is not True and \ + fix_mod_path not in self.explicit: + self.log_message("Skipping optional fixer: %s", fix_name) + continue + + self.log_debug("Adding transformation: %s", fix_name) + if fixer.order == "pre": + pre_order_fixers.append(fixer) + elif fixer.order == "post": + post_order_fixers.append(fixer) + else: + raise FixerError("Illegal fixer order: %r" % fixer.order) + + key_func = operator.attrgetter("run_order") + pre_order_fixers.sort(key=key_func) + post_order_fixers.sort(key=key_func) + return (pre_order_fixers, post_order_fixers) + + def log_error(self, msg, *args, **kwds): + """Called when an error occurs.""" + raise + + def log_message(self, msg, *args): + """Hook to log a message.""" + if args: + msg = msg % args + self.logger.info(msg) + + def log_debug(self, msg, *args): + if args: + msg = msg % args + self.logger.debug(msg) + + def print_output(self, old_text, new_text, filename, equal): + """Called with the old version, new version, and filename of a + refactored file.""" + pass + + def refactor(self, items, write=False, doctests_only=False): + """Refactor a list of files and directories.""" + + for dir_or_file in items: + if os.path.isdir(dir_or_file): + self.refactor_dir(dir_or_file, write, doctests_only) + else: + self.refactor_file(dir_or_file, write, doctests_only) + + def refactor_dir(self, dir_name, write=False, doctests_only=False): + """Descends down a directory and refactor every Python file found. + + Python files are assumed to have a .py extension. + + Files and subdirectories starting with '.' are skipped. + """ + py_ext = os.extsep + "py" + for dirpath, dirnames, filenames in os.walk(dir_name): + self.log_debug("Descending into %s", dirpath) + dirnames.sort() + filenames.sort() + for name in filenames: + if (not name.startswith(".") and + os.path.splitext(name)[1] == py_ext): + fullname = os.path.join(dirpath, name) + self.refactor_file(fullname, write, doctests_only) + # Modify dirnames in-place to remove subdirs with leading dots + dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] + + def _read_python_source(self, filename): + """ + Do our best to decode a Python source file correctly. + """ + try: + f = open(filename, "rb") + except OSError as err: + self.log_error("Can't open %s: %s", filename, err) + return None, None + try: + encoding = tokenize.detect_encoding(f.readline)[0] + finally: + f.close() + with io.open(filename, "r", encoding=encoding, newline='') as f: + return f.read(), encoding + + def refactor_file(self, filename, write=False, doctests_only=False): + """Refactors a file.""" + input, encoding = self._read_python_source(filename) + if input is None: + # Reading the file failed. + return + input += "\n" # Silence certain parse errors + if doctests_only: + self.log_debug("Refactoring doctests in %s", filename) + output = self.refactor_docstring(input, filename) + if self.write_unchanged_files or output != input: + self.processed_file(output, filename, input, write, encoding) + else: + self.log_debug("No doctest changes in %s", filename) + else: + tree = self.refactor_string(input, filename) + if self.write_unchanged_files or (tree and tree.was_changed): + # The [:-1] is to take off the \n we added earlier + self.processed_file(str(tree)[:-1], filename, + write=write, encoding=encoding) + else: + self.log_debug("No changes in %s", filename) + + def refactor_string(self, data, name): + """Refactor a given input string. + + Args: + data: a string holding the code to be refactored. + name: a human-readable name for use in error/log messages. + + Returns: + An AST corresponding to the refactored input stream; None if + there were errors during the parse. + """ + features = _detect_future_features(data) + if "print_function" in features: + self.driver.grammar = pygram.python_grammar_no_print_statement + try: + tree = self.driver.parse_string(data) + except Exception as err: + self.log_error("Can't parse %s: %s: %s", + name, err.__class__.__name__, err) + return + finally: + self.driver.grammar = self.grammar + tree.future_features = features + self.log_debug("Refactoring %s", name) + self.refactor_tree(tree, name) + return tree + + def refactor_stdin(self, doctests_only=False): + input = sys.stdin.read() + if doctests_only: + self.log_debug("Refactoring doctests in stdin") + output = self.refactor_docstring(input, "") + if self.write_unchanged_files or output != input: + self.processed_file(output, "", input) + else: + self.log_debug("No doctest changes in stdin") + else: + tree = self.refactor_string(input, "") + if self.write_unchanged_files or (tree and tree.was_changed): + self.processed_file(str(tree), "", input) + else: + self.log_debug("No changes in stdin") + + def refactor_tree(self, tree, name): + """Refactors a parse tree (modifying the tree in place). + + For compatible patterns the bottom matcher module is + used. Otherwise the tree is traversed node-to-node for + matches. + + Args: + tree: a pytree.Node instance representing the root of the tree + to be refactored. + name: a human-readable name for this tree. + + Returns: + True if the tree was modified, False otherwise. + """ + + for fixer in chain(self.pre_order, self.post_order): + fixer.start_tree(tree, name) + + #use traditional matching for the incompatible fixers + self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) + self.traverse_by(self.bmi_post_order_heads, tree.post_order()) + + # obtain a set of candidate nodes + match_set = self.BM.run(tree.leaves()) + + while any(match_set.values()): + for fixer in self.BM.fixers: + if fixer in match_set and match_set[fixer]: + #sort by depth; apply fixers from bottom(of the AST) to top + match_set[fixer].sort(key=pytree.Base.depth, reverse=True) + + if fixer.keep_line_order: + #some fixers(eg fix_imports) must be applied + #with the original file's line order + match_set[fixer].sort(key=pytree.Base.get_lineno) + + for node in list(match_set[fixer]): + if node in match_set[fixer]: + match_set[fixer].remove(node) + + try: + find_root(node) + except ValueError: + # this node has been cut off from a + # previous transformation ; skip + continue + + if node.fixers_applied and fixer in node.fixers_applied: + # do not apply the same fixer again + continue + + results = fixer.match(node) + + if results: + new = fixer.transform(node, results) + if new is not None: + node.replace(new) + #new.fixers_applied.append(fixer) + for node in new.post_order(): + # do not apply the fixer again to + # this or any subnode + if not node.fixers_applied: + node.fixers_applied = [] + node.fixers_applied.append(fixer) + + # update the original match set for + # the added code + new_matches = self.BM.run(new.leaves()) + for fxr in new_matches: + if not fxr in match_set: + match_set[fxr]=[] + + match_set[fxr].extend(new_matches[fxr]) + + for fixer in chain(self.pre_order, self.post_order): + fixer.finish_tree(tree, name) + return tree.was_changed + + def traverse_by(self, fixers, traversal): + """Traverse an AST, applying a set of fixers to each node. + + This is a helper method for refactor_tree(). + + Args: + fixers: a list of fixer instances. + traversal: a generator that yields AST nodes. + + Returns: + None + """ + if not fixers: + return + for node in traversal: + for fixer in fixers[node.type]: + results = fixer.match(node) + if results: + new = fixer.transform(node, results) + if new is not None: + node.replace(new) + node = new + + def processed_file(self, new_text, filename, old_text=None, write=False, + encoding=None): + """ + Called when a file has been refactored and there may be changes. + """ + self.files.append(filename) + if old_text is None: + old_text = self._read_python_source(filename)[0] + if old_text is None: + return + equal = old_text == new_text + self.print_output(old_text, new_text, filename, equal) + if equal: + self.log_debug("No changes to %s", filename) + if not self.write_unchanged_files: + return + if write: + self.write_file(new_text, filename, old_text, encoding) + else: + self.log_debug("Not writing changes to %s", filename) + + def write_file(self, new_text, filename, old_text, encoding=None): + """Writes a string to a file. + + It first shows a unified diff between the old text and the new text, and + then rewrites the file; the latter is only done if the write option is + set. + """ + try: + fp = io.open(filename, "w", encoding=encoding, newline='') + except OSError as err: + self.log_error("Can't create %s: %s", filename, err) + return + + with fp: + try: + fp.write(new_text) + except OSError as err: + self.log_error("Can't write %s: %s", filename, err) + self.log_debug("Wrote changes to %s", filename) + self.wrote = True + + PS1 = ">>> " + PS2 = "... " + + def refactor_docstring(self, input, filename): + """Refactors a docstring, looking for doctests. + + This returns a modified version of the input string. It looks + for doctests, which start with a ">>>" prompt, and may be + continued with "..." prompts, as long as the "..." is indented + the same as the ">>>". + + (Unfortunately we can't use the doctest module's parser, + since, like most parsers, it is not geared towards preserving + the original source.) + """ + result = [] + block = None + block_lineno = None + indent = None + lineno = 0 + for line in input.splitlines(keepends=True): + lineno += 1 + if line.lstrip().startswith(self.PS1): + if block is not None: + result.extend(self.refactor_doctest(block, block_lineno, + indent, filename)) + block_lineno = lineno + block = [line] + i = line.find(self.PS1) + indent = line[:i] + elif (indent is not None and + (line.startswith(indent + self.PS2) or + line == indent + self.PS2.rstrip() + "\n")): + block.append(line) + else: + if block is not None: + result.extend(self.refactor_doctest(block, block_lineno, + indent, filename)) + block = None + indent = None + result.append(line) + if block is not None: + result.extend(self.refactor_doctest(block, block_lineno, + indent, filename)) + return "".join(result) + + def refactor_doctest(self, block, lineno, indent, filename): + """Refactors one doctest. + + A doctest is given as a block of lines, the first of which starts + with ">>>" (possibly indented), while the remaining lines start + with "..." (identically indented). + + """ + try: + tree = self.parse_block(block, lineno, indent) + except Exception as err: + if self.logger.isEnabledFor(logging.DEBUG): + for line in block: + self.log_debug("Source: %s", line.rstrip("\n")) + self.log_error("Can't parse docstring in %s line %s: %s: %s", + filename, lineno, err.__class__.__name__, err) + return block + if self.refactor_tree(tree, filename): + new = str(tree).splitlines(keepends=True) + # Undo the adjustment of the line numbers in wrap_toks() below. + clipped, new = new[:lineno-1], new[lineno-1:] + assert clipped == ["\n"] * (lineno-1), clipped + if not new[-1].endswith("\n"): + new[-1] += "\n" + block = [indent + self.PS1 + new.pop(0)] + if new: + block += [indent + self.PS2 + line for line in new] + return block + + def summarize(self): + if self.wrote: + were = "were" + else: + were = "need to be" + if not self.files: + self.log_message("No files %s modified.", were) + else: + self.log_message("Files that %s modified:", were) + for file in self.files: + self.log_message(file) + if self.fixer_log: + self.log_message("Warnings/messages while refactoring:") + for message in self.fixer_log: + self.log_message(message) + if self.errors: + if len(self.errors) == 1: + self.log_message("There was 1 error:") + else: + self.log_message("There were %d errors:", len(self.errors)) + for msg, args, kwds in self.errors: + self.log_message(msg, *args, **kwds) + + def parse_block(self, block, lineno, indent): + """Parses a block into a tree. + + This is necessary to get correct line number / offset information + in the parser diagnostics and embedded into the parse tree. + """ + tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) + tree.future_features = frozenset() + return tree + + def wrap_toks(self, block, lineno, indent): + """Wraps a tokenize stream to systematically modify start/end.""" + tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__) + for type, value, (line0, col0), (line1, col1), line_text in tokens: + line0 += lineno - 1 + line1 += lineno - 1 + # Don't bother updating the columns; this is too complicated + # since line_text would also have to be updated and it would + # still break for tokens spanning lines. Let the user guess + # that the column numbers for doctests are relative to the + # end of the prompt string (PS1 or PS2). + yield type, value, (line0, col0), (line1, col1), line_text + + + def gen_lines(self, block, indent): + """Generates lines as expected by tokenize from a list of lines. + + This strips the first len(indent + self.PS1) characters off each line. + """ + prefix1 = indent + self.PS1 + prefix2 = indent + self.PS2 + prefix = prefix1 + for line in block: + if line.startswith(prefix): + yield line[len(prefix):] + elif line == prefix.rstrip() + "\n": + yield "\n" + else: + raise AssertionError("line=%r, prefix=%r" % (line, prefix)) + prefix = prefix2 + while True: + yield "" + + +class MultiprocessingUnsupported(Exception): + pass + + +class MultiprocessRefactoringTool(RefactoringTool): + + def __init__(self, *args, **kwargs): + super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) + self.queue = None + self.output_lock = None + + def refactor(self, items, write=False, doctests_only=False, + num_processes=1): + if num_processes == 1: + return super(MultiprocessRefactoringTool, self).refactor( + items, write, doctests_only) + try: + import multiprocessing + except ImportError: + raise MultiprocessingUnsupported + if self.queue is not None: + raise RuntimeError("already doing multiple processes") + self.queue = multiprocessing.JoinableQueue() + self.output_lock = multiprocessing.Lock() + processes = [multiprocessing.Process(target=self._child) + for i in range(num_processes)] + try: + for p in processes: + p.start() + super(MultiprocessRefactoringTool, self).refactor(items, write, + doctests_only) + finally: + self.queue.join() + for i in range(num_processes): + self.queue.put(None) + for p in processes: + if p.is_alive(): + p.join() + self.queue = None + + def _child(self): + task = self.queue.get() + while task is not None: + args, kwargs = task + try: + super(MultiprocessRefactoringTool, self).refactor_file( + *args, **kwargs) + finally: + self.queue.task_done() + task = self.queue.get() + + def refactor_file(self, *args, **kwargs): + if self.queue is not None: + self.queue.put((args, kwargs)) + else: + return super(MultiprocessRefactoringTool, self).refactor_file( + *args, **kwargs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8336f381decaacf58640640a6f05a45ba3a2c3e4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/__init__.py @@ -0,0 +1,37 @@ +# +# Package analogous to 'threading.py' but using processes +# +# multiprocessing/__init__.py +# +# This package is intended to duplicate the functionality (and much of +# the API) of threading.py but uses processes instead of threads. A +# subpackage 'multiprocessing.dummy' has the same API but is a simple +# wrapper for 'threading'. +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import sys +from . import context + +# +# Copy stuff from default context +# + +__all__ = [x for x in dir(context._default_context) if not x.startswith('_')] +globals().update((name, getattr(context._default_context, name)) for name in __all__) + +# +# XXX These should not really be documented or public. +# + +SUBDEBUG = 5 +SUBWARNING = 25 + +# +# Alias for main module -- will be reset by bootstrapping child processes +# + +if '__main__' in sys.modules: + sys.modules['__mp_main__'] = sys.modules['__main__'] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/connection.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..8e2facf92a94aa363a653a0032da4ea063f0162c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/connection.py @@ -0,0 +1,973 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ] + +import io +import os +import sys +import socket +import struct +import time +import tempfile +import itertools + +import _multiprocessing + +from . import util + +from . import AuthenticationError, BufferTooShort +from .context import reduction +_ForkingPickler = reduction.ForkingPickler + +try: + import _winapi + from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +BUFSIZE = 8192 +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return time.monotonic() + timeout + +def _check_timeout(t): + return time.monotonic() > t + +# +# +# + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir()) + elif family == 'AF_PIPE': + return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % + (os.getpid(), next(_mmap_counter)), dir="") + else: + raise ValueError('unrecognized family') + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str or util.is_abstract_socket_namespace(address): + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) + if m.itemsize > 1: + m = memoryview(bytes(m)) + n = len(m) + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(_ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto(m[offset // itemsize : + (offset + size) // itemsize]) + return size + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return _ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + + def _close(self, _CloseHandle=_winapi.CloseHandle): + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nwritten, err = ov.GetOverlappedResult(True) + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return f + elif err == _winapi.ERROR_MORE_DATA: + return self._get_more_data(ov, maxsize) + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError("shouldn't get here; expected KeyboardInterrupt") + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): + _close(self._handle) + _write = os.write + _read = os.read + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + n = write(self._handle, buf) + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + chunk = read(handle, remaining) + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + n = len(buf) + if n > 0x7fffffff: + pre_header = struct.pack("!i", -1) + header = struct.pack("!Q", n) + self._send(pre_header) + self._send(header) + self._send(buf) + else: + # For wire compatibility with 3.7 and lower + header = struct.pack("!i", n) + if n > 16384: + # The payload is large so Nagle's algorithm won't be triggered + # and we'd better avoid the cost of concatenation. + self._send(header) + self._send(buf) + else: + # Issue #20540: concatenate before sending, to avoid delays due + # to Nagle's algorithm on a TCP socket. + # Also note we want to avoid sending a 0-length buffer separately, + # to avoid "broken pipe" errors if the other end closed the pipe. + self._send(header + buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if size == -1: + buf = self._recv(8) + size, = struct.unpack("!Q", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = family or (address and address_type(address)) \ + or default_family + address = address or arbitrary_address(family) + + _validate_family(family) + if family == 'AF_PIPE': + self._listener = PipeListener(address, backlog) + else: + self._listener = SocketListener(address, family, backlog) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + c = self._listener.accept() + if self._authkey: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + listener = self._listener + if listener is not None: + self._listener = None + listener.close() + + @property + def address(self): + return self._listener._address + + @property + def last_accepted(self): + return self._listener._last_accepted + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(True) + s2.setblocking(True) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True): + ''' + Returns pair of connection objects at either end of a pipe + ''' + address = arbitrary_address('AF_PIPE') + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, + # default security descriptor: the handle cannot be inherited + _winapi.NULL + ) + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address): + # Linux abstract socket namespaces do not need to be explicitly unlinked + self._unlink = util.Finalize( + self, os.unlink, args=(address,), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + s, self._last_accepted = self._socket.accept() + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + try: + self._socket.close() + finally: + unlink = self._unlink + if unlink is not None: + self._unlink = None + unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket( getattr(socket, family) ) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + util.sub_debug('listener created with address=%r', self._address) + self.close = util.Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + res = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + util.sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 20 + +CHALLENGE = b'#CHALLENGE#' +WELCOME = b'#WELCOME#' +FAILURE = b'#FAILURE#' + +def deliver_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = os.urandom(MESSAGE_LENGTH) + connection.send_bytes(CHALLENGE + message) + digest = hmac.new(authkey, message, 'md5').digest() + response = connection.recv_bytes(256) # reject large message + if response == digest: + connection.send_bytes(WELCOME) + else: + connection.send_bytes(FAILURE) + raise AuthenticationError('digest received was wrong') + +def answer_challenge(connection, authkey): + import hmac + if not isinstance(authkey, bytes): + raise ValueError( + "Authkey must be bytes, not {0!s}".format(type(authkey))) + message = connection.recv_bytes(256) # reject large message + assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message + message = message[len(CHALLENGE):] + digest = hmac.new(authkey, message, 'md5').digest() + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + +class ConnectionWrapper(object): + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) + return obj + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + while L: + res = _winapi.WaitForMultipleObjects(L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + ov, err = None, e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed... + if ov and sys.getwindowsversion()[:2] >= (6, 2): + # ... except on Windows 8 and later, where + # the message HAS been consumed. + try: + _, err = ov.GetOverlappedResult(False) + except OSError as e: + err = e.winerror + if not err and hasattr(o, '_got_empty_message'): + o._got_empty_message = True + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + import selectors + + # poll/select have the advantage of not requiring any extra file + # descriptor, contrarily to epoll/kqueue (also, they require a single + # syscall). + if hasattr(selectors, 'PollSelector'): + _WaitSelector = selectors.PollSelector + else: + _WaitSelector = selectors.SelectSelector + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + with _WaitSelector() as selector: + for obj in object_list: + selector.register(obj, selectors.EVENT_READ) + + if timeout is not None: + deadline = time.monotonic() + timeout + + while True: + ready = selector.select(timeout) + if ready: + return [key.fileobj for (key, events) in ready] + else: + if timeout is not None: + timeout = deadline - time.monotonic() + if timeout < 0: + return ready + +# +# Make connection and socket objects sharable if possible +# + +if sys.platform == 'win32': + def reduce_connection(conn): + handle = conn.fileno() + with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s: + from . import resource_sharer + ds = resource_sharer.DupSocket(s) + return rebuild_connection, (ds, conn.readable, conn.writable) + def rebuild_connection(ds, readable, writable): + sock = ds.detach() + return Connection(sock.detach(), readable, writable) + reduction.register(Connection, reduce_connection) + + def reduce_pipe_connection(conn): + access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) | + (_winapi.FILE_GENERIC_WRITE if conn.writable else 0)) + dh = reduction.DupHandle(conn.fileno(), access) + return rebuild_pipe_connection, (dh, conn.readable, conn.writable) + def rebuild_pipe_connection(dh, readable, writable): + handle = dh.detach() + return PipeConnection(handle, readable, writable) + reduction.register(PipeConnection, reduce_pipe_connection) + +else: + def reduce_connection(conn): + df = reduction.DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + reduction.register(Connection, reduce_connection) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/context.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/context.py new file mode 100644 index 0000000000000000000000000000000000000000..8d0525d5d62179b0ac6d8ca16231f8d428ff44b3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/context.py @@ -0,0 +1,362 @@ +import os +import sys +import threading + +from . import process +from . import reduction + +__all__ = () + +# +# Exceptions +# + +class ProcessError(Exception): + pass + +class BufferTooShort(ProcessError): + pass + +class TimeoutError(ProcessError): + pass + +class AuthenticationError(ProcessError): + pass + +# +# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py +# + +class BaseContext(object): + + ProcessError = ProcessError + BufferTooShort = BufferTooShort + TimeoutError = TimeoutError + AuthenticationError = AuthenticationError + + current_process = staticmethod(process.current_process) + parent_process = staticmethod(process.parent_process) + active_children = staticmethod(process.active_children) + + def cpu_count(self): + '''Returns the number of CPUs in the system''' + num = os.cpu_count() + if num is None: + raise NotImplementedError('cannot determine number of cpus') + else: + return num + + def Manager(self): + '''Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager(ctx=self.get_context()) + m.start() + return m + + def Pipe(self, duplex=True): + '''Returns two connection object connected by a pipe''' + from .connection import Pipe + return Pipe(duplex) + + def Lock(self): + '''Returns a non-recursive lock object''' + from .synchronize import Lock + return Lock(ctx=self.get_context()) + + def RLock(self): + '''Returns a recursive lock object''' + from .synchronize import RLock + return RLock(ctx=self.get_context()) + + def Condition(self, lock=None): + '''Returns a condition object''' + from .synchronize import Condition + return Condition(lock, ctx=self.get_context()) + + def Semaphore(self, value=1): + '''Returns a semaphore object''' + from .synchronize import Semaphore + return Semaphore(value, ctx=self.get_context()) + + def BoundedSemaphore(self, value=1): + '''Returns a bounded semaphore object''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value, ctx=self.get_context()) + + def Event(self): + '''Returns an event object''' + from .synchronize import Event + return Event(ctx=self.get_context()) + + def Barrier(self, parties, action=None, timeout=None): + '''Returns a barrier object''' + from .synchronize import Barrier + return Barrier(parties, action, timeout, ctx=self.get_context()) + + def Queue(self, maxsize=0): + '''Returns a queue object''' + from .queues import Queue + return Queue(maxsize, ctx=self.get_context()) + + def JoinableQueue(self, maxsize=0): + '''Returns a queue object''' + from .queues import JoinableQueue + return JoinableQueue(maxsize, ctx=self.get_context()) + + def SimpleQueue(self): + '''Returns a queue object''' + from .queues import SimpleQueue + return SimpleQueue(ctx=self.get_context()) + + def Pool(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None): + '''Returns a process pool object''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + context=self.get_context()) + + def RawValue(self, typecode_or_type, *args): + '''Returns a shared object''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + def RawArray(self, typecode_or_type, size_or_initializer): + '''Returns a shared array''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + def Value(self, typecode_or_type, *args, lock=True): + '''Returns a synchronized shared object''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, lock=lock, + ctx=self.get_context()) + + def Array(self, typecode_or_type, size_or_initializer, *, lock=True): + '''Returns a synchronized shared array''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, lock=lock, + ctx=self.get_context()) + + def freeze_support(self): + '''Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + if sys.platform == 'win32' and getattr(sys, 'frozen', False): + from .spawn import freeze_support + freeze_support() + + def get_logger(self): + '''Return package logger -- if it does not already exist then + it is created. + ''' + from .util import get_logger + return get_logger() + + def log_to_stderr(self, level=None): + '''Turn on logging and add a handler which prints to stderr''' + from .util import log_to_stderr + return log_to_stderr(level) + + def allow_connection_pickling(self): + '''Install support for sending connections and sockets + between processes + ''' + # This is undocumented. In previous versions of multiprocessing + # its only effect was to make socket objects inheritable on Windows. + from . import connection + + def set_executable(self, executable): + '''Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + ''' + from .spawn import set_executable + set_executable(executable) + + def set_forkserver_preload(self, module_names): + '''Set list of module names to try to load in forkserver process. + This is really just a hint. + ''' + from .forkserver import set_forkserver_preload + set_forkserver_preload(module_names) + + def get_context(self, method=None): + if method is None: + return self + try: + ctx = _concrete_contexts[method] + except KeyError: + raise ValueError('cannot find context for %r' % method) from None + ctx._check_available() + return ctx + + def get_start_method(self, allow_none=False): + return self._name + + def set_start_method(self, method, force=False): + raise ValueError('cannot set start method of concrete context') + + @property + def reducer(self): + '''Controls how objects will be reduced to a form that can be + shared with other processes.''' + return globals().get('reduction') + + @reducer.setter + def reducer(self, reduction): + globals()['reduction'] = reduction + + def _check_available(self): + pass + +# +# Type of default context -- underlying context can be set at most once +# + +class Process(process.BaseProcess): + _start_method = None + @staticmethod + def _Popen(process_obj): + return _default_context.get_context().Process._Popen(process_obj) + +class DefaultContext(BaseContext): + Process = Process + + def __init__(self, context): + self._default_context = context + self._actual_context = None + + def get_context(self, method=None): + if method is None: + if self._actual_context is None: + self._actual_context = self._default_context + return self._actual_context + else: + return super().get_context(method) + + def set_start_method(self, method, force=False): + if self._actual_context is not None and not force: + raise RuntimeError('context has already been set') + if method is None and force: + self._actual_context = None + return + self._actual_context = self.get_context(method) + + def get_start_method(self, allow_none=False): + if self._actual_context is None: + if allow_none: + return None + self._actual_context = self._default_context + return self._actual_context._name + + def get_all_start_methods(self): + if sys.platform == 'win32': + return ['spawn'] + else: + methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn'] + if reduction.HAVE_SEND_HANDLE: + methods.append('forkserver') + return methods + + +# +# Context types for fixed start method +# + +if sys.platform != 'win32': + + class ForkProcess(process.BaseProcess): + _start_method = 'fork' + @staticmethod + def _Popen(process_obj): + from .popen_fork import Popen + return Popen(process_obj) + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_posix import Popen + return Popen(process_obj) + + class ForkServerProcess(process.BaseProcess): + _start_method = 'forkserver' + @staticmethod + def _Popen(process_obj): + from .popen_forkserver import Popen + return Popen(process_obj) + + class ForkContext(BaseContext): + _name = 'fork' + Process = ForkProcess + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + class ForkServerContext(BaseContext): + _name = 'forkserver' + Process = ForkServerProcess + def _check_available(self): + if not reduction.HAVE_SEND_HANDLE: + raise ValueError('forkserver start method not available') + + _concrete_contexts = { + 'fork': ForkContext(), + 'spawn': SpawnContext(), + 'forkserver': ForkServerContext(), + } + if sys.platform == 'darwin': + # bpo-33725: running arbitrary code after fork() is no longer reliable + # on macOS since macOS 10.14 (Mojave). Use spawn by default instead. + _default_context = DefaultContext(_concrete_contexts['spawn']) + else: + _default_context = DefaultContext(_concrete_contexts['fork']) + +else: + + class SpawnProcess(process.BaseProcess): + _start_method = 'spawn' + @staticmethod + def _Popen(process_obj): + from .popen_spawn_win32 import Popen + return Popen(process_obj) + + class SpawnContext(BaseContext): + _name = 'spawn' + Process = SpawnProcess + + _concrete_contexts = { + 'spawn': SpawnContext(), + } + _default_context = DefaultContext(_concrete_contexts['spawn']) + +# +# Force the start method +# + +def _force_start_method(method): + _default_context._actual_context = _concrete_contexts[method] + +# +# Check that the current thread is spawning a child process +# + +_tls = threading.local() + +def get_spawning_popen(): + return getattr(_tls, 'spawning_popen', None) + +def set_spawning_popen(popen): + _tls.spawning_popen = popen + +def assert_spawning(obj): + if get_spawning_popen() is None: + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(obj).__name__ + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/forkserver.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..215ac39afcaa07988c907e5a60d4685b6da61512 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/forkserver.py @@ -0,0 +1,354 @@ +import errno +import os +import selectors +import signal +import socket +import struct +import sys +import threading +import warnings + +from . import connection +from . import process +from .context import reduction +from . import resource_tracker +from . import spawn +from . import util + +__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', + 'set_forkserver_preload'] + +# +# +# + +MAXFDS_TO_SEND = 256 +SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t + +# +# Forkserver class +# + +class ForkServer(object): + + def __init__(self): + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + self._inherited_fds = None + self._lock = threading.Lock() + self._preload_modules = ['__main__'] + + def _stop(self): + # Method used by unit tests to stop the server + with self._lock: + self._stop_unlocked() + + def _stop_unlocked(self): + if self._forkserver_pid is None: + return + + # close the "alive" file descriptor asks the server to stop + os.close(self._forkserver_alive_fd) + self._forkserver_alive_fd = None + + os.waitpid(self._forkserver_pid, 0) + self._forkserver_pid = None + + if not util.is_abstract_socket_namespace(self._forkserver_address): + os.unlink(self._forkserver_address) + self._forkserver_address = None + + def set_forkserver_preload(self, modules_names): + '''Set list of module names to try to load in forkserver process.''' + if not all(type(mod) is str for mod in self._preload_modules): + raise TypeError('module_names must be a list of strings') + self._preload_modules = modules_names + + def get_inherited_fds(self): + '''Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + ''' + return self._inherited_fds + + def connect_to_new_process(self, fds): + '''Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + ''' + self.ensure_running() + if len(fds) + 4 >= MAXFDS_TO_SEND: + raise ValueError('too many fds') + with socket.socket(socket.AF_UNIX) as client: + client.connect(self._forkserver_address) + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + allfds = [child_r, child_w, self._forkserver_alive_fd, + resource_tracker.getfd()] + allfds += fds + try: + reduction.sendfds(client, allfds) + return parent_r, parent_w + except: + os.close(parent_r) + os.close(parent_w) + raise + finally: + os.close(child_r) + os.close(child_w) + + def ensure_running(self): + '''Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + ''' + with self._lock: + resource_tracker.ensure_running() + if self._forkserver_pid is not None: + # forkserver was launched before, is it still running? + pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG) + if not pid: + # still alive + return + # dead, launch it again + os.close(self._forkserver_alive_fd) + self._forkserver_address = None + self._forkserver_alive_fd = None + self._forkserver_pid = None + + cmd = ('from multiprocessing.forkserver import main; ' + + 'main(%d, %d, %r, **%r)') + + if self._preload_modules: + desired_keys = {'main_path', 'sys_path'} + data = spawn.get_preparation_data('ignore') + data = {x: y for x, y in data.items() if x in desired_keys} + else: + data = {} + + with socket.socket(socket.AF_UNIX) as listener: + address = connection.arbitrary_address('AF_UNIX') + listener.bind(address) + if not util.is_abstract_socket_namespace(address): + os.chmod(address, 0o600) + listener.listen() + + # all client processes own the write end of the "alive" pipe; + # when they all terminate the read end becomes ready. + alive_r, alive_w = os.pipe() + try: + fds_to_pass = [listener.fileno(), alive_r] + cmd %= (listener.fileno(), alive_r, self._preload_modules, + data) + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd] + pid = util.spawnv_passfds(exe, args, fds_to_pass) + except: + os.close(alive_w) + raise + finally: + os.close(alive_r) + self._forkserver_address = address + self._forkserver_alive_fd = alive_w + self._forkserver_pid = pid + +# +# +# + +def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): + '''Run forkserver.''' + if preload: + if '__main__' in preload and main_path is not None: + process.current_process()._inheriting = True + try: + spawn.import_main_path(main_path) + finally: + del process.current_process()._inheriting + for modname in preload: + try: + __import__(modname) + except ImportError: + pass + + util._close_stdin() + + sig_r, sig_w = os.pipe() + os.set_blocking(sig_r, False) + os.set_blocking(sig_w, False) + + def sigchld_handler(*_unused): + # Dummy signal handler, doesn't do anything + pass + + handlers = { + # unblocking SIGCHLD allows the wakeup fd to notify our event loop + signal.SIGCHLD: sigchld_handler, + # protect the process from ^C + signal.SIGINT: signal.SIG_IGN, + } + old_handlers = {sig: signal.signal(sig, val) + for (sig, val) in handlers.items()} + + # calling os.write() in the Python signal handler is racy + signal.set_wakeup_fd(sig_w) + + # map child pids to client fds + pid_to_fd = {} + + with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ + selectors.DefaultSelector() as selector: + _forkserver._forkserver_address = listener.getsockname() + + selector.register(listener, selectors.EVENT_READ) + selector.register(alive_r, selectors.EVENT_READ) + selector.register(sig_r, selectors.EVENT_READ) + + while True: + try: + while True: + rfds = [key.fileobj for (key, events) in selector.select()] + if rfds: + break + + if alive_r in rfds: + # EOF because no more client processes left + assert os.read(alive_r, 1) == b'', "Not at EOF?" + raise SystemExit + + if sig_r in rfds: + # Got SIGCHLD + os.read(sig_r, 65536) # exhaust + while True: + # Scan for child processes + try: + pid, sts = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + break + if pid == 0: + break + child_w = pid_to_fd.pop(pid, None) + if child_w is not None: + if os.WIFSIGNALED(sts): + returncode = -os.WTERMSIG(sts) + else: + if not os.WIFEXITED(sts): + raise AssertionError( + "Child {0:n} status is {1:n}".format( + pid,sts)) + returncode = os.WEXITSTATUS(sts) + # Send exit code to client process + try: + write_signed(child_w, returncode) + except BrokenPipeError: + # client vanished + pass + os.close(child_w) + else: + # This shouldn't happen really + warnings.warn('forkserver: waitpid returned ' + 'unexpected pid %d' % pid) + + if listener in rfds: + # Incoming fork request + with listener.accept()[0] as s: + # Receive fds from client + fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) + if len(fds) > MAXFDS_TO_SEND: + raise RuntimeError( + "Too many ({0:n}) fds to send".format( + len(fds))) + child_r, child_w, *fds = fds + s.close() + pid = os.fork() + if pid == 0: + # Child + code = 1 + try: + listener.close() + selector.close() + unused_fds = [alive_r, child_w, sig_r, sig_w] + unused_fds.extend(pid_to_fd.values()) + code = _serve_one(child_r, fds, + unused_fds, + old_handlers) + except Exception: + sys.excepthook(*sys.exc_info()) + sys.stderr.flush() + finally: + os._exit(code) + else: + # Send pid to client process + try: + write_signed(child_w, pid) + except BrokenPipeError: + # client vanished + pass + pid_to_fd[pid] = child_w + os.close(child_r) + for fd in fds: + os.close(fd) + + except OSError as e: + if e.errno != errno.ECONNABORTED: + raise + + +def _serve_one(child_r, fds, unused_fds, handlers): + # close unnecessary stuff and reset signal handlers + signal.set_wakeup_fd(-1) + for sig, val in handlers.items(): + signal.signal(sig, val) + for fd in unused_fds: + os.close(fd) + + (_forkserver._forkserver_alive_fd, + resource_tracker._resource_tracker._fd, + *_forkserver._inherited_fds) = fds + + # Run process object received over pipe + parent_sentinel = os.dup(child_r) + code = spawn._main(child_r, parent_sentinel) + + return code + + +# +# Read and write signed numbers +# + +def read_signed(fd): + data = b'' + length = SIGNED_STRUCT.size + while len(data) < length: + s = os.read(fd, length - len(data)) + if not s: + raise EOFError('unexpected EOF') + data += s + return SIGNED_STRUCT.unpack(data)[0] + +def write_signed(fd, n): + msg = SIGNED_STRUCT.pack(n) + while msg: + nbytes = os.write(fd, msg) + if nbytes == 0: + raise RuntimeError('should not get here') + msg = msg[nbytes:] + +# +# +# + +_forkserver = ForkServer() +ensure_running = _forkserver.ensure_running +get_inherited_fds = _forkserver.get_inherited_fds +connect_to_new_process = _forkserver.connect_to_new_process +set_forkserver_preload = _forkserver.set_forkserver_preload diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/heap.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/heap.py new file mode 100644 index 0000000000000000000000000000000000000000..6217dfe12689b379f2dad6f1e4bc3bbf6af8f60a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/heap.py @@ -0,0 +1,337 @@ +# +# Module which supports allocation of memory from an mmap +# +# multiprocessing/heap.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import bisect +from collections import defaultdict +import mmap +import os +import sys +import tempfile +import threading + +from .context import reduction, assert_spawning +from . import util + +__all__ = ['BufferWrapper'] + +# +# Inheritable class which wraps an mmap, and from which blocks can be allocated +# + +if sys.platform == 'win32': + + import _winapi + + class Arena(object): + """ + A shared memory area backed by anonymous memory (Windows). + """ + + _rand = tempfile._RandomNameSequence() + + def __init__(self, size): + self.size = size + for i in range(100): + name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) + buf = mmap.mmap(-1, size, tagname=name) + if _winapi.GetLastError() == 0: + break + # We have reopened a preexisting mmap. + buf.close() + else: + raise FileExistsError('Cannot find name for new mmap') + self.name = name + self.buffer = buf + self._state = (self.size, self.name) + + def __getstate__(self): + assert_spawning(self) + return self._state + + def __setstate__(self, state): + self.size, self.name = self._state = state + # Reopen existing mmap + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + # XXX Temporarily preventing buildbot failures while determining + # XXX the correct long-term fix. See issue 23060 + #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS + +else: + + class Arena(object): + """ + A shared memory area backed by a temporary file (POSIX). + """ + + if sys.platform == 'linux': + _dir_candidates = ['/dev/shm'] + else: + _dir_candidates = [] + + def __init__(self, size, fd=-1): + self.size = size + self.fd = fd + if fd == -1: + # Arena is created anew (if fd != -1, it means we're coming + # from rebuild_arena() below) + self.fd, name = tempfile.mkstemp( + prefix='pym-%d-'%os.getpid(), + dir=self._choose_dir(size)) + os.unlink(name) + util.Finalize(self, os.close, (self.fd,)) + os.ftruncate(self.fd, size) + self.buffer = mmap.mmap(self.fd, self.size) + + def _choose_dir(self, size): + # Choose a non-storage backed directory if possible, + # to improve performance + for d in self._dir_candidates: + st = os.statvfs(d) + if st.f_bavail * st.f_frsize >= size: # enough free space? + return d + return util.get_temp_dir() + + def reduce_arena(a): + if a.fd == -1: + raise ValueError('Arena is unpicklable because ' + 'forking was enabled when it was created') + return rebuild_arena, (a.size, reduction.DupFd(a.fd)) + + def rebuild_arena(size, dupfd): + return Arena(size, dupfd.detach()) + + reduction.register(Arena, reduce_arena) + +# +# Class allowing allocation of chunks of memory from arenas +# + +class Heap(object): + + # Minimum malloc() alignment + _alignment = 8 + + _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB + _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 + + def __init__(self, size=mmap.PAGESIZE): + self._lastpid = os.getpid() + self._lock = threading.Lock() + # Current arena allocation size + self._size = size + # A sorted list of available block sizes in arenas + self._lengths = [] + + # Free block management: + # - map each block size to a list of `(Arena, start, stop)` blocks + self._len_to_seq = {} + # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block + # starting at that offset + self._start_to_block = {} + # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block + # ending at that offset + self._stop_to_block = {} + + # Map arenas to their `(Arena, start, stop)` blocks in use + self._allocated_blocks = defaultdict(set) + self._arenas = [] + + # List of pending blocks to free - see comment in free() below + self._pending_free_blocks = [] + + # Statistics + self._n_mallocs = 0 + self._n_frees = 0 + + @staticmethod + def _roundup(n, alignment): + # alignment must be a power of 2 + mask = alignment - 1 + return (n + mask) & ~mask + + def _new_arena(self, size): + # Create a new arena with at least the given *size* + length = self._roundup(max(self._size, size), mmap.PAGESIZE) + # We carve larger and larger arenas, for efficiency, until we + # reach a large-ish size (roughly L3 cache-sized) + if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: + self._size *= 2 + util.info('allocating a new mmap of length %d', length) + arena = Arena(length) + self._arenas.append(arena) + return (arena, 0, length) + + def _discard_arena(self, arena): + # Possibly delete the given (unused) arena + length = arena.size + # Reusing an existing arena is faster than creating a new one, so + # we only reclaim space if it's large enough. + if length < self._DISCARD_FREE_SPACE_LARGER_THAN: + return + blocks = self._allocated_blocks.pop(arena) + assert not blocks + del self._start_to_block[(arena, 0)] + del self._stop_to_block[(arena, length)] + self._arenas.remove(arena) + seq = self._len_to_seq[length] + seq.remove((arena, 0, length)) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + def _malloc(self, size): + # returns a large enough block -- it might be much larger + i = bisect.bisect_left(self._lengths, size) + if i == len(self._lengths): + return self._new_arena(size) + else: + length = self._lengths[i] + seq = self._len_to_seq[length] + block = seq.pop() + if not seq: + del self._len_to_seq[length], self._lengths[i] + + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + return block + + def _add_free_block(self, block): + # make block available and try to merge with its neighbours in the arena + (arena, start, stop) = block + + try: + prev_block = self._stop_to_block[(arena, start)] + except KeyError: + pass + else: + start, _ = self._absorb(prev_block) + + try: + next_block = self._start_to_block[(arena, stop)] + except KeyError: + pass + else: + _, stop = self._absorb(next_block) + + block = (arena, start, stop) + length = stop - start + + try: + self._len_to_seq[length].append(block) + except KeyError: + self._len_to_seq[length] = [block] + bisect.insort(self._lengths, length) + + self._start_to_block[(arena, start)] = block + self._stop_to_block[(arena, stop)] = block + + def _absorb(self, block): + # deregister this block so it can be merged with a neighbour + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + + length = stop - start + seq = self._len_to_seq[length] + seq.remove(block) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + return start, stop + + def _remove_allocated_block(self, block): + arena, start, stop = block + blocks = self._allocated_blocks[arena] + blocks.remove((start, stop)) + if not blocks: + # Arena is entirely free, discard it from this process + self._discard_arena(arena) + + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held. + while True: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break + self._add_free_block(block) + self._remove_allocated_block(block) + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under CPython it's atomic thanks to the GIL). + if os.getpid() != self._lastpid: + raise ValueError( + "My pid ({0:n}) is not last pid {1:n}".format( + os.getpid(),self._lastpid)) + if not self._lock.acquire(False): + # can't acquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._n_frees += 1 + self._free_pending_blocks() + self._add_free_block(block) + self._remove_allocated_block(block) + finally: + self._lock.release() + + def malloc(self, size): + # return a block of right size (possibly rounded up) + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + if os.getpid() != self._lastpid: + self.__init__() # reinitialize after fork + with self._lock: + self._n_mallocs += 1 + # allow pending blocks to be marked available + self._free_pending_blocks() + size = self._roundup(max(size, 1), self._alignment) + (arena, start, stop) = self._malloc(size) + real_stop = start + size + if real_stop < stop: + # if the returned block is larger than necessary, mark + # the remainder available + self._add_free_block((arena, real_stop, stop)) + self._allocated_blocks[arena].add((start, real_stop)) + return (arena, start, real_stop) + +# +# Class wrapping a block allocated out of a Heap -- can be inherited by child process +# + +class BufferWrapper(object): + + _heap = Heap() + + def __init__(self, size): + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + block = BufferWrapper._heap.malloc(size) + self._state = (block, size) + util.Finalize(self, BufferWrapper._heap.free, args=(block,)) + + def create_memoryview(self): + (arena, start, stop), size = self._state + return memoryview(arena.buffer)[start:start+size] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/managers.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/managers.py new file mode 100644 index 0000000000000000000000000000000000000000..85e0d886f56474dd00cc318bafef7b49ae633b75 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/managers.py @@ -0,0 +1,1404 @@ +# +# Module providing manager classes for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token', + 'SharedMemoryManager' ] + +# +# Imports +# + +import sys +import threading +import signal +import array +import queue +import time +import os +from os import getpid + +from traceback import format_exc + +from . import connection +from .context import reduction, get_spawning_popen, ProcessError +from . import pool +from . import process +from . import util +from . import get_context +try: + from . import shared_memory + HAS_SHMEM = True +except ImportError: + HAS_SHMEM = False + +# +# Register some things for pickling +# + +def reduce_array(a): + return array.array, (a.typecode, a.tobytes()) +reduction.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] +if view_types[0] is not list: # only needed in Py3.0 + def rebuild_as_list(obj): + return list, (list(obj),) + for view_type in view_types: + reduction.register(view_type, rebuild_as_list) + +# +# Type for identifying shared objects +# + +class Token(object): + ''' + Type to uniquely identify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return '%s(typeid=%r, address=%r, id=%r)' % \ + (self.__class__.__name__, self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + raise convert_to_error(kind, result) + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'): + if not isinstance(result, str): + raise TypeError( + "Result {0!r} (kind '{1}') type is {2}, not str".format( + result, kind, type(result))) + if kind == '#UNSERIALIZABLE': + return RemoteError('Unserializable message: %s\n' % result) + else: + return RemoteError(result) + else: + return ValueError('Unrecognized message type {!r}'.format(kind)) + +class RemoteError(Exception): + def __str__(self): + return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) + +# +# Functions for finding the method names of an object +# + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + if not isinstance(authkey, bytes): + raise TypeError( + "Authkey {0!r} is type {1!s}, not bytes".format( + authkey, type(authkey))) + self.registry = registry + self.authkey = process.AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=16) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.id_to_local_proxy_obj = {} + self.mutex = threading.Lock() + + def serve_forever(self): + ''' + Run the server forever + ''' + self.stop_event = threading.Event() + process.current_process()._manager_server = self + try: + accepter = threading.Thread(target=self.accepter) + accepter.daemon = True + accepter.start() + try: + while not self.stop_event.is_set(): + self.stop_event.wait(1) + except (KeyboardInterrupt, SystemExit): + pass + finally: + if sys.stdout != sys.__stdout__: # what about stderr? + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + sys.exit(0) + + def accepter(self): + while True: + try: + c = self.listener.accept() + except OSError: + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + + def handle_request(self, c): + ''' + Handle a new connection + ''' + funcname = result = request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + try: + c.send(msg) + except Exception as e: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + util.info('Failure to send message: %r', msg) + util.info(' ... request was %r', request) + util.info(' ... exception was %r', e) + + c.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.current_thread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop_event.is_set(): + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + try: + obj, exposed, gettypeid = id_to_obj[ident] + except KeyError as ke: + try: + obj, exposed, gettypeid = \ + self.id_to_local_proxy_obj[ident] + except KeyError as second_ke: + raise ke + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % + (methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as e: + msg = ('#ERROR', e) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.current_thread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception as e: + send(('#UNSERIALIZABLE', format_exc())) + except Exception as e: + util.info('exception in thread serving %r', + threading.current_thread().name) + util.info(' ... message was %r', msg) + util.info(' ... exception was %r', e) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__':fallback_str, + '__repr__':fallback_repr, + '#GETVALUE':fallback_getvalue + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + # Perhaps include debug info about 'c'? + with self.mutex: + result = [] + keys = list(self.id_to_refcount.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0' + return len(self.id_to_refcount) + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + except: + import traceback + traceback.print_exc() + finally: + self.stop_event.set() + + def create(*args, **kwds): + ''' + Create a new shared object and return its id + ''' + if len(args) >= 3: + self, c, typeid, *args = args + elif not args: + raise TypeError("descriptor 'create' of 'Server' object " + "needs an argument") + else: + if 'typeid' not in kwds: + raise TypeError('create expected at least 2 positional ' + 'arguments, got %d' % (len(args)-1)) + typeid = kwds.pop('typeid') + if len(args) >= 2: + self, c, *args = args + import warnings + warnings.warn("Passing 'typeid' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + if 'c' not in kwds: + raise TypeError('create expected at least 2 positional ' + 'arguments, got %d' % (len(args)-1)) + c = kwds.pop('c') + self, *args = args + import warnings + warnings.warn("Passing 'c' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + args = tuple(args) + + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + if kwds or (len(args) != 1): + raise ValueError( + "Without callable, must have one non-keyword argument") + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + if not isinstance(method_to_typeid, dict): + raise TypeError( + "Method_to_typeid {0!r}: type {1!s}, not dict".format( + method_to_typeid, type(method_to_typeid))) + exposed = list(exposed) + list(method_to_typeid) + + ident = '%x' % id(obj) # convert to string because xmlrpclib + # only has 32 bit signed integers + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + + self.incref(c, ident) + return ident, tuple(exposed) + create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)' + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.current_thread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + try: + self.id_to_refcount[ident] += 1 + except KeyError as ke: + # If no external references exist but an internal (to the + # manager) still does and a new external reference is created + # from it, restore the manager's tracking of it from the + # previously stashed internal ref. + if ident in self.id_to_local_proxy_obj: + self.id_to_refcount[ident] = 1 + self.id_to_obj[ident] = \ + self.id_to_local_proxy_obj[ident] + obj, exposed, gettypeid = self.id_to_obj[ident] + util.debug('Server re-enabled tracking & INCREF %r', ident) + else: + raise ke + + def decref(self, c, ident): + if ident not in self.id_to_refcount and \ + ident in self.id_to_local_proxy_obj: + util.debug('Server DECREF skipping %r', ident) + return + + with self.mutex: + if self.id_to_refcount[ident] <= 0: + raise AssertionError( + "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format( + ident, self.id_to_obj[ident], + self.id_to_refcount[ident])) + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_refcount[ident] + + if ident not in self.id_to_refcount: + # Two-step process in case the object turns out to contain other + # proxy objects (e.g. a managed list of managed lists). + # Otherwise, deleting self.id_to_obj[ident] would trigger the + # deleting of the stored value (another managed object) which would + # in turn attempt to acquire the mutex that is already held here. + self.id_to_obj[ident] = (None, (), None) # thread-safe + util.debug('disposing of obj with id %r', ident) + with self.mutex: + del self.id_to_obj[ident] + + +# +# Class to represent state of a manager +# + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { + 'pickle' : (connection.Listener, connection.Client), + 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) + } + +# +# Definition of BaseManager +# + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle', + ctx=None): + if authkey is None: + authkey = process.current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = process.AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + self._ctx = ctx or get_context() + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = self._ctx.Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = util.Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, + self._state, self._Client), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + # bpo-36368: protect server process from KeyboardInterrupt signals + signal.signal(signal.SIGINT, signal.SIG_IGN) + + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + util.info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, /, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + if self._process is not None: + self._process.join(timeout) + if not self._process.is_alive(): + self._process = None + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + if self._state.value == State.INITIAL: + self.start() + if self._state.value != State.STARTED: + if self._state.value == State.INITIAL: + raise ProcessError("Unable to start server") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("Manager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + util.info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=1.0) + if process.is_alive(): + util.info('manager still alive') + if hasattr(process, 'terminate'): + util.info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=0.1) + if process.is_alive(): + util.info('manager still alive after terminate') + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + @property + def address(self): + return self._address + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = method_to_typeid or \ + getattr(proxytype, '_method_to_typeid_', None) + + if method_to_typeid: + for key, value in list(method_to_typeid.items()): # isinstance? + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, /, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + +class ProcessLocalSet(set): + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True, manager_owned=False): + with BaseProxy._mutex: + tls_idset = BaseProxy._address_to_local.get(token.address, None) + if tls_idset is None: + tls_idset = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_idset + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_idset[0] + + # self._idset is used to record the identities of all shared + # objects for which the current process owns references and + # which are in the manager at token.address + self._idset = tls_idset[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + # Should be set to True only when a proxy object is being created + # on the manager server; primary use case: nested proxy objects. + # RebuildProxy detects when a proxy is being created on the manager + # and sets this value appropriately. + self._owned_by_manager = manager_owned + + if authkey is not None: + self._authkey = process.AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = process.current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.current_thread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + token.address = self._token.address + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + raise convert_to_error(kind, result) + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + if self._owned_by_manager: + util.debug('owned_by_manager skipped INCREF of %r', self._token.id) + return + + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._idset.add(self._id) + + state = self._manager and self._manager._state + + self._close = util.Finalize( + self, BaseProxy._decref, + args=(self._token, self._authkey, state, + self._tls, self._idset, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, authkey, state, tls, idset, _Client): + idset.discard(token.id) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as e: + util.debug('... decref failed %s', e) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.current_thread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as e: + # the proxy may just be for a manager which has shutdown + util.info('incref failed: %s' % e) + + def __reduce__(self): + kwds = {} + if get_spawning_popen() is not None: + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %#x>' % \ + (type(self).__name__, self._token.typeid, id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + ''' + server = getattr(process.current_process(), '_manager_server', None) + if server and server.address == token.address: + util.debug('Rebuild a proxy owned by manager, token=%r', token) + kwds['manager_owned'] = True + if token.id not in server.id_to_local_proxy_obj: + server.id_to_local_proxy_obj[token.id] = \ + server.id_to_obj[token.id] + incref = ( + kwds.pop('incref', True) and + not getattr(process.current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return a proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, /, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = process.current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + def get(self): + return self._value + def set(self, value): + self._value = value + def __repr__(self): + return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) + value = property(get, set) + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + +class IteratorProxy(BaseProxy): + _exposed_ = ('__next__', 'send', 'throw', 'close') + def __iter__(self): + return self + def __next__(self, *args): + return self._callmethod('__next__', args) + def send(self, *args): + return self._callmethod('send', args) + def throw(self, *args): + return self._callmethod('throw', args) + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release') + def acquire(self, blocking=True, timeout=None): + args = (blocking,) if timeout is None else (blocking, timeout) + return self._callmethod('acquire', args) + def release(self): + return self._callmethod('release') + def __enter__(self): + return self._callmethod('acquire') + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def notify(self, n=1): + return self._callmethod('notify', (n,)) + def notify_all(self): + return self._callmethod('notify_all') + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + def is_set(self): + return self._callmethod('is_set') + def set(self): + return self._callmethod('set') + def clear(self): + return self._callmethod('clear') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class BarrierProxy(BaseProxy): + _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset') + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + def abort(self): + return self._callmethod('abort') + def reset(self): + return self._callmethod('reset') + @property + def parties(self): + return self._callmethod('__getattribute__', ('parties',)) + @property + def n_waiting(self): + return self._callmethod('__getattribute__', ('n_waiting',)) + @property + def broken(self): + return self._callmethod('__getattribute__', ('broken',)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + def get(self): + return self._callmethod('get') + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__getitem__', '__len__', + '__mul__', '__reversed__', '__rmul__', '__setitem__', + 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__imul__' + )) +class ListProxy(BaseListProxy): + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + +DictProxy = MakeProxyType('DictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__', + '__setitem__', 'clear', 'copy', 'get', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' + )) +DictProxy._method_to_typeid_ = { + '__iter__': 'Iterator', + } + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__' + )) + + +BasePoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', + )) +BasePoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator' + } +class PoolProxy(BasePoolProxy): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Definition of SyncManager +# + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `multiprocessing.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', queue.Queue) +SyncManager.register('JoinableQueue', queue.Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Barrier', threading.Barrier, BarrierProxy) +SyncManager.register('Pool', pool.Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) + +# +# Definition of SharedMemoryManager and SharedMemoryServer +# + +if HAS_SHMEM: + class _SharedMemoryTracker: + "Manages one or more shared memory segments." + + def __init__(self, name, segment_names=[]): + self.shared_memory_context_name = name + self.segment_names = segment_names + + def register_segment(self, segment_name): + "Adds the supplied shared memory block name to tracker." + util.debug(f"Register segment {segment_name!r} in pid {getpid()}") + self.segment_names.append(segment_name) + + def destroy_segment(self, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the list of blocks being tracked.""" + util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}") + self.segment_names.remove(segment_name) + segment = shared_memory.SharedMemory(segment_name) + segment.close() + segment.unlink() + + def unlink(self): + "Calls destroy_segment() on all tracked shared memory blocks." + for segment_name in self.segment_names[:]: + self.destroy_segment(segment_name) + + def __del__(self): + util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}") + self.unlink() + + def __getstate__(self): + return (self.shared_memory_context_name, self.segment_names) + + def __setstate__(self, state): + self.__init__(*state) + + + class SharedMemoryServer(Server): + + public = Server.public + \ + ['track_segment', 'release_segment', 'list_segments'] + + def __init__(self, *args, **kwargs): + Server.__init__(self, *args, **kwargs) + address = self.address + # The address of Linux abstract namespaces can be bytes + if isinstance(address, bytes): + address = os.fsdecode(address) + self.shared_memory_context = \ + _SharedMemoryTracker(f"shm_{address}_{getpid()}") + util.debug(f"SharedMemoryServer started by pid {getpid()}") + + def create(*args, **kwargs): + """Create a new distributed-shared object (not backed by a shared + memory block) and return its id to be used in a Proxy Object.""" + # Unless set up as a shared proxy, don't make shared_memory_context + # a standard part of kwargs. This makes things easier for supplying + # simple functions. + if len(args) >= 3: + typeod = args[2] + elif 'typeid' in kwargs: + typeid = kwargs['typeid'] + elif not args: + raise TypeError("descriptor 'create' of 'SharedMemoryServer' " + "object needs an argument") + else: + raise TypeError('create expected at least 2 positional ' + 'arguments, got %d' % (len(args)-1)) + if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"): + kwargs['shared_memory_context'] = self.shared_memory_context + return Server.create(*args, **kwargs) + create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)' + + def shutdown(self, c): + "Call unlink() on all tracked shared memory, terminate the Server." + self.shared_memory_context.unlink() + return Server.shutdown(self, c) + + def track_segment(self, c, segment_name): + "Adds the supplied shared memory block name to Server's tracker." + self.shared_memory_context.register_segment(segment_name) + + def release_segment(self, c, segment_name): + """Calls unlink() on the shared memory block with the supplied name + and removes it from the tracker instance inside the Server.""" + self.shared_memory_context.destroy_segment(segment_name) + + def list_segments(self, c): + """Returns a list of names of shared memory blocks that the Server + is currently tracking.""" + return self.shared_memory_context.segment_names + + + class SharedMemoryManager(BaseManager): + """Like SyncManager but uses SharedMemoryServer instead of Server. + + It provides methods for creating and returning SharedMemory instances + and for creating a list-like object (ShareableList) backed by shared + memory. It also provides methods that create and return Proxy Objects + that support synchronization across processes (i.e. multi-process-safe + locks and semaphores). + """ + + _Server = SharedMemoryServer + + def __init__(self, *args, **kwargs): + if os.name == "posix": + # bpo-36867: Ensure the resource_tracker is running before + # launching the manager process, so that concurrent + # shared_memory manipulation both in the manager and in the + # current process does not create two resource_tracker + # processes. + from . import resource_tracker + resource_tracker.ensure_running() + BaseManager.__init__(self, *args, **kwargs) + util.debug(f"{self.__class__.__name__} created by pid {getpid()}") + + def __del__(self): + util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}") + pass + + def get_server(self): + 'Better than monkeypatching for now; merge into Server ultimately' + if self._state.value != State.INITIAL: + if self._state.value == State.STARTED: + raise ProcessError("Already started SharedMemoryServer") + elif self._state.value == State.SHUTDOWN: + raise ProcessError("SharedMemoryManager has shut down") + else: + raise ProcessError( + "Unknown state {!r}".format(self._state.value)) + return self._Server(self._registry, self._address, + self._authkey, self._serializer) + + def SharedMemory(self, size): + """Returns a new SharedMemory instance with the specified size in + bytes, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sms = shared_memory.SharedMemory(None, create=True, size=size) + try: + dispatch(conn, None, 'track_segment', (sms.name,)) + except BaseException as e: + sms.unlink() + raise e + return sms + + def ShareableList(self, sequence): + """Returns a new ShareableList instance populated with the values + from the input sequence, to be tracked by the manager.""" + with self._Client(self._address, authkey=self._authkey) as conn: + sl = shared_memory.ShareableList(sequence) + try: + dispatch(conn, None, 'track_segment', (sl.shm.name,)) + except BaseException as e: + sl.shm.unlink() + raise e + return sl diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/popen_forkserver.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/popen_forkserver.py new file mode 100644 index 0000000000000000000000000000000000000000..a56eb9bf11080b0d499ae81cfef7b501545cb90e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/popen_forkserver.py @@ -0,0 +1,74 @@ +import io +import os + +from .context import reduction, set_spawning_popen +if not reduction.HAVE_SEND_HANDLE: + raise ImportError('No support for sending fds between processes') +from . import forkserver +from . import popen_fork +from . import spawn +from . import util + + +__all__ = ['Popen'] + +# +# Wrapper for an fd used while launching a process +# + +class _DupFd(object): + def __init__(self, ind): + self.ind = ind + def detach(self): + return forkserver.get_inherited_fds()[self.ind] + +# +# Start child process using a server process +# + +class Popen(popen_fork.Popen): + method = 'forkserver' + DupFd = _DupFd + + def __init__(self, process_obj): + self._fds = [] + super().__init__(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return len(self._fds) - 1 + + def _launch(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + buf = io.BytesIO() + set_spawning_popen(self) + try: + reduction.dump(prep_data, buf) + reduction.dump(process_obj, buf) + finally: + set_spawning_popen(None) + + self.sentinel, w = forkserver.connect_to_new_process(self._fds) + # Keep a duplicate of the data pipe's write end as a sentinel of the + # parent process used by the child process. + _parent_w = os.dup(w) + self.finalizer = util.Finalize(self, util.close_fds, + (_parent_w, self.sentinel)) + with open(w, 'wb', closefd=True) as f: + f.write(buf.getbuffer()) + self.pid = forkserver.read_signed(self.sentinel) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + from multiprocessing.connection import wait + timeout = 0 if flag == os.WNOHANG else None + if not wait([self.sentinel], timeout): + return None + try: + self.returncode = forkserver.read_signed(self.sentinel) + except (OSError, EOFError): + # This should not happen usually, but perhaps the forkserver + # process itself got killed + self.returncode = 255 + + return self.returncode diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/queues.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..d112db2cd981d116c226806ef3c84037d7b515e7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/queues.py @@ -0,0 +1,368 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + +import sys +import os +import threading +import collections +import time +import weakref +import errno + +from queue import Empty, Full + +import _multiprocessing + +from . import connection +from . import context +_ForkingPickler = context.reduction.ForkingPickler + +from .util import debug, info, Finalize, register_after_fork, is_exiting + +# +# Queue type using a pipe, buffer and thread +# + +class Queue(object): + + def __init__(self, maxsize=0, *, ctx): + if maxsize <= 0: + # Can raise ImportError (see issues #3770 and #23400) + from .synchronize import SEM_VALUE_MAX as maxsize + self._maxsize = maxsize + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + self._sem = ctx.BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + + self._after_fork() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + context.assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._after_fork() + + def _after_fork(self): + debug('Queue._after_fork()') + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send_bytes = self._writer.send_bytes + self._recv_bytes = self._reader.recv_bytes + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if block and timeout is None: + with self._rlock: + res = self._recv_bytes() + self._sem.release() + else: + if block: + deadline = time.monotonic() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - time.monotonic() + if not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv_bytes() + self._sem.release() + finally: + self._rlock.release() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def qsize(self): + # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() + return self._maxsize - self._sem._semlock._get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + try: + self._reader.close() + finally: + close = self._close + if close: + self._close = None + close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed, "Queue {0!r} not closed".format(self) + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send_bytes, + self._wlock, self._writer.close, self._ignore_epipe, + self._on_queue_feeder_error, self._sem), + name='QueueFeederThread' + ) + self._thread.daemon = True + + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + + if not self._joincancelled: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, + onerror, queue_sem): + debug('starting thread to feed data to pipe') + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while 1: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + close() + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: + send_bytes(obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we need + # to decrease the size of the queue. The error acts as + # if the object had been silently removed from the queue + # and this step is necessary to have a properly working + # queue. + queue_sem.release() + onerror(e, obj) + + @staticmethod + def _on_queue_feeder_error(e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + traceback.print_exc() + + +_sentinel = object() + +# +# A queue type which also supports join() and task_done() methods +# +# Note that if you do not call task_done() for each finished task then +# eventually the counter's semaphore may overflow causing Bad Things +# to happen. +# + +class JoinableQueue(Queue): + + def __init__(self, maxsize=0, *, ctx): + Queue.__init__(self, maxsize, ctx=ctx) + self._unfinished_tasks = ctx.Semaphore(0) + self._cond = ctx.Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty, self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + +# +# Simplified Queue type -- really just a locked pipe +# + +class SimpleQueue(object): + + def __init__(self, *, ctx): + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._poll = self._reader.poll + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + + def empty(self): + return not self._poll() + + def __getstate__(self): + context.assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._poll = self._reader.poll + + def get(self): + with self._rlock: + res = self._reader.recv_bytes() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def put(self, obj): + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/reduction.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..5593f0682f7fce3dc90402187c1fd404aa2f8a42 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/reduction.py @@ -0,0 +1,281 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from abc import ABCMeta +import copyreg +import functools +import io +import os +import pickle +import socket +import sys + +from . import context + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocessing.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args): + super().__init__(*args) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None): + buf = io.BytesIO() + cls(buf, protocol).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + +def dump(obj, file, protocol=None): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False, + *, source_process=None): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + current_process = _winapi.GetCurrentProcess() + if source_process is None: + source_process = current_process + if target_process is None: + target_process = current_process + return _winapi.DuplicateHandle( + source_process, handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + # On MacOSX we should acknowledge receipt of fds -- see Issue14669 + ACKNOWLEDGE = sys.platform == 'darwin' + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size)) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError('received %d items of ancdata' % + len(ancdata)) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + if len(a) % 256 != msg[0]: + raise AssertionError( + "Len is {0:n} but msg[0] is {1!r}".format( + len(a), msg[0])) + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + popen_obj = context.get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from . import resource_sharer + return resource_sharer.DupFd(fd) + else: + raise ValueError('SCM_RIGHTS appears not to be available') + +# +# Try making some callable types picklable +# + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) + + +class AbstractReducer(metaclass=ABCMeta): + '''Abstract base class for use in implementing a Reduction class + suitable for use in replacing the standard reduction mechanism + used in multiprocessing.''' + ForkingPickler = ForkingPickler + register = register + dump = dump + send_handle = send_handle + recv_handle = recv_handle + + if sys.platform == 'win32': + steal_handle = steal_handle + duplicate = duplicate + DupHandle = DupHandle + else: + sendfds = sendfds + recvfds = recvfds + DupFd = DupFd + + _reduce_method = _reduce_method + _reduce_method_descriptor = _reduce_method_descriptor + _rebuild_partial = _rebuild_partial + _reduce_socket = _reduce_socket + _rebuild_socket = _rebuild_socket + + def __init__(self, *args): + register(type(_C().f), _reduce_method) + register(type(list.append), _reduce_method_descriptor) + register(type(int.__add__), _reduce_method_descriptor) + register(functools.partial, _reduce_partial) + register(socket.socket, _reduce_socket) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/shared_memory.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/shared_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..e5b5e6c098a021d1f2c36f4cfaba677107fc2008 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/shared_memory.py @@ -0,0 +1,514 @@ +"""Provides shared memory for direct access across processes. + +The API of this package is currently provisional. Refer to the +documentation for details. +""" + + +__all__ = [ 'SharedMemory', 'ShareableList' ] + + +from functools import partial +import mmap +import os +import errno +import struct +import secrets + +if os.name == "nt": + import _winapi + _USE_POSIX = False +else: + import _posixshmem + _USE_POSIX = True + + +_O_CREX = os.O_CREAT | os.O_EXCL + +# FreeBSD (and perhaps other BSDs) limit names to 14 characters. +_SHM_SAFE_NAME_LENGTH = 14 + +# Shared memory block name prefix +if _USE_POSIX: + _SHM_NAME_PREFIX = '/psm_' +else: + _SHM_NAME_PREFIX = 'wnsm_' + + +def _make_filename(): + "Create a random filename for the shared memory object." + # number of random bytes to use for name + nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2 + assert nbytes >= 2, '_SHM_NAME_PREFIX too long' + name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes) + assert len(name) <= _SHM_SAFE_NAME_LENGTH + return name + + +class SharedMemory: + """Creates a new shared memory block or attaches to an existing + shared memory block. + + Every shared memory block is assigned a unique name. This enables + one process to create a shared memory block with a particular name + so that a different process can attach to that same shared memory + block using that same name. + + As a resource for sharing data across processes, shared memory blocks + may outlive the original process that created them. When one process + no longer needs access to a shared memory block that might still be + needed by other processes, the close() method should be called. + When a shared memory block is no longer needed by any process, the + unlink() method should be called to ensure proper cleanup.""" + + # Defaults; enables close() and unlink() to run without errors. + _name = None + _fd = -1 + _mmap = None + _buf = None + _flags = os.O_RDWR + _mode = 0o600 + _prepend_leading_slash = True if _USE_POSIX else False + + def __init__(self, name=None, create=False, size=0): + if not size >= 0: + raise ValueError("'size' must be a positive integer") + if create: + self._flags = _O_CREX | os.O_RDWR + if size == 0: + raise ValueError("'size' must be a positive number different from zero") + if name is None and not self._flags & os.O_EXCL: + raise ValueError("'name' can only be None if create=True") + + if _USE_POSIX: + + # POSIX Shared Memory + + if name is None: + while True: + name = _make_filename() + try: + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + except FileExistsError: + continue + self._name = name + break + else: + name = "/" + name if self._prepend_leading_slash else name + self._fd = _posixshmem.shm_open( + name, + self._flags, + mode=self._mode + ) + self._name = name + try: + if create and size: + os.ftruncate(self._fd, size) + stats = os.fstat(self._fd) + size = stats.st_size + self._mmap = mmap.mmap(self._fd, size) + except OSError: + self.unlink() + raise + + from .resource_tracker import register + register(self._name, "shared_memory") + + else: + + # Windows Named Shared Memory + + if create: + while True: + temp_name = _make_filename() if name is None else name + # Create and reserve shared memory block with this name + # until it can be attached to by mmap. + h_map = _winapi.CreateFileMapping( + _winapi.INVALID_HANDLE_VALUE, + _winapi.NULL, + _winapi.PAGE_READWRITE, + (size >> 32) & 0xFFFFFFFF, + size & 0xFFFFFFFF, + temp_name + ) + try: + last_error_code = _winapi.GetLastError() + if last_error_code == _winapi.ERROR_ALREADY_EXISTS: + if name is not None: + raise FileExistsError( + errno.EEXIST, + os.strerror(errno.EEXIST), + name, + _winapi.ERROR_ALREADY_EXISTS + ) + else: + continue + self._mmap = mmap.mmap(-1, size, tagname=temp_name) + finally: + _winapi.CloseHandle(h_map) + self._name = temp_name + break + + else: + self._name = name + # Dynamically determine the existing named shared memory + # block's size which is likely a multiple of mmap.PAGESIZE. + h_map = _winapi.OpenFileMapping( + _winapi.FILE_MAP_READ, + False, + name + ) + try: + p_buf = _winapi.MapViewOfFile( + h_map, + _winapi.FILE_MAP_READ, + 0, + 0, + 0 + ) + finally: + _winapi.CloseHandle(h_map) + size = _winapi.VirtualQuerySize(p_buf) + self._mmap = mmap.mmap(-1, size, tagname=name) + + self._size = size + self._buf = memoryview(self._mmap) + + def __del__(self): + try: + self.close() + except OSError: + pass + + def __reduce__(self): + return ( + self.__class__, + ( + self.name, + False, + self.size, + ), + ) + + def __repr__(self): + return f'{self.__class__.__name__}({self.name!r}, size={self.size})' + + @property + def buf(self): + "A memoryview of contents of the shared memory block." + return self._buf + + @property + def name(self): + "Unique name that identifies the shared memory block." + reported_name = self._name + if _USE_POSIX and self._prepend_leading_slash: + if self._name.startswith("/"): + reported_name = self._name[1:] + return reported_name + + @property + def size(self): + "Size in bytes." + return self._size + + def close(self): + """Closes access to the shared memory from this instance but does + not destroy the shared memory block.""" + if self._buf is not None: + self._buf.release() + self._buf = None + if self._mmap is not None: + self._mmap.close() + self._mmap = None + if _USE_POSIX and self._fd >= 0: + os.close(self._fd) + self._fd = -1 + + def unlink(self): + """Requests that the underlying shared memory block be destroyed. + + In order to ensure proper cleanup of resources, unlink should be + called once (and only once) across all processes which have access + to the shared memory block.""" + if _USE_POSIX and self._name: + from .resource_tracker import unregister + _posixshmem.shm_unlink(self._name) + unregister(self._name, "shared_memory") + + +_encoding = "utf8" + +class ShareableList: + """Pattern for a mutable list-like object shareable via a shared + memory block. It differs from the built-in list type in that these + lists can not change their overall length (i.e. no append, insert, + etc.) + + Because values are packed into a memoryview as bytes, the struct + packing format for any storable value must require no more than 8 + characters to describe its format.""" + + _types_mapping = { + int: "q", + float: "d", + bool: "xxxxxxx?", + str: "%ds", + bytes: "%ds", + None.__class__: "xxxxxx?x", + } + _alignment = 8 + _back_transforms_mapping = { + 0: lambda value: value, # int, float, bool + 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str + 2: lambda value: value.rstrip(b'\x00'), # bytes + 3: lambda _value: None, # None + } + + @staticmethod + def _extract_recreation_code(value): + """Used in concert with _back_transforms_mapping to convert values + into the appropriate Python objects when retrieving them from + the list as well as when storing them.""" + if not isinstance(value, (str, bytes, None.__class__)): + return 0 + elif isinstance(value, str): + return 1 + elif isinstance(value, bytes): + return 2 + else: + return 3 # NoneType + + def __init__(self, sequence=None, *, name=None): + if sequence is not None: + _formats = [ + self._types_mapping[type(item)] + if not isinstance(item, (str, bytes)) + else self._types_mapping[type(item)] % ( + self._alignment * (len(item) // self._alignment + 1), + ) + for item in sequence + ] + self._list_len = len(_formats) + assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len + self._allocated_bytes = tuple( + self._alignment if fmt[-1] != "s" else int(fmt[:-1]) + for fmt in _formats + ) + _recreation_codes = [ + self._extract_recreation_code(item) for item in sequence + ] + requested_size = struct.calcsize( + "q" + self._format_size_metainfo + + "".join(_formats) + + self._format_packing_metainfo + + self._format_back_transform_codes + ) + + else: + requested_size = 8 # Some platforms require > 0. + + if name is not None and sequence is None: + self.shm = SharedMemory(name) + else: + self.shm = SharedMemory(name, create=True, size=requested_size) + + if sequence is not None: + _enc = _encoding + struct.pack_into( + "q" + self._format_size_metainfo, + self.shm.buf, + 0, + self._list_len, + *(self._allocated_bytes) + ) + struct.pack_into( + "".join(_formats), + self.shm.buf, + self._offset_data_start, + *(v.encode(_enc) if isinstance(v, str) else v for v in sequence) + ) + struct.pack_into( + self._format_packing_metainfo, + self.shm.buf, + self._offset_packing_formats, + *(v.encode(_enc) for v in _formats) + ) + struct.pack_into( + self._format_back_transform_codes, + self.shm.buf, + self._offset_back_transform_codes, + *(_recreation_codes) + ) + + else: + self._list_len = len(self) # Obtains size from offset 0 in buffer. + self._allocated_bytes = struct.unpack_from( + self._format_size_metainfo, + self.shm.buf, + 1 * 8 + ) + + def _get_packing_format(self, position): + "Gets the packing format for a single value stored in the list." + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + v = struct.unpack_from( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8 + )[0] + fmt = v.rstrip(b'\x00') + fmt_as_str = fmt.decode(_encoding) + + return fmt_as_str + + def _get_back_transform(self, position): + "Gets the back transformation function for a single value." + + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + transform_code = struct.unpack_from( + "b", + self.shm.buf, + self._offset_back_transform_codes + position + )[0] + transform_function = self._back_transforms_mapping[transform_code] + + return transform_function + + def _set_packing_format_and_transform(self, position, fmt_as_str, value): + """Sets the packing format and back transformation code for a + single value in the list at the specified position.""" + + position = position if position >= 0 else position + self._list_len + if (position >= self._list_len) or (self._list_len < 0): + raise IndexError("Requested position out of range.") + + struct.pack_into( + "8s", + self.shm.buf, + self._offset_packing_formats + position * 8, + fmt_as_str.encode(_encoding) + ) + + transform_code = self._extract_recreation_code(value) + struct.pack_into( + "b", + self.shm.buf, + self._offset_back_transform_codes + position, + transform_code + ) + + def __getitem__(self, position): + try: + offset = self._offset_data_start \ + + sum(self._allocated_bytes[:position]) + (v,) = struct.unpack_from( + self._get_packing_format(position), + self.shm.buf, + offset + ) + except IndexError: + raise IndexError("index out of range") + + back_transform = self._get_back_transform(position) + v = back_transform(v) + + return v + + def __setitem__(self, position, value): + try: + offset = self._offset_data_start \ + + sum(self._allocated_bytes[:position]) + current_format = self._get_packing_format(position) + except IndexError: + raise IndexError("assignment index out of range") + + if not isinstance(value, (str, bytes)): + new_format = self._types_mapping[type(value)] + encoded_value = value + else: + encoded_value = (value.encode(_encoding) + if isinstance(value, str) else value) + if len(encoded_value) > self._allocated_bytes[position]: + raise ValueError("bytes/str item exceeds available storage") + if current_format[-1] == "s": + new_format = current_format + else: + new_format = self._types_mapping[str] % ( + self._allocated_bytes[position], + ) + + self._set_packing_format_and_transform( + position, + new_format, + value + ) + struct.pack_into(new_format, self.shm.buf, offset, encoded_value) + + def __reduce__(self): + return partial(self.__class__, name=self.shm.name), () + + def __len__(self): + return struct.unpack_from("q", self.shm.buf, 0)[0] + + def __repr__(self): + return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})' + + @property + def format(self): + "The struct packing format used by all currently stored values." + return "".join( + self._get_packing_format(i) for i in range(self._list_len) + ) + + @property + def _format_size_metainfo(self): + "The struct packing format used for metainfo on storage sizes." + return f"{self._list_len}q" + + @property + def _format_packing_metainfo(self): + "The struct packing format used for the values' packing formats." + return "8s" * self._list_len + + @property + def _format_back_transform_codes(self): + "The struct packing format used for the values' back transforms." + return "b" * self._list_len + + @property + def _offset_data_start(self): + return (self._list_len + 1) * 8 # 8 bytes per "q" + + @property + def _offset_packing_formats(self): + return self._offset_data_start + sum(self._allocated_bytes) + + @property + def _offset_back_transform_codes(self): + return self._offset_packing_formats + self._list_len * 8 + + def count(self, value): + "L.count(value) -> integer -- return number of occurrences of value." + + return sum(value == entry for entry in self) + + def index(self, value): + """L.index(value) -> integer -- return first index of value. + Raises ValueError if the value is not present.""" + + for position, entry in enumerate(self): + if value == entry: + return position + else: + raise ValueError(f"{value!r} not in this container") diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/synchronize.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..d0be48f1fd7a8f14b935f74d89c168d831b84dbc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/synchronize.py @@ -0,0 +1,394 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' + ] + +import threading +import sys +import tempfile +import _multiprocessing +import time + +from . import context +from . import process +from . import util + +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocessing import SemLock, sem_unlink +except (ImportError): + raise ImportError("This platform lacks a functioning sem_open" + + " implementation, therefore, the required" + + " synchronization primitives needed will not" + + " function, see issue 3770.") + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + +class SemLock(object): + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, *, ctx): + if ctx is None: + ctx = context._default_context.get_context() + name = ctx.get_start_method() + unlink_now = sys.platform == 'win32' or name == 'fork' + for i in range(100): + try: + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), + unlink_now) + except FileExistsError: + pass + else: + break + else: + raise FileExistsError('cannot find name for semaphore') + + util.debug('created semlock with handle %s' % sl.handle) + self._make_methods() + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .resource_tracker import register + register(self._semlock.name, "semaphore") + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .resource_tracker import unregister + sem_unlink(name) + unregister(name, "semaphore") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + context.assert_spawning(self) + sl = self._semlock + if sys.platform == 'win32': + h = context.get_spawning_popen().duplicate_for_child(sl.handle) + else: + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _multiprocessing.SemLock._rebuild(*state) + util.debug('recreated blocker with handle %r' % state[0]) + self._make_methods() + + @staticmethod + def _make_name(): + return '%s-%s' % (process.current_process()._config['semprefix'], + next(SemLock._rand)) + +# +# Semaphore +# + +class Semaphore(SemLock): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) + + def get_value(self): + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s)>' % (self.__class__.__name__, value) + +# +# Bounded semaphore +# + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s, maxvalue=%s)>' % \ + (self.__class__.__name__, value, self._semlock.maxvalue) + +# +# Non-recursive lock +# + +class Lock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + elif self._semlock._get_value() == 1: + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '<%s(owner=%s)>' % (self.__class__.__name__, name) + +# +# Recursive lock +# + +class RLock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) + +# +# Condition variable +# + +class Condition(object): + + def __init__(self, lock=None, *, ctx): + self._lock = lock or ctx.RLock() + self._sleeping_count = ctx.Semaphore(0) + self._woken_count = ctx.Semaphore(0) + self._wait_semaphore = ctx.Semaphore(0) + self._make_methods() + + def __getstate__(self): + context.assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count._semlock._get_value() - + self._woken_count._semlock._get_value()) + except Exception: + num_waiters = 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + + def notify(self, n=1): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire( + False), ('notify: Should not have been able to acquire ' + + '_wait_semaphore') + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res, ('notify: Bug in sleeping_count.acquire' + + '- res should not be False') + + sleepers = 0 + while sleepers < n and self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def notify_all(self): + self.notify(n=sys.maxsize) + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + +# +# Event +# + +class Event(object): + + def __init__(self, *, ctx): + self._cond = ctx.Condition(ctx.Lock()) + self._flag = ctx.Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + +# +# Barrier +# + +class Barrier(threading.Barrier): + + def __init__(self, parties, action=None, timeout=None, *, ctx): + import struct + from .heap import BufferWrapper + wrapper = BufferWrapper(struct.calcsize('i') * 2) + cond = ctx.Condition() + self.__setstate__((parties, action, timeout, cond, wrapper)) + self._state = 0 + self._count = 0 + + def __setstate__(self, state): + (self._parties, self._action, self._timeout, + self._cond, self._wrapper) = state + self._array = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._parties, self._action, self._timeout, + self._cond, self._wrapper) + + @property + def _state(self): + return self._array[0] + + @_state.setter + def _state(self, value): + self._array[0] = value + + @property + def _count(self): + return self._array[1] + + @_count.setter + def _count(self, value): + self._array[1] = value diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/util.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/util.py new file mode 100644 index 0000000000000000000000000000000000000000..44abfe529fc784c85a723342b74ab9893065f0b5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/multiprocessing/util.py @@ -0,0 +1,489 @@ +# +# Module providing various facilities to other parts of the package +# +# multiprocessing/util.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import itertools +import sys +import weakref +import atexit +import threading # we want threading to install it's + # cleanup function before multiprocessing does +from subprocess import _args_from_interpreter_flags + +from . import process + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', + ] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 + +LOGGER_NAME = 'multiprocessing' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + +def sub_debug(msg, *args): + if _logger: + _logger.log(SUBDEBUG, msg, *args) + +def debug(msg, *args): + if _logger: + _logger.log(DEBUG, msg, *args) + +def info(msg, *args): + if _logger: + _logger.log(INFO, msg, *args) + +def sub_warning(msg, *args): + if _logger: + _logger.log(SUBWARNING, msg, *args) + +def get_logger(): + ''' + Returns logger used by multiprocessing + ''' + global _logger + import logging + + logging._acquireLock() + try: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + + finally: + logging._releaseLock() + + return _logger + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +# Abstract socket support + +def _platform_supports_abstract_sockets(): + if sys.platform == "linux": + return True + if hasattr(sys, 'getandroidapilevel'): + return True + return False + + +def is_abstract_socket_namespace(address): + if not address: + return False + if isinstance(address, bytes): + return address[0] == 0 + elif isinstance(address, str): + return address[0] == "\0" + raise TypeError('address type of {address!r} unrecognized') + + +abstract_sockets_supported = _platform_supports_abstract_sockets() + +# +# Function returning a temp directory which will be removed on exit +# + +def _remove_temp_dir(rmtree, tempdir): + rmtree(tempdir) + + current_process = process.current_process() + # current_process() can be None if the finalizer is called + # late during Python finalization + if current_process is not None: + current_process._config['tempdir'] = None + +def get_temp_dir(): + # get name of a temp directory which will be automatically cleaned up + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: + import shutil, tempfile + tempdir = tempfile.mkdtemp(prefix='pymp-') + info('created temp directory %s', tempdir) + # keep a strong reference to shutil.rmtree(), since the finalizer + # can be called late during Python shutdown + Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), + exitpriority=-100) + process.current_process()._config['tempdir'] = tempdir + return tempdir + +# +# Support for reinitialization of objects when bootstrapping a child process +# + +_afterfork_registry = weakref.WeakValueDictionary() +_afterfork_counter = itertools.count() + +def _run_after_forkers(): + items = list(_afterfork_registry.items()) + items.sort() + for (index, ident, func), obj in items: + try: + func(obj) + except Exception as e: + info('after forker raised exception %s', e) + +def register_after_fork(obj, func): + _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj + +# +# Finalization using weakrefs +# + +_finalizer_registry = {} +_finalizer_counter = itertools.count() + + +class Finalize(object): + ''' + Class which supports object finalization using weakrefs + ''' + def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): + if (exitpriority is not None) and not isinstance(exitpriority,int): + raise TypeError( + "Exitpriority ({0!r}) must be None or int, not {1!s}".format( + exitpriority, type(exitpriority))) + + if obj is not None: + self._weakref = weakref.ref(obj, self) + elif exitpriority is None: + raise ValueError("Without object, exitpriority cannot be None") + + self._callback = callback + self._args = args + self._kwargs = kwargs or {} + self._key = (exitpriority, next(_finalizer_counter)) + self._pid = os.getpid() + + _finalizer_registry[self._key] = self + + def __call__(self, wr=None, + # Need to bind these locally because the globals can have + # been cleared at shutdown + _finalizer_registry=_finalizer_registry, + sub_debug=sub_debug, getpid=os.getpid): + ''' + Run the callback unless it has already been called or cancelled + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + sub_debug('finalizer no longer registered') + else: + if self._pid != getpid(): + sub_debug('finalizer ignored because different process') + res = None + else: + sub_debug('finalizer calling %s with args %s and kwargs %s', + self._callback, self._args, self._kwargs) + res = self._callback(*self._args, **self._kwargs) + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + return res + + def cancel(self): + ''' + Cancel finalization of the object + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + pass + else: + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + + def still_active(self): + ''' + Return whether this finalizer is still waiting to invoke callback + ''' + return self._key in _finalizer_registry + + def __repr__(self): + try: + obj = self._weakref() + except (AttributeError, TypeError): + obj = None + + if obj is None: + return '<%s object, dead>' % self.__class__.__name__ + + x = '<%s object, callback=%s' % ( + self.__class__.__name__, + getattr(self._callback, '__name__', self._callback)) + if self._args: + x += ', args=' + str(self._args) + if self._kwargs: + x += ', kwargs=' + str(self._kwargs) + if self._key[0] is not None: + x += ', exitpriority=' + str(self._key[0]) + return x + '>' + + +def _run_finalizers(minpriority=None): + ''' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + ''' + if _finalizer_registry is None: + # This function may be called after this module's globals are + # destroyed. See the _exit_function function in this module for more + # notes. + return + + if minpriority is None: + f = lambda p : p[0] is not None + else: + f = lambda p : p[0] is not None and p[0] >= minpriority + + # Careful: _finalizer_registry may be mutated while this function + # is running (either by a GC run or by another thread). + + # list(_finalizer_registry) should be atomic, while + # list(_finalizer_registry.items()) is not. + keys = [key for key in list(_finalizer_registry) if f(key)] + keys.sort(reverse=True) + + for key in keys: + finalizer = _finalizer_registry.get(key) + # key may have been removed from the registry + if finalizer is not None: + sub_debug('calling %s', finalizer) + try: + finalizer() + except Exception: + import traceback + traceback.print_exc() + + if minpriority is None: + _finalizer_registry.clear() + +# +# Clean up on exit +# + +def is_exiting(): + ''' + Returns true if the process is shutting down + ''' + return _exiting or _exiting is None + +_exiting = False + +def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, + active_children=process.active_children, + current_process=process.current_process): + # We hold on to references to functions in the arglist due to the + # situation described below, where this function is called after this + # module's globals are destroyed. + + global _exiting + + if not _exiting: + _exiting = True + + info('process shutting down') + debug('running all "atexit" finalizers with priority >= 0') + _run_finalizers(0) + + if current_process() is not None: + # We check if the current process is None here because if + # it's None, any call to ``active_children()`` will raise + # an AttributeError (active_children winds up trying to + # get attributes from util._current_process). One + # situation where this can happen is if someone has + # manipulated sys.modules, causing this module to be + # garbage collected. The destructor for the module type + # then replaces all values in the module dict with None. + # For instance, after setuptools runs a test it replaces + # sys.modules with a copy created earlier. See issues + # #9775 and #15881. Also related: #4106, #9205, and + # #9207. + + for p in active_children(): + if p.daemon: + info('calling terminate() for daemon %s', p.name) + p._popen.terminate() + + for p in active_children(): + info('calling join() for process %s', p.name) + p.join() + + debug('running the remaining "atexit" finalizers') + _run_finalizers() + +atexit.register(_exit_function) + +# +# Some fork aware types +# + +class ForkAwareThreadLock(object): + def __init__(self): + self._reset() + register_after_fork(self, ForkAwareThreadLock._reset) + + def _reset(self): + self._lock = threading.Lock() + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + +class ForkAwareLocal(threading.local): + def __init__(self): + register_after_fork(self, lambda obj : obj.__dict__.clear()) + def __reduce__(self): + return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) +# +# Close sys.stdin and replace stdin with os.devnull +# + +def _close_stdin(): + if sys.stdin is None: + return + + try: + sys.stdin.close() + except (OSError, ValueError): + pass + + try: + fd = os.open(os.devnull, os.O_RDONLY) + try: + sys.stdin = open(fd, closefd=False) + except: + os.close(fd) + raise + except (OSError, ValueError): + pass + +# +# Flush standard streams, if any +# + +def _flush_std_streams(): + try: + sys.stdout.flush() + except (AttributeError, ValueError): + pass + try: + sys.stderr.flush() + except (AttributeError, ValueError): + pass + +# +# Start a program with only specified fds kept open +# + +def spawnv_passfds(path, args, passfds): + import _posixsubprocess + passfds = tuple(sorted(map(int, passfds))) + errpipe_read, errpipe_write = os.pipe() + try: + return _posixsubprocess.fork_exec( + args, [os.fsencode(path)], True, passfds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + + +def close_fds(*fds): + """Close each file descriptor given as an argument""" + for fd in fds: + os.close(fd) + + +def _cleanup_tests(): + """Cleanup multiprocessing resources when multiprocessing tests + completed.""" + + from test import support + + # cleanup multiprocessing + process._cleanup() + + # Stop the ForkServer process if it's running + from multiprocessing import forkserver + forkserver._forkserver._stop() + + # Stop the ResourceTracker process if it's running + from multiprocessing import resource_tracker + resource_tracker._resource_tracker._stop() + + # bpo-37421: Explicitly call _run_finalizers() to remove immediately + # temporary directories created by multiprocessing.util.get_temp_dir(). + _run_finalizers() + support.gc_collect() + + support.reap_children() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/sqlite3/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/sqlite3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c91df27cca70d7fa397c41175d18c900b1747d9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/sqlite3/__init__.py @@ -0,0 +1,23 @@ +# pysqlite2/__init__.py: the pysqlite2 package. +# +# Copyright (C) 2005 Gerhard Häring +# +# This file is part of pysqlite. +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + +from sqlite3.dbapi2 import * diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/sqlite3/dump.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/sqlite3/dump.py new file mode 100644 index 0000000000000000000000000000000000000000..de9c368be3014e7e55f56a69457a003854381629 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/sqlite3/dump.py @@ -0,0 +1,70 @@ +# Mimic the sqlite3 console shell's .dump command +# Author: Paul Kippes + +# Every identifier in sql is quoted based on a comment in sqlite +# documentation "SQLite adds new keywords from time to time when it +# takes on new features. So to prevent your code from being broken by +# future enhancements, you should normally quote any identifier that +# is an English language word, even if you do not have to." + +def _iterdump(connection): + """ + Returns an iterator to the dump of the database in an SQL text format. + + Used to produce an SQL dump of the database. Useful to save an in-memory + database for later restoration. This function should not be called + directly but instead called from the Connection method, iterdump(). + """ + + cu = connection.cursor() + yield('BEGIN TRANSACTION;') + + # sqlite_master table contains the SQL CREATE statements for the database. + q = """ + SELECT "name", "type", "sql" + FROM "sqlite_master" + WHERE "sql" NOT NULL AND + "type" == 'table' + ORDER BY "name" + """ + schema_res = cu.execute(q) + for table_name, type, sql in schema_res.fetchall(): + if table_name == 'sqlite_sequence': + yield('DELETE FROM "sqlite_sequence";') + elif table_name == 'sqlite_stat1': + yield('ANALYZE "sqlite_master";') + elif table_name.startswith('sqlite_'): + continue + # NOTE: Virtual table support not implemented + #elif sql.startswith('CREATE VIRTUAL TABLE'): + # qtable = table_name.replace("'", "''") + # yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\ + # "VALUES('table','{0}','{0}',0,'{1}');".format( + # qtable, + # sql.replace("''"))) + else: + yield('{0};'.format(sql)) + + # Build the insert statement for each row of the current table + table_name_ident = table_name.replace('"', '""') + res = cu.execute('PRAGMA table_info("{0}")'.format(table_name_ident)) + column_names = [str(table_info[1]) for table_info in res.fetchall()] + q = """SELECT 'INSERT INTO "{0}" VALUES({1})' FROM "{0}";""".format( + table_name_ident, + ",".join("""'||quote("{0}")||'""".format(col.replace('"', '""')) for col in column_names)) + query_res = cu.execute(q) + for row in query_res: + yield("{0};".format(row[0])) + + # Now when the type is 'index', 'trigger', or 'view' + q = """ + SELECT "name", "type", "sql" + FROM "sqlite_master" + WHERE "sql" NOT NULL AND + "type" IN ('index', 'trigger', 'view') + """ + schema_res = cu.execute(q) + for name, type, sql in schema_res.fetchall(): + yield('{0};'.format(sql)) + + yield('COMMIT;') diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/fractalcurves.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/fractalcurves.py new file mode 100644 index 0000000000000000000000000000000000000000..54ade96a0ad05eba6304b643c21906db609903c4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/fractalcurves.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_fractalCurves.py + +This program draws two fractal-curve-designs: +(1) A hilbert curve (in a box) +(2) A combination of Koch-curves. + +The CurvesTurtle class and the fractal-curve- +methods are taken from the PythonCard example +scripts for turtle-graphics. +""" +from turtle import * +from time import sleep, perf_counter as clock + +class CurvesTurtle(Pen): + # example derived from + # Turtle Geometry: The Computer as a Medium for Exploring Mathematics + # by Harold Abelson and Andrea diSessa + # p. 96-98 + def hilbert(self, size, level, parity): + if level == 0: + return + # rotate and draw first subcurve with opposite parity to big curve + self.left(parity * 90) + self.hilbert(size, level - 1, -parity) + # interface to and draw second subcurve with same parity as big curve + self.forward(size) + self.right(parity * 90) + self.hilbert(size, level - 1, parity) + # third subcurve + self.forward(size) + self.hilbert(size, level - 1, parity) + # fourth subcurve + self.right(parity * 90) + self.forward(size) + self.hilbert(size, level - 1, -parity) + # a final turn is needed to make the turtle + # end up facing outward from the large square + self.left(parity * 90) + + # Visual Modeling with Logo: A Structural Approach to Seeing + # by James Clayson + # Koch curve, after Helge von Koch who introduced this geometric figure in 1904 + # p. 146 + def fractalgon(self, n, rad, lev, dir): + import math + + # if dir = 1 turn outward + # if dir = -1 turn inward + edge = 2 * rad * math.sin(math.pi / n) + self.pu() + self.fd(rad) + self.pd() + self.rt(180 - (90 * (n - 2) / n)) + for i in range(n): + self.fractal(edge, lev, dir) + self.rt(360 / n) + self.lt(180 - (90 * (n - 2) / n)) + self.pu() + self.bk(rad) + self.pd() + + # p. 146 + def fractal(self, dist, depth, dir): + if depth < 1: + self.fd(dist) + return + self.fractal(dist / 3, depth - 1, dir) + self.lt(60 * dir) + self.fractal(dist / 3, depth - 1, dir) + self.rt(120 * dir) + self.fractal(dist / 3, depth - 1, dir) + self.lt(60 * dir) + self.fractal(dist / 3, depth - 1, dir) + +def main(): + ft = CurvesTurtle() + + ft.reset() + ft.speed(0) + ft.ht() + ft.getscreen().tracer(1,0) + ft.pu() + + size = 6 + ft.setpos(-33*size, -32*size) + ft.pd() + + ta=clock() + ft.fillcolor("red") + ft.begin_fill() + ft.fd(size) + + ft.hilbert(size, 6, 1) + + # frame + ft.fd(size) + for i in range(3): + ft.lt(90) + ft.fd(size*(64+i%2)) + ft.pu() + for i in range(2): + ft.fd(size) + ft.rt(90) + ft.pd() + for i in range(4): + ft.fd(size*(66+i%2)) + ft.rt(90) + ft.end_fill() + tb=clock() + res = "Hilbert: %.2fsec. " % (tb-ta) + + sleep(3) + + ft.reset() + ft.speed(0) + ft.ht() + ft.getscreen().tracer(1,0) + + ta=clock() + ft.color("black", "blue") + ft.begin_fill() + ft.fractalgon(3, 250, 4, 1) + ft.end_fill() + ft.begin_fill() + ft.color("red") + ft.fractalgon(3, 200, 4, -1) + ft.end_fill() + tb=clock() + res += "Koch: %.2fsec." % (tb-ta) + return res + +if __name__ == '__main__': + msg = main() + print(msg) + mainloop() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/nim.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/nim.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae6cc5c01b9039981f31f4c28a470ac249ee5dc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/nim.py @@ -0,0 +1,226 @@ +""" turtle-example-suite: + + tdemo_nim.py + +Play nim against the computer. The player +who takes the last stick is the winner. + +Implements the model-view-controller +design pattern. +""" + + +import turtle +import random +import time + +SCREENWIDTH = 640 +SCREENHEIGHT = 480 + +MINSTICKS = 7 +MAXSTICKS = 31 + +HUNIT = SCREENHEIGHT // 12 +WUNIT = SCREENWIDTH // ((MAXSTICKS // 5) * 11 + (MAXSTICKS % 5) * 2) + +SCOLOR = (63, 63, 31) +HCOLOR = (255, 204, 204) +COLOR = (204, 204, 255) + +def randomrow(): + return random.randint(MINSTICKS, MAXSTICKS) + +def computerzug(state): + xored = state[0] ^ state[1] ^ state[2] + if xored == 0: + return randommove(state) + for z in range(3): + s = state[z] ^ xored + if s <= state[z]: + move = (z, s) + return move + +def randommove(state): + m = max(state) + while True: + z = random.randint(0,2) + if state[z] > (m > 1): + break + rand = random.randint(m > 1, state[z]-1) + return z, rand + + +class NimModel(object): + def __init__(self, game): + self.game = game + + def setup(self): + if self.game.state not in [Nim.CREATED, Nim.OVER]: + return + self.sticks = [randomrow(), randomrow(), randomrow()] + self.player = 0 + self.winner = None + self.game.view.setup() + self.game.state = Nim.RUNNING + + def move(self, row, col): + maxspalte = self.sticks[row] + self.sticks[row] = col + self.game.view.notify_move(row, col, maxspalte, self.player) + if self.game_over(): + self.game.state = Nim.OVER + self.winner = self.player + self.game.view.notify_over() + elif self.player == 0: + self.player = 1 + row, col = computerzug(self.sticks) + self.move(row, col) + self.player = 0 + + def game_over(self): + return self.sticks == [0, 0, 0] + + def notify_move(self, row, col): + if self.sticks[row] <= col: + return + self.move(row, col) + + +class Stick(turtle.Turtle): + def __init__(self, row, col, game): + turtle.Turtle.__init__(self, visible=False) + self.row = row + self.col = col + self.game = game + x, y = self.coords(row, col) + self.shape("square") + self.shapesize(HUNIT/10.0, WUNIT/20.0) + self.speed(0) + self.pu() + self.goto(x,y) + self.color("white") + self.showturtle() + + def coords(self, row, col): + packet, remainder = divmod(col, 5) + x = (3 + 11 * packet + 2 * remainder) * WUNIT + y = (2 + 3 * row) * HUNIT + return x - SCREENWIDTH // 2 + WUNIT // 2, SCREENHEIGHT // 2 - y - HUNIT // 2 + + def makemove(self, x, y): + if self.game.state != Nim.RUNNING: + return + self.game.controller.notify_move(self.row, self.col) + + +class NimView(object): + def __init__(self, game): + self.game = game + self.screen = game.screen + self.model = game.model + self.screen.colormode(255) + self.screen.tracer(False) + self.screen.bgcolor((240, 240, 255)) + self.writer = turtle.Turtle(visible=False) + self.writer.pu() + self.writer.speed(0) + self.sticks = {} + for row in range(3): + for col in range(MAXSTICKS): + self.sticks[(row, col)] = Stick(row, col, game) + self.display("... a moment please ...") + self.screen.tracer(True) + + def display(self, msg1, msg2=None): + self.screen.tracer(False) + self.writer.clear() + if msg2 is not None: + self.writer.goto(0, - SCREENHEIGHT // 2 + 48) + self.writer.pencolor("red") + self.writer.write(msg2, align="center", font=("Courier",18,"bold")) + self.writer.goto(0, - SCREENHEIGHT // 2 + 20) + self.writer.pencolor("black") + self.writer.write(msg1, align="center", font=("Courier",14,"bold")) + self.screen.tracer(True) + + def setup(self): + self.screen.tracer(False) + for row in range(3): + for col in range(self.model.sticks[row]): + self.sticks[(row, col)].color(SCOLOR) + for row in range(3): + for col in range(self.model.sticks[row], MAXSTICKS): + self.sticks[(row, col)].color("white") + self.display("Your turn! Click leftmost stick to remove.") + self.screen.tracer(True) + + def notify_move(self, row, col, maxspalte, player): + if player == 0: + farbe = HCOLOR + for s in range(col, maxspalte): + self.sticks[(row, s)].color(farbe) + else: + self.display(" ... thinking ... ") + time.sleep(0.5) + self.display(" ... thinking ... aaah ...") + farbe = COLOR + for s in range(maxspalte-1, col-1, -1): + time.sleep(0.2) + self.sticks[(row, s)].color(farbe) + self.display("Your turn! Click leftmost stick to remove.") + + def notify_over(self): + if self.game.model.winner == 0: + msg2 = "Congrats. You're the winner!!!" + else: + msg2 = "Sorry, the computer is the winner." + self.display("To play again press space bar. To leave press ESC.", msg2) + + def clear(self): + if self.game.state == Nim.OVER: + self.screen.clear() + + +class NimController(object): + + def __init__(self, game): + self.game = game + self.sticks = game.view.sticks + self.BUSY = False + for stick in self.sticks.values(): + stick.onclick(stick.makemove) + self.game.screen.onkey(self.game.model.setup, "space") + self.game.screen.onkey(self.game.view.clear, "Escape") + self.game.view.display("Press space bar to start game") + self.game.screen.listen() + + def notify_move(self, row, col): + if self.BUSY: + return + self.BUSY = True + self.game.model.notify_move(row, col) + self.BUSY = False + + +class Nim(object): + CREATED = 0 + RUNNING = 1 + OVER = 2 + def __init__(self, screen): + self.state = Nim.CREATED + self.screen = screen + self.model = NimModel(self) + self.view = NimView(self) + self.controller = NimController(self) + + +def main(): + mainscreen = turtle.Screen() + mainscreen.mode("standard") + mainscreen.setup(SCREENWIDTH, SCREENHEIGHT) + nim = Nim(mainscreen) + return "EVENTLOOP" + +if __name__ == "__main__": + main() + turtle.mainloop() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/sorting_animate.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/sorting_animate.py new file mode 100644 index 0000000000000000000000000000000000000000..d25a0ab6cebdc09e79a7a7974345852cd23a469c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/sorting_animate.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +""" + + sorting_animation.py + +A minimal sorting algorithm animation: +Sorts a shelf of 10 blocks using insertion +sort, selection sort and quicksort. + +Shelfs are implemented using builtin lists. + +Blocks are turtles with shape "square", but +stretched to rectangles by shapesize() + --------------------------------------- + To exit press space button + --------------------------------------- +""" +from turtle import * +import random + + +class Block(Turtle): + + def __init__(self, size): + self.size = size + Turtle.__init__(self, shape="square", visible=False) + self.pu() + self.shapesize(size * 1.5, 1.5, 2) # square-->rectangle + self.fillcolor("black") + self.st() + + def glow(self): + self.fillcolor("red") + + def unglow(self): + self.fillcolor("black") + + def __repr__(self): + return "Block size: {0}".format(self.size) + + +class Shelf(list): + + def __init__(self, y): + "create a shelf. y is y-position of first block" + self.y = y + self.x = -150 + + def push(self, d): + width, _, _ = d.shapesize() + # align blocks by the bottom edge + y_offset = width / 2 * 20 + d.sety(self.y + y_offset) + d.setx(self.x + 34 * len(self)) + self.append(d) + + def _close_gap_from_i(self, i): + for b in self[i:]: + xpos, _ = b.pos() + b.setx(xpos - 34) + + def _open_gap_from_i(self, i): + for b in self[i:]: + xpos, _ = b.pos() + b.setx(xpos + 34) + + def pop(self, key): + b = list.pop(self, key) + b.glow() + b.sety(200) + self._close_gap_from_i(key) + return b + + def insert(self, key, b): + self._open_gap_from_i(key) + list.insert(self, key, b) + b.setx(self.x + 34 * key) + width, _, _ = b.shapesize() + # align blocks by the bottom edge + y_offset = width / 2 * 20 + b.sety(self.y + y_offset) + b.unglow() + +def isort(shelf): + length = len(shelf) + for i in range(1, length): + hole = i + while hole > 0 and shelf[i].size < shelf[hole - 1].size: + hole = hole - 1 + shelf.insert(hole, shelf.pop(i)) + return + +def ssort(shelf): + length = len(shelf) + for j in range(0, length - 1): + imin = j + for i in range(j + 1, length): + if shelf[i].size < shelf[imin].size: + imin = i + if imin != j: + shelf.insert(j, shelf.pop(imin)) + +def partition(shelf, left, right, pivot_index): + pivot = shelf[pivot_index] + shelf.insert(right, shelf.pop(pivot_index)) + store_index = left + for i in range(left, right): # range is non-inclusive of ending value + if shelf[i].size < pivot.size: + shelf.insert(store_index, shelf.pop(i)) + store_index = store_index + 1 + shelf.insert(store_index, shelf.pop(right)) # move pivot to correct position + return store_index + +def qsort(shelf, left, right): + if left < right: + pivot_index = left + pivot_new_index = partition(shelf, left, right, pivot_index) + qsort(shelf, left, pivot_new_index - 1) + qsort(shelf, pivot_new_index + 1, right) + +def randomize(): + disable_keys() + clear() + target = list(range(10)) + random.shuffle(target) + for i, t in enumerate(target): + for j in range(i, len(s)): + if s[j].size == t + 1: + s.insert(i, s.pop(j)) + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def show_text(text, line=0): + line = 20 * line + goto(0,-250 - line) + write(text, align="center", font=("Courier", 16, "bold")) + +def start_ssort(): + disable_keys() + clear() + show_text("Selection Sort") + ssort(s) + clear() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def start_isort(): + disable_keys() + clear() + show_text("Insertion Sort") + isort(s) + clear() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def start_qsort(): + disable_keys() + clear() + show_text("Quicksort") + qsort(s, 0, len(s) - 1) + clear() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def init_shelf(): + global s + s = Shelf(-200) + vals = (4, 2, 8, 9, 1, 5, 10, 3, 7, 6) + for i in vals: + s.push(Block(i)) + +def disable_keys(): + onkey(None, "s") + onkey(None, "i") + onkey(None, "q") + onkey(None, "r") + +def enable_keys(): + onkey(start_isort, "i") + onkey(start_ssort, "s") + onkey(start_qsort, "q") + onkey(randomize, "r") + onkey(bye, "space") + +def main(): + getscreen().clearscreen() + ht(); penup() + init_shelf() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + listen() + return "EVENTLOOP" + +instructions1 = "press i for insertion sort, s for selection sort, q for quicksort" +instructions2 = "spacebar to quit, r to randomize" + +if __name__=="__main__": + msg = main() + mainloop() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/tree.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..98a20da7f15c11d90a4243b8319131ceed2e8f4c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/tree.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_tree.py + +Displays a 'breadth-first-tree' - in contrast +to the classical Logo tree drawing programs, +which use a depth-first-algorithm. + +Uses: +(1) a tree-generator, where the drawing is +quasi the side-effect, whereas the generator +always yields None. +(2) Turtle-cloning: At each branching point +the current pen is cloned. So in the end +there are 1024 turtles. +""" +from turtle import Turtle, mainloop +from time import perf_counter as clock + +def tree(plist, l, a, f): + """ plist is list of pens + l is length of branch + a is half of the angle between 2 branches + f is factor by which branch is shortened + from level to level.""" + if l > 3: + lst = [] + for p in plist: + p.forward(l) + q = p.clone() + p.left(a) + q.right(a) + lst.append(p) + lst.append(q) + for x in tree(lst, l*f, a, f): + yield None + +def maketree(): + p = Turtle() + p.setundobuffer(None) + p.hideturtle() + p.speed(0) + p.getscreen().tracer(30,0) + p.left(90) + p.penup() + p.forward(-210) + p.pendown() + t = tree([p], 200, 65, 0.6375) + for x in t: + pass + +def main(): + a=clock() + maketree() + b=clock() + return "done: %.2f sec." % (b-a) + +if __name__ == "__main__": + msg = main() + print(msg) + mainloop() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/two_canvases.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/two_canvases.py new file mode 100644 index 0000000000000000000000000000000000000000..f3602585ab0592c393fbe9526cb5aae4ae902f6c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/two_canvases.py @@ -0,0 +1,54 @@ +"""turtledemo.two_canvases + +Use TurtleScreen and RawTurtle to draw on two +distinct canvases in a separate window. The +new window must be separately closed in +addition to pressing the STOP button. +""" + +from turtle import TurtleScreen, RawTurtle, TK + +def main(): + root = TK.Tk() + cv1 = TK.Canvas(root, width=300, height=200, bg="#ddffff") + cv2 = TK.Canvas(root, width=300, height=200, bg="#ffeeee") + cv1.pack() + cv2.pack() + + s1 = TurtleScreen(cv1) + s1.bgcolor(0.85, 0.85, 1) + s2 = TurtleScreen(cv2) + s2.bgcolor(1, 0.85, 0.85) + + p = RawTurtle(s1) + q = RawTurtle(s2) + + p.color("red", (1, 0.85, 0.85)) + p.width(3) + q.color("blue", (0.85, 0.85, 1)) + q.width(3) + + for t in p,q: + t.shape("turtle") + t.lt(36) + + q.lt(180) + + for t in p, q: + t.begin_fill() + for i in range(5): + for t in p, q: + t.fd(50) + t.lt(72) + for t in p,q: + t.end_fill() + t.lt(54) + t.pu() + t.bk(50) + + return "EVENTLOOP" + + +if __name__ == '__main__': + main() + TK.mainloop() # keep window open until user closes it diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/yinyang.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/yinyang.py new file mode 100644 index 0000000000000000000000000000000000000000..11d1f47cae25491b302f2a387a816c3558130444 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/turtledemo/yinyang.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_yinyang.py + +Another drawing suitable as a beginner's +programming example. + +The small circles are drawn by the circle +command. + +""" + +from turtle import * + +def yin(radius, color1, color2): + width(3) + color("black", color1) + begin_fill() + circle(radius/2., 180) + circle(radius, 180) + left(180) + circle(-radius/2., 180) + end_fill() + left(90) + up() + forward(radius*0.35) + right(90) + down() + color(color1, color2) + begin_fill() + circle(radius*0.15) + end_fill() + left(90) + up() + backward(radius*0.35) + down() + left(90) + +def main(): + reset() + yin(200, "black", "white") + yin(200, "white", "black") + ht() + return "Done!" + +if __name__ == '__main__': + main() + mainloop() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/venv/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/venv/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1a370800ff25204304b345e3ad294bc3a96ad3c5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/venv/__init__.py @@ -0,0 +1,477 @@ +""" +Virtual environment (venv) package for Python. Based on PEP 405. + +Copyright (C) 2011-2014 Vinay Sajip. +Licensed to the PSF under a contributor agreement. +""" +import logging +import os +import shutil +import subprocess +import sys +import sysconfig +import types + +logger = logging.getLogger(__name__) + + +class EnvBuilder: + """ + This class exists to allow virtual environment creation to be + customized. The constructor parameters determine the builder's + behaviour when called upon to create a virtual environment. + + By default, the builder makes the system (global) site-packages dir + *un*available to the created environment. + + If invoked using the Python -m option, the default is to use copying + on Windows platforms but symlinks elsewhere. If instantiated some + other way, the default is to *not* use symlinks. + + :param system_site_packages: If True, the system (global) site-packages + dir is available to created environments. + :param clear: If True, delete the contents of the environment directory if + it already exists, before environment creation. + :param symlinks: If True, attempt to symlink rather than copy files into + virtual environment. + :param upgrade: If True, upgrade an existing virtual environment. + :param with_pip: If True, ensure pip is installed in the virtual + environment + :param prompt: Alternative terminal prefix for the environment. + """ + + def __init__(self, system_site_packages=False, clear=False, + symlinks=False, upgrade=False, with_pip=False, prompt=None): + self.system_site_packages = system_site_packages + self.clear = clear + self.symlinks = symlinks + self.upgrade = upgrade + self.with_pip = with_pip + self.prompt = prompt + + def create(self, env_dir): + """ + Create a virtual environment in a directory. + + :param env_dir: The target directory to create an environment in. + + """ + env_dir = os.path.abspath(env_dir) + context = self.ensure_directories(env_dir) + # See issue 24875. We need system_site_packages to be False + # until after pip is installed. + true_system_site_packages = self.system_site_packages + self.system_site_packages = False + self.create_configuration(context) + self.setup_python(context) + if self.with_pip: + self._setup_pip(context) + if not self.upgrade: + self.setup_scripts(context) + self.post_setup(context) + if true_system_site_packages: + # We had set it to False before, now + # restore it and rewrite the configuration + self.system_site_packages = True + self.create_configuration(context) + + def clear_directory(self, path): + for fn in os.listdir(path): + fn = os.path.join(path, fn) + if os.path.islink(fn) or os.path.isfile(fn): + os.remove(fn) + elif os.path.isdir(fn): + shutil.rmtree(fn) + + def ensure_directories(self, env_dir): + """ + Create the directories for the environment. + + Returns a context object which holds paths in the environment, + for use by subsequent logic. + """ + + def create_if_needed(d): + if not os.path.exists(d): + os.makedirs(d) + elif os.path.islink(d) or os.path.isfile(d): + raise ValueError('Unable to create directory %r' % d) + + if os.path.exists(env_dir) and self.clear: + self.clear_directory(env_dir) + context = types.SimpleNamespace() + context.env_dir = env_dir + context.env_name = os.path.split(env_dir)[1] + prompt = self.prompt if self.prompt is not None else context.env_name + context.prompt = '(%s) ' % prompt + create_if_needed(env_dir) + executable = sys._base_executable + dirname, exename = os.path.split(os.path.abspath(executable)) + context.executable = executable + context.python_dir = dirname + context.python_exe = exename + if sys.platform == 'win32': + binname = 'Scripts' + incpath = 'Include' + libpath = os.path.join(env_dir, 'Lib', 'site-packages') + else: + binname = 'bin' + incpath = 'include' + libpath = os.path.join(env_dir, 'lib', + 'python%d.%d' % sys.version_info[:2], + 'site-packages') + context.inc_path = path = os.path.join(env_dir, incpath) + create_if_needed(path) + create_if_needed(libpath) + # Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX + if ((sys.maxsize > 2**32) and (os.name == 'posix') and + (sys.platform != 'darwin')): + link_path = os.path.join(env_dir, 'lib64') + if not os.path.exists(link_path): # Issue #21643 + os.symlink('lib', link_path) + context.bin_path = binpath = os.path.join(env_dir, binname) + context.bin_name = binname + context.env_exe = os.path.join(binpath, exename) + create_if_needed(binpath) + return context + + def create_configuration(self, context): + """ + Create a configuration file indicating where the environment's Python + was copied from, and whether the system site-packages should be made + available in the environment. + + :param context: The information for the environment creation request + being processed. + """ + context.cfg_path = path = os.path.join(context.env_dir, 'pyvenv.cfg') + with open(path, 'w', encoding='utf-8') as f: + f.write('home = %s\n' % context.python_dir) + if self.system_site_packages: + incl = 'true' + else: + incl = 'false' + f.write('include-system-site-packages = %s\n' % incl) + f.write('version = %d.%d.%d\n' % sys.version_info[:3]) + if self.prompt is not None: + f.write(f'prompt = {self.prompt!r}\n') + + if os.name != 'nt': + def symlink_or_copy(self, src, dst, relative_symlinks_ok=False): + """ + Try symlinking a file, and if that fails, fall back to copying. + """ + force_copy = not self.symlinks + if not force_copy: + try: + if not os.path.islink(dst): # can't link to itself! + if relative_symlinks_ok: + assert os.path.dirname(src) == os.path.dirname(dst) + os.symlink(os.path.basename(src), dst) + else: + os.symlink(src, dst) + except Exception: # may need to use a more specific exception + logger.warning('Unable to symlink %r to %r', src, dst) + force_copy = True + if force_copy: + shutil.copyfile(src, dst) + else: + def symlink_or_copy(self, src, dst, relative_symlinks_ok=False): + """ + Try symlinking a file, and if that fails, fall back to copying. + """ + bad_src = os.path.lexists(src) and not os.path.exists(src) + if self.symlinks and not bad_src and not os.path.islink(dst): + try: + if relative_symlinks_ok: + assert os.path.dirname(src) == os.path.dirname(dst) + os.symlink(os.path.basename(src), dst) + else: + os.symlink(src, dst) + return + except Exception: # may need to use a more specific exception + logger.warning('Unable to symlink %r to %r', src, dst) + + # On Windows, we rewrite symlinks to our base python.exe into + # copies of venvlauncher.exe + basename, ext = os.path.splitext(os.path.basename(src)) + srcfn = os.path.join(os.path.dirname(__file__), + "scripts", + "nt", + basename + ext) + # Builds or venv's from builds need to remap source file + # locations, as we do not put them into Lib/venv/scripts + if sysconfig.is_python_build(True) or not os.path.isfile(srcfn): + if basename.endswith('_d'): + ext = '_d' + ext + basename = basename[:-2] + if basename == 'python': + basename = 'venvlauncher' + elif basename == 'pythonw': + basename = 'venvwlauncher' + src = os.path.join(os.path.dirname(src), basename + ext) + else: + if basename.startswith('python'): + scripts = sys.prefix + else: + scripts = os.path.join(os.path.dirname(__file__), "scripts", "nt") + src = os.path.join(scripts, basename + ext) + + if not os.path.exists(src): + if not bad_src: + logger.warning('Unable to copy %r', src) + return + + shutil.copyfile(src, dst) + + def setup_python(self, context): + """ + Set up a Python executable in the environment. + + :param context: The information for the environment creation request + being processed. + """ + binpath = context.bin_path + path = context.env_exe + copier = self.symlink_or_copy + copier(context.executable, path) + dirname = context.python_dir + if os.name != 'nt': + if not os.path.islink(path): + os.chmod(path, 0o755) + for suffix in ('python', 'python3'): + path = os.path.join(binpath, suffix) + if not os.path.exists(path): + # Issue 18807: make copies if + # symlinks are not wanted + copier(context.env_exe, path, relative_symlinks_ok=True) + if not os.path.islink(path): + os.chmod(path, 0o755) + else: + if self.symlinks: + # For symlinking, we need a complete copy of the root directory + # If symlinks fail, you'll get unnecessary copies of files, but + # we assume that if you've opted into symlinks on Windows then + # you know what you're doing. + suffixes = [ + f for f in os.listdir(dirname) if + os.path.normcase(os.path.splitext(f)[1]) in ('.exe', '.dll') + ] + if sysconfig.is_python_build(True): + suffixes = [ + f for f in suffixes if + os.path.normcase(f).startswith(('python', 'vcruntime')) + ] + else: + suffixes = ['python.exe', 'python_d.exe', 'pythonw.exe', + 'pythonw_d.exe'] + + for suffix in suffixes: + src = os.path.join(dirname, suffix) + if os.path.lexists(src): + copier(src, os.path.join(binpath, suffix)) + + if sysconfig.is_python_build(True): + # copy init.tcl + for root, dirs, files in os.walk(context.python_dir): + if 'init.tcl' in files: + tcldir = os.path.basename(root) + tcldir = os.path.join(context.env_dir, 'Lib', tcldir) + if not os.path.exists(tcldir): + os.makedirs(tcldir) + src = os.path.join(root, 'init.tcl') + dst = os.path.join(tcldir, 'init.tcl') + shutil.copyfile(src, dst) + break + + def _setup_pip(self, context): + """Installs or upgrades pip in a virtual environment""" + # We run ensurepip in isolated mode to avoid side effects from + # environment vars, the current directory and anything else + # intended for the global Python environment + cmd = [context.env_exe, '-Im', 'ensurepip', '--upgrade', + '--default-pip'] + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + + def setup_scripts(self, context): + """ + Set up scripts into the created environment from a directory. + + This method installs the default scripts into the environment + being created. You can prevent the default installation by overriding + this method if you really need to, or if you need to specify + a different location for the scripts to install. By default, the + 'scripts' directory in the venv package is used as the source of + scripts to install. + """ + path = os.path.abspath(os.path.dirname(__file__)) + path = os.path.join(path, 'scripts') + self.install_scripts(context, path) + + def post_setup(self, context): + """ + Hook for post-setup modification of the venv. Subclasses may install + additional packages or scripts here, add activation shell scripts, etc. + + :param context: The information for the environment creation request + being processed. + """ + pass + + def replace_variables(self, text, context): + """ + Replace variable placeholders in script text with context-specific + variables. + + Return the text passed in , but with variables replaced. + + :param text: The text in which to replace placeholder variables. + :param context: The information for the environment creation request + being processed. + """ + text = text.replace('__VENV_DIR__', context.env_dir) + text = text.replace('__VENV_NAME__', context.env_name) + text = text.replace('__VENV_PROMPT__', context.prompt) + text = text.replace('__VENV_BIN_NAME__', context.bin_name) + text = text.replace('__VENV_PYTHON__', context.env_exe) + return text + + def install_scripts(self, context, path): + """ + Install scripts into the created environment from a directory. + + :param context: The information for the environment creation request + being processed. + :param path: Absolute pathname of a directory containing script. + Scripts in the 'common' subdirectory of this directory, + and those in the directory named for the platform + being run on, are installed in the created environment. + Placeholder variables are replaced with environment- + specific values. + """ + binpath = context.bin_path + plen = len(path) + for root, dirs, files in os.walk(path): + if root == path: # at top-level, remove irrelevant dirs + for d in dirs[:]: + if d not in ('common', os.name): + dirs.remove(d) + continue # ignore files in top level + for f in files: + if (os.name == 'nt' and f.startswith('python') + and f.endswith(('.exe', '.pdb'))): + continue + srcfile = os.path.join(root, f) + suffix = root[plen:].split(os.sep)[2:] + if not suffix: + dstdir = binpath + else: + dstdir = os.path.join(binpath, *suffix) + if not os.path.exists(dstdir): + os.makedirs(dstdir) + dstfile = os.path.join(dstdir, f) + with open(srcfile, 'rb') as f: + data = f.read() + if not srcfile.endswith(('.exe', '.pdb')): + try: + data = data.decode('utf-8') + data = self.replace_variables(data, context) + data = data.encode('utf-8') + except UnicodeError as e: + data = None + logger.warning('unable to copy script %r, ' + 'may be binary: %s', srcfile, e) + if data is not None: + with open(dstfile, 'wb') as f: + f.write(data) + shutil.copymode(srcfile, dstfile) + + +def create(env_dir, system_site_packages=False, clear=False, + symlinks=False, with_pip=False, prompt=None): + """Create a virtual environment in a directory.""" + builder = EnvBuilder(system_site_packages=system_site_packages, + clear=clear, symlinks=symlinks, with_pip=with_pip, + prompt=prompt) + builder.create(env_dir) + +def main(args=None): + compatible = True + if sys.version_info < (3, 3): + compatible = False + elif not hasattr(sys, 'base_prefix'): + compatible = False + if not compatible: + raise ValueError('This script is only for use with Python >= 3.3') + else: + import argparse + + parser = argparse.ArgumentParser(prog=__name__, + description='Creates virtual Python ' + 'environments in one or ' + 'more target ' + 'directories.', + epilog='Once an environment has been ' + 'created, you may wish to ' + 'activate it, e.g. by ' + 'sourcing an activate script ' + 'in its bin directory.') + parser.add_argument('dirs', metavar='ENV_DIR', nargs='+', + help='A directory to create the environment in.') + parser.add_argument('--system-site-packages', default=False, + action='store_true', dest='system_site', + help='Give the virtual environment access to the ' + 'system site-packages dir.') + if os.name == 'nt': + use_symlinks = False + else: + use_symlinks = True + group = parser.add_mutually_exclusive_group() + group.add_argument('--symlinks', default=use_symlinks, + action='store_true', dest='symlinks', + help='Try to use symlinks rather than copies, ' + 'when symlinks are not the default for ' + 'the platform.') + group.add_argument('--copies', default=not use_symlinks, + action='store_false', dest='symlinks', + help='Try to use copies rather than symlinks, ' + 'even when symlinks are the default for ' + 'the platform.') + parser.add_argument('--clear', default=False, action='store_true', + dest='clear', help='Delete the contents of the ' + 'environment directory if it ' + 'already exists, before ' + 'environment creation.') + parser.add_argument('--upgrade', default=False, action='store_true', + dest='upgrade', help='Upgrade the environment ' + 'directory to use this version ' + 'of Python, assuming Python ' + 'has been upgraded in-place.') + parser.add_argument('--without-pip', dest='with_pip', + default=True, action='store_false', + help='Skips installing or upgrading pip in the ' + 'virtual environment (pip is bootstrapped ' + 'by default)') + parser.add_argument('--prompt', + help='Provides an alternative prompt prefix for ' + 'this environment.') + options = parser.parse_args(args) + if options.upgrade and options.clear: + raise ValueError('you cannot supply --upgrade and --clear together.') + builder = EnvBuilder(system_site_packages=options.system_site, + clear=options.clear, + symlinks=options.symlinks, + upgrade=options.upgrade, + with_pip=options.with_pip, + prompt=options.prompt) + for d in options.dirs: + builder.create(d) + +if __name__ == '__main__': + rc = 1 + try: + main() + rc = 0 + except Exception as e: + print('Error: %s' % e, file=sys.stderr) + sys.exit(rc) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/venv/__main__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/venv/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..912423e4a781988accc165be8f0f6bb2e289482d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/venv/__main__.py @@ -0,0 +1,10 @@ +import sys +from . import main + +rc = 1 +try: + main() + rc = 0 +except Exception as e: + print('Error: %s' % e, file=sys.stderr) +sys.exit(rc) diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/libffi-3.4.2-h7f98852_5.tar.bz2 b/my_container_sandbox/workspace/anaconda3/pkgs/libffi-3.4.2-h7f98852_5.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..df72cdd13ebcbfdf0068ca5b8c1b14bf14054997 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/pkgs/libffi-3.4.2-h7f98852_5.tar.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab6e9856c21709b7b517e940ae7028ae0737546122f83c2aa5d692860c3b149e +size 58292 diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/python_abi-3.8-2_cp38.tar.bz2 b/my_container_sandbox/workspace/anaconda3/pkgs/python_abi-3.8-2_cp38.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..e274e0ee53c8eaf56ce13b3c8717a4f0f6e35908 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/pkgs/python_abi-3.8-2_cp38.tar.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8535eaa9225ce212f8ed437c4a7341409903a4c005ecb6d36c63af2793b59689 +size 4018