diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/__init__.py b/.venv/Lib/site-packages/pkg_resources/_vendor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34e3a9950cc557879af8d797f9382b18a870fb56 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py @@ -0,0 +1,36 @@ +"""Read resources contained within a package.""" + +from ._common import ( + as_file, + files, + Package, +) + +from ._legacy import ( + contents, + open_binary, + read_binary, + open_text, + read_text, + is_resource, + path, + Resource, +) + +from .abc import ResourceReader + + +__all__ = [ + 'Package', + 'Resource', + 'ResourceReader', + 'as_file', + 'contents', + 'files', + 'is_resource', + 'open_binary', + 'open_text', + 'path', + 'read_binary', + 'read_text', +] diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..ea363d86a564b5450666aa00aecd46353326a75a --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py @@ -0,0 +1,170 @@ +from contextlib import suppress +from io import TextIOWrapper + +from . import abc + + +class SpecLoaderAdapter: + """ + Adapt a package spec to adapt the underlying loader. + """ + + def __init__(self, spec, adapter=lambda spec: spec.loader): + self.spec = spec + self.loader = adapter(spec) + + def __getattr__(self, name): + return getattr(self.spec, name) + + +class TraversableResourcesLoader: + """ + Adapt a loader to provide TraversableResources. + """ + + def __init__(self, spec): + self.spec = spec + + def get_resource_reader(self, name): + return CompatibilityFiles(self.spec)._native() + + +def _io_wrapper(file, mode='r', *args, **kwargs): + if mode == 'r': + return TextIOWrapper(file, *args, **kwargs) + elif mode == 'rb': + return file + raise ValueError( + "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) + ) + + +class CompatibilityFiles: + """ + Adapter for an existing or non-existent resource reader + to provide a compatibility .files(). + """ + + class SpecPath(abc.Traversable): + """ + Path tied to a module spec. + Can be read and exposes the resource reader children. + """ + + def __init__(self, spec, reader): + self._spec = spec + self._reader = reader + + def iterdir(self): + if not self._reader: + return iter(()) + return iter( + CompatibilityFiles.ChildPath(self._reader, path) + for path in self._reader.contents() + ) + + def is_file(self): + return False + + is_dir = is_file + + def joinpath(self, other): + if not self._reader: + return CompatibilityFiles.OrphanPath(other) + return CompatibilityFiles.ChildPath(self._reader, other) + + @property + def name(self): + return self._spec.name + + def open(self, mode='r', *args, **kwargs): + return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) + + class ChildPath(abc.Traversable): + """ + Path tied to a resource reader child. + Can be read but doesn't expose any meaningful children. + """ + + def __init__(self, reader, name): + self._reader = reader + self._name = name + + def iterdir(self): + return iter(()) + + def is_file(self): + return self._reader.is_resource(self.name) + + def is_dir(self): + return not self.is_file() + + def joinpath(self, other): + return CompatibilityFiles.OrphanPath(self.name, other) + + @property + def name(self): + return self._name + + def open(self, mode='r', *args, **kwargs): + return _io_wrapper( + self._reader.open_resource(self.name), mode, *args, **kwargs + ) + + class OrphanPath(abc.Traversable): + """ + Orphan path, not tied to a module spec or resource reader. + Can't be read and doesn't expose any meaningful children. + """ + + def __init__(self, *path_parts): + if len(path_parts) < 1: + raise ValueError('Need at least one path part to construct a path') + self._path = path_parts + + def iterdir(self): + return iter(()) + + def is_file(self): + return False + + is_dir = is_file + + def joinpath(self, other): + return CompatibilityFiles.OrphanPath(*self._path, other) + + @property + def name(self): + return self._path[-1] + + def open(self, mode='r', *args, **kwargs): + raise FileNotFoundError("Can't open orphan path") + + def __init__(self, spec): + self.spec = spec + + @property + def _reader(self): + with suppress(AttributeError): + return self.spec.loader.get_resource_reader(self.spec.name) + + def _native(self): + """ + Return the native reader if it supports files(). + """ + reader = self._reader + return reader if hasattr(reader, 'files') else self + + def __getattr__(self, attr): + return getattr(self._reader, attr) + + def files(self): + return CompatibilityFiles.SpecPath(self.spec, self._reader) + + +def wrap_spec(package): + """ + Construct a package spec with traversable compatibility + on the spec/loader/reader. + """ + return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..3c6de1cfb2e7b8f4ae95100589c4eaa84fb99926 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py @@ -0,0 +1,207 @@ +import os +import pathlib +import tempfile +import functools +import contextlib +import types +import importlib +import inspect +import warnings +import itertools + +from typing import Union, Optional, cast +from .abc import ResourceReader, Traversable + +from ._compat import wrap_spec + +Package = Union[types.ModuleType, str] +Anchor = Package + + +def package_to_anchor(func): + """ + Replace 'package' parameter as 'anchor' and warn about the change. + + Other errors should fall through. + + >>> files('a', 'b') + Traceback (most recent call last): + TypeError: files() takes from 0 to 1 positional arguments but 2 were given + """ + undefined = object() + + @functools.wraps(func) + def wrapper(anchor=undefined, package=undefined): + if package is not undefined: + if anchor is not undefined: + return func(anchor, package) + warnings.warn( + "First parameter to files is renamed to 'anchor'", + DeprecationWarning, + stacklevel=2, + ) + return func(package) + elif anchor is undefined: + return func() + return func(anchor) + + return wrapper + + +@package_to_anchor +def files(anchor: Optional[Anchor] = None) -> Traversable: + """ + Get a Traversable resource for an anchor. + """ + return from_package(resolve(anchor)) + + +def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]: + """ + Return the package's loader if it's a ResourceReader. + """ + # We can't use + # a issubclass() check here because apparently abc.'s __subclasscheck__() + # hook wants to create a weak reference to the object, but + # zipimport.zipimporter does not support weak references, resulting in a + # TypeError. That seems terrible. + spec = package.__spec__ + reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore + if reader is None: + return None + return reader(spec.name) # type: ignore + + +@functools.singledispatch +def resolve(cand: Optional[Anchor]) -> types.ModuleType: + return cast(types.ModuleType, cand) + + +@resolve.register +def _(cand: str) -> types.ModuleType: + return importlib.import_module(cand) + + +@resolve.register +def _(cand: None) -> types.ModuleType: + return resolve(_infer_caller().f_globals['__name__']) + + +def _infer_caller(): + """ + Walk the stack and find the frame of the first caller not in this module. + """ + + def is_this_file(frame_info): + return frame_info.filename == __file__ + + def is_wrapper(frame_info): + return frame_info.function == 'wrapper' + + not_this_file = itertools.filterfalse(is_this_file, inspect.stack()) + # also exclude 'wrapper' due to singledispatch in the call stack + callers = itertools.filterfalse(is_wrapper, not_this_file) + return next(callers).frame + + +def from_package(package: types.ModuleType): + """ + Return a Traversable object for the given package. + + """ + spec = wrap_spec(package) + reader = spec.loader.get_resource_reader(spec.name) + return reader.files() + + +@contextlib.contextmanager +def _tempfile( + reader, + suffix='', + # gh-93353: Keep a reference to call os.remove() in late Python + # finalization. + *, + _os_remove=os.remove, +): + # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' + # blocks due to the need to close the temporary file to work on Windows + # properly. + fd, raw_path = tempfile.mkstemp(suffix=suffix) + try: + try: + os.write(fd, reader()) + finally: + os.close(fd) + del reader + yield pathlib.Path(raw_path) + finally: + try: + _os_remove(raw_path) + except FileNotFoundError: + pass + + +def _temp_file(path): + return _tempfile(path.read_bytes, suffix=path.name) + + +def _is_present_dir(path: Traversable) -> bool: + """ + Some Traversables implement ``is_dir()`` to raise an + exception (i.e. ``FileNotFoundError``) when the + directory doesn't exist. This function wraps that call + to always return a boolean and only return True + if there's a dir and it exists. + """ + with contextlib.suppress(FileNotFoundError): + return path.is_dir() + return False + + +@functools.singledispatch +def as_file(path): + """ + Given a Traversable object, return that object as a + path on the local file system in a context manager. + """ + return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) + + +@as_file.register(pathlib.Path) +@contextlib.contextmanager +def _(path): + """ + Degenerate behavior for pathlib.Path objects. + """ + yield path + + +@contextlib.contextmanager +def _temp_path(dir: tempfile.TemporaryDirectory): + """ + Wrap tempfile.TemporyDirectory to return a pathlib object. + """ + with dir as result: + yield pathlib.Path(result) + + +@contextlib.contextmanager +def _temp_dir(path): + """ + Given a traversable dir, recursively replicate the whole tree + to the file system in a context manager. + """ + assert path.is_dir() + with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: + yield _write_contents(temp_dir, path) + + +def _write_contents(target, source): + child = target.joinpath(source.name) + if source.is_dir(): + child.mkdir() + for item in source.iterdir(): + _write_contents(child, item) + else: + child.write_bytes(source.read_bytes()) + return child diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5b1d280f3c7b45cee54338f60d5271a7510c2e --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py @@ -0,0 +1,108 @@ +# flake8: noqa + +import abc +import os +import sys +import pathlib +from contextlib import suppress +from typing import Union + + +if sys.version_info >= (3, 10): + from zipfile import Path as ZipPath # type: ignore +else: + from ..zipp import Path as ZipPath # type: ignore + + +try: + from typing import runtime_checkable # type: ignore +except ImportError: + + def runtime_checkable(cls): # type: ignore + return cls + + +try: + from typing import Protocol # type: ignore +except ImportError: + Protocol = abc.ABC # type: ignore + + +class TraversableResourcesLoader: + """ + Adapt loaders to provide TraversableResources and other + compatibility. + + Used primarily for Python 3.9 and earlier where the native + loaders do not yet implement TraversableResources. + """ + + def __init__(self, spec): + self.spec = spec + + @property + def path(self): + return self.spec.origin + + def get_resource_reader(self, name): + from . import readers, _adapters + + def _zip_reader(spec): + with suppress(AttributeError): + return readers.ZipReader(spec.loader, spec.name) + + def _namespace_reader(spec): + with suppress(AttributeError, ValueError): + return readers.NamespaceReader(spec.submodule_search_locations) + + def _available_reader(spec): + with suppress(AttributeError): + return spec.loader.get_resource_reader(spec.name) + + def _native_reader(spec): + reader = _available_reader(spec) + return reader if hasattr(reader, 'files') else None + + def _file_reader(spec): + try: + path = pathlib.Path(self.path) + except TypeError: + return None + if path.exists(): + return readers.FileReader(self) + + return ( + # native reader if it supplies 'files' + _native_reader(self.spec) + or + # local ZipReader if a zip module + _zip_reader(self.spec) + or + # local NamespaceReader if a namespace module + _namespace_reader(self.spec) + or + # local FileReader + _file_reader(self.spec) + # fallback - adapt the spec ResourceReader to TraversableReader + or _adapters.CompatibilityFiles(self.spec) + ) + + +def wrap_spec(package): + """ + Construct a package spec with traversable compatibility + on the spec/loader/reader. + + Supersedes _adapters.wrap_spec to use TraversableResourcesLoader + from above for older Python compatibility (<3.10). + """ + from . import _adapters + + return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) + + +if sys.version_info >= (3, 9): + StrPath = Union[str, os.PathLike[str]] +else: + # PathLike is only subscriptable at runtime in 3.9+ + StrPath = Union[str, "os.PathLike[str]"] diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py new file mode 100644 index 0000000000000000000000000000000000000000..cce05582ffc6fe6d72027194f4ccc44ee42f1fcd --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py @@ -0,0 +1,35 @@ +from itertools import filterfalse + +from typing import ( + Callable, + Iterable, + Iterator, + Optional, + Set, + TypeVar, + Union, +) + +# Type and type variable definitions +_T = TypeVar('_T') +_U = TypeVar('_U') + + +def unique_everseen( + iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None +) -> Iterator[_T]: + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen: Set[Union[_T, _U]] = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..b1ea8105dad6e27eefd5a34f64dfee974a5c4f71 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py @@ -0,0 +1,120 @@ +import functools +import os +import pathlib +import types +import warnings + +from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any + +from . import _common + +Package = Union[types.ModuleType, str] +Resource = str + + +def deprecated(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + warnings.warn( + f"{func.__name__} is deprecated. Use files() instead. " + "Refer to https://importlib-resources.readthedocs.io" + "/en/latest/using.html#migrating-from-legacy for migration advice.", + DeprecationWarning, + stacklevel=2, + ) + return func(*args, **kwargs) + + return wrapper + + +def normalize_path(path: Any) -> str: + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + str_path = str(path) + parent, file_name = os.path.split(str_path) + if parent: + raise ValueError(f'{path!r} must be only a file name') + return file_name + + +@deprecated +def open_binary(package: Package, resource: Resource) -> BinaryIO: + """Return a file-like object opened for binary reading of the resource.""" + return (_common.files(package) / normalize_path(resource)).open('rb') + + +@deprecated +def read_binary(package: Package, resource: Resource) -> bytes: + """Return the binary contents of the resource.""" + return (_common.files(package) / normalize_path(resource)).read_bytes() + + +@deprecated +def open_text( + package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict', +) -> TextIO: + """Return a file-like object opened for text reading of the resource.""" + return (_common.files(package) / normalize_path(resource)).open( + 'r', encoding=encoding, errors=errors + ) + + +@deprecated +def read_text( + package: Package, + resource: Resource, + encoding: str = 'utf-8', + errors: str = 'strict', +) -> str: + """Return the decoded string of the resource. + + The decoding-related arguments have the same semantics as those of + bytes.decode(). + """ + with open_text(package, resource, encoding, errors) as fp: + return fp.read() + + +@deprecated +def contents(package: Package) -> Iterable[str]: + """Return an iterable of entries in `package`. + + Note that not all entries are resources. Specifically, directories are + not considered resources. Use `is_resource()` on each entry returned here + to check if it is a resource or not. + """ + return [path.name for path in _common.files(package).iterdir()] + + +@deprecated +def is_resource(package: Package, name: str) -> bool: + """True if `name` is a resource inside `package`. + + Directories are *not* resources. + """ + resource = normalize_path(name) + return any( + traversable.name == resource and traversable.is_file() + for traversable in _common.files(package).iterdir() + ) + + +@deprecated +def path( + package: Package, + resource: Resource, +) -> ContextManager[pathlib.Path]: + """A context manager providing a file path object to the resource. + + If the resource does not already exist on its own on the file system, + a temporary file will be created. If the file was created, the file + will be deleted upon exiting the context manager (no exception is + raised if the file was deleted prior to the context manager + exiting). + """ + return _common.as_file(_common.files(package) / normalize_path(resource)) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..23b6aeafe4f43d097734e186907232513ad27a3c --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py @@ -0,0 +1,170 @@ +import abc +import io +import itertools +import pathlib +from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional + +from ._compat import runtime_checkable, Protocol, StrPath + + +__all__ = ["ResourceReader", "Traversable", "TraversableResources"] + + +class ResourceReader(metaclass=abc.ABCMeta): + """Abstract base class for loaders to provide resource reading support.""" + + @abc.abstractmethod + def open_resource(self, resource: Text) -> BinaryIO: + """Return an opened, file-like object for binary reading. + + The 'resource' argument is expected to represent only a file name. + If the resource cannot be found, FileNotFoundError is raised. + """ + # This deliberately raises FileNotFoundError instead of + # NotImplementedError so that if this method is accidentally called, + # it'll still do the right thing. + raise FileNotFoundError + + @abc.abstractmethod + def resource_path(self, resource: Text) -> Text: + """Return the file system path to the specified resource. + + The 'resource' argument is expected to represent only a file name. + If the resource does not exist on the file system, raise + FileNotFoundError. + """ + # This deliberately raises FileNotFoundError instead of + # NotImplementedError so that if this method is accidentally called, + # it'll still do the right thing. + raise FileNotFoundError + + @abc.abstractmethod + def is_resource(self, path: Text) -> bool: + """Return True if the named 'path' is a resource. + + Files are resources, directories are not. + """ + raise FileNotFoundError + + @abc.abstractmethod + def contents(self) -> Iterable[str]: + """Return an iterable of entries in `package`.""" + raise FileNotFoundError + + +class TraversalError(Exception): + pass + + +@runtime_checkable +class Traversable(Protocol): + """ + An object with a subset of pathlib.Path methods suitable for + traversing directories and opening files. + + Any exceptions that occur when accessing the backing resource + may propagate unaltered. + """ + + @abc.abstractmethod + def iterdir(self) -> Iterator["Traversable"]: + """ + Yield Traversable objects in self + """ + + def read_bytes(self) -> bytes: + """ + Read contents of self as bytes + """ + with self.open('rb') as strm: + return strm.read() + + def read_text(self, encoding: Optional[str] = None) -> str: + """ + Read contents of self as text + """ + with self.open(encoding=encoding) as strm: + return strm.read() + + @abc.abstractmethod + def is_dir(self) -> bool: + """ + Return True if self is a directory + """ + + @abc.abstractmethod + def is_file(self) -> bool: + """ + Return True if self is a file + """ + + def joinpath(self, *descendants: StrPath) -> "Traversable": + """ + Return Traversable resolved with any descendants applied. + + Each descendant should be a path segment relative to self + and each may contain multiple levels separated by + ``posixpath.sep`` (``/``). + """ + if not descendants: + return self + names = itertools.chain.from_iterable( + path.parts for path in map(pathlib.PurePosixPath, descendants) + ) + target = next(names) + matches = ( + traversable for traversable in self.iterdir() if traversable.name == target + ) + try: + match = next(matches) + except StopIteration: + raise TraversalError( + "Target not found during traversal.", target, list(names) + ) + return match.joinpath(*names) + + def __truediv__(self, child: StrPath) -> "Traversable": + """ + Return Traversable child in self + """ + return self.joinpath(child) + + @abc.abstractmethod + def open(self, mode='r', *args, **kwargs): + """ + mode may be 'r' or 'rb' to open as text or binary. Return a handle + suitable for reading (same as pathlib.Path.open). + + When opening as text, accepts encoding parameters such as those + accepted by io.TextIOWrapper. + """ + + @property + @abc.abstractmethod + def name(self) -> str: + """ + The base name of this object without any parent references. + """ + + +class TraversableResources(ResourceReader): + """ + The required interface for providing traversable + resources. + """ + + @abc.abstractmethod + def files(self) -> "Traversable": + """Return a Traversable object for the loaded package.""" + + def open_resource(self, resource: StrPath) -> io.BufferedReader: + return self.files().joinpath(resource).open('rb') + + def resource_path(self, resource: Any) -> NoReturn: + raise FileNotFoundError(resource) + + def is_resource(self, path: StrPath) -> bool: + return self.files().joinpath(path).is_file() + + def contents(self) -> Iterator[str]: + return (item.name for item in self.files().iterdir()) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/readers.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/readers.py new file mode 100644 index 0000000000000000000000000000000000000000..ab34db74091c8a04ee9004ce9a786de3146ec917 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/readers.py @@ -0,0 +1,120 @@ +import collections +import pathlib +import operator + +from . import abc + +from ._itertools import unique_everseen +from ._compat import ZipPath + + +def remove_duplicates(items): + return iter(collections.OrderedDict.fromkeys(items)) + + +class FileReader(abc.TraversableResources): + def __init__(self, loader): + self.path = pathlib.Path(loader.path).parent + + def resource_path(self, resource): + """ + Return the file system path to prevent + `resources.path()` from creating a temporary + copy. + """ + return str(self.path.joinpath(resource)) + + def files(self): + return self.path + + +class ZipReader(abc.TraversableResources): + def __init__(self, loader, module): + _, _, name = module.rpartition('.') + self.prefix = loader.prefix.replace('\\', '/') + name + '/' + self.archive = loader.archive + + def open_resource(self, resource): + try: + return super().open_resource(resource) + except KeyError as exc: + raise FileNotFoundError(exc.args[0]) + + def is_resource(self, path): + # workaround for `zipfile.Path.is_file` returning true + # for non-existent paths. + target = self.files().joinpath(path) + return target.is_file() and target.exists() + + def files(self): + return ZipPath(self.archive, self.prefix) + + +class MultiplexedPath(abc.Traversable): + """ + Given a series of Traversable objects, implement a merged + version of the interface across all objects. Useful for + namespace packages which may be multihomed at a single + name. + """ + + def __init__(self, *paths): + self._paths = list(map(pathlib.Path, remove_duplicates(paths))) + if not self._paths: + message = 'MultiplexedPath must contain at least one path' + raise FileNotFoundError(message) + if not all(path.is_dir() for path in self._paths): + raise NotADirectoryError('MultiplexedPath only supports directories') + + def iterdir(self): + files = (file for path in self._paths for file in path.iterdir()) + return unique_everseen(files, key=operator.attrgetter('name')) + + def read_bytes(self): + raise FileNotFoundError(f'{self} is not a file') + + def read_text(self, *args, **kwargs): + raise FileNotFoundError(f'{self} is not a file') + + def is_dir(self): + return True + + def is_file(self): + return False + + def joinpath(self, *descendants): + try: + return super().joinpath(*descendants) + except abc.TraversalError: + # One of the paths did not resolve (a directory does not exist). + # Just return something that will not exist. + return self._paths[0].joinpath(*descendants) + + def open(self, *args, **kwargs): + raise FileNotFoundError(f'{self} is not a file') + + @property + def name(self): + return self._paths[0].name + + def __repr__(self): + paths = ', '.join(f"'{path}'" for path in self._paths) + return f'MultiplexedPath({paths})' + + +class NamespaceReader(abc.TraversableResources): + def __init__(self, namespace_path): + if 'NamespacePath' not in str(namespace_path): + raise ValueError('Invalid path') + self.path = MultiplexedPath(*list(namespace_path)) + + def resource_path(self, resource): + """ + Return the file system path to prevent + `resources.path()` from creating a temporary + copy. + """ + return str(self.path.joinpath(resource)) + + def files(self): + return self.path diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py new file mode 100644 index 0000000000000000000000000000000000000000..7770c922c84fabe0031333a4de305dd6d6852911 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py @@ -0,0 +1,106 @@ +""" +Interface adapters for low-level readers. +""" + +import abc +import io +import itertools +from typing import BinaryIO, List + +from .abc import Traversable, TraversableResources + + +class SimpleReader(abc.ABC): + """ + The minimum, low-level interface required from a resource + provider. + """ + + @property + @abc.abstractmethod + def package(self) -> str: + """ + The name of the package for which this reader loads resources. + """ + + @abc.abstractmethod + def children(self) -> List['SimpleReader']: + """ + Obtain an iterable of SimpleReader for available + child containers (e.g. directories). + """ + + @abc.abstractmethod + def resources(self) -> List[str]: + """ + Obtain available named resources for this virtual package. + """ + + @abc.abstractmethod + def open_binary(self, resource: str) -> BinaryIO: + """ + Obtain a File-like for a named resource. + """ + + @property + def name(self): + return self.package.split('.')[-1] + + +class ResourceContainer(Traversable): + """ + Traversable container for a package's resources via its reader. + """ + + def __init__(self, reader: SimpleReader): + self.reader = reader + + def is_dir(self): + return True + + def is_file(self): + return False + + def iterdir(self): + files = (ResourceHandle(self, name) for name in self.reader.resources) + dirs = map(ResourceContainer, self.reader.children()) + return itertools.chain(files, dirs) + + def open(self, *args, **kwargs): + raise IsADirectoryError() + + +class ResourceHandle(Traversable): + """ + Handle to a named resource in a ResourceReader. + """ + + def __init__(self, parent: ResourceContainer, name: str): + self.parent = parent + self.name = name # type: ignore + + def is_file(self): + return True + + def is_dir(self): + return False + + def open(self, mode='r', *args, **kwargs): + stream = self.parent.reader.open_binary(self.name) + if 'b' not in mode: + stream = io.TextIOWrapper(*args, **kwargs) + return stream + + def joinpath(self, name): + raise RuntimeError("Cannot traverse into a resource") + + +class TraversableReader(TraversableResources, SimpleReader): + """ + A TraversableResources based on SimpleReader. Resource providers + may derive from this class to provide the TraversableResources + interface by supplying the SimpleReader interface. + """ + + def files(self): + return ResourceContainer(self) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/__init__.py b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py new file mode 100644 index 0000000000000000000000000000000000000000..b0d1ef37cbccbf20c0606fd1132bf58c26d91da0 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py @@ -0,0 +1,288 @@ +import os +import subprocess +import contextlib +import functools +import tempfile +import shutil +import operator +import warnings + + +@contextlib.contextmanager +def pushd(dir): + """ + >>> tmp_path = getfixture('tmp_path') + >>> with pushd(tmp_path): + ... assert os.getcwd() == os.fspath(tmp_path) + >>> assert os.getcwd() != os.fspath(tmp_path) + """ + + orig = os.getcwd() + os.chdir(dir) + try: + yield dir + finally: + os.chdir(orig) + + +@contextlib.contextmanager +def tarball_context(url, target_dir=None, runner=None, pushd=pushd): + """ + Get a tarball, extract it, change to that directory, yield, then + clean up. + `runner` is the function to invoke commands. + `pushd` is a context manager for changing the directory. + """ + if target_dir is None: + target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') + if runner is None: + runner = functools.partial(subprocess.check_call, shell=True) + else: + warnings.warn("runner parameter is deprecated", DeprecationWarning) + # In the tar command, use --strip-components=1 to strip the first path and + # then + # use -C to cause the files to be extracted to {target_dir}. This ensures + # that we always know where the files were extracted. + runner('mkdir {target_dir}'.format(**vars())) + try: + getter = 'wget {url} -O -' + extract = 'tar x{compression} --strip-components=1 -C {target_dir}' + cmd = ' | '.join((getter, extract)) + runner(cmd.format(compression=infer_compression(url), **vars())) + with pushd(target_dir): + yield target_dir + finally: + runner('rm -Rf {target_dir}'.format(**vars())) + + +def infer_compression(url): + """ + Given a URL or filename, infer the compression code for tar. + + >>> infer_compression('http://foo/bar.tar.gz') + 'z' + >>> infer_compression('http://foo/bar.tgz') + 'z' + >>> infer_compression('file.bz') + 'j' + >>> infer_compression('file.xz') + 'J' + """ + # cheat and just assume it's the last two characters + compression_indicator = url[-2:] + mapping = dict(gz='z', bz='j', xz='J') + # Assume 'z' (gzip) if no match + return mapping.get(compression_indicator, 'z') + + +@contextlib.contextmanager +def temp_dir(remover=shutil.rmtree): + """ + Create a temporary directory context. Pass a custom remover + to override the removal behavior. + + >>> import pathlib + >>> with temp_dir() as the_dir: + ... assert os.path.isdir(the_dir) + ... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents') + >>> assert not os.path.exists(the_dir) + """ + temp_dir = tempfile.mkdtemp() + try: + yield temp_dir + finally: + remover(temp_dir) + + +@contextlib.contextmanager +def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): + """ + Check out the repo indicated by url. + + If dest_ctx is supplied, it should be a context manager + to yield the target directory for the check out. + """ + exe = 'git' if 'git' in url else 'hg' + with dest_ctx() as repo_dir: + cmd = [exe, 'clone', url, repo_dir] + if branch: + cmd.extend(['--branch', branch]) + devnull = open(os.path.devnull, 'w') + stdout = devnull if quiet else None + subprocess.check_call(cmd, stdout=stdout) + yield repo_dir + + +@contextlib.contextmanager +def null(): + """ + A null context suitable to stand in for a meaningful context. + + >>> with null() as value: + ... assert value is None + """ + yield + + +class ExceptionTrap: + """ + A context manager that will catch certain exceptions and provide an + indication they occurred. + + >>> with ExceptionTrap() as trap: + ... raise Exception() + >>> bool(trap) + True + + >>> with ExceptionTrap() as trap: + ... pass + >>> bool(trap) + False + + >>> with ExceptionTrap(ValueError) as trap: + ... raise ValueError("1 + 1 is not 3") + >>> bool(trap) + True + >>> trap.value + ValueError('1 + 1 is not 3') + >>> trap.tb + + + >>> with ExceptionTrap(ValueError) as trap: + ... raise Exception() + Traceback (most recent call last): + ... + Exception + + >>> bool(trap) + False + """ + + exc_info = None, None, None + + def __init__(self, exceptions=(Exception,)): + self.exceptions = exceptions + + def __enter__(self): + return self + + @property + def type(self): + return self.exc_info[0] + + @property + def value(self): + return self.exc_info[1] + + @property + def tb(self): + return self.exc_info[2] + + def __exit__(self, *exc_info): + type = exc_info[0] + matches = type and issubclass(type, self.exceptions) + if matches: + self.exc_info = exc_info + return matches + + def __bool__(self): + return bool(self.type) + + def raises(self, func, *, _test=bool): + """ + Wrap func and replace the result with the truth + value of the trap (True if an exception occurred). + + First, give the decorator an alias to support Python 3.8 + Syntax. + + >>> raises = ExceptionTrap(ValueError).raises + + Now decorate a function that always fails. + + >>> @raises + ... def fail(): + ... raise ValueError('failed') + >>> fail() + True + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + with ExceptionTrap(self.exceptions) as trap: + func(*args, **kwargs) + return _test(trap) + + return wrapper + + def passes(self, func): + """ + Wrap func and replace the result with the truth + value of the trap (True if no exception). + + First, give the decorator an alias to support Python 3.8 + Syntax. + + >>> passes = ExceptionTrap(ValueError).passes + + Now decorate a function that always fails. + + >>> @passes + ... def fail(): + ... raise ValueError('failed') + + >>> fail() + False + """ + return self.raises(func, _test=operator.not_) + + +class suppress(contextlib.suppress, contextlib.ContextDecorator): + """ + A version of contextlib.suppress with decorator support. + + >>> @suppress(KeyError) + ... def key_error(): + ... {}[''] + >>> key_error() + """ + + +class on_interrupt(contextlib.ContextDecorator): + """ + Replace a KeyboardInterrupt with SystemExit(1) + + >>> def do_interrupt(): + ... raise KeyboardInterrupt() + >>> on_interrupt('error')(do_interrupt)() + Traceback (most recent call last): + ... + SystemExit: 1 + >>> on_interrupt('error', code=255)(do_interrupt)() + Traceback (most recent call last): + ... + SystemExit: 255 + >>> on_interrupt('suppress')(do_interrupt)() + >>> with __import__('pytest').raises(KeyboardInterrupt): + ... on_interrupt('ignore')(do_interrupt)() + """ + + def __init__( + self, + action='error', + # py3.7 compat + # /, + code=1, + ): + self.action = action + self.code = code + + def __enter__(self): + return self + + def __exit__(self, exctype, excinst, exctb): + if exctype is not KeyboardInterrupt or self.action == 'ignore': + return + elif self.action == 'error': + raise SystemExit(self.code) from excinst + return self.action == 'suppress' diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py new file mode 100644 index 0000000000000000000000000000000000000000..67aeadc353ce372565a86da03913c80f14a2fddf --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py @@ -0,0 +1,556 @@ +import functools +import time +import inspect +import collections +import types +import itertools +import warnings + +import pkg_resources.extern.more_itertools + +from typing import Callable, TypeVar + + +CallableT = TypeVar("CallableT", bound=Callable[..., object]) + + +def compose(*funcs): + """ + Compose any number of unary functions into a single unary function. + + >>> import textwrap + >>> expected = str.strip(textwrap.dedent(compose.__doc__)) + >>> strip_and_dedent = compose(str.strip, textwrap.dedent) + >>> strip_and_dedent(compose.__doc__) == expected + True + + Compose also allows the innermost function to take arbitrary arguments. + + >>> round_three = lambda x: round(x, ndigits=3) + >>> f = compose(round_three, int.__truediv__) + >>> [f(3*x, x+1) for x in range(1,10)] + [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] + """ + + def compose_two(f1, f2): + return lambda *args, **kwargs: f1(f2(*args, **kwargs)) + + return functools.reduce(compose_two, funcs) + + +def method_caller(method_name, *args, **kwargs): + """ + Return a function that will call a named method on the + target object with optional positional and keyword + arguments. + + >>> lower = method_caller('lower') + >>> lower('MyString') + 'mystring' + """ + + def call_method(target): + func = getattr(target, method_name) + return func(*args, **kwargs) + + return call_method + + +def once(func): + """ + Decorate func so it's only ever called the first time. + + This decorator can ensure that an expensive or non-idempotent function + will not be expensive on subsequent calls and is idempotent. + + >>> add_three = once(lambda a: a+3) + >>> add_three(3) + 6 + >>> add_three(9) + 6 + >>> add_three('12') + 6 + + To reset the stored value, simply clear the property ``saved_result``. + + >>> del add_three.saved_result + >>> add_three(9) + 12 + >>> add_three(8) + 12 + + Or invoke 'reset()' on it. + + >>> add_three.reset() + >>> add_three(-3) + 0 + >>> add_three(0) + 0 + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not hasattr(wrapper, 'saved_result'): + wrapper.saved_result = func(*args, **kwargs) + return wrapper.saved_result + + wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') + return wrapper + + +def method_cache( + method: CallableT, + cache_wrapper: Callable[ + [CallableT], CallableT + ] = functools.lru_cache(), # type: ignore[assignment] +) -> CallableT: + """ + Wrap lru_cache to support storing the cache data in the object instances. + + Abstracts the common paradigm where the method explicitly saves an + underscore-prefixed protected property on first call and returns that + subsequently. + + >>> class MyClass: + ... calls = 0 + ... + ... @method_cache + ... def method(self, value): + ... self.calls += 1 + ... return value + + >>> a = MyClass() + >>> a.method(3) + 3 + >>> for x in range(75): + ... res = a.method(x) + >>> a.calls + 75 + + Note that the apparent behavior will be exactly like that of lru_cache + except that the cache is stored on each instance, so values in one + instance will not flush values from another, and when an instance is + deleted, so are the cached values for that instance. + + >>> b = MyClass() + >>> for x in range(35): + ... res = b.method(x) + >>> b.calls + 35 + >>> a.method(0) + 0 + >>> a.calls + 75 + + Note that if method had been decorated with ``functools.lru_cache()``, + a.calls would have been 76 (due to the cached value of 0 having been + flushed by the 'b' instance). + + Clear the cache with ``.cache_clear()`` + + >>> a.method.cache_clear() + + Same for a method that hasn't yet been called. + + >>> c = MyClass() + >>> c.method.cache_clear() + + Another cache wrapper may be supplied: + + >>> cache = functools.lru_cache(maxsize=2) + >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) + >>> a = MyClass() + >>> a.method2() + 3 + + Caution - do not subsequently wrap the method with another decorator, such + as ``@property``, which changes the semantics of the function. + + See also + http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ + for another implementation and additional justification. + """ + + def wrapper(self: object, *args: object, **kwargs: object) -> object: + # it's the first call, replace the method with a cached, bound method + bound_method: CallableT = types.MethodType( # type: ignore[assignment] + method, self + ) + cached_method = cache_wrapper(bound_method) + setattr(self, method.__name__, cached_method) + return cached_method(*args, **kwargs) + + # Support cache clear even before cache has been created. + wrapper.cache_clear = lambda: None # type: ignore[attr-defined] + + return ( # type: ignore[return-value] + _special_method_cache(method, cache_wrapper) or wrapper + ) + + +def _special_method_cache(method, cache_wrapper): + """ + Because Python treats special methods differently, it's not + possible to use instance attributes to implement the cached + methods. + + Instead, install the wrapper method under a different name + and return a simple proxy to that wrapper. + + https://github.com/jaraco/jaraco.functools/issues/5 + """ + name = method.__name__ + special_names = '__getattr__', '__getitem__' + if name not in special_names: + return + + wrapper_name = '__cached' + name + + def proxy(self, *args, **kwargs): + if wrapper_name not in vars(self): + bound = types.MethodType(method, self) + cache = cache_wrapper(bound) + setattr(self, wrapper_name, cache) + else: + cache = getattr(self, wrapper_name) + return cache(*args, **kwargs) + + return proxy + + +def apply(transform): + """ + Decorate a function with a transform function that is + invoked on results returned from the decorated function. + + >>> @apply(reversed) + ... def get_numbers(start): + ... "doc for get_numbers" + ... return range(start, start+3) + >>> list(get_numbers(4)) + [6, 5, 4] + >>> get_numbers.__doc__ + 'doc for get_numbers' + """ + + def wrap(func): + return functools.wraps(func)(compose(transform, func)) + + return wrap + + +def result_invoke(action): + r""" + Decorate a function with an action function that is + invoked on the results returned from the decorated + function (for its side-effect), then return the original + result. + + >>> @result_invoke(print) + ... def add_two(a, b): + ... return a + b + >>> x = add_two(2, 3) + 5 + >>> x + 5 + """ + + def wrap(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + result = func(*args, **kwargs) + action(result) + return result + + return wrapper + + return wrap + + +def invoke(f, *args, **kwargs): + """ + Call a function for its side effect after initialization. + + The benefit of using the decorator instead of simply invoking a function + after defining it is that it makes explicit the author's intent for the + function to be called immediately. Whereas if one simply calls the + function immediately, it's less obvious if that was intentional or + incidental. It also avoids repeating the name - the two actions, defining + the function and calling it immediately are modeled separately, but linked + by the decorator construct. + + The benefit of having a function construct (opposed to just invoking some + behavior inline) is to serve as a scope in which the behavior occurs. It + avoids polluting the global namespace with local variables, provides an + anchor on which to attach documentation (docstring), keeps the behavior + logically separated (instead of conceptually separated or not separated at + all), and provides potential to re-use the behavior for testing or other + purposes. + + This function is named as a pithy way to communicate, "call this function + primarily for its side effect", or "while defining this function, also + take it aside and call it". It exists because there's no Python construct + for "define and call" (nor should there be, as decorators serve this need + just fine). The behavior happens immediately and synchronously. + + >>> @invoke + ... def func(): print("called") + called + >>> func() + called + + Use functools.partial to pass parameters to the initial call + + >>> @functools.partial(invoke, name='bingo') + ... def func(name): print("called with", name) + called with bingo + """ + f(*args, **kwargs) + return f + + +def call_aside(*args, **kwargs): + """ + Deprecated name for invoke. + """ + warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning) + return invoke(*args, **kwargs) + + +class Throttler: + """ + Rate-limit a function (or other callable) + """ + + def __init__(self, func, max_rate=float('Inf')): + if isinstance(func, Throttler): + func = func.func + self.func = func + self.max_rate = max_rate + self.reset() + + def reset(self): + self.last_called = 0 + + def __call__(self, *args, **kwargs): + self._wait() + return self.func(*args, **kwargs) + + def _wait(self): + "ensure at least 1/max_rate seconds from last call" + elapsed = time.time() - self.last_called + must_wait = 1 / self.max_rate - elapsed + time.sleep(max(0, must_wait)) + self.last_called = time.time() + + def __get__(self, obj, type=None): + return first_invoke(self._wait, functools.partial(self.func, obj)) + + +def first_invoke(func1, func2): + """ + Return a function that when invoked will invoke func1 without + any parameters (for its side-effect) and then invoke func2 + with whatever parameters were passed, returning its result. + """ + + def wrapper(*args, **kwargs): + func1() + return func2(*args, **kwargs) + + return wrapper + + +def retry_call(func, cleanup=lambda: None, retries=0, trap=()): + """ + Given a callable func, trap the indicated exceptions + for up to 'retries' times, invoking cleanup on the + exception. On the final attempt, allow any exceptions + to propagate. + """ + attempts = itertools.count() if retries == float('inf') else range(retries) + for attempt in attempts: + try: + return func() + except trap: + cleanup() + + return func() + + +def retry(*r_args, **r_kwargs): + """ + Decorator wrapper for retry_call. Accepts arguments to retry_call + except func and then returns a decorator for the decorated function. + + Ex: + + >>> @retry(retries=3) + ... def my_func(a, b): + ... "this is my funk" + ... print(a, b) + >>> my_func.__doc__ + 'this is my funk' + """ + + def decorate(func): + @functools.wraps(func) + def wrapper(*f_args, **f_kwargs): + bound = functools.partial(func, *f_args, **f_kwargs) + return retry_call(bound, *r_args, **r_kwargs) + + return wrapper + + return decorate + + +def print_yielded(func): + """ + Convert a generator into a function that prints all yielded elements + + >>> @print_yielded + ... def x(): + ... yield 3; yield None + >>> x() + 3 + None + """ + print_all = functools.partial(map, print) + print_results = compose(more_itertools.consume, print_all, func) + return functools.wraps(func)(print_results) + + +def pass_none(func): + """ + Wrap func so it's not called if its first param is None + + >>> print_text = pass_none(print) + >>> print_text('text') + text + >>> print_text(None) + """ + + @functools.wraps(func) + def wrapper(param, *args, **kwargs): + if param is not None: + return func(param, *args, **kwargs) + + return wrapper + + +def assign_params(func, namespace): + """ + Assign parameters from namespace where func solicits. + + >>> def func(x, y=3): + ... print(x, y) + >>> assigned = assign_params(func, dict(x=2, z=4)) + >>> assigned() + 2 3 + + The usual errors are raised if a function doesn't receive + its required parameters: + + >>> assigned = assign_params(func, dict(y=3, z=4)) + >>> assigned() + Traceback (most recent call last): + TypeError: func() ...argument... + + It even works on methods: + + >>> class Handler: + ... def meth(self, arg): + ... print(arg) + >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() + crystal + """ + sig = inspect.signature(func) + params = sig.parameters.keys() + call_ns = {k: namespace[k] for k in params if k in namespace} + return functools.partial(func, **call_ns) + + +def save_method_args(method): + """ + Wrap a method such that when it is called, the args and kwargs are + saved on the method. + + >>> class MyClass: + ... @save_method_args + ... def method(self, a, b): + ... print(a, b) + >>> my_ob = MyClass() + >>> my_ob.method(1, 2) + 1 2 + >>> my_ob._saved_method.args + (1, 2) + >>> my_ob._saved_method.kwargs + {} + >>> my_ob.method(a=3, b='foo') + 3 foo + >>> my_ob._saved_method.args + () + >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') + True + + The arguments are stored on the instance, allowing for + different instance to save different args. + + >>> your_ob = MyClass() + >>> your_ob.method({str('x'): 3}, b=[4]) + {'x': 3} [4] + >>> your_ob._saved_method.args + ({'x': 3},) + >>> my_ob._saved_method.args + () + """ + args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') + + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + attr_name = '_saved_' + method.__name__ + attr = args_and_kwargs(args, kwargs) + setattr(self, attr_name, attr) + return method(self, *args, **kwargs) + + return wrapper + + +def except_(*exceptions, replace=None, use=None): + """ + Replace the indicated exceptions, if raised, with the indicated + literal replacement or evaluated expression (if present). + + >>> safe_int = except_(ValueError)(int) + >>> safe_int('five') + >>> safe_int('5') + 5 + + Specify a literal replacement with ``replace``. + + >>> safe_int_r = except_(ValueError, replace=0)(int) + >>> safe_int_r('five') + 0 + + Provide an expression to ``use`` to pass through particular parameters. + + >>> safe_int_pt = except_(ValueError, use='args[0]')(int) + >>> safe_int_pt('five') + 'five' + + """ + + def decorate(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except exceptions: + try: + return eval(use) + except TypeError: + return replace + + return wrapper + + return decorate diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c466378ceba69a335d2beb4d3af92703d52b3831 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py @@ -0,0 +1,599 @@ +import re +import itertools +import textwrap +import functools + +try: + from importlib.resources import files # type: ignore +except ImportError: # pragma: nocover + from pkg_resources.extern.importlib_resources import files # type: ignore + +from pkg_resources.extern.jaraco.functools import compose, method_cache +from pkg_resources.extern.jaraco.context import ExceptionTrap + + +def substitution(old, new): + """ + Return a function that will perform a substitution on a string + """ + return lambda s: s.replace(old, new) + + +def multi_substitution(*substitutions): + """ + Take a sequence of pairs specifying substitutions, and create + a function that performs those substitutions. + + >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo') + 'baz' + """ + substitutions = itertools.starmap(substitution, substitutions) + # compose function applies last function first, so reverse the + # substitutions to get the expected order. + substitutions = reversed(tuple(substitutions)) + return compose(*substitutions) + + +class FoldedCase(str): + """ + A case insensitive string class; behaves just like str + except compares equal when the only variation is case. + + >>> s = FoldedCase('hello world') + + >>> s == 'Hello World' + True + + >>> 'Hello World' == s + True + + >>> s != 'Hello World' + False + + >>> s.index('O') + 4 + + >>> s.split('O') + ['hell', ' w', 'rld'] + + >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) + ['alpha', 'Beta', 'GAMMA'] + + Sequence membership is straightforward. + + >>> "Hello World" in [s] + True + >>> s in ["Hello World"] + True + + You may test for set inclusion, but candidate and elements + must both be folded. + + >>> FoldedCase("Hello World") in {s} + True + >>> s in {FoldedCase("Hello World")} + True + + String inclusion works as long as the FoldedCase object + is on the right. + + >>> "hello" in FoldedCase("Hello World") + True + + But not if the FoldedCase object is on the left: + + >>> FoldedCase('hello') in 'Hello World' + False + + In that case, use ``in_``: + + >>> FoldedCase('hello').in_('Hello World') + True + + >>> FoldedCase('hello') > FoldedCase('Hello') + False + """ + + def __lt__(self, other): + return self.lower() < other.lower() + + def __gt__(self, other): + return self.lower() > other.lower() + + def __eq__(self, other): + return self.lower() == other.lower() + + def __ne__(self, other): + return self.lower() != other.lower() + + def __hash__(self): + return hash(self.lower()) + + def __contains__(self, other): + return super().lower().__contains__(other.lower()) + + def in_(self, other): + "Does self appear in other?" + return self in FoldedCase(other) + + # cache lower since it's likely to be called frequently. + @method_cache + def lower(self): + return super().lower() + + def index(self, sub): + return self.lower().index(sub.lower()) + + def split(self, splitter=' ', maxsplit=0): + pattern = re.compile(re.escape(splitter), re.I) + return pattern.split(self, maxsplit) + + +# Python 3.8 compatibility +_unicode_trap = ExceptionTrap(UnicodeDecodeError) + + +@_unicode_trap.passes +def is_decodable(value): + r""" + Return True if the supplied value is decodable (using the default + encoding). + + >>> is_decodable(b'\xff') + False + >>> is_decodable(b'\x32') + True + """ + value.decode() + + +def is_binary(value): + r""" + Return True if the value appears to be binary (that is, it's a byte + string and isn't decodable). + + >>> is_binary(b'\xff') + True + >>> is_binary('\xff') + False + """ + return isinstance(value, bytes) and not is_decodable(value) + + +def trim(s): + r""" + Trim something like a docstring to remove the whitespace that + is common due to indentation and formatting. + + >>> trim("\n\tfoo = bar\n\t\tbar = baz\n") + 'foo = bar\n\tbar = baz' + """ + return textwrap.dedent(s).strip() + + +def wrap(s): + """ + Wrap lines of text, retaining existing newlines as + paragraph markers. + + >>> print(wrap(lorem_ipsum)) + Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do + eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad + minim veniam, quis nostrud exercitation ullamco laboris nisi ut + aliquip ex ea commodo consequat. Duis aute irure dolor in + reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla + pariatur. Excepteur sint occaecat cupidatat non proident, sunt in + culpa qui officia deserunt mollit anim id est laborum. + + Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam + varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus + magna felis sollicitudin mauris. Integer in mauris eu nibh euismod + gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis + risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, + eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas + fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla + a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, + neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing + sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque + nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus + quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, + molestie eu, feugiat in, orci. In hac habitasse platea dictumst. + """ + paragraphs = s.splitlines() + wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs) + return '\n\n'.join(wrapped) + + +def unwrap(s): + r""" + Given a multi-line string, return an unwrapped version. + + >>> wrapped = wrap(lorem_ipsum) + >>> wrapped.count('\n') + 20 + >>> unwrapped = unwrap(wrapped) + >>> unwrapped.count('\n') + 1 + >>> print(unwrapped) + Lorem ipsum dolor sit amet, consectetur adipiscing ... + Curabitur pretium tincidunt lacus. Nulla gravida orci ... + + """ + paragraphs = re.split(r'\n\n+', s) + cleaned = (para.replace('\n', ' ') for para in paragraphs) + return '\n'.join(cleaned) + + + + +class Splitter(object): + """object that will split a string with the given arguments for each call + + >>> s = Splitter(',') + >>> s('hello, world, this is your, master calling') + ['hello', ' world', ' this is your', ' master calling'] + """ + + def __init__(self, *args): + self.args = args + + def __call__(self, s): + return s.split(*self.args) + + +def indent(string, prefix=' ' * 4): + """ + >>> indent('foo') + ' foo' + """ + return prefix + string + + +class WordSet(tuple): + """ + Given an identifier, return the words that identifier represents, + whether in camel case, underscore-separated, etc. + + >>> WordSet.parse("camelCase") + ('camel', 'Case') + + >>> WordSet.parse("under_sep") + ('under', 'sep') + + Acronyms should be retained + + >>> WordSet.parse("firstSNL") + ('first', 'SNL') + + >>> WordSet.parse("you_and_I") + ('you', 'and', 'I') + + >>> WordSet.parse("A simple test") + ('A', 'simple', 'test') + + Multiple caps should not interfere with the first cap of another word. + + >>> WordSet.parse("myABCClass") + ('my', 'ABC', 'Class') + + The result is a WordSet, so you can get the form you need. + + >>> WordSet.parse("myABCClass").underscore_separated() + 'my_ABC_Class' + + >>> WordSet.parse('a-command').camel_case() + 'ACommand' + + >>> WordSet.parse('someIdentifier').lowered().space_separated() + 'some identifier' + + Slices of the result should return another WordSet. + + >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated() + 'out_of_context' + + >>> WordSet.from_class_name(WordSet()).lowered().space_separated() + 'word set' + + >>> example = WordSet.parse('figured it out') + >>> example.headless_camel_case() + 'figuredItOut' + >>> example.dash_separated() + 'figured-it-out' + + """ + + _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))') + + def capitalized(self): + return WordSet(word.capitalize() for word in self) + + def lowered(self): + return WordSet(word.lower() for word in self) + + def camel_case(self): + return ''.join(self.capitalized()) + + def headless_camel_case(self): + words = iter(self) + first = next(words).lower() + new_words = itertools.chain((first,), WordSet(words).camel_case()) + return ''.join(new_words) + + def underscore_separated(self): + return '_'.join(self) + + def dash_separated(self): + return '-'.join(self) + + def space_separated(self): + return ' '.join(self) + + def trim_right(self, item): + """ + Remove the item from the end of the set. + + >>> WordSet.parse('foo bar').trim_right('foo') + ('foo', 'bar') + >>> WordSet.parse('foo bar').trim_right('bar') + ('foo',) + >>> WordSet.parse('').trim_right('bar') + () + """ + return self[:-1] if self and self[-1] == item else self + + def trim_left(self, item): + """ + Remove the item from the beginning of the set. + + >>> WordSet.parse('foo bar').trim_left('foo') + ('bar',) + >>> WordSet.parse('foo bar').trim_left('bar') + ('foo', 'bar') + >>> WordSet.parse('').trim_left('bar') + () + """ + return self[1:] if self and self[0] == item else self + + def trim(self, item): + """ + >>> WordSet.parse('foo bar').trim('foo') + ('bar',) + """ + return self.trim_left(item).trim_right(item) + + def __getitem__(self, item): + result = super(WordSet, self).__getitem__(item) + if isinstance(item, slice): + result = WordSet(result) + return result + + @classmethod + def parse(cls, identifier): + matches = cls._pattern.finditer(identifier) + return WordSet(match.group(0) for match in matches) + + @classmethod + def from_class_name(cls, subject): + return cls.parse(subject.__class__.__name__) + + +# for backward compatibility +words = WordSet.parse + + +def simple_html_strip(s): + r""" + Remove HTML from the string `s`. + + >>> str(simple_html_strip('')) + '' + + >>> print(simple_html_strip('A stormy day in paradise')) + A stormy day in paradise + + >>> print(simple_html_strip('Somebody tell the truth.')) + Somebody tell the truth. + + >>> print(simple_html_strip('What about
\nmultiple lines?')) + What about + multiple lines? + """ + html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL) + texts = (match.group(3) or '' for match in html_stripper.finditer(s)) + return ''.join(texts) + + +class SeparatedValues(str): + """ + A string separated by a separator. Overrides __iter__ for getting + the values. + + >>> list(SeparatedValues('a,b,c')) + ['a', 'b', 'c'] + + Whitespace is stripped and empty values are discarded. + + >>> list(SeparatedValues(' a, b , c, ')) + ['a', 'b', 'c'] + """ + + separator = ',' + + def __iter__(self): + parts = self.split(self.separator) + return filter(None, (part.strip() for part in parts)) + + +class Stripper: + r""" + Given a series of lines, find the common prefix and strip it from them. + + >>> lines = [ + ... 'abcdefg\n', + ... 'abc\n', + ... 'abcde\n', + ... ] + >>> res = Stripper.strip_prefix(lines) + >>> res.prefix + 'abc' + >>> list(res.lines) + ['defg\n', '\n', 'de\n'] + + If no prefix is common, nothing should be stripped. + + >>> lines = [ + ... 'abcd\n', + ... '1234\n', + ... ] + >>> res = Stripper.strip_prefix(lines) + >>> res.prefix = '' + >>> list(res.lines) + ['abcd\n', '1234\n'] + """ + + def __init__(self, prefix, lines): + self.prefix = prefix + self.lines = map(self, lines) + + @classmethod + def strip_prefix(cls, lines): + prefix_lines, lines = itertools.tee(lines) + prefix = functools.reduce(cls.common_prefix, prefix_lines) + return cls(prefix, lines) + + def __call__(self, line): + if not self.prefix: + return line + null, prefix, rest = line.partition(self.prefix) + return rest + + @staticmethod + def common_prefix(s1, s2): + """ + Return the common prefix of two lines. + """ + index = min(len(s1), len(s2)) + while s1[:index] != s2[:index]: + index -= 1 + return s1[:index] + + +def remove_prefix(text, prefix): + """ + Remove the prefix from the text if it exists. + + >>> remove_prefix('underwhelming performance', 'underwhelming ') + 'performance' + + >>> remove_prefix('something special', 'sample') + 'something special' + """ + null, prefix, rest = text.rpartition(prefix) + return rest + + +def remove_suffix(text, suffix): + """ + Remove the suffix from the text if it exists. + + >>> remove_suffix('name.git', '.git') + 'name' + + >>> remove_suffix('something special', 'sample') + 'something special' + """ + rest, suffix, null = text.partition(suffix) + return rest + + +def normalize_newlines(text): + r""" + Replace alternate newlines with the canonical newline. + + >>> normalize_newlines('Lorem Ipsum\u2029') + 'Lorem Ipsum\n' + >>> normalize_newlines('Lorem Ipsum\r\n') + 'Lorem Ipsum\n' + >>> normalize_newlines('Lorem Ipsum\x85') + 'Lorem Ipsum\n' + """ + newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029'] + pattern = '|'.join(newlines) + return re.sub(pattern, '\n', text) + + +def _nonblank(str): + return str and not str.startswith('#') + + +@functools.singledispatch +def yield_lines(iterable): + r""" + Yield valid lines of a string or iterable. + + >>> list(yield_lines('')) + [] + >>> list(yield_lines(['foo', 'bar'])) + ['foo', 'bar'] + >>> list(yield_lines('foo\nbar')) + ['foo', 'bar'] + >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) + ['foo', 'baz #comment'] + >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) + ['foo', 'bar', 'baz', 'bing'] + """ + return itertools.chain.from_iterable(map(yield_lines, iterable)) + + +@yield_lines.register(str) +def _(text): + return filter(_nonblank, map(str.strip, text.splitlines())) + + +def drop_comment(line): + """ + Drop comments. + + >>> drop_comment('foo # bar') + 'foo' + + A hash without a space may be in a URL. + + >>> drop_comment('http://example.com/foo#bar') + 'http://example.com/foo#bar' + """ + return line.partition(' #')[0] + + +def join_continuation(lines): + r""" + Join lines continued by a trailing backslash. + + >>> list(join_continuation(['foo \\', 'bar', 'baz'])) + ['foobar', 'baz'] + >>> list(join_continuation(['foo \\', 'bar', 'baz'])) + ['foobar', 'baz'] + >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) + ['foobarbaz'] + + Not sure why, but... + The character preceeding the backslash is also elided. + + >>> list(join_continuation(['goo\\', 'dly'])) + ['godly'] + + A terrible idea, but... + If no line is available to continue, suppress the lines. + + >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) + ['foo'] + """ + lines = iter(lines) + for item in lines: + while item.endswith('\\'): + try: + item = item[:-2].strip() + next(lines) + except StopIteration: + return + yield item diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/__init__.py b/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66443971dfdc53bd58e3aa141d0cf48cda0daf2f --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/__init__.py @@ -0,0 +1,6 @@ +"""More routines for operating on iterables, beyond itertools""" + +from .more import * # noqa +from .recipes import * # noqa + +__version__ = '9.1.0' diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/more.py b/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/more.py new file mode 100644 index 0000000000000000000000000000000000000000..e0e2d3de92d1c4acd6e47943d57a7c57ca675afe --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/more.py @@ -0,0 +1,4391 @@ +import warnings + +from collections import Counter, defaultdict, deque, abc +from collections.abc import Sequence +from functools import partial, reduce, wraps +from heapq import heapify, heapreplace, heappop +from itertools import ( + chain, + compress, + count, + cycle, + dropwhile, + groupby, + islice, + repeat, + starmap, + takewhile, + tee, + zip_longest, +) +from math import exp, factorial, floor, log +from queue import Empty, Queue +from random import random, randrange, uniform +from operator import itemgetter, mul, sub, gt, lt, ge, le +from sys import hexversion, maxsize +from time import monotonic + +from .recipes import ( + _marker, + _zip_equal, + UnequalIterablesError, + consume, + flatten, + pairwise, + powerset, + take, + unique_everseen, + all_equal, +) + +__all__ = [ + 'AbortThread', + 'SequenceView', + 'UnequalIterablesError', + 'adjacent', + 'all_unique', + 'always_iterable', + 'always_reversible', + 'bucket', + 'callback_iter', + 'chunked', + 'chunked_even', + 'circular_shifts', + 'collapse', + 'combination_index', + 'consecutive_groups', + 'constrained_batches', + 'consumer', + 'count_cycle', + 'countable', + 'difference', + 'distinct_combinations', + 'distinct_permutations', + 'distribute', + 'divide', + 'duplicates_everseen', + 'duplicates_justseen', + 'exactly_n', + 'filter_except', + 'first', + 'gray_product', + 'groupby_transform', + 'ichunked', + 'iequals', + 'ilen', + 'interleave', + 'interleave_evenly', + 'interleave_longest', + 'intersperse', + 'is_sorted', + 'islice_extended', + 'iterate', + 'last', + 'locate', + 'longest_common_prefix', + 'lstrip', + 'make_decorator', + 'map_except', + 'map_if', + 'map_reduce', + 'mark_ends', + 'minmax', + 'nth_or_last', + 'nth_permutation', + 'nth_product', + 'numeric_range', + 'one', + 'only', + 'padded', + 'partitions', + 'peekable', + 'permutation_index', + 'product_index', + 'raise_', + 'repeat_each', + 'repeat_last', + 'replace', + 'rlocate', + 'rstrip', + 'run_length', + 'sample', + 'seekable', + 'set_partitions', + 'side_effect', + 'sliced', + 'sort_together', + 'split_after', + 'split_at', + 'split_before', + 'split_into', + 'split_when', + 'spy', + 'stagger', + 'strip', + 'strictly_n', + 'substrings', + 'substrings_indexes', + 'time_limited', + 'unique_in_window', + 'unique_to_each', + 'unzip', + 'value_chain', + 'windowed', + 'windowed_complete', + 'with_iter', + 'zip_broadcast', + 'zip_equal', + 'zip_offset', +] + + +def chunked(iterable, n, strict=False): + """Break *iterable* into lists of length *n*: + + >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) + [[1, 2, 3], [4, 5, 6]] + + By the default, the last yielded list will have fewer than *n* elements + if the length of *iterable* is not divisible by *n*: + + >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) + [[1, 2, 3], [4, 5, 6], [7, 8]] + + To use a fill-in value instead, see the :func:`grouper` recipe. + + If the length of *iterable* is not divisible by *n* and *strict* is + ``True``, then ``ValueError`` will be raised before the last + list is yielded. + + """ + iterator = iter(partial(take, n, iter(iterable)), []) + if strict: + if n is None: + raise ValueError('n must not be None when using strict mode.') + + def ret(): + for chunk in iterator: + if len(chunk) != n: + raise ValueError('iterable is not divisible by n.') + yield chunk + + return iter(ret()) + else: + return iterator + + +def first(iterable, default=_marker): + """Return the first item of *iterable*, or *default* if *iterable* is + empty. + + >>> first([0, 1, 2, 3]) + 0 + >>> first([], 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + + :func:`first` is useful when you have a generator of expensive-to-retrieve + values and want any arbitrary one. It is marginally shorter than + ``next(iter(iterable), default)``. + + """ + try: + return next(iter(iterable)) + except StopIteration as e: + if default is _marker: + raise ValueError( + 'first() was called on an empty iterable, and no ' + 'default value was provided.' + ) from e + return default + + +def last(iterable, default=_marker): + """Return the last item of *iterable*, or *default* if *iterable* is + empty. + + >>> last([0, 1, 2, 3]) + 3 + >>> last([], 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + """ + try: + if isinstance(iterable, Sequence): + return iterable[-1] + # Work around https://bugs.python.org/issue38525 + elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): + return next(reversed(iterable)) + else: + return deque(iterable, maxlen=1)[-1] + except (IndexError, TypeError, StopIteration): + if default is _marker: + raise ValueError( + 'last() was called on an empty iterable, and no default was ' + 'provided.' + ) + return default + + +def nth_or_last(iterable, n, default=_marker): + """Return the nth or the last item of *iterable*, + or *default* if *iterable* is empty. + + >>> nth_or_last([0, 1, 2, 3], 2) + 2 + >>> nth_or_last([0, 1], 2) + 1 + >>> nth_or_last([], 0, 'some default') + 'some default' + + If *default* is not provided and there are no items in the iterable, + raise ``ValueError``. + """ + return last(islice(iterable, n + 1), default=default) + + +class peekable: + """Wrap an iterator to allow lookahead and prepending elements. + + Call :meth:`peek` on the result to get the value that will be returned + by :func:`next`. This won't advance the iterator: + + >>> p = peekable(['a', 'b']) + >>> p.peek() + 'a' + >>> next(p) + 'a' + + Pass :meth:`peek` a default value to return that instead of raising + ``StopIteration`` when the iterator is exhausted. + + >>> p = peekable([]) + >>> p.peek('hi') + 'hi' + + peekables also offer a :meth:`prepend` method, which "inserts" items + at the head of the iterable: + + >>> p = peekable([1, 2, 3]) + >>> p.prepend(10, 11, 12) + >>> next(p) + 10 + >>> p.peek() + 11 + >>> list(p) + [11, 12, 1, 2, 3] + + peekables can be indexed. Index 0 is the item that will be returned by + :func:`next`, index 1 is the item after that, and so on: + The values up to the given index will be cached. + + >>> p = peekable(['a', 'b', 'c', 'd']) + >>> p[0] + 'a' + >>> p[1] + 'b' + >>> next(p) + 'a' + + Negative indexes are supported, but be aware that they will cache the + remaining items in the source iterator, which may require significant + storage. + + To check whether a peekable is exhausted, check its truth value: + + >>> p = peekable(['a', 'b']) + >>> if p: # peekable has items + ... list(p) + ['a', 'b'] + >>> if not p: # peekable is exhausted + ... list(p) + [] + + """ + + def __init__(self, iterable): + self._it = iter(iterable) + self._cache = deque() + + def __iter__(self): + return self + + def __bool__(self): + try: + self.peek() + except StopIteration: + return False + return True + + def peek(self, default=_marker): + """Return the item that will be next returned from ``next()``. + + Return ``default`` if there are no items left. If ``default`` is not + provided, raise ``StopIteration``. + + """ + if not self._cache: + try: + self._cache.append(next(self._it)) + except StopIteration: + if default is _marker: + raise + return default + return self._cache[0] + + def prepend(self, *items): + """Stack up items to be the next ones returned from ``next()`` or + ``self.peek()``. The items will be returned in + first in, first out order:: + + >>> p = peekable([1, 2, 3]) + >>> p.prepend(10, 11, 12) + >>> next(p) + 10 + >>> list(p) + [11, 12, 1, 2, 3] + + It is possible, by prepending items, to "resurrect" a peekable that + previously raised ``StopIteration``. + + >>> p = peekable([]) + >>> next(p) + Traceback (most recent call last): + ... + StopIteration + >>> p.prepend(1) + >>> next(p) + 1 + >>> next(p) + Traceback (most recent call last): + ... + StopIteration + + """ + self._cache.extendleft(reversed(items)) + + def __next__(self): + if self._cache: + return self._cache.popleft() + + return next(self._it) + + def _get_slice(self, index): + # Normalize the slice's arguments + step = 1 if (index.step is None) else index.step + if step > 0: + start = 0 if (index.start is None) else index.start + stop = maxsize if (index.stop is None) else index.stop + elif step < 0: + start = -1 if (index.start is None) else index.start + stop = (-maxsize - 1) if (index.stop is None) else index.stop + else: + raise ValueError('slice step cannot be zero') + + # If either the start or stop index is negative, we'll need to cache + # the rest of the iterable in order to slice from the right side. + if (start < 0) or (stop < 0): + self._cache.extend(self._it) + # Otherwise we'll need to find the rightmost index and cache to that + # point. + else: + n = min(max(start, stop) + 1, maxsize) + cache_len = len(self._cache) + if n >= cache_len: + self._cache.extend(islice(self._it, n - cache_len)) + + return list(self._cache)[index] + + def __getitem__(self, index): + if isinstance(index, slice): + return self._get_slice(index) + + cache_len = len(self._cache) + if index < 0: + self._cache.extend(self._it) + elif index >= cache_len: + self._cache.extend(islice(self._it, index + 1 - cache_len)) + + return self._cache[index] + + +def consumer(func): + """Decorator that automatically advances a PEP-342-style "reverse iterator" + to its first yield point so you don't have to call ``next()`` on it + manually. + + >>> @consumer + ... def tally(): + ... i = 0 + ... while True: + ... print('Thing number %s is %s.' % (i, (yield))) + ... i += 1 + ... + >>> t = tally() + >>> t.send('red') + Thing number 0 is red. + >>> t.send('fish') + Thing number 1 is fish. + + Without the decorator, you would have to call ``next(t)`` before + ``t.send()`` could be used. + + """ + + @wraps(func) + def wrapper(*args, **kwargs): + gen = func(*args, **kwargs) + next(gen) + return gen + + return wrapper + + +def ilen(iterable): + """Return the number of items in *iterable*. + + >>> ilen(x for x in range(1000000) if x % 3 == 0) + 333334 + + This consumes the iterable, so handle with care. + + """ + # This approach was selected because benchmarks showed it's likely the + # fastest of the known implementations at the time of writing. + # See GitHub tracker: #236, #230. + counter = count() + deque(zip(iterable, counter), maxlen=0) + return next(counter) + + +def iterate(func, start): + """Return ``start``, ``func(start)``, ``func(func(start))``, ... + + >>> from itertools import islice + >>> list(islice(iterate(lambda x: 2*x, 1), 10)) + [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + """ + while True: + yield start + start = func(start) + + +def with_iter(context_manager): + """Wrap an iterable in a ``with`` statement, so it closes once exhausted. + + For example, this will close the file when the iterator is exhausted:: + + upper_lines = (line.upper() for line in with_iter(open('foo'))) + + Any context manager which returns an iterable is a candidate for + ``with_iter``. + + """ + with context_manager as iterable: + yield from iterable + + +def one(iterable, too_short=None, too_long=None): + """Return the first item from *iterable*, which is expected to contain only + that item. Raise an exception if *iterable* is empty or has more than one + item. + + :func:`one` is useful for ensuring that an iterable contains only one item. + For example, it can be used to retrieve the result of a database query + that is expected to return a single row. + + If *iterable* is empty, ``ValueError`` will be raised. You may specify a + different exception with the *too_short* keyword: + + >>> it = [] + >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too many items in iterable (expected 1)' + >>> too_short = IndexError('too few items') + >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + IndexError: too few items + + Similarly, if *iterable* contains more than one item, ``ValueError`` will + be raised. You may specify a different exception with the *too_long* + keyword: + + >>> it = ['too', 'many'] + >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: Expected exactly one item in iterable, but got 'too', + 'many', and perhaps more. + >>> too_long = RuntimeError + >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + RuntimeError + + Note that :func:`one` attempts to advance *iterable* twice to ensure there + is only one item. See :func:`spy` or :func:`peekable` to check iterable + contents less destructively. + + """ + it = iter(iterable) + + try: + first_value = next(it) + except StopIteration as e: + raise ( + too_short or ValueError('too few items in iterable (expected 1)') + ) from e + + try: + second_value = next(it) + except StopIteration: + pass + else: + msg = ( + 'Expected exactly one item in iterable, but got {!r}, {!r}, ' + 'and perhaps more.'.format(first_value, second_value) + ) + raise too_long or ValueError(msg) + + return first_value + + +def raise_(exception, *args): + raise exception(*args) + + +def strictly_n(iterable, n, too_short=None, too_long=None): + """Validate that *iterable* has exactly *n* items and return them if + it does. If it has fewer than *n* items, call function *too_short* + with those items. If it has more than *n* items, call function + *too_long* with the first ``n + 1`` items. + + >>> iterable = ['a', 'b', 'c', 'd'] + >>> n = 4 + >>> list(strictly_n(iterable, n)) + ['a', 'b', 'c', 'd'] + + By default, *too_short* and *too_long* are functions that raise + ``ValueError``. + + >>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too few items in iterable (got 2) + + >>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: too many items in iterable (got at least 3) + + You can instead supply functions that do something else. + *too_short* will be called with the number of items in *iterable*. + *too_long* will be called with `n + 1`. + + >>> def too_short(item_count): + ... raise RuntimeError + >>> it = strictly_n('abcd', 6, too_short=too_short) + >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + RuntimeError + + >>> def too_long(item_count): + ... print('The boss is going to hear about this') + >>> it = strictly_n('abcdef', 4, too_long=too_long) + >>> list(it) + The boss is going to hear about this + ['a', 'b', 'c', 'd'] + + """ + if too_short is None: + too_short = lambda item_count: raise_( + ValueError, + 'Too few items in iterable (got {})'.format(item_count), + ) + + if too_long is None: + too_long = lambda item_count: raise_( + ValueError, + 'Too many items in iterable (got at least {})'.format(item_count), + ) + + it = iter(iterable) + for i in range(n): + try: + item = next(it) + except StopIteration: + too_short(i) + return + else: + yield item + + try: + next(it) + except StopIteration: + pass + else: + too_long(n + 1) + + +def distinct_permutations(iterable, r=None): + """Yield successive distinct permutations of the elements in *iterable*. + + >>> sorted(distinct_permutations([1, 0, 1])) + [(0, 1, 1), (1, 0, 1), (1, 1, 0)] + + Equivalent to ``set(permutations(iterable))``, except duplicates are not + generated and thrown away. For larger input sequences this is much more + efficient. + + Duplicate permutations arise when there are duplicated elements in the + input iterable. The number of items returned is + `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of + items input, and each `x_i` is the count of a distinct item in the input + sequence. + + If *r* is given, only the *r*-length permutations are yielded. + + >>> sorted(distinct_permutations([1, 0, 1], r=2)) + [(0, 1), (1, 0), (1, 1)] + >>> sorted(distinct_permutations(range(3), r=2)) + [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + + """ + + # Algorithm: https://w.wiki/Qai + def _full(A): + while True: + # Yield the permutation we have + yield tuple(A) + + # Find the largest index i such that A[i] < A[i + 1] + for i in range(size - 2, -1, -1): + if A[i] < A[i + 1]: + break + # If no such index exists, this permutation is the last one + else: + return + + # Find the largest index j greater than j such that A[i] < A[j] + for j in range(size - 1, i, -1): + if A[i] < A[j]: + break + + # Swap the value of A[i] with that of A[j], then reverse the + # sequence from A[i + 1] to form the new permutation + A[i], A[j] = A[j], A[i] + A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] + + # Algorithm: modified from the above + def _partial(A, r): + # Split A into the first r items and the last r items + head, tail = A[:r], A[r:] + right_head_indexes = range(r - 1, -1, -1) + left_tail_indexes = range(len(tail)) + + while True: + # Yield the permutation we have + yield tuple(head) + + # Starting from the right, find the first index of the head with + # value smaller than the maximum value of the tail - call it i. + pivot = tail[-1] + for i in right_head_indexes: + if head[i] < pivot: + break + pivot = head[i] + else: + return + + # Starting from the left, find the first value of the tail + # with a value greater than head[i] and swap. + for j in left_tail_indexes: + if tail[j] > head[i]: + head[i], tail[j] = tail[j], head[i] + break + # If we didn't find one, start from the right and find the first + # index of the head with a value greater than head[i] and swap. + else: + for j in right_head_indexes: + if head[j] > head[i]: + head[i], head[j] = head[j], head[i] + break + + # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] + tail += head[: i - r : -1] # head[i + 1:][::-1] + i += 1 + head[i:], tail[:] = tail[: r - i], tail[r - i :] + + items = sorted(iterable) + + size = len(items) + if r is None: + r = size + + if 0 < r <= size: + return _full(items) if (r == size) else _partial(items, r) + + return iter(() if r else ((),)) + + +def intersperse(e, iterable, n=1): + """Intersperse filler element *e* among the items in *iterable*, leaving + *n* items between each filler element. + + >>> list(intersperse('!', [1, 2, 3, 4, 5])) + [1, '!', 2, '!', 3, '!', 4, '!', 5] + + >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) + [1, 2, None, 3, 4, None, 5] + + """ + if n == 0: + raise ValueError('n must be > 0') + elif n == 1: + # interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2... + # islice(..., 1, None) -> x_0, e, x_1, e, x_2... + return islice(interleave(repeat(e), iterable), 1, None) + else: + # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... + # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... + # flatten(...) -> x_0, x_1, e, x_2, x_3... + filler = repeat([e]) + chunks = chunked(iterable, n) + return flatten(islice(interleave(filler, chunks), 1, None)) + + +def unique_to_each(*iterables): + """Return the elements from each of the input iterables that aren't in the + other input iterables. + + For example, suppose you have a set of packages, each with a set of + dependencies:: + + {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} + + If you remove one package, which dependencies can also be removed? + + If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not + associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for + ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: + + >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) + [['A'], ['C'], ['D']] + + If there are duplicates in one input iterable that aren't in the others + they will be duplicated in the output. Input order is preserved:: + + >>> unique_to_each("mississippi", "missouri") + [['p', 'p'], ['o', 'u', 'r']] + + It is assumed that the elements of each iterable are hashable. + + """ + pool = [list(it) for it in iterables] + counts = Counter(chain.from_iterable(map(set, pool))) + uniques = {element for element in counts if counts[element] == 1} + return [list(filter(uniques.__contains__, it)) for it in pool] + + +def windowed(seq, n, fillvalue=None, step=1): + """Return a sliding window of width *n* over the given iterable. + + >>> all_windows = windowed([1, 2, 3, 4, 5], 3) + >>> list(all_windows) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + When the window is larger than the iterable, *fillvalue* is used in place + of missing values: + + >>> list(windowed([1, 2, 3], 4)) + [(1, 2, 3, None)] + + Each window will advance in increments of *step*: + + >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) + [(1, 2, 3), (3, 4, 5), (5, 6, '!')] + + To slide into the iterable's items, use :func:`chain` to add filler items + to the left: + + >>> iterable = [1, 2, 3, 4] + >>> n = 3 + >>> padding = [None] * (n - 1) + >>> list(windowed(chain(padding, iterable), 3)) + [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] + """ + if n < 0: + raise ValueError('n must be >= 0') + if n == 0: + yield tuple() + return + if step < 1: + raise ValueError('step must be >= 1') + + window = deque(maxlen=n) + i = n + for _ in map(window.append, seq): + i -= 1 + if not i: + i = step + yield tuple(window) + + size = len(window) + if size == 0: + return + elif size < n: + yield tuple(chain(window, repeat(fillvalue, n - size))) + elif 0 < i < min(step, n): + window += (fillvalue,) * i + yield tuple(window) + + +def substrings(iterable): + """Yield all of the substrings of *iterable*. + + >>> [''.join(s) for s in substrings('more')] + ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] + + Note that non-string iterables can also be subdivided. + + >>> list(substrings([0, 1, 2])) + [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] + + """ + # The length-1 substrings + seq = [] + for item in iter(iterable): + seq.append(item) + yield (item,) + seq = tuple(seq) + item_count = len(seq) + + # And the rest + for n in range(2, item_count + 1): + for i in range(item_count - n + 1): + yield seq[i : i + n] + + +def substrings_indexes(seq, reverse=False): + """Yield all substrings and their positions in *seq* + + The items yielded will be a tuple of the form ``(substr, i, j)``, where + ``substr == seq[i:j]``. + + This function only works for iterables that support slicing, such as + ``str`` objects. + + >>> for item in substrings_indexes('more'): + ... print(item) + ('m', 0, 1) + ('o', 1, 2) + ('r', 2, 3) + ('e', 3, 4) + ('mo', 0, 2) + ('or', 1, 3) + ('re', 2, 4) + ('mor', 0, 3) + ('ore', 1, 4) + ('more', 0, 4) + + Set *reverse* to ``True`` to yield the same items in the opposite order. + + + """ + r = range(1, len(seq) + 1) + if reverse: + r = reversed(r) + return ( + (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) + ) + + +class bucket: + """Wrap *iterable* and return an object that buckets it iterable into + child iterables based on a *key* function. + + >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] + >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character + >>> sorted(list(s)) # Get the keys + ['a', 'b', 'c'] + >>> a_iterable = s['a'] + >>> next(a_iterable) + 'a1' + >>> next(a_iterable) + 'a2' + >>> list(s['b']) + ['b1', 'b2', 'b3'] + + The original iterable will be advanced and its items will be cached until + they are used by the child iterables. This may require significant storage. + + By default, attempting to select a bucket to which no items belong will + exhaust the iterable and cache all values. + If you specify a *validator* function, selected buckets will instead be + checked against it. + + >>> from itertools import count + >>> it = count(1, 2) # Infinite sequence of odd numbers + >>> key = lambda x: x % 10 # Bucket by last digit + >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only + >>> s = bucket(it, key=key, validator=validator) + >>> 2 in s + False + >>> list(s[2]) + [] + + """ + + def __init__(self, iterable, key, validator=None): + self._it = iter(iterable) + self._key = key + self._cache = defaultdict(deque) + self._validator = validator or (lambda x: True) + + def __contains__(self, value): + if not self._validator(value): + return False + + try: + item = next(self[value]) + except StopIteration: + return False + else: + self._cache[value].appendleft(item) + + return True + + def _get_values(self, value): + """ + Helper to yield items from the parent iterator that match *value*. + Items that don't match are stored in the local cache as they + are encountered. + """ + while True: + # If we've cached some items that match the target value, emit + # the first one and evict it from the cache. + if self._cache[value]: + yield self._cache[value].popleft() + # Otherwise we need to advance the parent iterator to search for + # a matching item, caching the rest. + else: + while True: + try: + item = next(self._it) + except StopIteration: + return + item_value = self._key(item) + if item_value == value: + yield item + break + elif self._validator(item_value): + self._cache[item_value].append(item) + + def __iter__(self): + for item in self._it: + item_value = self._key(item) + if self._validator(item_value): + self._cache[item_value].append(item) + + yield from self._cache.keys() + + def __getitem__(self, value): + if not self._validator(value): + return iter(()) + + return self._get_values(value) + + +def spy(iterable, n=1): + """Return a 2-tuple with a list containing the first *n* elements of + *iterable*, and an iterator with the same items as *iterable*. + This allows you to "look ahead" at the items in the iterable without + advancing it. + + There is one item in the list by default: + + >>> iterable = 'abcdefg' + >>> head, iterable = spy(iterable) + >>> head + ['a'] + >>> list(iterable) + ['a', 'b', 'c', 'd', 'e', 'f', 'g'] + + You may use unpacking to retrieve items instead of lists: + + >>> (head,), iterable = spy('abcdefg') + >>> head + 'a' + >>> (first, second), iterable = spy('abcdefg', 2) + >>> first + 'a' + >>> second + 'b' + + The number of items requested can be larger than the number of items in + the iterable: + + >>> iterable = [1, 2, 3, 4, 5] + >>> head, iterable = spy(iterable, 10) + >>> head + [1, 2, 3, 4, 5] + >>> list(iterable) + [1, 2, 3, 4, 5] + + """ + it = iter(iterable) + head = take(n, it) + + return head.copy(), chain(head, it) + + +def interleave(*iterables): + """Return a new iterable yielding from each iterable in turn, + until the shortest is exhausted. + + >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) + [1, 4, 6, 2, 5, 7] + + For a version that doesn't terminate after the shortest iterable is + exhausted, see :func:`interleave_longest`. + + """ + return chain.from_iterable(zip(*iterables)) + + +def interleave_longest(*iterables): + """Return a new iterable yielding from each iterable in turn, + skipping any that are exhausted. + + >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) + [1, 4, 6, 2, 5, 7, 3, 8] + + This function produces the same output as :func:`roundrobin`, but may + perform better for some inputs (in particular when the number of iterables + is large). + + """ + i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) + return (x for x in i if x is not _marker) + + +def interleave_evenly(iterables, lengths=None): + """ + Interleave multiple iterables so that their elements are evenly distributed + throughout the output sequence. + + >>> iterables = [1, 2, 3, 4, 5], ['a', 'b'] + >>> list(interleave_evenly(iterables)) + [1, 2, 'a', 3, 4, 'b', 5] + + >>> iterables = [[1, 2, 3], [4, 5], [6, 7, 8]] + >>> list(interleave_evenly(iterables)) + [1, 6, 4, 2, 7, 3, 8, 5] + + This function requires iterables of known length. Iterables without + ``__len__()`` can be used by manually specifying lengths with *lengths*: + + >>> from itertools import combinations, repeat + >>> iterables = [combinations(range(4), 2), ['a', 'b', 'c']] + >>> lengths = [4 * (4 - 1) // 2, 3] + >>> list(interleave_evenly(iterables, lengths=lengths)) + [(0, 1), (0, 2), 'a', (0, 3), (1, 2), 'b', (1, 3), (2, 3), 'c'] + + Based on Bresenham's algorithm. + """ + if lengths is None: + try: + lengths = [len(it) for it in iterables] + except TypeError: + raise ValueError( + 'Iterable lengths could not be determined automatically. ' + 'Specify them with the lengths keyword.' + ) + elif len(iterables) != len(lengths): + raise ValueError('Mismatching number of iterables and lengths.') + + dims = len(lengths) + + # sort iterables by length, descending + lengths_permute = sorted( + range(dims), key=lambda i: lengths[i], reverse=True + ) + lengths_desc = [lengths[i] for i in lengths_permute] + iters_desc = [iter(iterables[i]) for i in lengths_permute] + + # the longest iterable is the primary one (Bresenham: the longest + # distance along an axis) + delta_primary, deltas_secondary = lengths_desc[0], lengths_desc[1:] + iter_primary, iters_secondary = iters_desc[0], iters_desc[1:] + errors = [delta_primary // dims] * len(deltas_secondary) + + to_yield = sum(lengths) + while to_yield: + yield next(iter_primary) + to_yield -= 1 + # update errors for each secondary iterable + errors = [e - delta for e, delta in zip(errors, deltas_secondary)] + + # those iterables for which the error is negative are yielded + # ("diagonal step" in Bresenham) + for i, e in enumerate(errors): + if e < 0: + yield next(iters_secondary[i]) + to_yield -= 1 + errors[i] += delta_primary + + +def collapse(iterable, base_type=None, levels=None): + """Flatten an iterable with multiple levels of nesting (e.g., a list of + lists of tuples) into non-iterable types. + + >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] + >>> list(collapse(iterable)) + [1, 2, 3, 4, 5, 6] + + Binary and text strings are not considered iterable and + will not be collapsed. + + To avoid collapsing other types, specify *base_type*: + + >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] + >>> list(collapse(iterable, base_type=tuple)) + ['ab', ('cd', 'ef'), 'gh', 'ij'] + + Specify *levels* to stop flattening after a certain level: + + >>> iterable = [('a', ['b']), ('c', ['d'])] + >>> list(collapse(iterable)) # Fully flattened + ['a', 'b', 'c', 'd'] + >>> list(collapse(iterable, levels=1)) # Only one level flattened + ['a', ['b'], 'c', ['d']] + + """ + + def walk(node, level): + if ( + ((levels is not None) and (level > levels)) + or isinstance(node, (str, bytes)) + or ((base_type is not None) and isinstance(node, base_type)) + ): + yield node + return + + try: + tree = iter(node) + except TypeError: + yield node + return + else: + for child in tree: + yield from walk(child, level + 1) + + yield from walk(iterable, 0) + + +def side_effect(func, iterable, chunk_size=None, before=None, after=None): + """Invoke *func* on each item in *iterable* (or on each *chunk_size* group + of items) before yielding the item. + + `func` must be a function that takes a single argument. Its return value + will be discarded. + + *before* and *after* are optional functions that take no arguments. They + will be executed before iteration starts and after it ends, respectively. + + `side_effect` can be used for logging, updating progress bars, or anything + that is not functionally "pure." + + Emitting a status message: + + >>> from more_itertools import consume + >>> func = lambda item: print('Received {}'.format(item)) + >>> consume(side_effect(func, range(2))) + Received 0 + Received 1 + + Operating on chunks of items: + + >>> pair_sums = [] + >>> func = lambda chunk: pair_sums.append(sum(chunk)) + >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) + [0, 1, 2, 3, 4, 5] + >>> list(pair_sums) + [1, 5, 9] + + Writing to a file-like object: + + >>> from io import StringIO + >>> from more_itertools import consume + >>> f = StringIO() + >>> func = lambda x: print(x, file=f) + >>> before = lambda: print(u'HEADER', file=f) + >>> after = f.close + >>> it = [u'a', u'b', u'c'] + >>> consume(side_effect(func, it, before=before, after=after)) + >>> f.closed + True + + """ + try: + if before is not None: + before() + + if chunk_size is None: + for item in iterable: + func(item) + yield item + else: + for chunk in chunked(iterable, chunk_size): + func(chunk) + yield from chunk + finally: + if after is not None: + after() + + +def sliced(seq, n, strict=False): + """Yield slices of length *n* from the sequence *seq*. + + >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) + [(1, 2, 3), (4, 5, 6)] + + By the default, the last yielded slice will have fewer than *n* elements + if the length of *seq* is not divisible by *n*: + + >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) + [(1, 2, 3), (4, 5, 6), (7, 8)] + + If the length of *seq* is not divisible by *n* and *strict* is + ``True``, then ``ValueError`` will be raised before the last + slice is yielded. + + This function will only work for iterables that support slicing. + For non-sliceable iterables, see :func:`chunked`. + + """ + iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) + if strict: + + def ret(): + for _slice in iterator: + if len(_slice) != n: + raise ValueError("seq is not divisible by n.") + yield _slice + + return iter(ret()) + else: + return iterator + + +def split_at(iterable, pred, maxsplit=-1, keep_separator=False): + """Yield lists of items from *iterable*, where each list is delimited by + an item where callable *pred* returns ``True``. + + >>> list(split_at('abcdcba', lambda x: x == 'b')) + [['a'], ['c', 'd', 'c'], ['a']] + + >>> list(split_at(range(10), lambda n: n % 2 == 1)) + [[0], [2], [4], [6], [8], []] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) + [[0], [2], [4, 5, 6, 7, 8, 9]] + + By default, the delimiting items are not included in the output. + To include them, set *keep_separator* to ``True``. + + >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) + [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] + + """ + if maxsplit == 0: + yield list(iterable) + return + + buf = [] + it = iter(iterable) + for item in it: + if pred(item): + yield buf + if keep_separator: + yield [item] + if maxsplit == 1: + yield list(it) + return + buf = [] + maxsplit -= 1 + else: + buf.append(item) + yield buf + + +def split_before(iterable, pred, maxsplit=-1): + """Yield lists of items from *iterable*, where each list ends just before + an item for which callable *pred* returns ``True``: + + >>> list(split_before('OneTwo', lambda s: s.isupper())) + [['O', 'n', 'e'], ['T', 'w', 'o']] + + >>> list(split_before(range(10), lambda n: n % 3 == 0)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] + """ + if maxsplit == 0: + yield list(iterable) + return + + buf = [] + it = iter(iterable) + for item in it: + if pred(item) and buf: + yield buf + if maxsplit == 1: + yield [item] + list(it) + return + buf = [] + maxsplit -= 1 + buf.append(item) + if buf: + yield buf + + +def split_after(iterable, pred, maxsplit=-1): + """Yield lists of items from *iterable*, where each list ends with an + item where callable *pred* returns ``True``: + + >>> list(split_after('one1two2', lambda s: s.isdigit())) + [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] + + >>> list(split_after(range(10), lambda n: n % 3 == 0)) + [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) + [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] + + """ + if maxsplit == 0: + yield list(iterable) + return + + buf = [] + it = iter(iterable) + for item in it: + buf.append(item) + if pred(item) and buf: + yield buf + if maxsplit == 1: + buf = list(it) + if buf: + yield buf + return + buf = [] + maxsplit -= 1 + if buf: + yield buf + + +def split_when(iterable, pred, maxsplit=-1): + """Split *iterable* into pieces based on the output of *pred*. + *pred* should be a function that takes successive pairs of items and + returns ``True`` if the iterable should be split in between them. + + For example, to find runs of increasing numbers, split the iterable when + element ``i`` is larger than element ``i + 1``: + + >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) + [[1, 2, 3, 3], [2, 5], [2, 4], [2]] + + At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, + then there is no limit on the number of splits: + + >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], + ... lambda x, y: x > y, maxsplit=2)) + [[1, 2, 3, 3], [2, 5], [2, 4, 2]] + + """ + if maxsplit == 0: + yield list(iterable) + return + + it = iter(iterable) + try: + cur_item = next(it) + except StopIteration: + return + + buf = [cur_item] + for next_item in it: + if pred(cur_item, next_item): + yield buf + if maxsplit == 1: + yield [next_item] + list(it) + return + buf = [] + maxsplit -= 1 + + buf.append(next_item) + cur_item = next_item + + yield buf + + +def split_into(iterable, sizes): + """Yield a list of sequential items from *iterable* of length 'n' for each + integer 'n' in *sizes*. + + >>> list(split_into([1,2,3,4,5,6], [1,2,3])) + [[1], [2, 3], [4, 5, 6]] + + If the sum of *sizes* is smaller than the length of *iterable*, then the + remaining items of *iterable* will not be returned. + + >>> list(split_into([1,2,3,4,5,6], [2,3])) + [[1, 2], [3, 4, 5]] + + If the sum of *sizes* is larger than the length of *iterable*, fewer items + will be returned in the iteration that overruns *iterable* and further + lists will be empty: + + >>> list(split_into([1,2,3,4], [1,2,3,4])) + [[1], [2, 3], [4], []] + + When a ``None`` object is encountered in *sizes*, the returned list will + contain items up to the end of *iterable* the same way that itertools.slice + does: + + >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) + [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] + + :func:`split_into` can be useful for grouping a series of items where the + sizes of the groups are not uniform. An example would be where in a row + from a table, multiple columns represent elements of the same feature + (e.g. a point represented by x,y,z) but, the format is not the same for + all columns. + """ + # convert the iterable argument into an iterator so its contents can + # be consumed by islice in case it is a generator + it = iter(iterable) + + for size in sizes: + if size is None: + yield list(it) + return + else: + yield list(islice(it, size)) + + +def padded(iterable, fillvalue=None, n=None, next_multiple=False): + """Yield the elements from *iterable*, followed by *fillvalue*, such that + at least *n* items are emitted. + + >>> list(padded([1, 2, 3], '?', 5)) + [1, 2, 3, '?', '?'] + + If *next_multiple* is ``True``, *fillvalue* will be emitted until the + number of items emitted is a multiple of *n*:: + + >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) + [1, 2, 3, 4, None, None] + + If *n* is ``None``, *fillvalue* will be emitted indefinitely. + + """ + it = iter(iterable) + if n is None: + yield from chain(it, repeat(fillvalue)) + elif n < 1: + raise ValueError('n must be at least 1') + else: + item_count = 0 + for item in it: + yield item + item_count += 1 + + remaining = (n - item_count) % n if next_multiple else n - item_count + for _ in range(remaining): + yield fillvalue + + +def repeat_each(iterable, n=2): + """Repeat each element in *iterable* *n* times. + + >>> list(repeat_each('ABC', 3)) + ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C'] + """ + return chain.from_iterable(map(repeat, iterable, repeat(n))) + + +def repeat_last(iterable, default=None): + """After the *iterable* is exhausted, keep yielding its last element. + + >>> list(islice(repeat_last(range(3)), 5)) + [0, 1, 2, 2, 2] + + If the iterable is empty, yield *default* forever:: + + >>> list(islice(repeat_last(range(0), 42), 5)) + [42, 42, 42, 42, 42] + + """ + item = _marker + for item in iterable: + yield item + final = default if item is _marker else item + yield from repeat(final) + + +def distribute(n, iterable): + """Distribute the items from *iterable* among *n* smaller iterables. + + >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 3, 5] + >>> list(group_2) + [2, 4, 6] + + If the length of *iterable* is not evenly divisible by *n*, then the + length of the returned iterables will not be identical: + + >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 4, 7], [2, 5], [3, 6]] + + If the length of *iterable* is smaller than *n*, then the last returned + iterables will be empty: + + >>> children = distribute(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + + This function uses :func:`itertools.tee` and may require significant + storage. If you need the order items in the smaller iterables to match the + original iterable, see :func:`divide`. + + """ + if n < 1: + raise ValueError('n must be at least 1') + + children = tee(iterable, n) + return [islice(it, index, None, n) for index, it in enumerate(children)] + + +def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): + """Yield tuples whose elements are offset from *iterable*. + The amount by which the `i`-th item in each tuple is offset is given by + the `i`-th item in *offsets*. + + >>> list(stagger([0, 1, 2, 3])) + [(None, 0, 1), (0, 1, 2), (1, 2, 3)] + >>> list(stagger(range(8), offsets=(0, 2, 4))) + [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] + + By default, the sequence will end when the final element of a tuple is the + last item in the iterable. To continue until the first element of a tuple + is the last item in the iterable, set *longest* to ``True``:: + + >>> list(stagger([0, 1, 2, 3], longest=True)) + [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] + + By default, ``None`` will be used to replace offsets beyond the end of the + sequence. Specify *fillvalue* to use some other value. + + """ + children = tee(iterable, len(offsets)) + + return zip_offset( + *children, offsets=offsets, longest=longest, fillvalue=fillvalue + ) + + +def zip_equal(*iterables): + """``zip`` the input *iterables* together, but raise + ``UnequalIterablesError`` if they aren't all the same length. + + >>> it_1 = range(3) + >>> it_2 = iter('abc') + >>> list(zip_equal(it_1, it_2)) + [(0, 'a'), (1, 'b'), (2, 'c')] + + >>> it_1 = range(3) + >>> it_2 = iter('abcd') + >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + more_itertools.more.UnequalIterablesError: Iterables have different + lengths + + """ + if hexversion >= 0x30A00A6: + warnings.warn( + ( + 'zip_equal will be removed in a future version of ' + 'more-itertools. Use the builtin zip function with ' + 'strict=True instead.' + ), + DeprecationWarning, + ) + + return _zip_equal(*iterables) + + +def zip_offset(*iterables, offsets, longest=False, fillvalue=None): + """``zip`` the input *iterables* together, but offset the `i`-th iterable + by the `i`-th item in *offsets*. + + >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) + [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] + + This can be used as a lightweight alternative to SciPy or pandas to analyze + data sets in which some series have a lead or lag relationship. + + By default, the sequence will end when the shortest iterable is exhausted. + To continue until the longest iterable is exhausted, set *longest* to + ``True``. + + >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) + [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] + + By default, ``None`` will be used to replace offsets beyond the end of the + sequence. Specify *fillvalue* to use some other value. + + """ + if len(iterables) != len(offsets): + raise ValueError("Number of iterables and offsets didn't match") + + staggered = [] + for it, n in zip(iterables, offsets): + if n < 0: + staggered.append(chain(repeat(fillvalue, -n), it)) + elif n > 0: + staggered.append(islice(it, n, None)) + else: + staggered.append(it) + + if longest: + return zip_longest(*staggered, fillvalue=fillvalue) + + return zip(*staggered) + + +def sort_together(iterables, key_list=(0,), key=None, reverse=False): + """Return the input iterables sorted together, with *key_list* as the + priority for sorting. All iterables are trimmed to the length of the + shortest one. + + This can be used like the sorting function in a spreadsheet. If each + iterable represents a column of data, the key list determines which + columns are used for sorting. + + By default, all iterables are sorted using the ``0``-th iterable:: + + >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] + >>> sort_together(iterables) + [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] + + Set a different key list to sort according to another iterable. + Specifying multiple keys dictates how ties are broken:: + + >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] + >>> sort_together(iterables, key_list=(1, 2)) + [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] + + To sort by a function of the elements of the iterable, pass a *key* + function. Its arguments are the elements of the iterables corresponding to + the key list:: + + >>> names = ('a', 'b', 'c') + >>> lengths = (1, 2, 3) + >>> widths = (5, 2, 1) + >>> def area(length, width): + ... return length * width + >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) + [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] + + Set *reverse* to ``True`` to sort in descending order. + + >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) + [(3, 2, 1), ('a', 'b', 'c')] + + """ + if key is None: + # if there is no key function, the key argument to sorted is an + # itemgetter + key_argument = itemgetter(*key_list) + else: + # if there is a key function, call it with the items at the offsets + # specified by the key function as arguments + key_list = list(key_list) + if len(key_list) == 1: + # if key_list contains a single item, pass the item at that offset + # as the only argument to the key function + key_offset = key_list[0] + key_argument = lambda zipped_items: key(zipped_items[key_offset]) + else: + # if key_list contains multiple items, use itemgetter to return a + # tuple of items, which we pass as *args to the key function + get_key_items = itemgetter(*key_list) + key_argument = lambda zipped_items: key( + *get_key_items(zipped_items) + ) + + return list( + zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) + ) + + +def unzip(iterable): + """The inverse of :func:`zip`, this function disaggregates the elements + of the zipped *iterable*. + + The ``i``-th iterable contains the ``i``-th element from each element + of the zipped iterable. The first element is used to determine the + length of the remaining elements. + + >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + >>> letters, numbers = unzip(iterable) + >>> list(letters) + ['a', 'b', 'c', 'd'] + >>> list(numbers) + [1, 2, 3, 4] + + This is similar to using ``zip(*iterable)``, but it avoids reading + *iterable* into memory. Note, however, that this function uses + :func:`itertools.tee` and thus may require significant storage. + + """ + head, iterable = spy(iter(iterable)) + if not head: + # empty iterable, e.g. zip([], [], []) + return () + # spy returns a one-length iterable as head + head = head[0] + iterables = tee(iterable, len(head)) + + def itemgetter(i): + def getter(obj): + try: + return obj[i] + except IndexError: + # basically if we have an iterable like + # iter([(1, 2, 3), (4, 5), (6,)]) + # the second unzipped iterable would fail at the third tuple + # since it would try to access tup[1] + # same with the third unzipped iterable and the second tuple + # to support these "improperly zipped" iterables, + # we create a custom itemgetter + # which just stops the unzipped iterables + # at first length mismatch + raise StopIteration + + return getter + + return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) + + +def divide(n, iterable): + """Divide the elements from *iterable* into *n* parts, maintaining + order. + + >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) + >>> list(group_1) + [1, 2, 3] + >>> list(group_2) + [4, 5, 6] + + If the length of *iterable* is not evenly divisible by *n*, then the + length of the returned iterables will not be identical: + + >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) + >>> [list(c) for c in children] + [[1, 2, 3], [4, 5], [6, 7]] + + If the length of the iterable is smaller than n, then the last returned + iterables will be empty: + + >>> children = divide(5, [1, 2, 3]) + >>> [list(c) for c in children] + [[1], [2], [3], [], []] + + This function will exhaust the iterable before returning and may require + significant storage. If order is not important, see :func:`distribute`, + which does not first pull the iterable into memory. + + """ + if n < 1: + raise ValueError('n must be at least 1') + + try: + iterable[:0] + except TypeError: + seq = tuple(iterable) + else: + seq = iterable + + q, r = divmod(len(seq), n) + + ret = [] + stop = 0 + for i in range(1, n + 1): + start = stop + stop += q + 1 if i <= r else q + ret.append(iter(seq[start:stop])) + + return ret + + +def always_iterable(obj, base_type=(str, bytes)): + """If *obj* is iterable, return an iterator over its items:: + + >>> obj = (1, 2, 3) + >>> list(always_iterable(obj)) + [1, 2, 3] + + If *obj* is not iterable, return a one-item iterable containing *obj*:: + + >>> obj = 1 + >>> list(always_iterable(obj)) + [1] + + If *obj* is ``None``, return an empty iterable: + + >>> obj = None + >>> list(always_iterable(None)) + [] + + By default, binary and text strings are not considered iterable:: + + >>> obj = 'foo' + >>> list(always_iterable(obj)) + ['foo'] + + If *base_type* is set, objects for which ``isinstance(obj, base_type)`` + returns ``True`` won't be considered iterable. + + >>> obj = {'a': 1} + >>> list(always_iterable(obj)) # Iterate over the dict's keys + ['a'] + >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit + [{'a': 1}] + + Set *base_type* to ``None`` to avoid any special handling and treat objects + Python considers iterable as iterable: + + >>> obj = 'foo' + >>> list(always_iterable(obj, base_type=None)) + ['f', 'o', 'o'] + """ + if obj is None: + return iter(()) + + if (base_type is not None) and isinstance(obj, base_type): + return iter((obj,)) + + try: + return iter(obj) + except TypeError: + return iter((obj,)) + + +def adjacent(predicate, iterable, distance=1): + """Return an iterable over `(bool, item)` tuples where the `item` is + drawn from *iterable* and the `bool` indicates whether + that item satisfies the *predicate* or is adjacent to an item that does. + + For example, to find whether items are adjacent to a ``3``:: + + >>> list(adjacent(lambda x: x == 3, range(6))) + [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] + + Set *distance* to change what counts as adjacent. For example, to find + whether items are two places away from a ``3``: + + >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) + [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] + + This is useful for contextualizing the results of a search function. + For example, a code comparison tool might want to identify lines that + have changed, but also surrounding lines to give the viewer of the diff + context. + + The predicate function will only be called once for each item in the + iterable. + + See also :func:`groupby_transform`, which can be used with this function + to group ranges of items with the same `bool` value. + + """ + # Allow distance=0 mainly for testing that it reproduces results with map() + if distance < 0: + raise ValueError('distance must be at least 0') + + i1, i2 = tee(iterable) + padding = [False] * distance + selected = chain(padding, map(predicate, i1), padding) + adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) + return zip(adjacent_to_selected, i2) + + +def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): + """An extension of :func:`itertools.groupby` that can apply transformations + to the grouped data. + + * *keyfunc* is a function computing a key value for each item in *iterable* + * *valuefunc* is a function that transforms the individual items from + *iterable* after grouping + * *reducefunc* is a function that transforms each group of items + + >>> iterable = 'aAAbBBcCC' + >>> keyfunc = lambda k: k.upper() + >>> valuefunc = lambda v: v.lower() + >>> reducefunc = lambda g: ''.join(g) + >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) + [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] + + Each optional argument defaults to an identity function if not specified. + + :func:`groupby_transform` is useful when grouping elements of an iterable + using a separate iterable as the key. To do this, :func:`zip` the iterables + and pass a *keyfunc* that extracts the first element and a *valuefunc* + that extracts the second element:: + + >>> from operator import itemgetter + >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] + >>> values = 'abcdefghi' + >>> iterable = zip(keys, values) + >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) + >>> [(k, ''.join(g)) for k, g in grouper] + [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] + + Note that the order of items in the iterable is significant. + Only adjacent items are grouped together, so if you don't want any + duplicate groups, you should sort the iterable by the key function. + + """ + ret = groupby(iterable, keyfunc) + if valuefunc: + ret = ((k, map(valuefunc, g)) for k, g in ret) + if reducefunc: + ret = ((k, reducefunc(g)) for k, g in ret) + + return ret + + +class numeric_range(abc.Sequence, abc.Hashable): + """An extension of the built-in ``range()`` function whose arguments can + be any orderable numeric type. + + With only *stop* specified, *start* defaults to ``0`` and *step* + defaults to ``1``. The output items will match the type of *stop*: + + >>> list(numeric_range(3.5)) + [0.0, 1.0, 2.0, 3.0] + + With only *start* and *stop* specified, *step* defaults to ``1``. The + output items will match the type of *start*: + + >>> from decimal import Decimal + >>> start = Decimal('2.1') + >>> stop = Decimal('5.1') + >>> list(numeric_range(start, stop)) + [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] + + With *start*, *stop*, and *step* specified the output items will match + the type of ``start + step``: + + >>> from fractions import Fraction + >>> start = Fraction(1, 2) # Start at 1/2 + >>> stop = Fraction(5, 2) # End at 5/2 + >>> step = Fraction(1, 2) # Count by 1/2 + >>> list(numeric_range(start, stop, step)) + [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] + + If *step* is zero, ``ValueError`` is raised. Negative steps are supported: + + >>> list(numeric_range(3, -1, -1.0)) + [3.0, 2.0, 1.0, 0.0] + + Be aware of the limitations of floating point numbers; the representation + of the yielded numbers may be surprising. + + ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* + is a ``datetime.timedelta`` object: + + >>> import datetime + >>> start = datetime.datetime(2019, 1, 1) + >>> stop = datetime.datetime(2019, 1, 3) + >>> step = datetime.timedelta(days=1) + >>> items = iter(numeric_range(start, stop, step)) + >>> next(items) + datetime.datetime(2019, 1, 1, 0, 0) + >>> next(items) + datetime.datetime(2019, 1, 2, 0, 0) + + """ + + _EMPTY_HASH = hash(range(0, 0)) + + def __init__(self, *args): + argc = len(args) + if argc == 1: + (self._stop,) = args + self._start = type(self._stop)(0) + self._step = type(self._stop - self._start)(1) + elif argc == 2: + self._start, self._stop = args + self._step = type(self._stop - self._start)(1) + elif argc == 3: + self._start, self._stop, self._step = args + elif argc == 0: + raise TypeError( + 'numeric_range expected at least ' + '1 argument, got {}'.format(argc) + ) + else: + raise TypeError( + 'numeric_range expected at most ' + '3 arguments, got {}'.format(argc) + ) + + self._zero = type(self._step)(0) + if self._step == self._zero: + raise ValueError('numeric_range() arg 3 must not be zero') + self._growing = self._step > self._zero + self._init_len() + + def __bool__(self): + if self._growing: + return self._start < self._stop + else: + return self._start > self._stop + + def __contains__(self, elem): + if self._growing: + if self._start <= elem < self._stop: + return (elem - self._start) % self._step == self._zero + else: + if self._start >= elem > self._stop: + return (self._start - elem) % (-self._step) == self._zero + + return False + + def __eq__(self, other): + if isinstance(other, numeric_range): + empty_self = not bool(self) + empty_other = not bool(other) + if empty_self or empty_other: + return empty_self and empty_other # True if both empty + else: + return ( + self._start == other._start + and self._step == other._step + and self._get_by_index(-1) == other._get_by_index(-1) + ) + else: + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self._get_by_index(key) + elif isinstance(key, slice): + step = self._step if key.step is None else key.step * self._step + + if key.start is None or key.start <= -self._len: + start = self._start + elif key.start >= self._len: + start = self._stop + else: # -self._len < key.start < self._len + start = self._get_by_index(key.start) + + if key.stop is None or key.stop >= self._len: + stop = self._stop + elif key.stop <= -self._len: + stop = self._start + else: # -self._len < key.stop < self._len + stop = self._get_by_index(key.stop) + + return numeric_range(start, stop, step) + else: + raise TypeError( + 'numeric range indices must be ' + 'integers or slices, not {}'.format(type(key).__name__) + ) + + def __hash__(self): + if self: + return hash((self._start, self._get_by_index(-1), self._step)) + else: + return self._EMPTY_HASH + + def __iter__(self): + values = (self._start + (n * self._step) for n in count()) + if self._growing: + return takewhile(partial(gt, self._stop), values) + else: + return takewhile(partial(lt, self._stop), values) + + def __len__(self): + return self._len + + def _init_len(self): + if self._growing: + start = self._start + stop = self._stop + step = self._step + else: + start = self._stop + stop = self._start + step = -self._step + distance = stop - start + if distance <= self._zero: + self._len = 0 + else: # distance > 0 and step > 0: regular euclidean division + q, r = divmod(distance, step) + self._len = int(q) + int(r != self._zero) + + def __reduce__(self): + return numeric_range, (self._start, self._stop, self._step) + + def __repr__(self): + if self._step == 1: + return "numeric_range({}, {})".format( + repr(self._start), repr(self._stop) + ) + else: + return "numeric_range({}, {}, {})".format( + repr(self._start), repr(self._stop), repr(self._step) + ) + + def __reversed__(self): + return iter( + numeric_range( + self._get_by_index(-1), self._start - self._step, -self._step + ) + ) + + def count(self, value): + return int(value in self) + + def index(self, value): + if self._growing: + if self._start <= value < self._stop: + q, r = divmod(value - self._start, self._step) + if r == self._zero: + return int(q) + else: + if self._start >= value > self._stop: + q, r = divmod(self._start - value, -self._step) + if r == self._zero: + return int(q) + + raise ValueError("{} is not in numeric range".format(value)) + + def _get_by_index(self, i): + if i < 0: + i += self._len + if i < 0 or i >= self._len: + raise IndexError("numeric range object index out of range") + return self._start + i * self._step + + +def count_cycle(iterable, n=None): + """Cycle through the items from *iterable* up to *n* times, yielding + the number of completed cycles along with each item. If *n* is omitted the + process repeats indefinitely. + + >>> list(count_cycle('AB', 3)) + [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] + + """ + iterable = tuple(iterable) + if not iterable: + return iter(()) + counter = count() if n is None else range(n) + return ((i, item) for i in counter for item in iterable) + + +def mark_ends(iterable): + """Yield 3-tuples of the form ``(is_first, is_last, item)``. + + >>> list(mark_ends('ABC')) + [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] + + Use this when looping over an iterable to take special action on its first + and/or last items: + + >>> iterable = ['Header', 100, 200, 'Footer'] + >>> total = 0 + >>> for is_first, is_last, item in mark_ends(iterable): + ... if is_first: + ... continue # Skip the header + ... if is_last: + ... continue # Skip the footer + ... total += item + >>> print(total) + 300 + """ + it = iter(iterable) + + try: + b = next(it) + except StopIteration: + return + + try: + for i in count(): + a = b + b = next(it) + yield i == 0, False, a + + except StopIteration: + yield i == 0, True, a + + +def locate(iterable, pred=bool, window_size=None): + """Yield the index of each item in *iterable* for which *pred* returns + ``True``. + + *pred* defaults to :func:`bool`, which will select truthy items: + + >>> list(locate([0, 1, 1, 0, 1, 0, 0])) + [1, 2, 4] + + Set *pred* to a custom function to, e.g., find the indexes for a particular + item. + + >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) + [1, 3] + + If *window_size* is given, then the *pred* function will be called with + that many items. This enables searching for sub-sequences: + + >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] + >>> pred = lambda *args: args == (1, 2, 3) + >>> list(locate(iterable, pred=pred, window_size=3)) + [1, 5, 9] + + Use with :func:`seekable` to find indexes and then retrieve the associated + items: + + >>> from itertools import count + >>> from more_itertools import seekable + >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) + >>> it = seekable(source) + >>> pred = lambda x: x > 100 + >>> indexes = locate(it, pred=pred) + >>> i = next(indexes) + >>> it.seek(i) + >>> next(it) + 106 + + """ + if window_size is None: + return compress(count(), map(pred, iterable)) + + if window_size < 1: + raise ValueError('window size must be at least 1') + + it = windowed(iterable, window_size, fillvalue=_marker) + return compress(count(), starmap(pred, it)) + + +def longest_common_prefix(iterables): + """Yield elements of the longest common prefix amongst given *iterables*. + + >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf'])) + 'ab' + + """ + return (c[0] for c in takewhile(all_equal, zip(*iterables))) + + +def lstrip(iterable, pred): + """Yield the items from *iterable*, but strip any from the beginning + for which *pred* returns ``True``. + + For example, to remove a set of items from the start of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(lstrip(iterable, pred)) + [1, 2, None, 3, False, None] + + This function is analogous to to :func:`str.lstrip`, and is essentially + an wrapper for :func:`itertools.dropwhile`. + + """ + return dropwhile(pred, iterable) + + +def rstrip(iterable, pred): + """Yield the items from *iterable*, but strip any from the end + for which *pred* returns ``True``. + + For example, to remove a set of items from the end of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(rstrip(iterable, pred)) + [None, False, None, 1, 2, None, 3] + + This function is analogous to :func:`str.rstrip`. + + """ + cache = [] + cache_append = cache.append + cache_clear = cache.clear + for x in iterable: + if pred(x): + cache_append(x) + else: + yield from cache + cache_clear() + yield x + + +def strip(iterable, pred): + """Yield the items from *iterable*, but strip any from the + beginning and end for which *pred* returns ``True``. + + For example, to remove a set of items from both ends of an iterable: + + >>> iterable = (None, False, None, 1, 2, None, 3, False, None) + >>> pred = lambda x: x in {None, False, ''} + >>> list(strip(iterable, pred)) + [1, 2, None, 3] + + This function is analogous to :func:`str.strip`. + + """ + return rstrip(lstrip(iterable, pred), pred) + + +class islice_extended: + """An extension of :func:`itertools.islice` that supports negative values + for *stop*, *start*, and *step*. + + >>> iterable = iter('abcdefgh') + >>> list(islice_extended(iterable, -4, -1)) + ['e', 'f', 'g'] + + Slices with negative values require some caching of *iterable*, but this + function takes care to minimize the amount of memory required. + + For example, you can use a negative step with an infinite iterator: + + >>> from itertools import count + >>> list(islice_extended(count(), 110, 99, -2)) + [110, 108, 106, 104, 102, 100] + + You can also use slice notation directly: + + >>> iterable = map(str, count()) + >>> it = islice_extended(iterable)[10:20:2] + >>> list(it) + ['10', '12', '14', '16', '18'] + + """ + + def __init__(self, iterable, *args): + it = iter(iterable) + if args: + self._iterable = _islice_helper(it, slice(*args)) + else: + self._iterable = it + + def __iter__(self): + return self + + def __next__(self): + return next(self._iterable) + + def __getitem__(self, key): + if isinstance(key, slice): + return islice_extended(_islice_helper(self._iterable, key)) + + raise TypeError('islice_extended.__getitem__ argument must be a slice') + + +def _islice_helper(it, s): + start = s.start + stop = s.stop + if s.step == 0: + raise ValueError('step argument must be a non-zero integer or None.') + step = s.step or 1 + + if step > 0: + start = 0 if (start is None) else start + + if start < 0: + # Consume all but the last -start items + cache = deque(enumerate(it, 1), maxlen=-start) + len_iter = cache[-1][0] if cache else 0 + + # Adjust start to be positive + i = max(len_iter + start, 0) + + # Adjust stop to be positive + if stop is None: + j = len_iter + elif stop >= 0: + j = min(stop, len_iter) + else: + j = max(len_iter + stop, 0) + + # Slice the cache + n = j - i + if n <= 0: + return + + for index, item in islice(cache, 0, n, step): + yield item + elif (stop is not None) and (stop < 0): + # Advance to the start position + next(islice(it, start, start), None) + + # When stop is negative, we have to carry -stop items while + # iterating + cache = deque(islice(it, -stop), maxlen=-stop) + + for index, item in enumerate(it): + cached_item = cache.popleft() + if index % step == 0: + yield cached_item + cache.append(item) + else: + # When both start and stop are positive we have the normal case + yield from islice(it, start, stop, step) + else: + start = -1 if (start is None) else start + + if (stop is not None) and (stop < 0): + # Consume all but the last items + n = -stop - 1 + cache = deque(enumerate(it, 1), maxlen=n) + len_iter = cache[-1][0] if cache else 0 + + # If start and stop are both negative they are comparable and + # we can just slice. Otherwise we can adjust start to be negative + # and then slice. + if start < 0: + i, j = start, stop + else: + i, j = min(start - len_iter, -1), None + + for index, item in list(cache)[i:j:step]: + yield item + else: + # Advance to the stop position + if stop is not None: + m = stop + 1 + next(islice(it, m, m), None) + + # stop is positive, so if start is negative they are not comparable + # and we need the rest of the items. + if start < 0: + i = start + n = None + # stop is None and start is positive, so we just need items up to + # the start index. + elif stop is None: + i = None + n = start + 1 + # Both stop and start are positive, so they are comparable. + else: + i = None + n = start - stop + if n <= 0: + return + + cache = list(islice(it, n)) + + yield from cache[i::step] + + +def always_reversible(iterable): + """An extension of :func:`reversed` that supports all iterables, not + just those which implement the ``Reversible`` or ``Sequence`` protocols. + + >>> print(*always_reversible(x for x in range(3))) + 2 1 0 + + If the iterable is already reversible, this function returns the + result of :func:`reversed()`. If the iterable is not reversible, + this function will cache the remaining items in the iterable and + yield them in reverse order, which may require significant storage. + """ + try: + return reversed(iterable) + except TypeError: + return reversed(list(iterable)) + + +def consecutive_groups(iterable, ordering=lambda x: x): + """Yield groups of consecutive items using :func:`itertools.groupby`. + The *ordering* function determines whether two items are adjacent by + returning their position. + + By default, the ordering function is the identity function. This is + suitable for finding runs of numbers: + + >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] + >>> for group in consecutive_groups(iterable): + ... print(list(group)) + [1] + [10, 11, 12] + [20] + [30, 31, 32, 33] + [40] + + For finding runs of adjacent letters, try using the :meth:`index` method + of a string of letters: + + >>> from string import ascii_lowercase + >>> iterable = 'abcdfgilmnop' + >>> ordering = ascii_lowercase.index + >>> for group in consecutive_groups(iterable, ordering): + ... print(list(group)) + ['a', 'b', 'c', 'd'] + ['f', 'g'] + ['i'] + ['l', 'm', 'n', 'o', 'p'] + + Each group of consecutive items is an iterator that shares it source with + *iterable*. When an an output group is advanced, the previous group is + no longer available unless its elements are copied (e.g., into a ``list``). + + >>> iterable = [1, 2, 11, 12, 21, 22] + >>> saved_groups = [] + >>> for group in consecutive_groups(iterable): + ... saved_groups.append(list(group)) # Copy group elements + >>> saved_groups + [[1, 2], [11, 12], [21, 22]] + + """ + for k, g in groupby( + enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) + ): + yield map(itemgetter(1), g) + + +def difference(iterable, func=sub, *, initial=None): + """This function is the inverse of :func:`itertools.accumulate`. By default + it will compute the first difference of *iterable* using + :func:`operator.sub`: + + >>> from itertools import accumulate + >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 + >>> list(difference(iterable)) + [0, 1, 2, 3, 4] + + *func* defaults to :func:`operator.sub`, but other functions can be + specified. They will be applied as follows:: + + A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... + + For example, to do progressive division: + + >>> iterable = [1, 2, 6, 24, 120] + >>> func = lambda x, y: x // y + >>> list(difference(iterable, func)) + [1, 2, 3, 4, 5] + + If the *initial* keyword is set, the first element will be skipped when + computing successive differences. + + >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) + >>> list(difference(it, initial=10)) + [1, 2, 3] + + """ + a, b = tee(iterable) + try: + first = [next(b)] + except StopIteration: + return iter([]) + + if initial is not None: + first = [] + + return chain(first, map(func, b, a)) + + +class SequenceView(Sequence): + """Return a read-only view of the sequence object *target*. + + :class:`SequenceView` objects are analogous to Python's built-in + "dictionary view" types. They provide a dynamic view of a sequence's items, + meaning that when the sequence updates, so does the view. + + >>> seq = ['0', '1', '2'] + >>> view = SequenceView(seq) + >>> view + SequenceView(['0', '1', '2']) + >>> seq.append('3') + >>> view + SequenceView(['0', '1', '2', '3']) + + Sequence views support indexing, slicing, and length queries. They act + like the underlying sequence, except they don't allow assignment: + + >>> view[1] + '1' + >>> view[1:-1] + ['1', '2'] + >>> len(view) + 4 + + Sequence views are useful as an alternative to copying, as they don't + require (much) extra storage. + + """ + + def __init__(self, target): + if not isinstance(target, Sequence): + raise TypeError + self._target = target + + def __getitem__(self, index): + return self._target[index] + + def __len__(self): + return len(self._target) + + def __repr__(self): + return '{}({})'.format(self.__class__.__name__, repr(self._target)) + + +class seekable: + """Wrap an iterator to allow for seeking backward and forward. This + progressively caches the items in the source iterable so they can be + re-visited. + + Call :meth:`seek` with an index to seek to that position in the source + iterable. + + To "reset" an iterator, seek to ``0``: + + >>> from itertools import count + >>> it = seekable((str(n) for n in count())) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> it.seek(0) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> next(it) + '3' + + You can also seek forward: + + >>> it = seekable((str(n) for n in range(20))) + >>> it.seek(10) + >>> next(it) + '10' + >>> it.seek(20) # Seeking past the end of the source isn't a problem + >>> list(it) + [] + >>> it.seek(0) # Resetting works even after hitting the end + >>> next(it), next(it), next(it) + ('0', '1', '2') + + Call :meth:`peek` to look ahead one item without advancing the iterator: + + >>> it = seekable('1234') + >>> it.peek() + '1' + >>> list(it) + ['1', '2', '3', '4'] + >>> it.peek(default='empty') + 'empty' + + Before the iterator is at its end, calling :func:`bool` on it will return + ``True``. After it will return ``False``: + + >>> it = seekable('5678') + >>> bool(it) + True + >>> list(it) + ['5', '6', '7', '8'] + >>> bool(it) + False + + You may view the contents of the cache with the :meth:`elements` method. + That returns a :class:`SequenceView`, a view that updates automatically: + + >>> it = seekable((str(n) for n in range(10))) + >>> next(it), next(it), next(it) + ('0', '1', '2') + >>> elements = it.elements() + >>> elements + SequenceView(['0', '1', '2']) + >>> next(it) + '3' + >>> elements + SequenceView(['0', '1', '2', '3']) + + By default, the cache grows as the source iterable progresses, so beware of + wrapping very large or infinite iterables. Supply *maxlen* to limit the + size of the cache (this of course limits how far back you can seek). + + >>> from itertools import count + >>> it = seekable((str(n) for n in count()), maxlen=2) + >>> next(it), next(it), next(it), next(it) + ('0', '1', '2', '3') + >>> list(it.elements()) + ['2', '3'] + >>> it.seek(0) + >>> next(it), next(it), next(it), next(it) + ('2', '3', '4', '5') + >>> next(it) + '6' + + """ + + def __init__(self, iterable, maxlen=None): + self._source = iter(iterable) + if maxlen is None: + self._cache = [] + else: + self._cache = deque([], maxlen) + self._index = None + + def __iter__(self): + return self + + def __next__(self): + if self._index is not None: + try: + item = self._cache[self._index] + except IndexError: + self._index = None + else: + self._index += 1 + return item + + item = next(self._source) + self._cache.append(item) + return item + + def __bool__(self): + try: + self.peek() + except StopIteration: + return False + return True + + def peek(self, default=_marker): + try: + peeked = next(self) + except StopIteration: + if default is _marker: + raise + return default + if self._index is None: + self._index = len(self._cache) + self._index -= 1 + return peeked + + def elements(self): + return SequenceView(self._cache) + + def seek(self, index): + self._index = index + remainder = index - len(self._cache) + if remainder > 0: + consume(self, remainder) + + +class run_length: + """ + :func:`run_length.encode` compresses an iterable with run-length encoding. + It yields groups of repeated items with the count of how many times they + were repeated: + + >>> uncompressed = 'abbcccdddd' + >>> list(run_length.encode(uncompressed)) + [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + + :func:`run_length.decode` decompresses an iterable that was previously + compressed with run-length encoding. It yields the items of the + decompressed iterable: + + >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] + >>> list(run_length.decode(compressed)) + ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] + + """ + + @staticmethod + def encode(iterable): + return ((k, ilen(g)) for k, g in groupby(iterable)) + + @staticmethod + def decode(iterable): + return chain.from_iterable(repeat(k, n) for k, n in iterable) + + +def exactly_n(iterable, n, predicate=bool): + """Return ``True`` if exactly ``n`` items in the iterable are ``True`` + according to the *predicate* function. + + >>> exactly_n([True, True, False], 2) + True + >>> exactly_n([True, True, False], 1) + False + >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) + True + + The iterable will be advanced until ``n + 1`` truthy items are encountered, + so avoid calling it on infinite iterables. + + """ + return len(take(n + 1, filter(predicate, iterable))) == n + + +def circular_shifts(iterable): + """Return a list of circular shifts of *iterable*. + + >>> circular_shifts(range(4)) + [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] + """ + lst = list(iterable) + return take(len(lst), windowed(cycle(lst), len(lst))) + + +def make_decorator(wrapping_func, result_index=0): + """Return a decorator version of *wrapping_func*, which is a function that + modifies an iterable. *result_index* is the position in that function's + signature where the iterable goes. + + This lets you use itertools on the "production end," i.e. at function + definition. This can augment what the function returns without changing the + function's code. + + For example, to produce a decorator version of :func:`chunked`: + + >>> from more_itertools import chunked + >>> chunker = make_decorator(chunked, result_index=0) + >>> @chunker(3) + ... def iter_range(n): + ... return iter(range(n)) + ... + >>> list(iter_range(9)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + + To only allow truthy items to be returned: + + >>> truth_serum = make_decorator(filter, result_index=1) + >>> @truth_serum(bool) + ... def boolean_test(): + ... return [0, 1, '', ' ', False, True] + ... + >>> list(boolean_test()) + [1, ' ', True] + + The :func:`peekable` and :func:`seekable` wrappers make for practical + decorators: + + >>> from more_itertools import peekable + >>> peekable_function = make_decorator(peekable) + >>> @peekable_function() + ... def str_range(*args): + ... return (str(x) for x in range(*args)) + ... + >>> it = str_range(1, 20, 2) + >>> next(it), next(it), next(it) + ('1', '3', '5') + >>> it.peek() + '7' + >>> next(it) + '7' + + """ + + # See https://sites.google.com/site/bbayles/index/decorator_factory for + # notes on how this works. + def decorator(*wrapping_args, **wrapping_kwargs): + def outer_wrapper(f): + def inner_wrapper(*args, **kwargs): + result = f(*args, **kwargs) + wrapping_args_ = list(wrapping_args) + wrapping_args_.insert(result_index, result) + return wrapping_func(*wrapping_args_, **wrapping_kwargs) + + return inner_wrapper + + return outer_wrapper + + return decorator + + +def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): + """Return a dictionary that maps the items in *iterable* to categories + defined by *keyfunc*, transforms them with *valuefunc*, and + then summarizes them by category with *reducefunc*. + + *valuefunc* defaults to the identity function if it is unspecified. + If *reducefunc* is unspecified, no summarization takes place: + + >>> keyfunc = lambda x: x.upper() + >>> result = map_reduce('abbccc', keyfunc) + >>> sorted(result.items()) + [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] + + Specifying *valuefunc* transforms the categorized items: + + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: 1 + >>> result = map_reduce('abbccc', keyfunc, valuefunc) + >>> sorted(result.items()) + [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] + + Specifying *reducefunc* summarizes the categorized items: + + >>> keyfunc = lambda x: x.upper() + >>> valuefunc = lambda x: 1 + >>> reducefunc = sum + >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) + >>> sorted(result.items()) + [('A', 1), ('B', 2), ('C', 3)] + + You may want to filter the input iterable before applying the map/reduce + procedure: + + >>> all_items = range(30) + >>> items = [x for x in all_items if 10 <= x <= 20] # Filter + >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 + >>> categories = map_reduce(items, keyfunc=keyfunc) + >>> sorted(categories.items()) + [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] + >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) + >>> sorted(summaries.items()) + [(0, 90), (1, 75)] + + Note that all items in the iterable are gathered into a list before the + summarization step, which may require significant storage. + + The returned object is a :obj:`collections.defaultdict` with the + ``default_factory`` set to ``None``, such that it behaves like a normal + dictionary. + + """ + valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc + + ret = defaultdict(list) + for item in iterable: + key = keyfunc(item) + value = valuefunc(item) + ret[key].append(value) + + if reducefunc is not None: + for key, value_list in ret.items(): + ret[key] = reducefunc(value_list) + + ret.default_factory = None + return ret + + +def rlocate(iterable, pred=bool, window_size=None): + """Yield the index of each item in *iterable* for which *pred* returns + ``True``, starting from the right and moving left. + + *pred* defaults to :func:`bool`, which will select truthy items: + + >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 + [4, 2, 1] + + Set *pred* to a custom function to, e.g., find the indexes for a particular + item: + + >>> iterable = iter('abcb') + >>> pred = lambda x: x == 'b' + >>> list(rlocate(iterable, pred)) + [3, 1] + + If *window_size* is given, then the *pred* function will be called with + that many items. This enables searching for sub-sequences: + + >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] + >>> pred = lambda *args: args == (1, 2, 3) + >>> list(rlocate(iterable, pred=pred, window_size=3)) + [9, 5, 1] + + Beware, this function won't return anything for infinite iterables. + If *iterable* is reversible, ``rlocate`` will reverse it and search from + the right. Otherwise, it will search from the left and return the results + in reverse order. + + See :func:`locate` to for other example applications. + + """ + if window_size is None: + try: + len_iter = len(iterable) + return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) + except TypeError: + pass + + return reversed(list(locate(iterable, pred, window_size))) + + +def replace(iterable, pred, substitutes, count=None, window_size=1): + """Yield the items from *iterable*, replacing the items for which *pred* + returns ``True`` with the items from the iterable *substitutes*. + + >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] + >>> pred = lambda x: x == 0 + >>> substitutes = (2, 3) + >>> list(replace(iterable, pred, substitutes)) + [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] + + If *count* is given, the number of replacements will be limited: + + >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] + >>> pred = lambda x: x == 0 + >>> substitutes = [None] + >>> list(replace(iterable, pred, substitutes, count=2)) + [1, 1, None, 1, 1, None, 1, 1, 0] + + Use *window_size* to control the number of items passed as arguments to + *pred*. This allows for locating and replacing subsequences. + + >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] + >>> window_size = 3 + >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred + >>> substitutes = [3, 4] # Splice in these items + >>> list(replace(iterable, pred, substitutes, window_size=window_size)) + [3, 4, 5, 3, 4, 5] + + """ + if window_size < 1: + raise ValueError('window_size must be at least 1') + + # Save the substitutes iterable, since it's used more than once + substitutes = tuple(substitutes) + + # Add padding such that the number of windows matches the length of the + # iterable + it = chain(iterable, [_marker] * (window_size - 1)) + windows = windowed(it, window_size) + + n = 0 + for w in windows: + # If the current window matches our predicate (and we haven't hit + # our maximum number of replacements), splice in the substitutes + # and then consume the following windows that overlap with this one. + # For example, if the iterable is (0, 1, 2, 3, 4...) + # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... + # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) + if pred(*w): + if (count is None) or (n < count): + n += 1 + yield from substitutes + consume(windows, window_size - 1) + continue + + # If there was no match (or we've reached the replacement limit), + # yield the first item from the window. + if w and (w[0] is not _marker): + yield w[0] + + +def partitions(iterable): + """Yield all possible order-preserving partitions of *iterable*. + + >>> iterable = 'abc' + >>> for part in partitions(iterable): + ... print([''.join(p) for p in part]) + ['abc'] + ['a', 'bc'] + ['ab', 'c'] + ['a', 'b', 'c'] + + This is unrelated to :func:`partition`. + + """ + sequence = list(iterable) + n = len(sequence) + for i in powerset(range(1, n)): + yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] + + +def set_partitions(iterable, k=None): + """ + Yield the set partitions of *iterable* into *k* parts. Set partitions are + not order-preserving. + + >>> iterable = 'abc' + >>> for part in set_partitions(iterable, 2): + ... print([''.join(p) for p in part]) + ['a', 'bc'] + ['ab', 'c'] + ['b', 'ac'] + + + If *k* is not given, every set partition is generated. + + >>> iterable = 'abc' + >>> for part in set_partitions(iterable): + ... print([''.join(p) for p in part]) + ['abc'] + ['a', 'bc'] + ['ab', 'c'] + ['b', 'ac'] + ['a', 'b', 'c'] + + """ + L = list(iterable) + n = len(L) + if k is not None: + if k < 1: + raise ValueError( + "Can't partition in a negative or zero number of groups" + ) + elif k > n: + return + + def set_partitions_helper(L, k): + n = len(L) + if k == 1: + yield [L] + elif n == k: + yield [[s] for s in L] + else: + e, *M = L + for p in set_partitions_helper(M, k - 1): + yield [[e], *p] + for p in set_partitions_helper(M, k): + for i in range(len(p)): + yield p[:i] + [[e] + p[i]] + p[i + 1 :] + + if k is None: + for k in range(1, n + 1): + yield from set_partitions_helper(L, k) + else: + yield from set_partitions_helper(L, k) + + +class time_limited: + """ + Yield items from *iterable* until *limit_seconds* have passed. + If the time limit expires before all items have been yielded, the + ``timed_out`` parameter will be set to ``True``. + + >>> from time import sleep + >>> def generator(): + ... yield 1 + ... yield 2 + ... sleep(0.2) + ... yield 3 + >>> iterable = time_limited(0.1, generator()) + >>> list(iterable) + [1, 2] + >>> iterable.timed_out + True + + Note that the time is checked before each item is yielded, and iteration + stops if the time elapsed is greater than *limit_seconds*. If your time + limit is 1 second, but it takes 2 seconds to generate the first item from + the iterable, the function will run for 2 seconds and not yield anything. + + """ + + def __init__(self, limit_seconds, iterable): + if limit_seconds < 0: + raise ValueError('limit_seconds must be positive') + self.limit_seconds = limit_seconds + self._iterable = iter(iterable) + self._start_time = monotonic() + self.timed_out = False + + def __iter__(self): + return self + + def __next__(self): + item = next(self._iterable) + if monotonic() - self._start_time > self.limit_seconds: + self.timed_out = True + raise StopIteration + + return item + + +def only(iterable, default=None, too_long=None): + """If *iterable* has only one item, return it. + If it has zero items, return *default*. + If it has more than one item, raise the exception given by *too_long*, + which is ``ValueError`` by default. + + >>> only([], default='missing') + 'missing' + >>> only([1]) + 1 + >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + ValueError: Expected exactly one item in iterable, but got 1, 2, + and perhaps more.' + >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + TypeError + + Note that :func:`only` attempts to advance *iterable* twice to ensure there + is only one item. See :func:`spy` or :func:`peekable` to check + iterable contents less destructively. + """ + it = iter(iterable) + first_value = next(it, default) + + try: + second_value = next(it) + except StopIteration: + pass + else: + msg = ( + 'Expected exactly one item in iterable, but got {!r}, {!r}, ' + 'and perhaps more.'.format(first_value, second_value) + ) + raise too_long or ValueError(msg) + + return first_value + + +class _IChunk: + def __init__(self, iterable, n): + self._it = islice(iterable, n) + self._cache = deque() + + def fill_cache(self): + self._cache.extend(self._it) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._it) + except StopIteration: + if self._cache: + return self._cache.popleft() + else: + raise + + +def ichunked(iterable, n): + """Break *iterable* into sub-iterables with *n* elements each. + :func:`ichunked` is like :func:`chunked`, but it yields iterables + instead of lists. + + If the sub-iterables are read in order, the elements of *iterable* + won't be stored in memory. + If they are read out of order, :func:`itertools.tee` is used to cache + elements as necessary. + + >>> from itertools import count + >>> all_chunks = ichunked(count(), 4) + >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) + >>> list(c_2) # c_1's elements have been cached; c_3's haven't been + [4, 5, 6, 7] + >>> list(c_1) + [0, 1, 2, 3] + >>> list(c_3) + [8, 9, 10, 11] + + """ + source = peekable(iter(iterable)) + ichunk_marker = object() + while True: + # Check to see whether we're at the end of the source iterable + item = source.peek(ichunk_marker) + if item is ichunk_marker: + return + + chunk = _IChunk(source, n) + yield chunk + + # Advance the source iterable and fill previous chunk's cache + chunk.fill_cache() + + +def iequals(*iterables): + """Return ``True`` if all given *iterables* are equal to each other, + which means that they contain the same elements in the same order. + + The function is useful for comparing iterables of different data types + or iterables that do not support equality checks. + + >>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc")) + True + + >>> iequals("abc", "acb") + False + + Not to be confused with :func:`all_equals`, which checks whether all + elements of iterable are equal to each other. + + """ + return all(map(all_equal, zip_longest(*iterables, fillvalue=object()))) + + +def distinct_combinations(iterable, r): + """Yield the distinct combinations of *r* items taken from *iterable*. + + >>> list(distinct_combinations([0, 0, 1], 2)) + [(0, 0), (0, 1)] + + Equivalent to ``set(combinations(iterable))``, except duplicates are not + generated and thrown away. For larger input sequences this is much more + efficient. + + """ + if r < 0: + raise ValueError('r must be non-negative') + elif r == 0: + yield () + return + pool = tuple(iterable) + generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] + current_combo = [None] * r + level = 0 + while generators: + try: + cur_idx, p = next(generators[-1]) + except StopIteration: + generators.pop() + level -= 1 + continue + current_combo[level] = p + if level + 1 == r: + yield tuple(current_combo) + else: + generators.append( + unique_everseen( + enumerate(pool[cur_idx + 1 :], cur_idx + 1), + key=itemgetter(1), + ) + ) + level += 1 + + +def filter_except(validator, iterable, *exceptions): + """Yield the items from *iterable* for which the *validator* function does + not raise one of the specified *exceptions*. + + *validator* is called for each item in *iterable*. + It should be a function that accepts one argument and raises an exception + if that item is not valid. + + >>> iterable = ['1', '2', 'three', '4', None] + >>> list(filter_except(int, iterable, ValueError, TypeError)) + ['1', '2', '4'] + + If an exception other than one given by *exceptions* is raised by + *validator*, it is raised like normal. + """ + for item in iterable: + try: + validator(item) + except exceptions: + pass + else: + yield item + + +def map_except(function, iterable, *exceptions): + """Transform each item from *iterable* with *function* and yield the + result, unless *function* raises one of the specified *exceptions*. + + *function* is called to transform each item in *iterable*. + It should accept one argument. + + >>> iterable = ['1', '2', 'three', '4', None] + >>> list(map_except(int, iterable, ValueError, TypeError)) + [1, 2, 4] + + If an exception other than one given by *exceptions* is raised by + *function*, it is raised like normal. + """ + for item in iterable: + try: + yield function(item) + except exceptions: + pass + + +def map_if(iterable, pred, func, func_else=lambda x: x): + """Evaluate each item from *iterable* using *pred*. If the result is + equivalent to ``True``, transform the item with *func* and yield it. + Otherwise, transform the item with *func_else* and yield it. + + *pred*, *func*, and *func_else* should each be functions that accept + one argument. By default, *func_else* is the identity function. + + >>> from math import sqrt + >>> iterable = list(range(-5, 5)) + >>> iterable + [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + >>> list(map_if(iterable, lambda x: x > 3, lambda x: 'toobig')) + [-5, -4, -3, -2, -1, 0, 1, 2, 3, 'toobig'] + >>> list(map_if(iterable, lambda x: x >= 0, + ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) + [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] + """ + for item in iterable: + yield func(item) if pred(item) else func_else(item) + + +def _sample_unweighted(iterable, k): + # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: + # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". + + # Fill up the reservoir (collection of samples) with the first `k` samples + reservoir = take(k, iterable) + + # Generate random number that's the largest in a sample of k U(0,1) numbers + # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic + W = exp(log(random()) / k) + + # The number of elements to skip before changing the reservoir is a random + # number with a geometric distribution. Sample it using random() and logs. + next_index = k + floor(log(random()) / log(1 - W)) + + for index, element in enumerate(iterable, k): + if index == next_index: + reservoir[randrange(k)] = element + # The new W is the largest in a sample of k U(0, `old_W`) numbers + W *= exp(log(random()) / k) + next_index += floor(log(random()) / log(1 - W)) + 1 + + return reservoir + + +def _sample_weighted(iterable, k, weights): + # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : + # "Weighted random sampling with a reservoir". + + # Log-transform for numerical stability for weights that are small/large + weight_keys = (log(random()) / weight for weight in weights) + + # Fill up the reservoir (collection of samples) with the first `k` + # weight-keys and elements, then heapify the list. + reservoir = take(k, zip(weight_keys, iterable)) + heapify(reservoir) + + # The number of jumps before changing the reservoir is a random variable + # with an exponential distribution. Sample it using random() and logs. + smallest_weight_key, _ = reservoir[0] + weights_to_skip = log(random()) / smallest_weight_key + + for weight, element in zip(weights, iterable): + if weight >= weights_to_skip: + # The notation here is consistent with the paper, but we store + # the weight-keys in log-space for better numerical stability. + smallest_weight_key, _ = reservoir[0] + t_w = exp(weight * smallest_weight_key) + r_2 = uniform(t_w, 1) # generate U(t_w, 1) + weight_key = log(r_2) / weight + heapreplace(reservoir, (weight_key, element)) + smallest_weight_key, _ = reservoir[0] + weights_to_skip = log(random()) / smallest_weight_key + else: + weights_to_skip -= weight + + # Equivalent to [element for weight_key, element in sorted(reservoir)] + return [heappop(reservoir)[1] for _ in range(k)] + + +def sample(iterable, k, weights=None): + """Return a *k*-length list of elements chosen (without replacement) + from the *iterable*. Like :func:`random.sample`, but works on iterables + of unknown length. + + >>> iterable = range(100) + >>> sample(iterable, 5) # doctest: +SKIP + [81, 60, 96, 16, 4] + + An iterable with *weights* may also be given: + + >>> iterable = range(100) + >>> weights = (i * i + 1 for i in range(100)) + >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP + [79, 67, 74, 66, 78] + + The algorithm can also be used to generate weighted random permutations. + The relative weight of each item determines the probability that it + appears late in the permutation. + + >>> data = "abcdefgh" + >>> weights = range(1, len(data) + 1) + >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP + ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] + """ + if k == 0: + return [] + + iterable = iter(iterable) + if weights is None: + return _sample_unweighted(iterable, k) + else: + weights = iter(weights) + return _sample_weighted(iterable, k, weights) + + +def is_sorted(iterable, key=None, reverse=False, strict=False): + """Returns ``True`` if the items of iterable are in sorted order, and + ``False`` otherwise. *key* and *reverse* have the same meaning that they do + in the built-in :func:`sorted` function. + + >>> is_sorted(['1', '2', '3', '4', '5'], key=int) + True + >>> is_sorted([5, 4, 3, 1, 2], reverse=True) + False + + If *strict*, tests for strict sorting, that is, returns ``False`` if equal + elements are found: + + >>> is_sorted([1, 2, 2]) + True + >>> is_sorted([1, 2, 2], strict=True) + False + + The function returns ``False`` after encountering the first out-of-order + item. If there are no out-of-order items, the iterable is exhausted. + """ + + compare = (le if reverse else ge) if strict else (lt if reverse else gt) + it = iterable if key is None else map(key, iterable) + return not any(starmap(compare, pairwise(it))) + + +class AbortThread(BaseException): + pass + + +class callback_iter: + """Convert a function that uses callbacks to an iterator. + + Let *func* be a function that takes a `callback` keyword argument. + For example: + + >>> def func(callback=None): + ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: + ... if callback: + ... callback(i, c) + ... return 4 + + + Use ``with callback_iter(func)`` to get an iterator over the parameters + that are delivered to the callback. + + >>> with callback_iter(func) as it: + ... for args, kwargs in it: + ... print(args) + (1, 'a') + (2, 'b') + (3, 'c') + + The function will be called in a background thread. The ``done`` property + indicates whether it has completed execution. + + >>> it.done + True + + If it completes successfully, its return value will be available + in the ``result`` property. + + >>> it.result + 4 + + Notes: + + * If the function uses some keyword argument besides ``callback``, supply + *callback_kwd*. + * If it finished executing, but raised an exception, accessing the + ``result`` property will raise the same exception. + * If it hasn't finished executing, accessing the ``result`` + property from within the ``with`` block will raise ``RuntimeError``. + * If it hasn't finished executing, accessing the ``result`` property from + outside the ``with`` block will raise a + ``more_itertools.AbortThread`` exception. + * Provide *wait_seconds* to adjust how frequently the it is polled for + output. + + """ + + def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): + self._func = func + self._callback_kwd = callback_kwd + self._aborted = False + self._future = None + self._wait_seconds = wait_seconds + # Lazily import concurrent.future + self._executor = __import__( + ).futures.__import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1) + self._iterator = self._reader() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._aborted = True + self._executor.shutdown() + + def __iter__(self): + return self + + def __next__(self): + return next(self._iterator) + + @property + def done(self): + if self._future is None: + return False + return self._future.done() + + @property + def result(self): + if not self.done: + raise RuntimeError('Function has not yet completed') + + return self._future.result() + + def _reader(self): + q = Queue() + + def callback(*args, **kwargs): + if self._aborted: + raise AbortThread('canceled by user') + + q.put((args, kwargs)) + + self._future = self._executor.submit( + self._func, **{self._callback_kwd: callback} + ) + + while True: + try: + item = q.get(timeout=self._wait_seconds) + except Empty: + pass + else: + q.task_done() + yield item + + if self._future.done(): + break + + remaining = [] + while True: + try: + item = q.get_nowait() + except Empty: + break + else: + q.task_done() + remaining.append(item) + q.join() + yield from remaining + + +def windowed_complete(iterable, n): + """ + Yield ``(beginning, middle, end)`` tuples, where: + + * Each ``middle`` has *n* items from *iterable* + * Each ``beginning`` has the items before the ones in ``middle`` + * Each ``end`` has the items after the ones in ``middle`` + + >>> iterable = range(7) + >>> n = 3 + >>> for beginning, middle, end in windowed_complete(iterable, n): + ... print(beginning, middle, end) + () (0, 1, 2) (3, 4, 5, 6) + (0,) (1, 2, 3) (4, 5, 6) + (0, 1) (2, 3, 4) (5, 6) + (0, 1, 2) (3, 4, 5) (6,) + (0, 1, 2, 3) (4, 5, 6) () + + Note that *n* must be at least 0 and most equal to the length of + *iterable*. + + This function will exhaust the iterable and may require significant + storage. + """ + if n < 0: + raise ValueError('n must be >= 0') + + seq = tuple(iterable) + size = len(seq) + + if n > size: + raise ValueError('n must be <= len(seq)') + + for i in range(size - n + 1): + beginning = seq[:i] + middle = seq[i : i + n] + end = seq[i + n :] + yield beginning, middle, end + + +def all_unique(iterable, key=None): + """ + Returns ``True`` if all the elements of *iterable* are unique (no two + elements are equal). + + >>> all_unique('ABCB') + False + + If a *key* function is specified, it will be used to make comparisons. + + >>> all_unique('ABCb') + True + >>> all_unique('ABCb', str.lower) + False + + The function returns as soon as the first non-unique element is + encountered. Iterables with a mix of hashable and unhashable items can + be used, but the function will be slower for unhashable items. + """ + seenset = set() + seenset_add = seenset.add + seenlist = [] + seenlist_add = seenlist.append + for element in map(key, iterable) if key else iterable: + try: + if element in seenset: + return False + seenset_add(element) + except TypeError: + if element in seenlist: + return False + seenlist_add(element) + return True + + +def nth_product(index, *args): + """Equivalent to ``list(product(*args))[index]``. + + The products of *args* can be ordered lexicographically. + :func:`nth_product` computes the product at sort position *index* without + computing the previous products. + + >>> nth_product(8, range(2), range(2), range(2), range(2)) + (1, 0, 0, 0) + + ``IndexError`` will be raised if the given *index* is invalid. + """ + pools = list(map(tuple, reversed(args))) + ns = list(map(len, pools)) + + c = reduce(mul, ns) + + if index < 0: + index += c + + if not 0 <= index < c: + raise IndexError + + result = [] + for pool, n in zip(pools, ns): + result.append(pool[index % n]) + index //= n + + return tuple(reversed(result)) + + +def nth_permutation(iterable, r, index): + """Equivalent to ``list(permutations(iterable, r))[index]``` + + The subsequences of *iterable* that are of length *r* where order is + important can be ordered lexicographically. :func:`nth_permutation` + computes the subsequence at sort position *index* directly, without + computing the previous subsequences. + + >>> nth_permutation('ghijk', 2, 5) + ('h', 'i') + + ``ValueError`` will be raised If *r* is negative or greater than the length + of *iterable*. + ``IndexError`` will be raised if the given *index* is invalid. + """ + pool = list(iterable) + n = len(pool) + + if r is None or r == n: + r, c = n, factorial(n) + elif not 0 <= r < n: + raise ValueError + else: + c = factorial(n) // factorial(n - r) + + if index < 0: + index += c + + if not 0 <= index < c: + raise IndexError + + if c == 0: + return tuple() + + result = [0] * r + q = index * factorial(n) // c if r < n else index + for d in range(1, n + 1): + q, i = divmod(q, d) + if 0 <= n - d < r: + result[n - d] = i + if q == 0: + break + + return tuple(map(pool.pop, result)) + + +def value_chain(*args): + """Yield all arguments passed to the function in the same order in which + they were passed. If an argument itself is iterable then iterate over its + values. + + >>> list(value_chain(1, 2, 3, [4, 5, 6])) + [1, 2, 3, 4, 5, 6] + + Binary and text strings are not considered iterable and are emitted + as-is: + + >>> list(value_chain('12', '34', ['56', '78'])) + ['12', '34', '56', '78'] + + + Multiple levels of nesting are not flattened. + + """ + for value in args: + if isinstance(value, (str, bytes)): + yield value + continue + try: + yield from value + except TypeError: + yield value + + +def product_index(element, *args): + """Equivalent to ``list(product(*args)).index(element)`` + + The products of *args* can be ordered lexicographically. + :func:`product_index` computes the first index of *element* without + computing the previous products. + + >>> product_index([8, 2], range(10), range(5)) + 42 + + ``ValueError`` will be raised if the given *element* isn't in the product + of *args*. + """ + index = 0 + + for x, pool in zip_longest(element, args, fillvalue=_marker): + if x is _marker or pool is _marker: + raise ValueError('element is not a product of args') + + pool = tuple(pool) + index = index * len(pool) + pool.index(x) + + return index + + +def combination_index(element, iterable): + """Equivalent to ``list(combinations(iterable, r)).index(element)`` + + The subsequences of *iterable* that are of length *r* can be ordered + lexicographically. :func:`combination_index` computes the index of the + first *element*, without computing the previous combinations. + + >>> combination_index('adf', 'abcdefg') + 10 + + ``ValueError`` will be raised if the given *element* isn't one of the + combinations of *iterable*. + """ + element = enumerate(element) + k, y = next(element, (None, None)) + if k is None: + return 0 + + indexes = [] + pool = enumerate(iterable) + for n, x in pool: + if x == y: + indexes.append(n) + tmp, y = next(element, (None, None)) + if tmp is None: + break + else: + k = tmp + else: + raise ValueError('element is not a combination of iterable') + + n, _ = last(pool, default=(n, None)) + + # Python versions below 3.8 don't have math.comb + index = 1 + for i, j in enumerate(reversed(indexes), start=1): + j = n - j + if i <= j: + index += factorial(j) // (factorial(i) * factorial(j - i)) + + return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index + + +def permutation_index(element, iterable): + """Equivalent to ``list(permutations(iterable, r)).index(element)``` + + The subsequences of *iterable* that are of length *r* where order is + important can be ordered lexicographically. :func:`permutation_index` + computes the index of the first *element* directly, without computing + the previous permutations. + + >>> permutation_index([1, 3, 2], range(5)) + 19 + + ``ValueError`` will be raised if the given *element* isn't one of the + permutations of *iterable*. + """ + index = 0 + pool = list(iterable) + for i, x in zip(range(len(pool), -1, -1), element): + r = pool.index(x) + index = index * i + r + del pool[r] + + return index + + +class countable: + """Wrap *iterable* and keep a count of how many items have been consumed. + + The ``items_seen`` attribute starts at ``0`` and increments as the iterable + is consumed: + + >>> iterable = map(str, range(10)) + >>> it = countable(iterable) + >>> it.items_seen + 0 + >>> next(it), next(it) + ('0', '1') + >>> list(it) + ['2', '3', '4', '5', '6', '7', '8', '9'] + >>> it.items_seen + 10 + """ + + def __init__(self, iterable): + self._it = iter(iterable) + self.items_seen = 0 + + def __iter__(self): + return self + + def __next__(self): + item = next(self._it) + self.items_seen += 1 + + return item + + +def chunked_even(iterable, n): + """Break *iterable* into lists of approximately length *n*. + Items are distributed such the lengths of the lists differ by at most + 1 item. + + >>> iterable = [1, 2, 3, 4, 5, 6, 7] + >>> n = 3 + >>> list(chunked_even(iterable, n)) # List lengths: 3, 2, 2 + [[1, 2, 3], [4, 5], [6, 7]] + >>> list(chunked(iterable, n)) # List lengths: 3, 3, 1 + [[1, 2, 3], [4, 5, 6], [7]] + + """ + + len_method = getattr(iterable, '__len__', None) + + if len_method is None: + return _chunked_even_online(iterable, n) + else: + return _chunked_even_finite(iterable, len_method(), n) + + +def _chunked_even_online(iterable, n): + buffer = [] + maxbuf = n + (n - 2) * (n - 1) + for x in iterable: + buffer.append(x) + if len(buffer) == maxbuf: + yield buffer[:n] + buffer = buffer[n:] + yield from _chunked_even_finite(buffer, len(buffer), n) + + +def _chunked_even_finite(iterable, N, n): + if N < 1: + return + + # Lists are either size `full_size <= n` or `partial_size = full_size - 1` + q, r = divmod(N, n) + num_lists = q + (1 if r > 0 else 0) + q, r = divmod(N, num_lists) + full_size = q + (1 if r > 0 else 0) + partial_size = full_size - 1 + num_full = N - partial_size * num_lists + num_partial = num_lists - num_full + + buffer = [] + iterator = iter(iterable) + + # Yield num_full lists of full_size + for x in iterator: + buffer.append(x) + if len(buffer) == full_size: + yield buffer + buffer = [] + num_full -= 1 + if num_full <= 0: + break + + # Yield num_partial lists of partial_size + for x in iterator: + buffer.append(x) + if len(buffer) == partial_size: + yield buffer + buffer = [] + num_partial -= 1 + + +def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): + """A version of :func:`zip` that "broadcasts" any scalar + (i.e., non-iterable) items into output tuples. + + >>> iterable_1 = [1, 2, 3] + >>> iterable_2 = ['a', 'b', 'c'] + >>> scalar = '_' + >>> list(zip_broadcast(iterable_1, iterable_2, scalar)) + [(1, 'a', '_'), (2, 'b', '_'), (3, 'c', '_')] + + The *scalar_types* keyword argument determines what types are considered + scalar. It is set to ``(str, bytes)`` by default. Set it to ``None`` to + treat strings and byte strings as iterable: + + >>> list(zip_broadcast('abc', 0, 'xyz', scalar_types=None)) + [('a', 0, 'x'), ('b', 0, 'y'), ('c', 0, 'z')] + + If the *strict* keyword argument is ``True``, then + ``UnequalIterablesError`` will be raised if any of the iterables have + different lengths. + """ + + def is_scalar(obj): + if scalar_types and isinstance(obj, scalar_types): + return True + try: + iter(obj) + except TypeError: + return True + else: + return False + + size = len(objects) + if not size: + return + + iterables, iterable_positions = [], [] + scalars, scalar_positions = [], [] + for i, obj in enumerate(objects): + if is_scalar(obj): + scalars.append(obj) + scalar_positions.append(i) + else: + iterables.append(iter(obj)) + iterable_positions.append(i) + + if len(scalars) == size: + yield tuple(objects) + return + + zipper = _zip_equal if strict else zip + for item in zipper(*iterables): + new_item = [None] * size + + for i, elem in zip(iterable_positions, item): + new_item[i] = elem + + for i, elem in zip(scalar_positions, scalars): + new_item[i] = elem + + yield tuple(new_item) + + +def unique_in_window(iterable, n, key=None): + """Yield the items from *iterable* that haven't been seen recently. + *n* is the size of the lookback window. + + >>> iterable = [0, 1, 0, 2, 3, 0] + >>> n = 3 + >>> list(unique_in_window(iterable, n)) + [0, 1, 2, 3, 0] + + The *key* function, if provided, will be used to determine uniqueness: + + >>> list(unique_in_window('abAcda', 3, key=lambda x: x.lower())) + ['a', 'b', 'c', 'd', 'a'] + + The items in *iterable* must be hashable. + + """ + if n <= 0: + raise ValueError('n must be greater than 0') + + window = deque(maxlen=n) + uniques = set() + use_key = key is not None + + for item in iterable: + k = key(item) if use_key else item + if k in uniques: + continue + + if len(uniques) == n: + uniques.discard(window[0]) + + uniques.add(k) + window.append(k) + + yield item + + +def duplicates_everseen(iterable, key=None): + """Yield duplicate elements after their first appearance. + + >>> list(duplicates_everseen('mississippi')) + ['s', 'i', 's', 's', 'i', 'p', 'i'] + >>> list(duplicates_everseen('AaaBbbCccAaa', str.lower)) + ['a', 'a', 'b', 'b', 'c', 'c', 'A', 'a', 'a'] + + This function is analagous to :func:`unique_everseen` and is subject to + the same performance considerations. + + """ + seen_set = set() + seen_list = [] + use_key = key is not None + + for element in iterable: + k = key(element) if use_key else element + try: + if k not in seen_set: + seen_set.add(k) + else: + yield element + except TypeError: + if k not in seen_list: + seen_list.append(k) + else: + yield element + + +def duplicates_justseen(iterable, key=None): + """Yields serially-duplicate elements after their first appearance. + + >>> list(duplicates_justseen('mississippi')) + ['s', 's', 'p'] + >>> list(duplicates_justseen('AaaBbbCccAaa', str.lower)) + ['a', 'a', 'b', 'b', 'c', 'c', 'a', 'a'] + + This function is analagous to :func:`unique_justseen`. + + """ + return flatten( + map( + lambda group_tuple: islice_extended(group_tuple[1])[1:], + groupby(iterable, key), + ) + ) + + +def minmax(iterable_or_value, *others, key=None, default=_marker): + """Returns both the smallest and largest items in an iterable + or the largest of two or more arguments. + + >>> minmax([3, 1, 5]) + (1, 5) + + >>> minmax(4, 2, 6) + (2, 6) + + If a *key* function is provided, it will be used to transform the input + items for comparison. + + >>> minmax([5, 30], key=str) # '30' sorts before '5' + (30, 5) + + If a *default* value is provided, it will be returned if there are no + input items. + + >>> minmax([], default=(0, 0)) + (0, 0) + + Otherwise ``ValueError`` is raised. + + This function is based on the + `recipe `__ by + Raymond Hettinger and takes care to minimize the number of comparisons + performed. + """ + iterable = (iterable_or_value, *others) if others else iterable_or_value + + it = iter(iterable) + + try: + lo = hi = next(it) + except StopIteration as e: + if default is _marker: + raise ValueError( + '`minmax()` argument is an empty iterable. ' + 'Provide a `default` value to suppress this error.' + ) from e + return default + + # Different branches depending on the presence of key. This saves a lot + # of unimportant copies which would slow the "key=None" branch + # significantly down. + if key is None: + for x, y in zip_longest(it, it, fillvalue=lo): + if y < x: + x, y = y, x + if x < lo: + lo = x + if hi < y: + hi = y + + else: + lo_key = hi_key = key(lo) + + for x, y in zip_longest(it, it, fillvalue=lo): + x_key, y_key = key(x), key(y) + + if y_key < x_key: + x, y, x_key, y_key = y, x, y_key, x_key + if x_key < lo_key: + lo, lo_key = x, x_key + if hi_key < y_key: + hi, hi_key = y, y_key + + return lo, hi + + +def constrained_batches( + iterable, max_size, max_count=None, get_len=len, strict=True +): + """Yield batches of items from *iterable* with a combined size limited by + *max_size*. + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10)) + [(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')] + + If a *max_count* is supplied, the number of items per batch is also + limited: + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10, max_count = 2)) + [(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)] + + If a *get_len* function is supplied, use that instead of :func:`len` to + determine item size. + + If *strict* is ``True``, raise ``ValueError`` if any single item is bigger + than *max_size*. Otherwise, allow single items to exceed *max_size*. + """ + if max_size <= 0: + raise ValueError('maximum size must be greater than zero') + + batch = [] + batch_size = 0 + batch_count = 0 + for item in iterable: + item_len = get_len(item) + if strict and item_len > max_size: + raise ValueError('item size exceeds maximum size') + + reached_count = batch_count == max_count + reached_size = item_len + batch_size > max_size + if batch_count and (reached_size or reached_count): + yield tuple(batch) + batch.clear() + batch_size = 0 + batch_count = 0 + + batch.append(item) + batch_size += item_len + batch_count += 1 + + if batch: + yield tuple(batch) + + +def gray_product(*iterables): + """Like :func:`itertools.product`, but return tuples in an order such + that only one element in the generated tuple changes from one iteration + to the next. + + >>> list(gray_product('AB','CD')) + [('A', 'C'), ('B', 'C'), ('B', 'D'), ('A', 'D')] + + This function consumes all of the input iterables before producing output. + If any of the input iterables have fewer than two items, ``ValueError`` + is raised. + + For information on the algorithm, see + `this section `__ + of Donald Knuth's *The Art of Computer Programming*. + """ + all_iterables = tuple(tuple(x) for x in iterables) + iterable_count = len(all_iterables) + for iterable in all_iterables: + if len(iterable) < 2: + raise ValueError("each iterable must have two or more items") + + # This is based on "Algorithm H" from section 7.2.1.1, page 20. + # a holds the indexes of the source iterables for the n-tuple to be yielded + # f is the array of "focus pointers" + # o is the array of "directions" + a = [0] * iterable_count + f = list(range(iterable_count + 1)) + o = [1] * iterable_count + while True: + yield tuple(all_iterables[i][a[i]] for i in range(iterable_count)) + j = f[0] + f[0] = 0 + if j == iterable_count: + break + a[j] = a[j] + o[j] + if a[j] == 0 or a[j] == len(all_iterables[j]) - 1: + o[j] = -o[j] + f[j] = f[j + 1] + f[j + 1] = j + 1 diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py b/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py new file mode 100644 index 0000000000000000000000000000000000000000..3facc2e3a67be6aff35c826b05661365e7dc02c3 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py @@ -0,0 +1,930 @@ +"""Imported from the recipes section of the itertools documentation. + +All functions taken from the recipes section of the itertools library docs +[1]_. +Some backward-compatible usability improvements have been made. + +.. [1] http://docs.python.org/library/itertools.html#recipes + +""" +import math +import operator +import warnings + +from collections import deque +from collections.abc import Sized +from functools import reduce +from itertools import ( + chain, + combinations, + compress, + count, + cycle, + groupby, + islice, + product, + repeat, + starmap, + tee, + zip_longest, +) +from random import randrange, sample, choice +from sys import hexversion + +__all__ = [ + 'all_equal', + 'batched', + 'before_and_after', + 'consume', + 'convolve', + 'dotproduct', + 'first_true', + 'factor', + 'flatten', + 'grouper', + 'iter_except', + 'iter_index', + 'matmul', + 'ncycles', + 'nth', + 'nth_combination', + 'padnone', + 'pad_none', + 'pairwise', + 'partition', + 'polynomial_from_roots', + 'powerset', + 'prepend', + 'quantify', + 'random_combination_with_replacement', + 'random_combination', + 'random_permutation', + 'random_product', + 'repeatfunc', + 'roundrobin', + 'sieve', + 'sliding_window', + 'subslices', + 'tabulate', + 'tail', + 'take', + 'transpose', + 'triplewise', + 'unique_everseen', + 'unique_justseen', +] + +_marker = object() + + +def take(n, iterable): + """Return first *n* items of the iterable as a list. + + >>> take(3, range(10)) + [0, 1, 2] + + If there are fewer than *n* items in the iterable, all of them are + returned. + + >>> take(10, range(3)) + [0, 1, 2] + + """ + return list(islice(iterable, n)) + + +def tabulate(function, start=0): + """Return an iterator over the results of ``func(start)``, + ``func(start + 1)``, ``func(start + 2)``... + + *func* should be a function that accepts one integer argument. + + If *start* is not specified it defaults to 0. It will be incremented each + time the iterator is advanced. + + >>> square = lambda x: x ** 2 + >>> iterator = tabulate(square, -3) + >>> take(4, iterator) + [9, 4, 1, 0] + + """ + return map(function, count(start)) + + +def tail(n, iterable): + """Return an iterator over the last *n* items of *iterable*. + + >>> t = tail(3, 'ABCDEFG') + >>> list(t) + ['E', 'F', 'G'] + + """ + # If the given iterable has a length, then we can use islice to get its + # final elements. Note that if the iterable is not actually Iterable, + # either islice or deque will throw a TypeError. This is why we don't + # check if it is Iterable. + if isinstance(iterable, Sized): + yield from islice(iterable, max(0, len(iterable) - n), None) + else: + yield from iter(deque(iterable, maxlen=n)) + + +def consume(iterator, n=None): + """Advance *iterable* by *n* steps. If *n* is ``None``, consume it + entirely. + + Efficiently exhausts an iterator without returning values. Defaults to + consuming the whole iterator, but an optional second argument may be + provided to limit consumption. + + >>> i = (x for x in range(10)) + >>> next(i) + 0 + >>> consume(i, 3) + >>> next(i) + 4 + >>> consume(i) + >>> next(i) + Traceback (most recent call last): + File "", line 1, in + StopIteration + + If the iterator has fewer items remaining than the provided limit, the + whole iterator will be consumed. + + >>> i = (x for x in range(3)) + >>> consume(i, 5) + >>> next(i) + Traceback (most recent call last): + File "", line 1, in + StopIteration + + """ + # Use functions that consume iterators at C speed. + if n is None: + # feed the entire iterator into a zero-length deque + deque(iterator, maxlen=0) + else: + # advance to the empty slice starting at position n + next(islice(iterator, n, n), None) + + +def nth(iterable, n, default=None): + """Returns the nth item or a default value. + + >>> l = range(10) + >>> nth(l, 3) + 3 + >>> nth(l, 20, "zebra") + 'zebra' + + """ + return next(islice(iterable, n, None), default) + + +def all_equal(iterable): + """ + Returns ``True`` if all the elements are equal to each other. + + >>> all_equal('aaaa') + True + >>> all_equal('aaab') + False + + """ + g = groupby(iterable) + return next(g, True) and not next(g, False) + + +def quantify(iterable, pred=bool): + """Return the how many times the predicate is true. + + >>> quantify([True, False, True]) + 2 + + """ + return sum(map(pred, iterable)) + + +def pad_none(iterable): + """Returns the sequence of elements and then returns ``None`` indefinitely. + + >>> take(5, pad_none(range(3))) + [0, 1, 2, None, None] + + Useful for emulating the behavior of the built-in :func:`map` function. + + See also :func:`padded`. + + """ + return chain(iterable, repeat(None)) + + +padnone = pad_none + + +def ncycles(iterable, n): + """Returns the sequence elements *n* times + + >>> list(ncycles(["a", "b"], 3)) + ['a', 'b', 'a', 'b', 'a', 'b'] + + """ + return chain.from_iterable(repeat(tuple(iterable), n)) + + +def dotproduct(vec1, vec2): + """Returns the dot product of the two iterables. + + >>> dotproduct([10, 10], [20, 20]) + 400 + + """ + return sum(map(operator.mul, vec1, vec2)) + + +def flatten(listOfLists): + """Return an iterator flattening one level of nesting in a list of lists. + + >>> list(flatten([[0, 1], [2, 3]])) + [0, 1, 2, 3] + + See also :func:`collapse`, which can flatten multiple levels of nesting. + + """ + return chain.from_iterable(listOfLists) + + +def repeatfunc(func, times=None, *args): + """Call *func* with *args* repeatedly, returning an iterable over the + results. + + If *times* is specified, the iterable will terminate after that many + repetitions: + + >>> from operator import add + >>> times = 4 + >>> args = 3, 5 + >>> list(repeatfunc(add, times, *args)) + [8, 8, 8, 8] + + If *times* is ``None`` the iterable will not terminate: + + >>> from random import randrange + >>> times = None + >>> args = 1, 11 + >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP + [2, 4, 8, 1, 8, 4] + + """ + if times is None: + return starmap(func, repeat(args)) + return starmap(func, repeat(args, times)) + + +def _pairwise(iterable): + """Returns an iterator of paired items, overlapping, from the original + + >>> take(4, pairwise(count())) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. + + """ + a, b = tee(iterable) + next(b, None) + yield from zip(a, b) + + +try: + from itertools import pairwise as itertools_pairwise +except ImportError: + pairwise = _pairwise +else: + + def pairwise(iterable): + yield from itertools_pairwise(iterable) + + pairwise.__doc__ = _pairwise.__doc__ + + +class UnequalIterablesError(ValueError): + def __init__(self, details=None): + msg = 'Iterables have different lengths' + if details is not None: + msg += (': index 0 has length {}; index {} has length {}').format( + *details + ) + + super().__init__(msg) + + +def _zip_equal_generator(iterables): + for combo in zip_longest(*iterables, fillvalue=_marker): + for val in combo: + if val is _marker: + raise UnequalIterablesError() + yield combo + + +def _zip_equal(*iterables): + # Check whether the iterables are all the same size. + try: + first_size = len(iterables[0]) + for i, it in enumerate(iterables[1:], 1): + size = len(it) + if size != first_size: + break + else: + # If we didn't break out, we can use the built-in zip. + return zip(*iterables) + + # If we did break out, there was a mismatch. + raise UnequalIterablesError(details=(first_size, i, size)) + # If any one of the iterables didn't have a length, start reading + # them until one runs out. + except TypeError: + return _zip_equal_generator(iterables) + + +def grouper(iterable, n, incomplete='fill', fillvalue=None): + """Group elements from *iterable* into fixed-length groups of length *n*. + + >>> list(grouper('ABCDEF', 3)) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + The keyword arguments *incomplete* and *fillvalue* control what happens for + iterables whose length is not a multiple of *n*. + + When *incomplete* is `'fill'`, the last group will contain instances of + *fillvalue*. + + >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x')) + [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] + + When *incomplete* is `'ignore'`, the last group will not be emitted. + + >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x')) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised. + + >>> it = grouper('ABCDEFG', 3, incomplete='strict') + >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + UnequalIterablesError + + """ + args = [iter(iterable)] * n + if incomplete == 'fill': + return zip_longest(*args, fillvalue=fillvalue) + if incomplete == 'strict': + return _zip_equal(*args) + if incomplete == 'ignore': + return zip(*args) + else: + raise ValueError('Expected fill, strict, or ignore') + + +def roundrobin(*iterables): + """Yields an item from each iterable, alternating between them. + + >>> list(roundrobin('ABC', 'D', 'EF')) + ['A', 'D', 'E', 'B', 'F', 'C'] + + This function produces the same output as :func:`interleave_longest`, but + may perform better for some inputs (in particular when the number of + iterables is small). + + """ + # Recipe credited to George Sakkis + pending = len(iterables) + nexts = cycle(iter(it).__next__ for it in iterables) + while pending: + try: + for next in nexts: + yield next() + except StopIteration: + pending -= 1 + nexts = cycle(islice(nexts, pending)) + + +def partition(pred, iterable): + """ + Returns a 2-tuple of iterables derived from the input iterable. + The first yields the items that have ``pred(item) == False``. + The second yields the items that have ``pred(item) == True``. + + >>> is_odd = lambda x: x % 2 != 0 + >>> iterable = range(10) + >>> even_items, odd_items = partition(is_odd, iterable) + >>> list(even_items), list(odd_items) + ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) + + If *pred* is None, :func:`bool` is used. + + >>> iterable = [0, 1, False, True, '', ' '] + >>> false_items, true_items = partition(None, iterable) + >>> list(false_items), list(true_items) + ([0, False, ''], [1, True, ' ']) + + """ + if pred is None: + pred = bool + + evaluations = ((pred(x), x) for x in iterable) + t1, t2 = tee(evaluations) + return ( + (x for (cond, x) in t1 if not cond), + (x for (cond, x) in t2 if cond), + ) + + +def powerset(iterable): + """Yields all possible subsets of the iterable. + + >>> list(powerset([1, 2, 3])) + [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] + + :func:`powerset` will operate on iterables that aren't :class:`set` + instances, so repeated elements in the input will produce repeated elements + in the output. Use :func:`unique_everseen` on the input to avoid generating + duplicates: + + >>> seq = [1, 1, 0] + >>> list(powerset(seq)) + [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] + >>> from more_itertools import unique_everseen + >>> list(powerset(unique_everseen(seq))) + [(), (1,), (0,), (1, 0)] + + """ + s = list(iterable) + return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) + + +def unique_everseen(iterable, key=None): + """ + Yield unique elements, preserving order. + + >>> list(unique_everseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D'] + >>> list(unique_everseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'D'] + + Sequences with a mix of hashable and unhashable items can be used. + The function will be slower (i.e., `O(n^2)`) for unhashable items. + + Remember that ``list`` objects are unhashable - you can use the *key* + parameter to transform the list to a tuple (which is hashable) to + avoid a slowdown. + + >>> iterable = ([1, 2], [2, 3], [1, 2]) + >>> list(unique_everseen(iterable)) # Slow + [[1, 2], [2, 3]] + >>> list(unique_everseen(iterable, key=tuple)) # Faster + [[1, 2], [2, 3]] + + Similary, you may want to convert unhashable ``set`` objects with + ``key=frozenset``. For ``dict`` objects, + ``key=lambda x: frozenset(x.items())`` can be used. + + """ + seenset = set() + seenset_add = seenset.add + seenlist = [] + seenlist_add = seenlist.append + use_key = key is not None + + for element in iterable: + k = key(element) if use_key else element + try: + if k not in seenset: + seenset_add(k) + yield element + except TypeError: + if k not in seenlist: + seenlist_add(k) + yield element + + +def unique_justseen(iterable, key=None): + """Yields elements in order, ignoring serial duplicates + + >>> list(unique_justseen('AAAABBBCCDAABBB')) + ['A', 'B', 'C', 'D', 'A', 'B'] + >>> list(unique_justseen('ABBCcAD', str.lower)) + ['A', 'B', 'C', 'A', 'D'] + + """ + return map(next, map(operator.itemgetter(1), groupby(iterable, key))) + + +def iter_except(func, exception, first=None): + """Yields results from a function repeatedly until an exception is raised. + + Converts a call-until-exception interface to an iterator interface. + Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel + to end the loop. + + >>> l = [0, 1, 2] + >>> list(iter_except(l.pop, IndexError)) + [2, 1, 0] + + Multiple exceptions can be specified as a stopping condition: + + >>> l = [1, 2, 3, '...', 4, 5, 6] + >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) + [7, 6, 5] + >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) + [4, 3, 2] + >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) + [] + + """ + try: + if first is not None: + yield first() + while 1: + yield func() + except exception: + pass + + +def first_true(iterable, default=None, pred=None): + """ + Returns the first true value in the iterable. + + If no true value is found, returns *default* + + If *pred* is not None, returns the first item for which + ``pred(item) == True`` . + + >>> first_true(range(10)) + 1 + >>> first_true(range(10), pred=lambda x: x > 5) + 6 + >>> first_true(range(10), default='missing', pred=lambda x: x > 9) + 'missing' + + """ + return next(filter(pred, iterable), default) + + +def random_product(*args, repeat=1): + """Draw an item at random from each of the input iterables. + + >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP + ('c', 3, 'Z') + + If *repeat* is provided as a keyword argument, that many items will be + drawn from each iterable. + + >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP + ('a', 2, 'd', 3) + + This equivalent to taking a random selection from + ``itertools.product(*args, **kwarg)``. + + """ + pools = [tuple(pool) for pool in args] * repeat + return tuple(choice(pool) for pool in pools) + + +def random_permutation(iterable, r=None): + """Return a random *r* length permutation of the elements in *iterable*. + + If *r* is not specified or is ``None``, then *r* defaults to the length of + *iterable*. + + >>> random_permutation(range(5)) # doctest:+SKIP + (3, 4, 0, 1, 2) + + This equivalent to taking a random selection from + ``itertools.permutations(iterable, r)``. + + """ + pool = tuple(iterable) + r = len(pool) if r is None else r + return tuple(sample(pool, r)) + + +def random_combination(iterable, r): + """Return a random *r* length subsequence of the elements in *iterable*. + + >>> random_combination(range(5), 3) # doctest:+SKIP + (2, 3, 4) + + This equivalent to taking a random selection from + ``itertools.combinations(iterable, r)``. + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(sample(range(n), r)) + return tuple(pool[i] for i in indices) + + +def random_combination_with_replacement(iterable, r): + """Return a random *r* length subsequence of elements in *iterable*, + allowing individual elements to be repeated. + + >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP + (0, 0, 1, 2, 2) + + This equivalent to taking a random selection from + ``itertools.combinations_with_replacement(iterable, r)``. + + """ + pool = tuple(iterable) + n = len(pool) + indices = sorted(randrange(n) for i in range(r)) + return tuple(pool[i] for i in indices) + + +def nth_combination(iterable, r, index): + """Equivalent to ``list(combinations(iterable, r))[index]``. + + The subsequences of *iterable* that are of length *r* can be ordered + lexicographically. :func:`nth_combination` computes the subsequence at + sort position *index* directly, without computing the previous + subsequences. + + >>> nth_combination(range(5), 3, 5) + (0, 3, 4) + + ``ValueError`` will be raised If *r* is negative or greater than the length + of *iterable*. + ``IndexError`` will be raised if the given *index* is invalid. + """ + pool = tuple(iterable) + n = len(pool) + if (r < 0) or (r > n): + raise ValueError + + c = 1 + k = min(r, n - r) + for i in range(1, k + 1): + c = c * (n - k + i) // i + + if index < 0: + index += c + + if (index < 0) or (index >= c): + raise IndexError + + result = [] + while r: + c, n, r = c * r // n, n - 1, r - 1 + while index >= c: + index -= c + c, n = c * (n - r) // n, n - 1 + result.append(pool[-1 - n]) + + return tuple(result) + + +def prepend(value, iterator): + """Yield *value*, followed by the elements in *iterator*. + + >>> value = '0' + >>> iterator = ['1', '2', '3'] + >>> list(prepend(value, iterator)) + ['0', '1', '2', '3'] + + To prepend multiple values, see :func:`itertools.chain` + or :func:`value_chain`. + + """ + return chain([value], iterator) + + +def convolve(signal, kernel): + """Convolve the iterable *signal* with the iterable *kernel*. + + >>> signal = (1, 2, 3, 4, 5) + >>> kernel = [3, 2, 1] + >>> list(convolve(signal, kernel)) + [3, 8, 14, 20, 26, 14, 5] + + Note: the input arguments are not interchangeable, as the *kernel* + is immediately consumed and stored. + + """ + kernel = tuple(kernel)[::-1] + n = len(kernel) + window = deque([0], maxlen=n) * n + for x in chain(signal, repeat(0, n - 1)): + window.append(x) + yield sum(map(operator.mul, kernel, window)) + + +def before_and_after(predicate, it): + """A variant of :func:`takewhile` that allows complete access to the + remainder of the iterator. + + >>> it = iter('ABCdEfGhI') + >>> all_upper, remainder = before_and_after(str.isupper, it) + >>> ''.join(all_upper) + 'ABC' + >>> ''.join(remainder) # takewhile() would lose the 'd' + 'dEfGhI' + + Note that the first iterator must be fully consumed before the second + iterator can generate valid results. + """ + it = iter(it) + transition = [] + + def true_iterator(): + for elem in it: + if predicate(elem): + yield elem + else: + transition.append(elem) + return + + # Note: this is different from itertools recipes to allow nesting + # before_and_after remainders into before_and_after again. See tests + # for an example. + remainder_iterator = chain(transition, it) + + return true_iterator(), remainder_iterator + + +def triplewise(iterable): + """Return overlapping triplets from *iterable*. + + >>> list(triplewise('ABCDE')) + [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] + + """ + for (a, _), (b, c) in pairwise(pairwise(iterable)): + yield a, b, c + + +def sliding_window(iterable, n): + """Return a sliding window of width *n* over *iterable*. + + >>> list(sliding_window(range(6), 4)) + [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] + + If *iterable* has fewer than *n* items, then nothing is yielded: + + >>> list(sliding_window(range(3), 4)) + [] + + For a variant with more features, see :func:`windowed`. + """ + it = iter(iterable) + window = deque(islice(it, n), maxlen=n) + if len(window) == n: + yield tuple(window) + for x in it: + window.append(x) + yield tuple(window) + + +def subslices(iterable): + """Return all contiguous non-empty subslices of *iterable*. + + >>> list(subslices('ABC')) + [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']] + + This is similar to :func:`substrings`, but emits items in a different + order. + """ + seq = list(iterable) + slices = starmap(slice, combinations(range(len(seq) + 1), 2)) + return map(operator.getitem, repeat(seq), slices) + + +def polynomial_from_roots(roots): + """Compute a polynomial's coefficients from its roots. + + >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) + >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 + [1, -4, -17, 60] + """ + # Use math.prod for Python 3.8+, + prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1)) + roots = list(map(operator.neg, roots)) + return [ + sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1) + ] + + +def iter_index(iterable, value, start=0): + """Yield the index of each place in *iterable* that *value* occurs, + beginning with index *start*. + + See :func:`locate` for a more general means of finding the indexes + associated with particular values. + + >>> list(iter_index('AABCADEAF', 'A')) + [0, 1, 4, 7] + """ + try: + seq_index = iterable.index + except AttributeError: + # Slow path for general iterables + it = islice(iterable, start, None) + for i, element in enumerate(it, start): + if element is value or element == value: + yield i + else: + # Fast path for sequences + i = start - 1 + try: + while True: + i = seq_index(value, i + 1) + yield i + except ValueError: + pass + + +def sieve(n): + """Yield the primes less than n. + + >>> list(sieve(30)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + """ + isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) + data = bytearray((0, 1)) * (n // 2) + data[:3] = 0, 0, 0 + limit = isqrt(n) + 1 + for p in compress(range(limit), data): + data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p))) + data[2] = 1 + return iter_index(data, 1) if n > 2 else iter([]) + + +def batched(iterable, n): + """Batch data into lists of length *n*. The last batch may be shorter. + + >>> list(batched('ABCDEFG', 3)) + [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] + + This recipe is from the ``itertools`` docs. This library also provides + :func:`chunked`, which has a different implementation. + """ + if hexversion >= 0x30C00A0: # Python 3.12.0a0 + warnings.warn( + ( + 'batched will be removed in a future version of ' + 'more-itertools. Use the standard library ' + 'itertools.batched function instead' + ), + DeprecationWarning, + ) + + it = iter(iterable) + while True: + batch = list(islice(it, n)) + if not batch: + break + yield batch + + +def transpose(it): + """Swap the rows and columns of the input. + + >>> list(transpose([(1, 2, 3), (11, 22, 33)])) + [(1, 11), (2, 22), (3, 33)] + + The caller should ensure that the dimensions of the input are compatible. + """ + # TODO: when 3.9 goes end-of-life, add stric=True to this. + return zip(*it) + + +def matmul(m1, m2): + """Multiply two matrices. + >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)])) + [[49, 80], [41, 60]] + + The caller should ensure that the dimensions of the input matrices are + compatible with each other. + """ + n = len(m2[0]) + return batched(starmap(dotproduct, product(m1, transpose(m2))), n) + + +def factor(n): + """Yield the prime factors of n. + >>> list(factor(360)) + [2, 2, 2, 3, 3, 5] + """ + isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) + for prime in sieve(isqrt(n) + 1): + while True: + quotient, remainder = divmod(n, prime) + if remainder: + break + yield prime + n = quotient + if n == 1: + return + if n >= 2: + yield n diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13cadc7f04d4c24afb829c9fc44367ac06a3c61d --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py @@ -0,0 +1,15 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "23.1" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_elffile.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_elffile.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb19b30bb53c18f38a9ef02dd7c4478670fb962 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_elffile.py @@ -0,0 +1,108 @@ +""" +ELF file parser. + +This provides a class ``ELFFile`` that parses an ELF executable in a similar +interface to ``ZipFile``. Only the read interface is implemented. + +Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca +ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html +""" + +import enum +import os +import struct +from typing import IO, Optional, Tuple + + +class ELFInvalid(ValueError): + pass + + +class EIClass(enum.IntEnum): + C32 = 1 + C64 = 2 + + +class EIData(enum.IntEnum): + Lsb = 1 + Msb = 2 + + +class EMachine(enum.IntEnum): + I386 = 3 + S390 = 22 + Arm = 40 + X8664 = 62 + AArc64 = 183 + + +class ELFFile: + """ + Representation of an ELF executable. + """ + + def __init__(self, f: IO[bytes]) -> None: + self._f = f + + try: + ident = self._read("16B") + except struct.error: + raise ELFInvalid("unable to parse identification") + magic = bytes(ident[:4]) + if magic != b"\x7fELF": + raise ELFInvalid(f"invalid magic: {magic!r}") + + self.capacity = ident[4] # Format for program header (bitness). + self.encoding = ident[5] # Data structure encoding (endianness). + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, self._p_fmt, self._p_idx = { + (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB. + (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB. + }[(self.capacity, self.encoding)] + except KeyError: + raise ELFInvalid( + f"unrecognized capacity ({self.capacity}) or " + f"encoding ({self.encoding})" + ) + + try: + ( + _, + self.machine, # Architecture type. + _, + _, + self._e_phoff, # Offset of program header. + _, + self.flags, # Processor-specific flags. + _, + self._e_phentsize, # Size of section. + self._e_phnum, # Number of sections. + ) = self._read(e_fmt) + except struct.error as e: + raise ELFInvalid("unable to parse machine and section information") from e + + def _read(self, fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, self._f.read(struct.calcsize(fmt))) + + @property + def interpreter(self) -> Optional[str]: + """ + The path recorded in the ``PT_INTERP`` section header. + """ + for index in range(self._e_phnum): + self._f.seek(self._e_phoff + self._e_phentsize * index) + try: + data = self._read(self._p_fmt) + except struct.error: + continue + if data[self._p_idx[0]] != 3: # Not PT_INTERP. + continue + self._f.seek(data[self._p_idx[1]]) + return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0") + return None diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_manylinux.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_manylinux.py new file mode 100644 index 0000000000000000000000000000000000000000..449c655be65a948f7b2476302a46c35d9e7605ac --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_manylinux.py @@ -0,0 +1,240 @@ +import collections +import contextlib +import functools +import os +import re +import sys +import warnings +from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple + +from ._elffile import EIClass, EIData, ELFFile, EMachine + +EF_ARM_ABIMASK = 0xFF000000 +EF_ARM_ABI_VER5 = 0x05000000 +EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + +# `os.PathLike` not a generic type until Python 3.9, so sticking with `str` +# as the type for `path` until then. +@contextlib.contextmanager +def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]: + try: + with open(path, "rb") as f: + yield ELFFile(f) + except (OSError, TypeError, ValueError): + yield None + + +def _is_linux_armhf(executable: str) -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.Arm + and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5 + and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD + ) + + +def _is_linux_i686(executable: str) -> bool: + with _parse_elf(executable) as f: + return ( + f is not None + and f.capacity == EIClass.C32 + and f.encoding == EIData.Lsb + and f.machine == EMachine.I386 + ) + + +def _have_compatible_abi(executable: str, arch: str) -> bool: + if arch == "armv7l": + return _is_linux_armhf(executable) + if arch == "i686": + return _is_linux_i686(executable) + return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # Should be a string like "glibc 2.17". + version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.rsplit() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + f"Expected glibc version with 2 components major.minor," + f" got: {version_str}", + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux # noqa + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(linux: str, arch: str) -> Iterator[str]: + if not _have_compatible_abi(sys.executable, arch): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if arch in {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(tag, arch, glibc_version): + yield linux.replace("linux", tag) + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(legacy_tag, arch, glibc_version): + yield linux.replace("linux", legacy_tag) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_musllinux.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_musllinux.py new file mode 100644 index 0000000000000000000000000000000000000000..706ba600a93c1b72594d96d3026daaa1998935b6 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_musllinux.py @@ -0,0 +1,80 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import functools +import re +import subprocess +import sys +from typing import Iterator, NamedTuple, Optional + +from ._elffile import ELFFile + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + try: + with open(executable, "rb") as f: + ld = ELFFile(f).interpreter + except (OSError, TypeError, ValueError): + return None + if ld is None or "musl" not in ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(arch: str) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param arch: Should be the part of platform tag after the ``linux_`` + prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a + prerequisite for the current platform to be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_parser.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..5a18b758fe0065416a92b9047dc9c392a3de2c4f --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_parser.py @@ -0,0 +1,353 @@ +"""Handwritten parser of dependency specifiers. + +The docstring for each __parse_* function contains ENBF-inspired grammar representing +the implementation. +""" + +import ast +from typing import Any, List, NamedTuple, Optional, Tuple, Union + +from ._tokenizer import DEFAULT_RULES, Tokenizer + + +class Node: + def __init__(self, value: str) -> None: + self.value = value + + def __str__(self) -> str: + return self.value + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +MarkerVar = Union[Variable, Value] +MarkerItem = Tuple[MarkerVar, Op, MarkerVar] +# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]] +# MarkerList = List[Union["MarkerList", MarkerAtom, str]] +# mypy does not support recursive type definition +# https://github.com/python/mypy/issues/731 +MarkerAtom = Any +MarkerList = List[Any] + + +class ParsedRequirement(NamedTuple): + name: str + url: str + extras: List[str] + specifier: str + marker: Optional[MarkerList] + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for dependency specifier +# -------------------------------------------------------------------------------------- +def parse_requirement(source: str) -> ParsedRequirement: + return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement: + """ + requirement = WS? IDENTIFIER WS? extras WS? requirement_details + """ + tokenizer.consume("WS") + + name_token = tokenizer.expect( + "IDENTIFIER", expected="package name at the start of dependency specifier" + ) + name = name_token.text + tokenizer.consume("WS") + + extras = _parse_extras(tokenizer) + tokenizer.consume("WS") + + url, specifier, marker = _parse_requirement_details(tokenizer) + tokenizer.expect("END", expected="end of dependency specifier") + + return ParsedRequirement(name, url, extras, specifier, marker) + + +def _parse_requirement_details( + tokenizer: Tokenizer, +) -> Tuple[str, str, Optional[MarkerList]]: + """ + requirement_details = AT URL (WS requirement_marker?)? + | specifier WS? (requirement_marker)? + """ + + specifier = "" + url = "" + marker = None + + if tokenizer.check("AT"): + tokenizer.read() + tokenizer.consume("WS") + + url_start = tokenizer.position + url = tokenizer.expect("URL", expected="URL after @").text + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + tokenizer.expect("WS", expected="whitespace after URL") + + # The input might end after whitespace. + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, span_start=url_start, after="URL and whitespace" + ) + else: + specifier_start = tokenizer.position + specifier = _parse_specifier(tokenizer) + tokenizer.consume("WS") + + if tokenizer.check("END", peek=True): + return (url, specifier, marker) + + marker = _parse_requirement_marker( + tokenizer, + span_start=specifier_start, + after=( + "version specifier" + if specifier + else "name and no valid version specifier" + ), + ) + + return (url, specifier, marker) + + +def _parse_requirement_marker( + tokenizer: Tokenizer, *, span_start: int, after: str +) -> MarkerList: + """ + requirement_marker = SEMICOLON marker WS? + """ + + if not tokenizer.check("SEMICOLON"): + tokenizer.raise_syntax_error( + f"Expected end or semicolon (after {after})", + span_start=span_start, + ) + tokenizer.read() + + marker = _parse_marker(tokenizer) + tokenizer.consume("WS") + + return marker + + +def _parse_extras(tokenizer: Tokenizer) -> List[str]: + """ + extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)? + """ + if not tokenizer.check("LEFT_BRACKET", peek=True): + return [] + + with tokenizer.enclosing_tokens( + "LEFT_BRACKET", + "RIGHT_BRACKET", + around="extras", + ): + tokenizer.consume("WS") + extras = _parse_extras_list(tokenizer) + tokenizer.consume("WS") + + return extras + + +def _parse_extras_list(tokenizer: Tokenizer) -> List[str]: + """ + extras_list = identifier (wsp* ',' wsp* identifier)* + """ + extras: List[str] = [] + + if not tokenizer.check("IDENTIFIER"): + return extras + + extras.append(tokenizer.read().text) + + while True: + tokenizer.consume("WS") + if tokenizer.check("IDENTIFIER", peek=True): + tokenizer.raise_syntax_error("Expected comma between extra names") + elif not tokenizer.check("COMMA"): + break + + tokenizer.read() + tokenizer.consume("WS") + + extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma") + extras.append(extra_token.text) + + return extras + + +def _parse_specifier(tokenizer: Tokenizer) -> str: + """ + specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS + | WS? version_many WS? + """ + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="version specifier", + ): + tokenizer.consume("WS") + parsed_specifiers = _parse_version_many(tokenizer) + tokenizer.consume("WS") + + return parsed_specifiers + + +def _parse_version_many(tokenizer: Tokenizer) -> str: + """ + version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)? + """ + parsed_specifiers = "" + while tokenizer.check("SPECIFIER"): + span_start = tokenizer.position + parsed_specifiers += tokenizer.read().text + if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True): + tokenizer.raise_syntax_error( + ".* suffix can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position + 1, + ) + if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True): + tokenizer.raise_syntax_error( + "Local version label can only be used with `==` or `!=` operators", + span_start=span_start, + span_end=tokenizer.position, + ) + tokenizer.consume("WS") + if not tokenizer.check("COMMA"): + break + parsed_specifiers += tokenizer.read().text + tokenizer.consume("WS") + + return parsed_specifiers + + +# -------------------------------------------------------------------------------------- +# Recursive descent parser for marker expression +# -------------------------------------------------------------------------------------- +def parse_marker(source: str) -> MarkerList: + return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES)) + + +def _parse_marker(tokenizer: Tokenizer) -> MarkerList: + """ + marker = marker_atom (BOOLOP marker_atom)+ + """ + expression = [_parse_marker_atom(tokenizer)] + while tokenizer.check("BOOLOP"): + token = tokenizer.read() + expr_right = _parse_marker_atom(tokenizer) + expression.extend((token.text, expr_right)) + return expression + + +def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom: + """ + marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS? + | WS? marker_item WS? + """ + + tokenizer.consume("WS") + if tokenizer.check("LEFT_PARENTHESIS", peek=True): + with tokenizer.enclosing_tokens( + "LEFT_PARENTHESIS", + "RIGHT_PARENTHESIS", + around="marker expression", + ): + tokenizer.consume("WS") + marker: MarkerAtom = _parse_marker(tokenizer) + tokenizer.consume("WS") + else: + marker = _parse_marker_item(tokenizer) + tokenizer.consume("WS") + return marker + + +def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem: + """ + marker_item = WS? marker_var WS? marker_op WS? marker_var WS? + """ + tokenizer.consume("WS") + marker_var_left = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + marker_op = _parse_marker_op(tokenizer) + tokenizer.consume("WS") + marker_var_right = _parse_marker_var(tokenizer) + tokenizer.consume("WS") + return (marker_var_left, marker_op, marker_var_right) + + +def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar: + """ + marker_var = VARIABLE | QUOTED_STRING + """ + if tokenizer.check("VARIABLE"): + return process_env_var(tokenizer.read().text.replace(".", "_")) + elif tokenizer.check("QUOTED_STRING"): + return process_python_str(tokenizer.read().text) + else: + tokenizer.raise_syntax_error( + message="Expected a marker variable or quoted string" + ) + + +def process_env_var(env_var: str) -> Variable: + if ( + env_var == "platform_python_implementation" + or env_var == "python_implementation" + ): + return Variable("platform_python_implementation") + else: + return Variable(env_var) + + +def process_python_str(python_str: str) -> Value: + value = ast.literal_eval(python_str) + return Value(str(value)) + + +def _parse_marker_op(tokenizer: Tokenizer) -> Op: + """ + marker_op = IN | NOT IN | OP + """ + if tokenizer.check("IN"): + tokenizer.read() + return Op("in") + elif tokenizer.check("NOT"): + tokenizer.read() + tokenizer.expect("WS", expected="whitespace after 'not'") + tokenizer.expect("IN", expected="'in' after 'not'") + return Op("not in") + elif tokenizer.check("OP"): + return Op(tokenizer.read().text) + else: + return tokenizer.raise_syntax_error( + "Expected marker operator, one of " + "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + ) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..90a6465f9682c886363eea5327dac64bf623a6ff --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..dd0d648d49a7c1a62d25ce5c9107aa448a8a22d1 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py @@ -0,0 +1,192 @@ +import contextlib +import re +from dataclasses import dataclass +from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union + +from .specifiers import Specifier + + +@dataclass +class Token: + name: str + text: str + position: int + + +class ParserSyntaxError(Exception): + """The provided source text could not be parsed correctly.""" + + def __init__( + self, + message: str, + *, + source: str, + span: Tuple[int, int], + ) -> None: + self.span = span + self.message = message + self.source = source + + super().__init__() + + def __str__(self) -> str: + marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" + return "\n ".join([self.message, self.source, marker]) + + +DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { + "LEFT_PARENTHESIS": r"\(", + "RIGHT_PARENTHESIS": r"\)", + "LEFT_BRACKET": r"\[", + "RIGHT_BRACKET": r"\]", + "SEMICOLON": r";", + "COMMA": r",", + "QUOTED_STRING": re.compile( + r""" + ( + ('[^']*') + | + ("[^"]*") + ) + """, + re.VERBOSE, + ), + "OP": r"(===|==|~=|!=|<=|>=|<|>)", + "BOOLOP": r"\b(or|and)\b", + "IN": r"\bin\b", + "NOT": r"\bnot\b", + "VARIABLE": re.compile( + r""" + \b( + python_version + |python_full_version + |os[._]name + |sys[._]platform + |platform_(release|system) + |platform[._](version|machine|python_implementation) + |python_implementation + |implementation_(name|version) + |extra + )\b + """, + re.VERBOSE, + ), + "SPECIFIER": re.compile( + Specifier._operator_regex_str + Specifier._version_regex_str, + re.VERBOSE | re.IGNORECASE, + ), + "AT": r"\@", + "URL": r"[^ \t]+", + "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", + "VERSION_PREFIX_TRAIL": r"\.\*", + "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", + "WS": r"[ \t]+", + "END": r"$", +} + + +class Tokenizer: + """Context-sensitive token parsing. + + Provides methods to examine the input stream to check whether the next token + matches. + """ + + def __init__( + self, + source: str, + *, + rules: "Dict[str, Union[str, re.Pattern[str]]]", + ) -> None: + self.source = source + self.rules: Dict[str, re.Pattern[str]] = { + name: re.compile(pattern) for name, pattern in rules.items() + } + self.next_token: Optional[Token] = None + self.position = 0 + + def consume(self, name: str) -> None: + """Move beyond provided token name, if at current position.""" + if self.check(name): + self.read() + + def check(self, name: str, *, peek: bool = False) -> bool: + """Check whether the next token has the provided name. + + By default, if the check succeeds, the token *must* be read before + another check. If `peek` is set to `True`, the token is not loaded and + would need to be checked again. + """ + assert ( + self.next_token is None + ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert name in self.rules, f"Unknown token name: {name!r}" + + expression = self.rules[name] + + match = expression.match(self.source, self.position) + if match is None: + return False + if not peek: + self.next_token = Token(name, match[0], self.position) + return True + + def expect(self, name: str, *, expected: str) -> Token: + """Expect a certain token name next, failing with a syntax error otherwise. + + The token is *not* read. + """ + if not self.check(name): + raise self.raise_syntax_error(f"Expected {expected}") + return self.read() + + def read(self) -> Token: + """Consume the next token and return it.""" + token = self.next_token + assert token is not None + + self.position += len(token.text) + self.next_token = None + + return token + + def raise_syntax_error( + self, + message: str, + *, + span_start: Optional[int] = None, + span_end: Optional[int] = None, + ) -> NoReturn: + """Raise ParserSyntaxError at the given position.""" + span = ( + self.position if span_start is None else span_start, + self.position if span_end is None else span_end, + ) + raise ParserSyntaxError( + message, + source=self.source, + span=span, + ) + + @contextlib.contextmanager + def enclosing_tokens( + self, open_token: str, close_token: str, *, around: str + ) -> Iterator[None]: + if self.check(open_token): + open_position = self.position + self.read() + else: + open_position = None + + yield + + if open_position is None: + return + + if not self.check(close_token): + self.raise_syntax_error( + f"Expected matching {close_token} for {open_token}, after {around}", + span_start=open_position, + ) + + self.read() diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py new file mode 100644 index 0000000000000000000000000000000000000000..8b98fca7233be6dd9324cd2b6d71b6a8ac91a6cb --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py @@ -0,0 +1,252 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from ._parser import ( + MarkerAtom, + MarkerList, + Op, + Value, + Variable, + parse_marker as _parse_marker, +) +from ._tokenizer import ParserSyntaxError +from .specifiers import InvalidSpecifier, Specifier +from .utils import canonicalize_name + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +def _normalize_extra_values(results: Any) -> Any: + """ + Normalize extra values. + """ + if isinstance(results[0], tuple): + lhs, op, rhs = results[0] + if isinstance(lhs, Variable) and lhs.value == "extra": + normalized_extra = canonicalize_name(rhs.value) + rhs = Value(normalized_extra) + elif isinstance(rhs, Variable) and rhs.value == "extra": + normalized_extra = canonicalize_name(lhs.value) + lhs = Value(normalized_extra) + results[0] = lhs, op, rhs + return results + + +def _format_marker( + marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +def _normalize(*values: str, key: str) -> Tuple[str, ...]: + # PEP 685 – Comparison of extra names for optional distribution dependencies + # https://peps.python.org/pep-0685/ + # > When comparing extra names, tools MUST normalize the names being + # > compared using the semantics outlined in PEP 503 for names + if key == "extra": + return tuple(canonicalize_name(v) for v in values) + + # other environment markers don't have such standards + return values + + +def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + environment_key = lhs.value + lhs_value = environment[environment_key] + rhs_value = rhs.value + else: + lhs_value = lhs.value + environment_key = rhs.value + rhs_value = environment[environment_key] + + lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + # Note: We create a Marker object without calling this constructor in + # packaging.requirements.Requirement. If any additional logic is + # added here, make sure to mirror/adapt Requirement. + try: + self._markers = _normalize_extra_values(_parse_marker(marker)) + # The attribute `_markers` can be described in terms of a recursive type: + # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] + # + # For example, the following expression: + # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") + # + # is parsed into: + # [ + # (, ')>, ), + # 'and', + # [ + # (, , ), + # 'or', + # (, , ) + # ] + # ] + except ParserSyntaxError as e: + raise InvalidMarker(str(e)) from e + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Marker): + return NotImplemented + + return str(self) == str(other) + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + current_environment["extra"] = "" + if environment is not None: + current_environment.update(environment) + # The API used to allow setting extra to None. We need to handle this + # case for backwards compatibility. + if current_environment["extra"] is None: + current_environment["extra"] = "" + + return _evaluate_markers(self._markers, current_environment) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/metadata.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..e76a60c395eb62d5f05d7248cf67210cdd10740d --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/metadata.py @@ -0,0 +1,408 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import Dict, List, Optional, Tuple, Union, cast + +if sys.version_info >= (3, 8): # pragma: no cover + from typing import TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from typing_extensions import TypedDict + else: + try: + from typing_extensions import TypedDict + except ImportError: + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emiting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_STRING_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata. + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_STRING_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..f34bfa85c8029f514f81f829fc24020e78b3786e --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py @@ -0,0 +1,95 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import urllib.parse +from typing import Any, List, Optional, Set + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + if parsed.url: + parsed_url = urllib.parse.urlparse(parsed.url) + if parsed_url.scheme == "file": + if urllib.parse.urlunparse(parsed_url) != parsed.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement(f"Invalid URL: {parsed.url}") + self.url: Optional[str] = parsed.url + else: + self.url = None + self.extras: Set[str] = set(parsed.extras if parsed.extras else []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def __str__(self) -> str: + parts: List[str] = [self.name] + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append(f"@ {self.url}") + if self.marker: + parts.append(" ") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash((self.__class__.__name__, str(self))) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + self.name == other.name + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..ba8fe37b7f7fd0f1e46666e3644b6394dcaff644 --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py @@ -0,0 +1,1008 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +import abc +import itertools +import re +from typing import ( + Callable, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> Optional[bool]: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = ".".join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + result: List[str] = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier. + parsed: Set[Specifier] = set() + for specifier in split_specifiers: + parsed.add(Specifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/tags.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/tags.py new file mode 100644 index 0000000000000000000000000000000000000000..76d243414d00f54a8973359cf553123e9bd1760e --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/tags.py @@ -0,0 +1,546 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import logging +import platform +import subprocess +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = sys.maxsize <= 2**32 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> FrozenSet[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value: Union[int, str, None] = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_").replace(" ", "_") + + +def _abi3_applies(python_version: PythonVersion) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}") + abis.insert( + 0, + "cp{version}{debug}{pymalloc}{ucs4}".format( + version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 + ), + ) + return abis + + +def cpython_tags( + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + if _abi3_applies(python_version): + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if _abi3_applies(python_version): + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{version}".format( + version=_version_nodot((python_version[0], minor_version)) + ) + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> List[str]: + """ + Return the ABI tag based on EXT_SUFFIX. + """ + # The following are examples of `EXT_SUFFIX`. + # We want to keep the parts which are related to the ABI and remove the + # parts which are related to the platform: + # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310 + # - mac: '.cpython-310-darwin.so' => cp310 + # - win: '.cp310-win_amd64.pyd' => cp310 + # - win: '.pyd' => cp37 (uses _cpython_abis()) + # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73 + # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib' + # => graalpy_38_native + + ext_suffix = _get_config_var("EXT_SUFFIX", warn=True) + if not isinstance(ext_suffix, str) or ext_suffix[0] != ".": + raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')") + parts = ext_suffix.split(".") + if len(parts) < 3: + # CPython3.7 and earlier uses ".pyd" on Windows. + return _cpython_abis(sys.version_info[:2]) + soabi = parts[1] + if soabi.startswith("cpython"): + # non-windows + abi = "cp" + soabi.split("-")[1] + elif soabi.startswith("cp"): + # windows + abi = soabi.split("-")[0] + elif soabi.startswith("pypy"): + abi = "-".join(soabi.split("-")[:2]) + elif soabi.startswith("graalpy"): + abi = "-".join(soabi.split("-")[:3]) + elif soabi: + # pyston, ironpython, others? + abi = soabi + else: + return [] + return [_normalize_string(abi)] + + +def generic_tags( + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + else: + abis = list(abis) + platforms = list(platforms or platform_tags()) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + if version == (10, 16): + # When built against an older macOS SDK, Python will report macOS 10.16 + # instead of the real version. + version_str = subprocess.run( + [ + sys.executable, + "-sS", + "-c", + "import platform; print(platform.mac_ver()[0])", + ], + check=True, + env={"SYSTEM_VERSION_COMPAT": "0"}, + stdout=subprocess.PIPE, + universal_newlines=True, + ).stdout + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + for minor_version in range(version[1], -1, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=10, minor=minor_version, binary_format=binary_format + ) + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + for major_version in range(version[0], 10, -1): + compat_version = major_version, 0 + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=major_version, minor=0, binary_format=binary_format + ) + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + else: + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_format = "universal2" + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv7l" + _, arch = linux.split("_", 1) + yield from _manylinux.platform_tags(linux, arch) + yield from _musllinux.platform_tags(arch) + yield linux + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + + Some implementations have a reserved, two-letter abbreviation which will + be returned when appropriate. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + interp = "pp3" + elif interp_name == "cp": + interp = "cp" + interpreter_version(warn=warn) + else: + interp = None + yield from compatible_tags(interpreter=interp) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..33c613b749a49d6035c0e549389e92c3d68a83ad --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py @@ -0,0 +1,141 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +_canonicalize_regex = re.compile(r"[-_.]+") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str) -> NormalizedName: + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def canonicalize_version( + version: Union[Version, str], *, strip_trailing_zero: bool = True +) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + release_segment = ".".join(str(x) for x in parsed.release) + if strip_trailing_zero: + # NB: This strips trailing '.0's to normalize + release_segment = re.sub(r"(\.0)+$", "", release_segment) + parts.append(release_segment) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + version = Version(parts[1]) + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + version = Version(version_part) + return (name, version) diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/version.py b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..b30e8cbf84f2a441ca87aef2ab1a0fed18caeddc --- /dev/null +++ b/.venv/Lib/site-packages/pkg_resources/_vendor/packaging/version.py @@ -0,0 +1,564 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +import collections +import itertools +import re +from typing import Any, Callable, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: Tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/platformdirs/android.py b/.venv/Lib/site-packages/pkg_resources/_vendor/platformdirs/android.py
new file mode 100644
index 0000000000000000000000000000000000000000..eda80935123cb5db7e18d7fb82fe5f71991d7af8
--- /dev/null
+++ b/.venv/Lib/site-packages/pkg_resources/_vendor/platformdirs/android.py
@@ -0,0 +1,120 @@
+from __future__ import annotations
+
+import os
+import re
+import sys
+from functools import lru_cache
+from typing import cast
+
+from .api import PlatformDirsABC
+
+
+class Android(PlatformDirsABC):
+    """
+    Follows the guidance `from here `_. Makes use of the
+    `appname ` and
+    `version `.
+    """
+
+    @property
+    def user_data_dir(self) -> str:
+        """:return: data directory tied to the user, e.g. ``/data/user///files/``"""
+        return self._append_app_name_and_version(cast(str, _android_folder()), "files")
+
+    @property
+    def site_data_dir(self) -> str:
+        """:return: data directory shared by users, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def user_config_dir(self) -> str:
+        """
+        :return: config directory tied to the user, e.g. ``/data/user///shared_prefs/``
+        """
+        return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
+
+    @property
+    def site_config_dir(self) -> str:
+        """:return: config directory shared by the users, same as `user_config_dir`"""
+        return self.user_config_dir
+
+    @property
+    def user_cache_dir(self) -> str:
+        """:return: cache directory tied to the user, e.g. e.g. ``/data/user///cache/``"""
+        return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
+
+    @property
+    def user_state_dir(self) -> str:
+        """:return: state directory tied to the user, same as `user_data_dir`"""
+        return self.user_data_dir
+
+    @property
+    def user_log_dir(self) -> str:
+        """
+        :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
+          e.g. ``/data/user///cache//log``
+        """
+        path = self.user_cache_dir
+        if self.opinion:
+            path = os.path.join(path, "log")
+        return path
+
+    @property
+    def user_documents_dir(self) -> str:
+        """
+        :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
+        """
+        return _android_documents_folder()
+
+    @property
+    def user_runtime_dir(self) -> str:
+        """
+        :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
+          e.g. ``/data/user///cache//tmp``
+        """
+        path = self.user_cache_dir
+        if self.opinion:
+            path = os.path.join(path, "tmp")
+        return path
+
+
+@lru_cache(maxsize=1)
+def _android_folder() -> str | None:
+    """:return: base folder for the Android OS or None if cannot be found"""
+    try:
+        # First try to get path to android app via pyjnius
+        from jnius import autoclass
+
+        Context = autoclass("android.content.Context")  # noqa: N806
+        result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
+    except Exception:
+        # if fails find an android folder looking path on the sys.path
+        pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
+        for path in sys.path:
+            if pattern.match(path):
+                result = path.split("/files")[0]
+                break
+        else:
+            result = None
+    return result
+
+
+@lru_cache(maxsize=1)
+def _android_documents_folder() -> str:
+    """:return: documents folder for the Android OS"""
+    # Get directories with pyjnius
+    try:
+        from jnius import autoclass
+
+        Context = autoclass("android.content.Context")  # noqa: N806
+        Environment = autoclass("android.os.Environment")  # noqa: N806
+        documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
+    except Exception:
+        documents_dir = "/storage/emulated/0/Documents"
+
+    return documents_dir
+
+
+__all__ = [
+    "Android",
+]
diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/typing_extensions.py b/.venv/Lib/site-packages/pkg_resources/_vendor/typing_extensions.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef42417c208e93c55d704728d3e88dfe46250d92
--- /dev/null
+++ b/.venv/Lib/site-packages/pkg_resources/_vendor/typing_extensions.py
@@ -0,0 +1,2209 @@
+import abc
+import collections
+import collections.abc
+import functools
+import operator
+import sys
+import types as _types
+import typing
+
+
+__all__ = [
+    # Super-special typing primitives.
+    'Any',
+    'ClassVar',
+    'Concatenate',
+    'Final',
+    'LiteralString',
+    'ParamSpec',
+    'ParamSpecArgs',
+    'ParamSpecKwargs',
+    'Self',
+    'Type',
+    'TypeVar',
+    'TypeVarTuple',
+    'Unpack',
+
+    # ABCs (from collections.abc).
+    'Awaitable',
+    'AsyncIterator',
+    'AsyncIterable',
+    'Coroutine',
+    'AsyncGenerator',
+    'AsyncContextManager',
+    'ChainMap',
+
+    # Concrete collection types.
+    'ContextManager',
+    'Counter',
+    'Deque',
+    'DefaultDict',
+    'NamedTuple',
+    'OrderedDict',
+    'TypedDict',
+
+    # Structural checks, a.k.a. protocols.
+    'SupportsIndex',
+
+    # One-off things.
+    'Annotated',
+    'assert_never',
+    'assert_type',
+    'clear_overloads',
+    'dataclass_transform',
+    'get_overloads',
+    'final',
+    'get_args',
+    'get_origin',
+    'get_type_hints',
+    'IntVar',
+    'is_typeddict',
+    'Literal',
+    'NewType',
+    'overload',
+    'override',
+    'Protocol',
+    'reveal_type',
+    'runtime',
+    'runtime_checkable',
+    'Text',
+    'TypeAlias',
+    'TypeGuard',
+    'TYPE_CHECKING',
+    'Never',
+    'NoReturn',
+    'Required',
+    'NotRequired',
+]
+
+# for backward compatibility
+PEP_560 = True
+GenericMeta = type
+
+# The functions below are modified copies of typing internal helpers.
+# They are needed by _ProtocolMeta and they provide support for PEP 646.
+
+_marker = object()
+
+
+def _check_generic(cls, parameters, elen=_marker):
+    """Check correct count for parameters of a generic cls (internal helper).
+    This gives a nice error message in case of count mismatch.
+    """
+    if not elen:
+        raise TypeError(f"{cls} is not a generic class")
+    if elen is _marker:
+        if not hasattr(cls, "__parameters__") or not cls.__parameters__:
+            raise TypeError(f"{cls} is not a generic class")
+        elen = len(cls.__parameters__)
+    alen = len(parameters)
+    if alen != elen:
+        if hasattr(cls, "__parameters__"):
+            parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
+            num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
+            if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
+                return
+        raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
+                        f" actual {alen}, expected {elen}")
+
+
+if sys.version_info >= (3, 10):
+    def _should_collect_from_parameters(t):
+        return isinstance(
+            t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
+        )
+elif sys.version_info >= (3, 9):
+    def _should_collect_from_parameters(t):
+        return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
+else:
+    def _should_collect_from_parameters(t):
+        return isinstance(t, typing._GenericAlias) and not t._special
+
+
+def _collect_type_vars(types, typevar_types=None):
+    """Collect all type variable contained in types in order of
+    first appearance (lexicographic order). For example::
+
+        _collect_type_vars((T, List[S, T])) == (T, S)
+    """
+    if typevar_types is None:
+        typevar_types = typing.TypeVar
+    tvars = []
+    for t in types:
+        if (
+            isinstance(t, typevar_types) and
+            t not in tvars and
+            not _is_unpack(t)
+        ):
+            tvars.append(t)
+        if _should_collect_from_parameters(t):
+            tvars.extend([t for t in t.__parameters__ if t not in tvars])
+    return tuple(tvars)
+
+
+NoReturn = typing.NoReturn
+
+# Some unconstrained type variables.  These are used by the container types.
+# (These are not for export.)
+T = typing.TypeVar('T')  # Any type.
+KT = typing.TypeVar('KT')  # Key type.
+VT = typing.TypeVar('VT')  # Value type.
+T_co = typing.TypeVar('T_co', covariant=True)  # Any type covariant containers.
+T_contra = typing.TypeVar('T_contra', contravariant=True)  # Ditto contravariant.
+
+
+if sys.version_info >= (3, 11):
+    from typing import Any
+else:
+
+    class _AnyMeta(type):
+        def __instancecheck__(self, obj):
+            if self is Any:
+                raise TypeError("typing_extensions.Any cannot be used with isinstance()")
+            return super().__instancecheck__(obj)
+
+        def __repr__(self):
+            if self is Any:
+                return "typing_extensions.Any"
+            return super().__repr__()
+
+    class Any(metaclass=_AnyMeta):
+        """Special type indicating an unconstrained type.
+        - Any is compatible with every type.
+        - Any assumed to have all methods.
+        - All values assumed to be instances of Any.
+        Note that all the above statements are true from the point of view of
+        static type checkers. At runtime, Any should not be used with instance
+        checks.
+        """
+        def __new__(cls, *args, **kwargs):
+            if cls is Any:
+                raise TypeError("Any cannot be instantiated")
+            return super().__new__(cls, *args, **kwargs)
+
+
+ClassVar = typing.ClassVar
+
+# On older versions of typing there is an internal class named "Final".
+# 3.8+
+if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
+    Final = typing.Final
+# 3.7
+else:
+    class _FinalForm(typing._SpecialForm, _root=True):
+
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+        def __getitem__(self, parameters):
+            item = typing._type_check(parameters,
+                                      f'{self._name} accepts only a single type.')
+            return typing._GenericAlias(self, (item,))
+
+    Final = _FinalForm('Final',
+                       doc="""A special typing construct to indicate that a name
+                       cannot be re-assigned or overridden in a subclass.
+                       For example:
+
+                           MAX_SIZE: Final = 9000
+                           MAX_SIZE += 1  # Error reported by type checker
+
+                           class Connection:
+                               TIMEOUT: Final[int] = 10
+                           class FastConnector(Connection):
+                               TIMEOUT = 1  # Error reported by type checker
+
+                       There is no runtime checking of these properties.""")
+
+if sys.version_info >= (3, 11):
+    final = typing.final
+else:
+    # @final exists in 3.8+, but we backport it for all versions
+    # before 3.11 to keep support for the __final__ attribute.
+    # See https://bugs.python.org/issue46342
+    def final(f):
+        """This decorator can be used to indicate to type checkers that
+        the decorated method cannot be overridden, and decorated class
+        cannot be subclassed. For example:
+
+            class Base:
+                @final
+                def done(self) -> None:
+                    ...
+            class Sub(Base):
+                def done(self) -> None:  # Error reported by type checker
+                    ...
+            @final
+            class Leaf:
+                ...
+            class Other(Leaf):  # Error reported by type checker
+                ...
+
+        There is no runtime checking of these properties. The decorator
+        sets the ``__final__`` attribute to ``True`` on the decorated object
+        to allow runtime introspection.
+        """
+        try:
+            f.__final__ = True
+        except (AttributeError, TypeError):
+            # Skip the attribute silently if it is not writable.
+            # AttributeError happens if the object has __slots__ or a
+            # read-only property, TypeError if it's a builtin class.
+            pass
+        return f
+
+
+def IntVar(name):
+    return typing.TypeVar(name)
+
+
+# 3.8+:
+if hasattr(typing, 'Literal'):
+    Literal = typing.Literal
+# 3.7:
+else:
+    class _LiteralForm(typing._SpecialForm, _root=True):
+
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+        def __getitem__(self, parameters):
+            return typing._GenericAlias(self, parameters)
+
+    Literal = _LiteralForm('Literal',
+                           doc="""A type that can be used to indicate to type checkers
+                           that the corresponding value has a value literally equivalent
+                           to the provided parameter. For example:
+
+                               var: Literal[4] = 4
+
+                           The type checker understands that 'var' is literally equal to
+                           the value 4 and no other value.
+
+                           Literal[...] cannot be subclassed. There is no runtime
+                           checking verifying that the parameter is actually a value
+                           instead of a type.""")
+
+
+_overload_dummy = typing._overload_dummy  # noqa
+
+
+if hasattr(typing, "get_overloads"):  # 3.11+
+    overload = typing.overload
+    get_overloads = typing.get_overloads
+    clear_overloads = typing.clear_overloads
+else:
+    # {module: {qualname: {firstlineno: func}}}
+    _overload_registry = collections.defaultdict(
+        functools.partial(collections.defaultdict, dict)
+    )
+
+    def overload(func):
+        """Decorator for overloaded functions/methods.
+
+        In a stub file, place two or more stub definitions for the same
+        function in a row, each decorated with @overload.  For example:
+
+        @overload
+        def utf8(value: None) -> None: ...
+        @overload
+        def utf8(value: bytes) -> bytes: ...
+        @overload
+        def utf8(value: str) -> bytes: ...
+
+        In a non-stub file (i.e. a regular .py file), do the same but
+        follow it with an implementation.  The implementation should *not*
+        be decorated with @overload.  For example:
+
+        @overload
+        def utf8(value: None) -> None: ...
+        @overload
+        def utf8(value: bytes) -> bytes: ...
+        @overload
+        def utf8(value: str) -> bytes: ...
+        def utf8(value):
+            # implementation goes here
+
+        The overloads for a function can be retrieved at runtime using the
+        get_overloads() function.
+        """
+        # classmethod and staticmethod
+        f = getattr(func, "__func__", func)
+        try:
+            _overload_registry[f.__module__][f.__qualname__][
+                f.__code__.co_firstlineno
+            ] = func
+        except AttributeError:
+            # Not a normal function; ignore.
+            pass
+        return _overload_dummy
+
+    def get_overloads(func):
+        """Return all defined overloads for *func* as a sequence."""
+        # classmethod and staticmethod
+        f = getattr(func, "__func__", func)
+        if f.__module__ not in _overload_registry:
+            return []
+        mod_dict = _overload_registry[f.__module__]
+        if f.__qualname__ not in mod_dict:
+            return []
+        return list(mod_dict[f.__qualname__].values())
+
+    def clear_overloads():
+        """Clear all overloads in the registry."""
+        _overload_registry.clear()
+
+
+# This is not a real generic class.  Don't use outside annotations.
+Type = typing.Type
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+
+
+Awaitable = typing.Awaitable
+Coroutine = typing.Coroutine
+AsyncIterable = typing.AsyncIterable
+AsyncIterator = typing.AsyncIterator
+Deque = typing.Deque
+ContextManager = typing.ContextManager
+AsyncContextManager = typing.AsyncContextManager
+DefaultDict = typing.DefaultDict
+
+# 3.7.2+
+if hasattr(typing, 'OrderedDict'):
+    OrderedDict = typing.OrderedDict
+# 3.7.0-3.7.2
+else:
+    OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
+
+Counter = typing.Counter
+ChainMap = typing.ChainMap
+AsyncGenerator = typing.AsyncGenerator
+NewType = typing.NewType
+Text = typing.Text
+TYPE_CHECKING = typing.TYPE_CHECKING
+
+
+_PROTO_WHITELIST = ['Callable', 'Awaitable',
+                    'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
+                    'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
+                    'ContextManager', 'AsyncContextManager']
+
+
+def _get_protocol_attrs(cls):
+    attrs = set()
+    for base in cls.__mro__[:-1]:  # without object
+        if base.__name__ in ('Protocol', 'Generic'):
+            continue
+        annotations = getattr(base, '__annotations__', {})
+        for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+            if (not attr.startswith('_abc_') and attr not in (
+                    '__abstractmethods__', '__annotations__', '__weakref__',
+                    '_is_protocol', '_is_runtime_protocol', '__dict__',
+                    '__args__', '__slots__',
+                    '__next_in_mro__', '__parameters__', '__origin__',
+                    '__orig_bases__', '__extra__', '__tree_hash__',
+                    '__doc__', '__subclasshook__', '__init__', '__new__',
+                    '__module__', '_MutableMapping__marker', '_gorg')):
+                attrs.add(attr)
+    return attrs
+
+
+def _is_callable_members_only(cls):
+    return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
+
+
+def _maybe_adjust_parameters(cls):
+    """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
+
+    The contents of this function are very similar
+    to logic found in typing.Generic.__init_subclass__
+    on the CPython main branch.
+    """
+    tvars = []
+    if '__orig_bases__' in cls.__dict__:
+        tvars = typing._collect_type_vars(cls.__orig_bases__)
+        # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
+        # If found, tvars must be a subset of it.
+        # If not found, tvars is it.
+        # Also check for and reject plain Generic,
+        # and reject multiple Generic[...] and/or Protocol[...].
+        gvars = None
+        for base in cls.__orig_bases__:
+            if (isinstance(base, typing._GenericAlias) and
+                    base.__origin__ in (typing.Generic, Protocol)):
+                # for error messages
+                the_base = base.__origin__.__name__
+                if gvars is not None:
+                    raise TypeError(
+                        "Cannot inherit from Generic[...]"
+                        " and/or Protocol[...] multiple types.")
+                gvars = base.__parameters__
+        if gvars is None:
+            gvars = tvars
+        else:
+            tvarset = set(tvars)
+            gvarset = set(gvars)
+            if not tvarset <= gvarset:
+                s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
+                s_args = ', '.join(str(g) for g in gvars)
+                raise TypeError(f"Some type variables ({s_vars}) are"
+                                f" not listed in {the_base}[{s_args}]")
+            tvars = gvars
+    cls.__parameters__ = tuple(tvars)
+
+
+# 3.8+
+if hasattr(typing, 'Protocol'):
+    Protocol = typing.Protocol
+# 3.7
+else:
+
+    def _no_init(self, *args, **kwargs):
+        if type(self)._is_protocol:
+            raise TypeError('Protocols cannot be instantiated')
+
+    class _ProtocolMeta(abc.ABCMeta):  # noqa: B024
+        # This metaclass is a bit unfortunate and exists only because of the lack
+        # of __instancehook__.
+        def __instancecheck__(cls, instance):
+            # We need this method for situations where attributes are
+            # assigned in __init__.
+            if ((not getattr(cls, '_is_protocol', False) or
+                 _is_callable_members_only(cls)) and
+                    issubclass(instance.__class__, cls)):
+                return True
+            if cls._is_protocol:
+                if all(hasattr(instance, attr) and
+                       (not callable(getattr(cls, attr, None)) or
+                        getattr(instance, attr) is not None)
+                       for attr in _get_protocol_attrs(cls)):
+                    return True
+            return super().__instancecheck__(instance)
+
+    class Protocol(metaclass=_ProtocolMeta):
+        # There is quite a lot of overlapping code with typing.Generic.
+        # Unfortunately it is hard to avoid this while these live in two different
+        # modules. The duplicated code will be removed when Protocol is moved to typing.
+        """Base class for protocol classes. Protocol classes are defined as::
+
+            class Proto(Protocol):
+                def meth(self) -> int:
+                    ...
+
+        Such classes are primarily used with static type checkers that recognize
+        structural subtyping (static duck-typing), for example::
+
+            class C:
+                def meth(self) -> int:
+                    return 0
+
+            def func(x: Proto) -> int:
+                return x.meth()
+
+            func(C())  # Passes static type check
+
+        See PEP 544 for details. Protocol classes decorated with
+        @typing_extensions.runtime act as simple-minded runtime protocol that checks
+        only the presence of given attributes, ignoring their type signatures.
+
+        Protocol classes can be generic, they are defined as::
+
+            class GenProto(Protocol[T]):
+                def meth(self) -> T:
+                    ...
+        """
+        __slots__ = ()
+        _is_protocol = True
+
+        def __new__(cls, *args, **kwds):
+            if cls is Protocol:
+                raise TypeError("Type Protocol cannot be instantiated; "
+                                "it can only be used as a base class")
+            return super().__new__(cls)
+
+        @typing._tp_cache
+        def __class_getitem__(cls, params):
+            if not isinstance(params, tuple):
+                params = (params,)
+            if not params and cls is not typing.Tuple:
+                raise TypeError(
+                    f"Parameter list to {cls.__qualname__}[...] cannot be empty")
+            msg = "Parameters to generic types must be types."
+            params = tuple(typing._type_check(p, msg) for p in params)  # noqa
+            if cls is Protocol:
+                # Generic can only be subscripted with unique type variables.
+                if not all(isinstance(p, typing.TypeVar) for p in params):
+                    i = 0
+                    while isinstance(params[i], typing.TypeVar):
+                        i += 1
+                    raise TypeError(
+                        "Parameters to Protocol[...] must all be type variables."
+                        f" Parameter {i + 1} is {params[i]}")
+                if len(set(params)) != len(params):
+                    raise TypeError(
+                        "Parameters to Protocol[...] must all be unique")
+            else:
+                # Subscripting a regular Generic subclass.
+                _check_generic(cls, params, len(cls.__parameters__))
+            return typing._GenericAlias(cls, params)
+
+        def __init_subclass__(cls, *args, **kwargs):
+            if '__orig_bases__' in cls.__dict__:
+                error = typing.Generic in cls.__orig_bases__
+            else:
+                error = typing.Generic in cls.__bases__
+            if error:
+                raise TypeError("Cannot inherit from plain Generic")
+            _maybe_adjust_parameters(cls)
+
+            # Determine if this is a protocol or a concrete subclass.
+            if not cls.__dict__.get('_is_protocol', None):
+                cls._is_protocol = any(b is Protocol for b in cls.__bases__)
+
+            # Set (or override) the protocol subclass hook.
+            def _proto_hook(other):
+                if not cls.__dict__.get('_is_protocol', None):
+                    return NotImplemented
+                if not getattr(cls, '_is_runtime_protocol', False):
+                    if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+                        return NotImplemented
+                    raise TypeError("Instance and class checks can only be used with"
+                                    " @runtime protocols")
+                if not _is_callable_members_only(cls):
+                    if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+                        return NotImplemented
+                    raise TypeError("Protocols with non-method members"
+                                    " don't support issubclass()")
+                if not isinstance(other, type):
+                    # Same error as for issubclass(1, int)
+                    raise TypeError('issubclass() arg 1 must be a class')
+                for attr in _get_protocol_attrs(cls):
+                    for base in other.__mro__:
+                        if attr in base.__dict__:
+                            if base.__dict__[attr] is None:
+                                return NotImplemented
+                            break
+                        annotations = getattr(base, '__annotations__', {})
+                        if (isinstance(annotations, typing.Mapping) and
+                                attr in annotations and
+                                isinstance(other, _ProtocolMeta) and
+                                other._is_protocol):
+                            break
+                    else:
+                        return NotImplemented
+                return True
+            if '__subclasshook__' not in cls.__dict__:
+                cls.__subclasshook__ = _proto_hook
+
+            # We have nothing more to do for non-protocols.
+            if not cls._is_protocol:
+                return
+
+            # Check consistency of bases.
+            for base in cls.__bases__:
+                if not (base in (object, typing.Generic) or
+                        base.__module__ == 'collections.abc' and
+                        base.__name__ in _PROTO_WHITELIST or
+                        isinstance(base, _ProtocolMeta) and base._is_protocol):
+                    raise TypeError('Protocols can only inherit from other'
+                                    f' protocols, got {repr(base)}')
+            cls.__init__ = _no_init
+
+
+# 3.8+
+if hasattr(typing, 'runtime_checkable'):
+    runtime_checkable = typing.runtime_checkable
+# 3.7
+else:
+    def runtime_checkable(cls):
+        """Mark a protocol class as a runtime protocol, so that it
+        can be used with isinstance() and issubclass(). Raise TypeError
+        if applied to a non-protocol class.
+
+        This allows a simple-minded structural check very similar to the
+        one-offs in collections.abc such as Hashable.
+        """
+        if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
+            raise TypeError('@runtime_checkable can be only applied to protocol classes,'
+                            f' got {cls!r}')
+        cls._is_runtime_protocol = True
+        return cls
+
+
+# Exists for backwards compatibility.
+runtime = runtime_checkable
+
+
+# 3.8+
+if hasattr(typing, 'SupportsIndex'):
+    SupportsIndex = typing.SupportsIndex
+# 3.7
+else:
+    @runtime_checkable
+    class SupportsIndex(Protocol):
+        __slots__ = ()
+
+        @abc.abstractmethod
+        def __index__(self) -> int:
+            pass
+
+
+if hasattr(typing, "Required"):
+    # The standard library TypedDict in Python 3.8 does not store runtime information
+    # about which (if any) keys are optional.  See https://bugs.python.org/issue38834
+    # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
+    # keyword with old-style TypedDict().  See https://bugs.python.org/issue42059
+    # The standard library TypedDict below Python 3.11 does not store runtime
+    # information about optional and required keys when using Required or NotRequired.
+    # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
+    TypedDict = typing.TypedDict
+    _TypedDictMeta = typing._TypedDictMeta
+    is_typeddict = typing.is_typeddict
+else:
+    def _check_fails(cls, other):
+        try:
+            if sys._getframe(1).f_globals['__name__'] not in ['abc',
+                                                              'functools',
+                                                              'typing']:
+                # Typed dicts are only for static structural subtyping.
+                raise TypeError('TypedDict does not support instance and class checks')
+        except (AttributeError, ValueError):
+            pass
+        return False
+
+    def _dict_new(*args, **kwargs):
+        if not args:
+            raise TypeError('TypedDict.__new__(): not enough arguments')
+        _, args = args[0], args[1:]  # allow the "cls" keyword be passed
+        return dict(*args, **kwargs)
+
+    _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
+
+    def _typeddict_new(*args, total=True, **kwargs):
+        if not args:
+            raise TypeError('TypedDict.__new__(): not enough arguments')
+        _, args = args[0], args[1:]  # allow the "cls" keyword be passed
+        if args:
+            typename, args = args[0], args[1:]  # allow the "_typename" keyword be passed
+        elif '_typename' in kwargs:
+            typename = kwargs.pop('_typename')
+            import warnings
+            warnings.warn("Passing '_typename' as keyword argument is deprecated",
+                          DeprecationWarning, stacklevel=2)
+        else:
+            raise TypeError("TypedDict.__new__() missing 1 required positional "
+                            "argument: '_typename'")
+        if args:
+            try:
+                fields, = args  # allow the "_fields" keyword be passed
+            except ValueError:
+                raise TypeError('TypedDict.__new__() takes from 2 to 3 '
+                                f'positional arguments but {len(args) + 2} '
+                                'were given')
+        elif '_fields' in kwargs and len(kwargs) == 1:
+            fields = kwargs.pop('_fields')
+            import warnings
+            warnings.warn("Passing '_fields' as keyword argument is deprecated",
+                          DeprecationWarning, stacklevel=2)
+        else:
+            fields = None
+
+        if fields is None:
+            fields = kwargs
+        elif kwargs:
+            raise TypeError("TypedDict takes either a dict or keyword arguments,"
+                            " but not both")
+
+        ns = {'__annotations__': dict(fields)}
+        try:
+            # Setting correct module is necessary to make typed dict classes pickleable.
+            ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            pass
+
+        return _TypedDictMeta(typename, (), ns, total=total)
+
+    _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
+                                         ' /, *, total=True, **kwargs)')
+
+    class _TypedDictMeta(type):
+        def __init__(cls, name, bases, ns, total=True):
+            super().__init__(name, bases, ns)
+
+        def __new__(cls, name, bases, ns, total=True):
+            # Create new typed dict class object.
+            # This method is called directly when TypedDict is subclassed,
+            # or via _typeddict_new when TypedDict is instantiated. This way
+            # TypedDict supports all three syntaxes described in its docstring.
+            # Subclasses and instances of TypedDict return actual dictionaries
+            # via _dict_new.
+            ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
+            # Don't insert typing.Generic into __bases__ here,
+            # or Generic.__init_subclass__ will raise TypeError
+            # in the super().__new__() call.
+            # Instead, monkey-patch __bases__ onto the class after it's been created.
+            tp_dict = super().__new__(cls, name, (dict,), ns)
+
+            if any(issubclass(base, typing.Generic) for base in bases):
+                tp_dict.__bases__ = (typing.Generic, dict)
+                _maybe_adjust_parameters(tp_dict)
+
+            annotations = {}
+            own_annotations = ns.get('__annotations__', {})
+            msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+            own_annotations = {
+                n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
+            }
+            required_keys = set()
+            optional_keys = set()
+
+            for base in bases:
+                annotations.update(base.__dict__.get('__annotations__', {}))
+                required_keys.update(base.__dict__.get('__required_keys__', ()))
+                optional_keys.update(base.__dict__.get('__optional_keys__', ()))
+
+            annotations.update(own_annotations)
+            for annotation_key, annotation_type in own_annotations.items():
+                annotation_origin = get_origin(annotation_type)
+                if annotation_origin is Annotated:
+                    annotation_args = get_args(annotation_type)
+                    if annotation_args:
+                        annotation_type = annotation_args[0]
+                        annotation_origin = get_origin(annotation_type)
+
+                if annotation_origin is Required:
+                    required_keys.add(annotation_key)
+                elif annotation_origin is NotRequired:
+                    optional_keys.add(annotation_key)
+                elif total:
+                    required_keys.add(annotation_key)
+                else:
+                    optional_keys.add(annotation_key)
+
+            tp_dict.__annotations__ = annotations
+            tp_dict.__required_keys__ = frozenset(required_keys)
+            tp_dict.__optional_keys__ = frozenset(optional_keys)
+            if not hasattr(tp_dict, '__total__'):
+                tp_dict.__total__ = total
+            return tp_dict
+
+        __instancecheck__ = __subclasscheck__ = _check_fails
+
+    TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
+    TypedDict.__module__ = __name__
+    TypedDict.__doc__ = \
+        """A simple typed name space. At runtime it is equivalent to a plain dict.
+
+        TypedDict creates a dictionary type that expects all of its
+        instances to have a certain set of keys, with each key
+        associated with a value of a consistent type. This expectation
+        is not checked at runtime but is only enforced by type checkers.
+        Usage::
+
+            class Point2D(TypedDict):
+                x: int
+                y: int
+                label: str
+
+            a: Point2D = {'x': 1, 'y': 2, 'label': 'good'}  # OK
+            b: Point2D = {'z': 3, 'label': 'bad'}           # Fails type check
+
+            assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+        The type info can be accessed via the Point2D.__annotations__ dict, and
+        the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+        TypedDict supports two additional equivalent forms::
+
+            Point2D = TypedDict('Point2D', x=int, y=int, label=str)
+            Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+        The class syntax is only supported in Python 3.6+, while two other
+        syntax forms work for Python 2.7 and 3.2+
+        """
+
+    if hasattr(typing, "_TypedDictMeta"):
+        _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
+    else:
+        _TYPEDDICT_TYPES = (_TypedDictMeta,)
+
+    def is_typeddict(tp):
+        """Check if an annotation is a TypedDict class
+
+        For example::
+            class Film(TypedDict):
+                title: str
+                year: int
+
+            is_typeddict(Film)  # => True
+            is_typeddict(Union[list, str])  # => False
+        """
+        return isinstance(tp, tuple(_TYPEDDICT_TYPES))
+
+
+if hasattr(typing, "assert_type"):
+    assert_type = typing.assert_type
+
+else:
+    def assert_type(__val, __typ):
+        """Assert (to the type checker) that the value is of the given type.
+
+        When the type checker encounters a call to assert_type(), it
+        emits an error if the value is not of the specified type::
+
+            def greet(name: str) -> None:
+                assert_type(name, str)  # ok
+                assert_type(name, int)  # type checker error
+
+        At runtime this returns the first argument unchanged and otherwise
+        does nothing.
+        """
+        return __val
+
+
+if hasattr(typing, "Required"):
+    get_type_hints = typing.get_type_hints
+else:
+    import functools
+    import types
+
+    # replaces _strip_annotations()
+    def _strip_extras(t):
+        """Strips Annotated, Required and NotRequired from a given type."""
+        if isinstance(t, _AnnotatedAlias):
+            return _strip_extras(t.__origin__)
+        if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
+            return _strip_extras(t.__args__[0])
+        if isinstance(t, typing._GenericAlias):
+            stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+            if stripped_args == t.__args__:
+                return t
+            return t.copy_with(stripped_args)
+        if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
+            stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+            if stripped_args == t.__args__:
+                return t
+            return types.GenericAlias(t.__origin__, stripped_args)
+        if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
+            stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+            if stripped_args == t.__args__:
+                return t
+            return functools.reduce(operator.or_, stripped_args)
+
+        return t
+
+    def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
+        """Return type hints for an object.
+
+        This is often the same as obj.__annotations__, but it handles
+        forward references encoded as string literals, adds Optional[t] if a
+        default value equal to None is set and recursively replaces all
+        'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
+        (unless 'include_extras=True').
+
+        The argument may be a module, class, method, or function. The annotations
+        are returned as a dictionary. For classes, annotations include also
+        inherited members.
+
+        TypeError is raised if the argument is not of a type that can contain
+        annotations, and an empty dictionary is returned if no annotations are
+        present.
+
+        BEWARE -- the behavior of globalns and localns is counterintuitive
+        (unless you are familiar with how eval() and exec() work).  The
+        search order is locals first, then globals.
+
+        - If no dict arguments are passed, an attempt is made to use the
+          globals from obj (or the respective module's globals for classes),
+          and these are also used as the locals.  If the object does not appear
+          to have globals, an empty dictionary is used.
+
+        - If one dict argument is passed, it is used for both globals and
+          locals.
+
+        - If two dict arguments are passed, they specify globals and
+          locals, respectively.
+        """
+        if hasattr(typing, "Annotated"):
+            hint = typing.get_type_hints(
+                obj, globalns=globalns, localns=localns, include_extras=True
+            )
+        else:
+            hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
+        if include_extras:
+            return hint
+        return {k: _strip_extras(t) for k, t in hint.items()}
+
+
+# Python 3.9+ has PEP 593 (Annotated)
+if hasattr(typing, 'Annotated'):
+    Annotated = typing.Annotated
+    # Not exported and not a public API, but needed for get_origin() and get_args()
+    # to work.
+    _AnnotatedAlias = typing._AnnotatedAlias
+# 3.7-3.8
+else:
+    class _AnnotatedAlias(typing._GenericAlias, _root=True):
+        """Runtime representation of an annotated type.
+
+        At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+        with extra annotations. The alias behaves like a normal typing alias,
+        instantiating is the same as instantiating the underlying type, binding
+        it to types is also the same.
+        """
+        def __init__(self, origin, metadata):
+            if isinstance(origin, _AnnotatedAlias):
+                metadata = origin.__metadata__ + metadata
+                origin = origin.__origin__
+            super().__init__(origin, origin)
+            self.__metadata__ = metadata
+
+        def copy_with(self, params):
+            assert len(params) == 1
+            new_type = params[0]
+            return _AnnotatedAlias(new_type, self.__metadata__)
+
+        def __repr__(self):
+            return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
+                    f"{', '.join(repr(a) for a in self.__metadata__)}]")
+
+        def __reduce__(self):
+            return operator.getitem, (
+                Annotated, (self.__origin__,) + self.__metadata__
+            )
+
+        def __eq__(self, other):
+            if not isinstance(other, _AnnotatedAlias):
+                return NotImplemented
+            if self.__origin__ != other.__origin__:
+                return False
+            return self.__metadata__ == other.__metadata__
+
+        def __hash__(self):
+            return hash((self.__origin__, self.__metadata__))
+
+    class Annotated:
+        """Add context specific metadata to a type.
+
+        Example: Annotated[int, runtime_check.Unsigned] indicates to the
+        hypothetical runtime_check module that this type is an unsigned int.
+        Every other consumer of this type can ignore this metadata and treat
+        this type as int.
+
+        The first argument to Annotated must be a valid type (and will be in
+        the __origin__ field), the remaining arguments are kept as a tuple in
+        the __extra__ field.
+
+        Details:
+
+        - It's an error to call `Annotated` with less than two arguments.
+        - Nested Annotated are flattened::
+
+            Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+        - Instantiating an annotated type is equivalent to instantiating the
+        underlying type::
+
+            Annotated[C, Ann1](5) == C(5)
+
+        - Annotated can be used as a generic type alias::
+
+            Optimized = Annotated[T, runtime.Optimize()]
+            Optimized[int] == Annotated[int, runtime.Optimize()]
+
+            OptimizedList = Annotated[List[T], runtime.Optimize()]
+            OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+        """
+
+        __slots__ = ()
+
+        def __new__(cls, *args, **kwargs):
+            raise TypeError("Type Annotated cannot be instantiated.")
+
+        @typing._tp_cache
+        def __class_getitem__(cls, params):
+            if not isinstance(params, tuple) or len(params) < 2:
+                raise TypeError("Annotated[...] should be used "
+                                "with at least two arguments (a type and an "
+                                "annotation).")
+            allowed_special_forms = (ClassVar, Final)
+            if get_origin(params[0]) in allowed_special_forms:
+                origin = params[0]
+            else:
+                msg = "Annotated[t, ...]: t must be a type."
+                origin = typing._type_check(params[0], msg)
+            metadata = tuple(params[1:])
+            return _AnnotatedAlias(origin, metadata)
+
+        def __init_subclass__(cls, *args, **kwargs):
+            raise TypeError(
+                f"Cannot subclass {cls.__module__}.Annotated"
+            )
+
+# Python 3.8 has get_origin() and get_args() but those implementations aren't
+# Annotated-aware, so we can't use those. Python 3.9's versions don't support
+# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
+if sys.version_info[:2] >= (3, 10):
+    get_origin = typing.get_origin
+    get_args = typing.get_args
+# 3.7-3.9
+else:
+    try:
+        # 3.9+
+        from typing import _BaseGenericAlias
+    except ImportError:
+        _BaseGenericAlias = typing._GenericAlias
+    try:
+        # 3.9+
+        from typing import GenericAlias as _typing_GenericAlias
+    except ImportError:
+        _typing_GenericAlias = typing._GenericAlias
+
+    def get_origin(tp):
+        """Get the unsubscripted version of a type.
+
+        This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+        and Annotated. Return None for unsupported types. Examples::
+
+            get_origin(Literal[42]) is Literal
+            get_origin(int) is None
+            get_origin(ClassVar[int]) is ClassVar
+            get_origin(Generic) is Generic
+            get_origin(Generic[T]) is Generic
+            get_origin(Union[T, int]) is Union
+            get_origin(List[Tuple[T, T]][int]) == list
+            get_origin(P.args) is P
+        """
+        if isinstance(tp, _AnnotatedAlias):
+            return Annotated
+        if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
+                           ParamSpecArgs, ParamSpecKwargs)):
+            return tp.__origin__
+        if tp is typing.Generic:
+            return typing.Generic
+        return None
+
+    def get_args(tp):
+        """Get type arguments with all substitutions performed.
+
+        For unions, basic simplifications used by Union constructor are performed.
+        Examples::
+            get_args(Dict[str, int]) == (str, int)
+            get_args(int) == ()
+            get_args(Union[int, Union[T, int], str][int]) == (int, str)
+            get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
+            get_args(Callable[[], T][int]) == ([], int)
+        """
+        if isinstance(tp, _AnnotatedAlias):
+            return (tp.__origin__,) + tp.__metadata__
+        if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
+            if getattr(tp, "_special", False):
+                return ()
+            res = tp.__args__
+            if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+                res = (list(res[:-1]), res[-1])
+            return res
+        return ()
+
+
+# 3.10+
+if hasattr(typing, 'TypeAlias'):
+    TypeAlias = typing.TypeAlias
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+    class _TypeAliasForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+    @_TypeAliasForm
+    def TypeAlias(self, parameters):
+        """Special marker indicating that an assignment should
+        be recognized as a proper type alias definition by type
+        checkers.
+
+        For example::
+
+            Predicate: TypeAlias = Callable[..., bool]
+
+        It's invalid when used anywhere except as in the example above.
+        """
+        raise TypeError(f"{self} is not subscriptable")
+# 3.7-3.8
+else:
+    class _TypeAliasForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+    TypeAlias = _TypeAliasForm('TypeAlias',
+                               doc="""Special marker indicating that an assignment should
+                               be recognized as a proper type alias definition by type
+                               checkers.
+
+                               For example::
+
+                                   Predicate: TypeAlias = Callable[..., bool]
+
+                               It's invalid when used anywhere except as in the example
+                               above.""")
+
+
+class _DefaultMixin:
+    """Mixin for TypeVarLike defaults."""
+
+    __slots__ = ()
+
+    def __init__(self, default):
+        if isinstance(default, (tuple, list)):
+            self.__default__ = tuple((typing._type_check(d, "Default must be a type")
+                                      for d in default))
+        elif default:
+            self.__default__ = typing._type_check(default, "Default must be a type")
+        else:
+            self.__default__ = None
+
+
+# Add default and infer_variance parameters from PEP 696 and 695
+class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
+    """Type variable."""
+
+    __module__ = 'typing'
+
+    def __init__(self, name, *constraints, bound=None,
+                 covariant=False, contravariant=False,
+                 default=None, infer_variance=False):
+        super().__init__(name, *constraints, bound=bound, covariant=covariant,
+                         contravariant=contravariant)
+        _DefaultMixin.__init__(self, default)
+        self.__infer_variance__ = infer_variance
+
+        # for pickling:
+        try:
+            def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):
+            def_mod = None
+        if def_mod != 'typing_extensions':
+            self.__module__ = def_mod
+
+
+# Python 3.10+ has PEP 612
+if hasattr(typing, 'ParamSpecArgs'):
+    ParamSpecArgs = typing.ParamSpecArgs
+    ParamSpecKwargs = typing.ParamSpecKwargs
+# 3.7-3.9
+else:
+    class _Immutable:
+        """Mixin to indicate that object should not be copied."""
+        __slots__ = ()
+
+        def __copy__(self):
+            return self
+
+        def __deepcopy__(self, memo):
+            return self
+
+    class ParamSpecArgs(_Immutable):
+        """The args for a ParamSpec object.
+
+        Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
+
+        ParamSpecArgs objects have a reference back to their ParamSpec:
+
+        P.args.__origin__ is P
+
+        This type is meant for runtime introspection and has no special meaning to
+        static type checkers.
+        """
+        def __init__(self, origin):
+            self.__origin__ = origin
+
+        def __repr__(self):
+            return f"{self.__origin__.__name__}.args"
+
+        def __eq__(self, other):
+            if not isinstance(other, ParamSpecArgs):
+                return NotImplemented
+            return self.__origin__ == other.__origin__
+
+    class ParamSpecKwargs(_Immutable):
+        """The kwargs for a ParamSpec object.
+
+        Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
+
+        ParamSpecKwargs objects have a reference back to their ParamSpec:
+
+        P.kwargs.__origin__ is P
+
+        This type is meant for runtime introspection and has no special meaning to
+        static type checkers.
+        """
+        def __init__(self, origin):
+            self.__origin__ = origin
+
+        def __repr__(self):
+            return f"{self.__origin__.__name__}.kwargs"
+
+        def __eq__(self, other):
+            if not isinstance(other, ParamSpecKwargs):
+                return NotImplemented
+            return self.__origin__ == other.__origin__
+
+# 3.10+
+if hasattr(typing, 'ParamSpec'):
+
+    # Add default Parameter - PEP 696
+    class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
+        """Parameter specification variable."""
+
+        __module__ = 'typing'
+
+        def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
+                     default=None):
+            super().__init__(name, bound=bound, covariant=covariant,
+                             contravariant=contravariant)
+            _DefaultMixin.__init__(self, default)
+
+            # for pickling:
+            try:
+                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+            except (AttributeError, ValueError):
+                def_mod = None
+            if def_mod != 'typing_extensions':
+                self.__module__ = def_mod
+
+# 3.7-3.9
+else:
+
+    # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+    class ParamSpec(list, _DefaultMixin):
+        """Parameter specification variable.
+
+        Usage::
+
+           P = ParamSpec('P')
+
+        Parameter specification variables exist primarily for the benefit of static
+        type checkers.  They are used to forward the parameter types of one
+        callable to another callable, a pattern commonly found in higher order
+        functions and decorators.  They are only valid when used in ``Concatenate``,
+        or s the first argument to ``Callable``. In Python 3.10 and higher,
+        they are also supported in user-defined Generics at runtime.
+        See class Generic for more information on generic types.  An
+        example for annotating a decorator::
+
+           T = TypeVar('T')
+           P = ParamSpec('P')
+
+           def add_logging(f: Callable[P, T]) -> Callable[P, T]:
+               '''A type-safe decorator to add logging to a function.'''
+               def inner(*args: P.args, **kwargs: P.kwargs) -> T:
+                   logging.info(f'{f.__name__} was called')
+                   return f(*args, **kwargs)
+               return inner
+
+           @add_logging
+           def add_two(x: float, y: float) -> float:
+               '''Add two numbers together.'''
+               return x + y
+
+        Parameter specification variables defined with covariant=True or
+        contravariant=True can be used to declare covariant or contravariant
+        generic types.  These keyword arguments are valid, but their actual semantics
+        are yet to be decided.  See PEP 612 for details.
+
+        Parameter specification variables can be introspected. e.g.:
+
+           P.__name__ == 'T'
+           P.__bound__ == None
+           P.__covariant__ == False
+           P.__contravariant__ == False
+
+        Note that only parameter specification variables defined in global scope can
+        be pickled.
+        """
+
+        # Trick Generic __parameters__.
+        __class__ = typing.TypeVar
+
+        @property
+        def args(self):
+            return ParamSpecArgs(self)
+
+        @property
+        def kwargs(self):
+            return ParamSpecKwargs(self)
+
+        def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
+                     default=None):
+            super().__init__([self])
+            self.__name__ = name
+            self.__covariant__ = bool(covariant)
+            self.__contravariant__ = bool(contravariant)
+            if bound:
+                self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
+            else:
+                self.__bound__ = None
+            _DefaultMixin.__init__(self, default)
+
+            # for pickling:
+            try:
+                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+            except (AttributeError, ValueError):
+                def_mod = None
+            if def_mod != 'typing_extensions':
+                self.__module__ = def_mod
+
+        def __repr__(self):
+            if self.__covariant__:
+                prefix = '+'
+            elif self.__contravariant__:
+                prefix = '-'
+            else:
+                prefix = '~'
+            return prefix + self.__name__
+
+        def __hash__(self):
+            return object.__hash__(self)
+
+        def __eq__(self, other):
+            return self is other
+
+        def __reduce__(self):
+            return self.__name__
+
+        # Hack to get typing._type_check to pass.
+        def __call__(self, *args, **kwargs):
+            pass
+
+
+# 3.7-3.9
+if not hasattr(typing, 'Concatenate'):
+    # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+    class _ConcatenateGenericAlias(list):
+
+        # Trick Generic into looking into this for __parameters__.
+        __class__ = typing._GenericAlias
+
+        # Flag in 3.8.
+        _special = False
+
+        def __init__(self, origin, args):
+            super().__init__(args)
+            self.__origin__ = origin
+            self.__args__ = args
+
+        def __repr__(self):
+            _type_repr = typing._type_repr
+            return (f'{_type_repr(self.__origin__)}'
+                    f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
+
+        def __hash__(self):
+            return hash((self.__origin__, self.__args__))
+
+        # Hack to get typing._type_check to pass in Generic.
+        def __call__(self, *args, **kwargs):
+            pass
+
+        @property
+        def __parameters__(self):
+            return tuple(
+                tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
+            )
+
+
+# 3.7-3.9
+@typing._tp_cache
+def _concatenate_getitem(self, parameters):
+    if parameters == ():
+        raise TypeError("Cannot take a Concatenate of no types.")
+    if not isinstance(parameters, tuple):
+        parameters = (parameters,)
+    if not isinstance(parameters[-1], ParamSpec):
+        raise TypeError("The last parameter to Concatenate should be a "
+                        "ParamSpec variable.")
+    msg = "Concatenate[arg, ...]: each arg must be a type."
+    parameters = tuple(typing._type_check(p, msg) for p in parameters)
+    return _ConcatenateGenericAlias(self, parameters)
+
+
+# 3.10+
+if hasattr(typing, 'Concatenate'):
+    Concatenate = typing.Concatenate
+    _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+    @_TypeAliasForm
+    def Concatenate(self, parameters):
+        """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+        higher order function which adds, removes or transforms parameters of a
+        callable.
+
+        For example::
+
+           Callable[Concatenate[int, P], int]
+
+        See PEP 612 for detailed information.
+        """
+        return _concatenate_getitem(self, parameters)
+# 3.7-8
+else:
+    class _ConcatenateForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+        def __getitem__(self, parameters):
+            return _concatenate_getitem(self, parameters)
+
+    Concatenate = _ConcatenateForm(
+        'Concatenate',
+        doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+        higher order function which adds, removes or transforms parameters of a
+        callable.
+
+        For example::
+
+           Callable[Concatenate[int, P], int]
+
+        See PEP 612 for detailed information.
+        """)
+
+# 3.10+
+if hasattr(typing, 'TypeGuard'):
+    TypeGuard = typing.TypeGuard
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+    class _TypeGuardForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+    @_TypeGuardForm
+    def TypeGuard(self, parameters):
+        """Special typing form used to annotate the return type of a user-defined
+        type guard function.  ``TypeGuard`` only accepts a single type argument.
+        At runtime, functions marked this way should return a boolean.
+
+        ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+        type checkers to determine a more precise type of an expression within a
+        program's code flow.  Usually type narrowing is done by analyzing
+        conditional code flow and applying the narrowing to a block of code.  The
+        conditional expression here is sometimes referred to as a "type guard".
+
+        Sometimes it would be convenient to use a user-defined boolean function
+        as a type guard.  Such a function should use ``TypeGuard[...]`` as its
+        return type to alert static type checkers to this intention.
+
+        Using  ``-> TypeGuard`` tells the static type checker that for a given
+        function:
+
+        1. The return value is a boolean.
+        2. If the return value is ``True``, the type of its argument
+        is the type inside ``TypeGuard``.
+
+        For example::
+
+            def is_str(val: Union[str, float]):
+                # "isinstance" type guard
+                if isinstance(val, str):
+                    # Type of ``val`` is narrowed to ``str``
+                    ...
+                else:
+                    # Else, type of ``val`` is narrowed to ``float``.
+                    ...
+
+        Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+        form of ``TypeA`` (it can even be a wider form) and this may lead to
+        type-unsafe results.  The main reason is to allow for things like
+        narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+        a subtype of the former, since ``List`` is invariant.  The responsibility of
+        writing type-safe type guards is left to the user.
+
+        ``TypeGuard`` also works with type variables.  For more information, see
+        PEP 647 (User-Defined Type Guards).
+        """
+        item = typing._type_check(parameters, f'{self} accepts only a single type.')
+        return typing._GenericAlias(self, (item,))
+# 3.7-3.8
+else:
+    class _TypeGuardForm(typing._SpecialForm, _root=True):
+
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+        def __getitem__(self, parameters):
+            item = typing._type_check(parameters,
+                                      f'{self._name} accepts only a single type')
+            return typing._GenericAlias(self, (item,))
+
+    TypeGuard = _TypeGuardForm(
+        'TypeGuard',
+        doc="""Special typing form used to annotate the return type of a user-defined
+        type guard function.  ``TypeGuard`` only accepts a single type argument.
+        At runtime, functions marked this way should return a boolean.
+
+        ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+        type checkers to determine a more precise type of an expression within a
+        program's code flow.  Usually type narrowing is done by analyzing
+        conditional code flow and applying the narrowing to a block of code.  The
+        conditional expression here is sometimes referred to as a "type guard".
+
+        Sometimes it would be convenient to use a user-defined boolean function
+        as a type guard.  Such a function should use ``TypeGuard[...]`` as its
+        return type to alert static type checkers to this intention.
+
+        Using  ``-> TypeGuard`` tells the static type checker that for a given
+        function:
+
+        1. The return value is a boolean.
+        2. If the return value is ``True``, the type of its argument
+        is the type inside ``TypeGuard``.
+
+        For example::
+
+            def is_str(val: Union[str, float]):
+                # "isinstance" type guard
+                if isinstance(val, str):
+                    # Type of ``val`` is narrowed to ``str``
+                    ...
+                else:
+                    # Else, type of ``val`` is narrowed to ``float``.
+                    ...
+
+        Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+        form of ``TypeA`` (it can even be a wider form) and this may lead to
+        type-unsafe results.  The main reason is to allow for things like
+        narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+        a subtype of the former, since ``List`` is invariant.  The responsibility of
+        writing type-safe type guards is left to the user.
+
+        ``TypeGuard`` also works with type variables.  For more information, see
+        PEP 647 (User-Defined Type Guards).
+        """)
+
+
+# Vendored from cpython typing._SpecialFrom
+class _SpecialForm(typing._Final, _root=True):
+    __slots__ = ('_name', '__doc__', '_getitem')
+
+    def __init__(self, getitem):
+        self._getitem = getitem
+        self._name = getitem.__name__
+        self.__doc__ = getitem.__doc__
+
+    def __getattr__(self, item):
+        if item in {'__name__', '__qualname__'}:
+            return self._name
+
+        raise AttributeError(item)
+
+    def __mro_entries__(self, bases):
+        raise TypeError(f"Cannot subclass {self!r}")
+
+    def __repr__(self):
+        return f'typing_extensions.{self._name}'
+
+    def __reduce__(self):
+        return self._name
+
+    def __call__(self, *args, **kwds):
+        raise TypeError(f"Cannot instantiate {self!r}")
+
+    def __or__(self, other):
+        return typing.Union[self, other]
+
+    def __ror__(self, other):
+        return typing.Union[other, self]
+
+    def __instancecheck__(self, obj):
+        raise TypeError(f"{self} cannot be used with isinstance()")
+
+    def __subclasscheck__(self, cls):
+        raise TypeError(f"{self} cannot be used with issubclass()")
+
+    @typing._tp_cache
+    def __getitem__(self, parameters):
+        return self._getitem(self, parameters)
+
+
+if hasattr(typing, "LiteralString"):
+    LiteralString = typing.LiteralString
+else:
+    @_SpecialForm
+    def LiteralString(self, params):
+        """Represents an arbitrary literal string.
+
+        Example::
+
+          from typing_extensions import LiteralString
+
+          def query(sql: LiteralString) -> ...:
+              ...
+
+          query("SELECT * FROM table")  # ok
+          query(f"SELECT * FROM {input()}")  # not ok
+
+        See PEP 675 for details.
+
+        """
+        raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, "Self"):
+    Self = typing.Self
+else:
+    @_SpecialForm
+    def Self(self, params):
+        """Used to spell the type of "self" in classes.
+
+        Example::
+
+          from typing import Self
+
+          class ReturnsSelf:
+              def parse(self, data: bytes) -> Self:
+                  ...
+                  return self
+
+        """
+
+        raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, "Never"):
+    Never = typing.Never
+else:
+    @_SpecialForm
+    def Never(self, params):
+        """The bottom type, a type that has no members.
+
+        This can be used to define a function that should never be
+        called, or a function that never returns::
+
+            from typing_extensions import Never
+
+            def never_call_me(arg: Never) -> None:
+                pass
+
+            def int_or_str(arg: int | str) -> None:
+                never_call_me(arg)  # type checker error
+                match arg:
+                    case int():
+                        print("It's an int")
+                    case str():
+                        print("It's a str")
+                    case _:
+                        never_call_me(arg)  # ok, arg is of type Never
+
+        """
+
+        raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, 'Required'):
+    Required = typing.Required
+    NotRequired = typing.NotRequired
+elif sys.version_info[:2] >= (3, 9):
+    class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+    @_ExtensionsSpecialForm
+    def Required(self, parameters):
+        """A special typing construct to mark a key of a total=False TypedDict
+        as required. For example:
+
+            class Movie(TypedDict, total=False):
+                title: Required[str]
+                year: int
+
+            m = Movie(
+                title='The Matrix',  # typechecker error if key is omitted
+                year=1999,
+            )
+
+        There is no runtime checking that a required key is actually provided
+        when instantiating a related TypedDict.
+        """
+        item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+        return typing._GenericAlias(self, (item,))
+
+    @_ExtensionsSpecialForm
+    def NotRequired(self, parameters):
+        """A special typing construct to mark a key of a TypedDict as
+        potentially missing. For example:
+
+            class Movie(TypedDict):
+                title: str
+                year: NotRequired[int]
+
+            m = Movie(
+                title='The Matrix',  # typechecker error if key is omitted
+                year=1999,
+            )
+        """
+        item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+        return typing._GenericAlias(self, (item,))
+
+else:
+    class _RequiredForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+        def __getitem__(self, parameters):
+            item = typing._type_check(parameters,
+                                      f'{self._name} accepts only a single type.')
+            return typing._GenericAlias(self, (item,))
+
+    Required = _RequiredForm(
+        'Required',
+        doc="""A special typing construct to mark a key of a total=False TypedDict
+        as required. For example:
+
+            class Movie(TypedDict, total=False):
+                title: Required[str]
+                year: int
+
+            m = Movie(
+                title='The Matrix',  # typechecker error if key is omitted
+                year=1999,
+            )
+
+        There is no runtime checking that a required key is actually provided
+        when instantiating a related TypedDict.
+        """)
+    NotRequired = _RequiredForm(
+        'NotRequired',
+        doc="""A special typing construct to mark a key of a TypedDict as
+        potentially missing. For example:
+
+            class Movie(TypedDict):
+                title: str
+                year: NotRequired[int]
+
+            m = Movie(
+                title='The Matrix',  # typechecker error if key is omitted
+                year=1999,
+            )
+        """)
+
+
+if hasattr(typing, "Unpack"):  # 3.11+
+    Unpack = typing.Unpack
+elif sys.version_info[:2] >= (3, 9):
+    class _UnpackSpecialForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+    class _UnpackAlias(typing._GenericAlias, _root=True):
+        __class__ = typing.TypeVar
+
+    @_UnpackSpecialForm
+    def Unpack(self, parameters):
+        """A special typing construct to unpack a variadic type. For example:
+
+            Shape = TypeVarTuple('Shape')
+            Batch = NewType('Batch', int)
+
+            def add_batch_axis(
+                x: Array[Unpack[Shape]]
+            ) -> Array[Batch, Unpack[Shape]]: ...
+
+        """
+        item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+        return _UnpackAlias(self, (item,))
+
+    def _is_unpack(obj):
+        return isinstance(obj, _UnpackAlias)
+
+else:
+    class _UnpackAlias(typing._GenericAlias, _root=True):
+        __class__ = typing.TypeVar
+
+    class _UnpackForm(typing._SpecialForm, _root=True):
+        def __repr__(self):
+            return 'typing_extensions.' + self._name
+
+        def __getitem__(self, parameters):
+            item = typing._type_check(parameters,
+                                      f'{self._name} accepts only a single type.')
+            return _UnpackAlias(self, (item,))
+
+    Unpack = _UnpackForm(
+        'Unpack',
+        doc="""A special typing construct to unpack a variadic type. For example:
+
+            Shape = TypeVarTuple('Shape')
+            Batch = NewType('Batch', int)
+
+            def add_batch_axis(
+                x: Array[Unpack[Shape]]
+            ) -> Array[Batch, Unpack[Shape]]: ...
+
+        """)
+
+    def _is_unpack(obj):
+        return isinstance(obj, _UnpackAlias)
+
+
+if hasattr(typing, "TypeVarTuple"):  # 3.11+
+
+    # Add default Parameter - PEP 696
+    class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
+        """Type variable tuple."""
+
+        def __init__(self, name, *, default=None):
+            super().__init__(name)
+            _DefaultMixin.__init__(self, default)
+
+            # for pickling:
+            try:
+                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+            except (AttributeError, ValueError):
+                def_mod = None
+            if def_mod != 'typing_extensions':
+                self.__module__ = def_mod
+
+else:
+    class TypeVarTuple(_DefaultMixin):
+        """Type variable tuple.
+
+        Usage::
+
+            Ts = TypeVarTuple('Ts')
+
+        In the same way that a normal type variable is a stand-in for a single
+        type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
+        type such as ``Tuple[int, str]``.
+
+        Type variable tuples can be used in ``Generic`` declarations.
+        Consider the following example::
+
+            class Array(Generic[*Ts]): ...
+
+        The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
+        where ``T1`` and ``T2`` are type variables. To use these type variables
+        as type parameters of ``Array``, we must *unpack* the type variable tuple using
+        the star operator: ``*Ts``. The signature of ``Array`` then behaves
+        as if we had simply written ``class Array(Generic[T1, T2]): ...``.
+        In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
+        us to parameterise the class with an *arbitrary* number of type parameters.
+
+        Type variable tuples can be used anywhere a normal ``TypeVar`` can.
+        This includes class definitions, as shown above, as well as function
+        signatures and variable annotations::
+
+            class Array(Generic[*Ts]):
+
+                def __init__(self, shape: Tuple[*Ts]):
+                    self._shape: Tuple[*Ts] = shape
+
+                def get_shape(self) -> Tuple[*Ts]:
+                    return self._shape
+
+            shape = (Height(480), Width(640))
+            x: Array[Height, Width] = Array(shape)
+            y = abs(x)  # Inferred type is Array[Height, Width]
+            z = x + x   #        ...    is Array[Height, Width]
+            x.get_shape()  #     ...    is tuple[Height, Width]
+
+        """
+
+        # Trick Generic __parameters__.
+        __class__ = typing.TypeVar
+
+        def __iter__(self):
+            yield self.__unpacked__
+
+        def __init__(self, name, *, default=None):
+            self.__name__ = name
+            _DefaultMixin.__init__(self, default)
+
+            # for pickling:
+            try:
+                def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+            except (AttributeError, ValueError):
+                def_mod = None
+            if def_mod != 'typing_extensions':
+                self.__module__ = def_mod
+
+            self.__unpacked__ = Unpack[self]
+
+        def __repr__(self):
+            return self.__name__
+
+        def __hash__(self):
+            return object.__hash__(self)
+
+        def __eq__(self, other):
+            return self is other
+
+        def __reduce__(self):
+            return self.__name__
+
+        def __init_subclass__(self, *args, **kwds):
+            if '_root' not in kwds:
+                raise TypeError("Cannot subclass special typing classes")
+
+
+if hasattr(typing, "reveal_type"):
+    reveal_type = typing.reveal_type
+else:
+    def reveal_type(__obj: T) -> T:
+        """Reveal the inferred type of a variable.
+
+        When a static type checker encounters a call to ``reveal_type()``,
+        it will emit the inferred type of the argument::
+
+            x: int = 1
+            reveal_type(x)
+
+        Running a static type checker (e.g., ``mypy``) on this example
+        will produce output similar to 'Revealed type is "builtins.int"'.
+
+        At runtime, the function prints the runtime type of the
+        argument and returns it unchanged.
+
+        """
+        print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
+        return __obj
+
+
+if hasattr(typing, "assert_never"):
+    assert_never = typing.assert_never
+else:
+    def assert_never(__arg: Never) -> Never:
+        """Assert to the type checker that a line of code is unreachable.
+
+        Example::
+
+            def int_or_str(arg: int | str) -> None:
+                match arg:
+                    case int():
+                        print("It's an int")
+                    case str():
+                        print("It's a str")
+                    case _:
+                        assert_never(arg)
+
+        If a type checker finds that a call to assert_never() is
+        reachable, it will emit an error.
+
+        At runtime, this throws an exception when called.
+
+        """
+        raise AssertionError("Expected code to be unreachable")
+
+
+if hasattr(typing, 'dataclass_transform'):
+    dataclass_transform = typing.dataclass_transform
+else:
+    def dataclass_transform(
+        *,
+        eq_default: bool = True,
+        order_default: bool = False,
+        kw_only_default: bool = False,
+        field_specifiers: typing.Tuple[
+            typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
+            ...
+        ] = (),
+        **kwargs: typing.Any,
+    ) -> typing.Callable[[T], T]:
+        """Decorator that marks a function, class, or metaclass as providing
+        dataclass-like behavior.
+
+        Example:
+
+            from typing_extensions import dataclass_transform
+
+            _T = TypeVar("_T")
+
+            # Used on a decorator function
+            @dataclass_transform()
+            def create_model(cls: type[_T]) -> type[_T]:
+                ...
+                return cls
+
+            @create_model
+            class CustomerModel:
+                id: int
+                name: str
+
+            # Used on a base class
+            @dataclass_transform()
+            class ModelBase: ...
+
+            class CustomerModel(ModelBase):
+                id: int
+                name: str
+
+            # Used on a metaclass
+            @dataclass_transform()
+            class ModelMeta(type): ...
+
+            class ModelBase(metaclass=ModelMeta): ...
+
+            class CustomerModel(ModelBase):
+                id: int
+                name: str
+
+        Each of the ``CustomerModel`` classes defined in this example will now
+        behave similarly to a dataclass created with the ``@dataclasses.dataclass``
+        decorator. For example, the type checker will synthesize an ``__init__``
+        method.
+
+        The arguments to this decorator can be used to customize this behavior:
+        - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
+          True or False if it is omitted by the caller.
+        - ``order_default`` indicates whether the ``order`` parameter is
+          assumed to be True or False if it is omitted by the caller.
+        - ``kw_only_default`` indicates whether the ``kw_only`` parameter is
+          assumed to be True or False if it is omitted by the caller.
+        - ``field_specifiers`` specifies a static list of supported classes
+          or functions that describe fields, similar to ``dataclasses.field()``.
+
+        At runtime, this decorator records its arguments in the
+        ``__dataclass_transform__`` attribute on the decorated object.
+
+        See PEP 681 for details.
+
+        """
+        def decorator(cls_or_fn):
+            cls_or_fn.__dataclass_transform__ = {
+                "eq_default": eq_default,
+                "order_default": order_default,
+                "kw_only_default": kw_only_default,
+                "field_specifiers": field_specifiers,
+                "kwargs": kwargs,
+            }
+            return cls_or_fn
+        return decorator
+
+
+if hasattr(typing, "override"):
+    override = typing.override
+else:
+    _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
+
+    def override(__arg: _F) -> _F:
+        """Indicate that a method is intended to override a method in a base class.
+
+        Usage:
+
+            class Base:
+                def method(self) -> None: ...
+                    pass
+
+            class Child(Base):
+                @override
+                def method(self) -> None:
+                    super().method()
+
+        When this decorator is applied to a method, the type checker will
+        validate that it overrides a method with the same name on a base class.
+        This helps prevent bugs that may occur when a base class is changed
+        without an equivalent change to a child class.
+
+        See PEP 698 for details.
+
+        """
+        return __arg
+
+
+# We have to do some monkey patching to deal with the dual nature of
+# Unpack/TypeVarTuple:
+# - We want Unpack to be a kind of TypeVar so it gets accepted in
+#   Generic[Unpack[Ts]]
+# - We want it to *not* be treated as a TypeVar for the purposes of
+#   counting generic parameters, so that when we subscript a generic,
+#   the runtime doesn't try to substitute the Unpack with the subscripted type.
+if not hasattr(typing, "TypeVarTuple"):
+    typing._collect_type_vars = _collect_type_vars
+    typing._check_generic = _check_generic
+
+
+# Backport typing.NamedTuple as it exists in Python 3.11.
+# In 3.11, the ability to define generic `NamedTuple`s was supported.
+# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
+if sys.version_info >= (3, 11):
+    NamedTuple = typing.NamedTuple
+else:
+    def _caller():
+        try:
+            return sys._getframe(2).f_globals.get('__name__', '__main__')
+        except (AttributeError, ValueError):  # For platforms without _getframe()
+            return None
+
+    def _make_nmtuple(name, types, module, defaults=()):
+        fields = [n for n, t in types]
+        annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
+                       for n, t in types}
+        nm_tpl = collections.namedtuple(name, fields,
+                                        defaults=defaults, module=module)
+        nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
+        # The `_field_types` attribute was removed in 3.9;
+        # in earlier versions, it is the same as the `__annotations__` attribute
+        if sys.version_info < (3, 9):
+            nm_tpl._field_types = annotations
+        return nm_tpl
+
+    _prohibited_namedtuple_fields = typing._prohibited
+    _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
+
+    class _NamedTupleMeta(type):
+        def __new__(cls, typename, bases, ns):
+            assert _NamedTuple in bases
+            for base in bases:
+                if base is not _NamedTuple and base is not typing.Generic:
+                    raise TypeError(
+                        'can only inherit from a NamedTuple type and Generic')
+            bases = tuple(tuple if base is _NamedTuple else base for base in bases)
+            types = ns.get('__annotations__', {})
+            default_names = []
+            for field_name in types:
+                if field_name in ns:
+                    default_names.append(field_name)
+                elif default_names:
+                    raise TypeError(f"Non-default namedtuple field {field_name} "
+                                    f"cannot follow default field"
+                                    f"{'s' if len(default_names) > 1 else ''} "
+                                    f"{', '.join(default_names)}")
+            nm_tpl = _make_nmtuple(
+                typename, types.items(),
+                defaults=[ns[n] for n in default_names],
+                module=ns['__module__']
+            )
+            nm_tpl.__bases__ = bases
+            if typing.Generic in bases:
+                class_getitem = typing.Generic.__class_getitem__.__func__
+                nm_tpl.__class_getitem__ = classmethod(class_getitem)
+            # update from user namespace without overriding special namedtuple attributes
+            for key in ns:
+                if key in _prohibited_namedtuple_fields:
+                    raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
+                elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
+                    setattr(nm_tpl, key, ns[key])
+            if typing.Generic in bases:
+                nm_tpl.__init_subclass__()
+            return nm_tpl
+
+    def NamedTuple(__typename, __fields=None, **kwargs):
+        if __fields is None:
+            __fields = kwargs.items()
+        elif kwargs:
+            raise TypeError("Either list of fields or keywords"
+                            " can be provided to NamedTuple, not both")
+        return _make_nmtuple(__typename, __fields, module=_caller())
+
+    NamedTuple.__doc__ = typing.NamedTuple.__doc__
+    _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
+
+    # On 3.8+, alter the signature so that it matches typing.NamedTuple.
+    # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
+    # so just leave the signature as it is on 3.7.
+    if sys.version_info >= (3, 8):
+        NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
+
+    def _namedtuple_mro_entries(bases):
+        assert NamedTuple in bases
+        return (_NamedTuple,)
+
+    NamedTuple.__mro_entries__ = _namedtuple_mro_entries
diff --git a/.venv/Lib/site-packages/pkg_resources/_vendor/zipp.py b/.venv/Lib/site-packages/pkg_resources/_vendor/zipp.py
new file mode 100644
index 0000000000000000000000000000000000000000..26b723c1fd3e25740e0268b8c9b50905c58c3d4a
--- /dev/null
+++ b/.venv/Lib/site-packages/pkg_resources/_vendor/zipp.py
@@ -0,0 +1,329 @@
+import io
+import posixpath
+import zipfile
+import itertools
+import contextlib
+import sys
+import pathlib
+
+if sys.version_info < (3, 7):
+    from collections import OrderedDict
+else:
+    OrderedDict = dict
+
+
+__all__ = ['Path']
+
+
+def _parents(path):
+    """
+    Given a path with elements separated by
+    posixpath.sep, generate all parents of that path.
+
+    >>> list(_parents('b/d'))
+    ['b']
+    >>> list(_parents('/b/d/'))
+    ['/b']
+    >>> list(_parents('b/d/f/'))
+    ['b/d', 'b']
+    >>> list(_parents('b'))
+    []
+    >>> list(_parents(''))
+    []
+    """
+    return itertools.islice(_ancestry(path), 1, None)
+
+
+def _ancestry(path):
+    """
+    Given a path with elements separated by
+    posixpath.sep, generate all elements of that path
+
+    >>> list(_ancestry('b/d'))
+    ['b/d', 'b']
+    >>> list(_ancestry('/b/d/'))
+    ['/b/d', '/b']
+    >>> list(_ancestry('b/d/f/'))
+    ['b/d/f', 'b/d', 'b']
+    >>> list(_ancestry('b'))
+    ['b']
+    >>> list(_ancestry(''))
+    []
+    """
+    path = path.rstrip(posixpath.sep)
+    while path and path != posixpath.sep:
+        yield path
+        path, tail = posixpath.split(path)
+
+
+_dedupe = OrderedDict.fromkeys
+"""Deduplicate an iterable in original order"""
+
+
+def _difference(minuend, subtrahend):
+    """
+    Return items in minuend not in subtrahend, retaining order
+    with O(1) lookup.
+    """
+    return itertools.filterfalse(set(subtrahend).__contains__, minuend)
+
+
+class CompleteDirs(zipfile.ZipFile):
+    """
+    A ZipFile subclass that ensures that implied directories
+    are always included in the namelist.
+    """
+
+    @staticmethod
+    def _implied_dirs(names):
+        parents = itertools.chain.from_iterable(map(_parents, names))
+        as_dirs = (p + posixpath.sep for p in parents)
+        return _dedupe(_difference(as_dirs, names))
+
+    def namelist(self):
+        names = super(CompleteDirs, self).namelist()
+        return names + list(self._implied_dirs(names))
+
+    def _name_set(self):
+        return set(self.namelist())
+
+    def resolve_dir(self, name):
+        """
+        If the name represents a directory, return that name
+        as a directory (with the trailing slash).
+        """
+        names = self._name_set()
+        dirname = name + '/'
+        dir_match = name not in names and dirname in names
+        return dirname if dir_match else name
+
+    @classmethod
+    def make(cls, source):
+        """
+        Given a source (filename or zipfile), return an
+        appropriate CompleteDirs subclass.
+        """
+        if isinstance(source, CompleteDirs):
+            return source
+
+        if not isinstance(source, zipfile.ZipFile):
+            return cls(_pathlib_compat(source))
+
+        # Only allow for FastLookup when supplied zipfile is read-only
+        if 'r' not in source.mode:
+            cls = CompleteDirs
+
+        source.__class__ = cls
+        return source
+
+
+class FastLookup(CompleteDirs):
+    """
+    ZipFile subclass to ensure implicit
+    dirs exist and are resolved rapidly.
+    """
+
+    def namelist(self):
+        with contextlib.suppress(AttributeError):
+            return self.__names
+        self.__names = super(FastLookup, self).namelist()
+        return self.__names
+
+    def _name_set(self):
+        with contextlib.suppress(AttributeError):
+            return self.__lookup
+        self.__lookup = super(FastLookup, self)._name_set()
+        return self.__lookup
+
+
+def _pathlib_compat(path):
+    """
+    For path-like objects, convert to a filename for compatibility
+    on Python 3.6.1 and earlier.
+    """
+    try:
+        return path.__fspath__()
+    except AttributeError:
+        return str(path)
+
+
+class Path:
+    """
+    A pathlib-compatible interface for zip files.
+
+    Consider a zip file with this structure::
+
+        .
+        ├── a.txt
+        └── b
+            ├── c.txt
+            └── d
+                └── e.txt
+
+    >>> data = io.BytesIO()
+    >>> zf = zipfile.ZipFile(data, 'w')
+    >>> zf.writestr('a.txt', 'content of a')
+    >>> zf.writestr('b/c.txt', 'content of c')
+    >>> zf.writestr('b/d/e.txt', 'content of e')
+    >>> zf.filename = 'mem/abcde.zip'
+
+    Path accepts the zipfile object itself or a filename
+
+    >>> root = Path(zf)
+
+    From there, several path operations are available.
+
+    Directory iteration (including the zip file itself):
+
+    >>> a, b = root.iterdir()
+    >>> a
+    Path('mem/abcde.zip', 'a.txt')
+    >>> b
+    Path('mem/abcde.zip', 'b/')
+
+    name property:
+
+    >>> b.name
+    'b'
+
+    join with divide operator:
+
+    >>> c = b / 'c.txt'
+    >>> c
+    Path('mem/abcde.zip', 'b/c.txt')
+    >>> c.name
+    'c.txt'
+
+    Read text:
+
+    >>> c.read_text()
+    'content of c'
+
+    existence:
+
+    >>> c.exists()
+    True
+    >>> (b / 'missing.txt').exists()
+    False
+
+    Coercion to string:
+
+    >>> import os
+    >>> str(c).replace(os.sep, posixpath.sep)
+    'mem/abcde.zip/b/c.txt'
+
+    At the root, ``name``, ``filename``, and ``parent``
+    resolve to the zipfile. Note these attributes are not
+    valid and will raise a ``ValueError`` if the zipfile
+    has no filename.
+
+    >>> root.name
+    'abcde.zip'
+    >>> str(root.filename).replace(os.sep, posixpath.sep)
+    'mem/abcde.zip'
+    >>> str(root.parent)
+    'mem'
+    """
+
+    __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
+
+    def __init__(self, root, at=""):
+        """
+        Construct a Path from a ZipFile or filename.
+
+        Note: When the source is an existing ZipFile object,
+        its type (__class__) will be mutated to a
+        specialized type. If the caller wishes to retain the
+        original type, the caller should either create a
+        separate ZipFile object or pass a filename.
+        """
+        self.root = FastLookup.make(root)
+        self.at = at
+
+    def open(self, mode='r', *args, pwd=None, **kwargs):
+        """
+        Open this entry as text or binary following the semantics
+        of ``pathlib.Path.open()`` by passing arguments through
+        to io.TextIOWrapper().
+        """
+        if self.is_dir():
+            raise IsADirectoryError(self)
+        zip_mode = mode[0]
+        if not self.exists() and zip_mode == 'r':
+            raise FileNotFoundError(self)
+        stream = self.root.open(self.at, zip_mode, pwd=pwd)
+        if 'b' in mode:
+            if args or kwargs:
+                raise ValueError("encoding args invalid for binary operation")
+            return stream
+        return io.TextIOWrapper(stream, *args, **kwargs)
+
+    @property
+    def name(self):
+        return pathlib.Path(self.at).name or self.filename.name
+
+    @property
+    def suffix(self):
+        return pathlib.Path(self.at).suffix or self.filename.suffix
+
+    @property
+    def suffixes(self):
+        return pathlib.Path(self.at).suffixes or self.filename.suffixes
+
+    @property
+    def stem(self):
+        return pathlib.Path(self.at).stem or self.filename.stem
+
+    @property
+    def filename(self):
+        return pathlib.Path(self.root.filename).joinpath(self.at)
+
+    def read_text(self, *args, **kwargs):
+        with self.open('r', *args, **kwargs) as strm:
+            return strm.read()
+
+    def read_bytes(self):
+        with self.open('rb') as strm:
+            return strm.read()
+
+    def _is_child(self, path):
+        return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
+
+    def _next(self, at):
+        return self.__class__(self.root, at)
+
+    def is_dir(self):
+        return not self.at or self.at.endswith("/")
+
+    def is_file(self):
+        return self.exists() and not self.is_dir()
+
+    def exists(self):
+        return self.at in self.root._name_set()
+
+    def iterdir(self):
+        if not self.is_dir():
+            raise ValueError("Can't listdir a file")
+        subs = map(self._next, self.root.namelist())
+        return filter(self._is_child, subs)
+
+    def __str__(self):
+        return posixpath.join(self.root.filename, self.at)
+
+    def __repr__(self):
+        return self.__repr.format(self=self)
+
+    def joinpath(self, *other):
+        next = posixpath.join(self.at, *map(_pathlib_compat, other))
+        return self._next(self.root.resolve_dir(next))
+
+    __truediv__ = joinpath
+
+    @property
+    def parent(self):
+        if not self.at:
+            return self.filename.parent
+        parent_at = posixpath.dirname(self.at.rstrip('/'))
+        if parent_at:
+            parent_at += '/'
+        return self._next(parent_at)
diff --git a/.venv/Lib/site-packages/pkg_resources/extern/__init__.py b/.venv/Lib/site-packages/pkg_resources/extern/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..948bcc6094d13684a39abd17b9519b2b78ca2813
--- /dev/null
+++ b/.venv/Lib/site-packages/pkg_resources/extern/__init__.py
@@ -0,0 +1,80 @@
+import importlib.util
+import sys
+
+
+class VendorImporter:
+    """
+    A PEP 302 meta path importer for finding optionally-vendored
+    or otherwise naturally-installed packages from root_name.
+    """
+
+    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+        self.root_name = root_name
+        self.vendored_names = set(vendored_names)
+        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+    @property
+    def search_path(self):
+        """
+        Search first the vendor package then as a natural package.
+        """
+        yield self.vendor_pkg + '.'
+        yield ''
+
+    def _module_matches_namespace(self, fullname):
+        """Figure out if the target module is vendored."""
+        root, base, target = fullname.partition(self.root_name + '.')
+        return not root and any(map(target.startswith, self.vendored_names))
+
+    def load_module(self, fullname):
+        """
+        Iterate over the search path to locate and load fullname.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        for prefix in self.search_path:
+            try:
+                extant = prefix + target
+                __import__(extant)
+                mod = sys.modules[extant]
+                sys.modules[fullname] = mod
+                return mod
+            except ImportError:
+                pass
+        else:
+            raise ImportError(
+                "The '{target}' package is required; "
+                "normally this is bundled with this package so if you get "
+                "this warning, consult the packager of your "
+                "distribution.".format(**locals())
+            )
+
+    def create_module(self, spec):
+        return self.load_module(spec.name)
+
+    def exec_module(self, module):
+        pass
+
+    def find_spec(self, fullname, path=None, target=None):
+        """Return a module spec for vendored names."""
+        return (
+            importlib.util.spec_from_loader(fullname, self)
+            if self._module_matches_namespace(fullname)
+            else None
+        )
+
+    def install(self):
+        """
+        Install this importer into sys.meta_path if not already present.
+        """
+        if self not in sys.meta_path:
+            sys.meta_path.append(self)
+
+
+names = (
+    'packaging',
+    'platformdirs',
+    'jaraco',
+    'importlib_resources',
+    'more_itertools',
+)
+VendorImporter(__name__, names).install()
diff --git a/.venv/Lib/site-packages/setuptools/__init__.py b/.venv/Lib/site-packages/setuptools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..563ca1c4baf53ed4a30008fb76e7de455afc7f96
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/__init__.py
@@ -0,0 +1,266 @@
+"""Extensions to the 'distutils' for large or complex distributions"""
+
+import functools
+import os
+import re
+
+import _distutils_hack.override  # noqa: F401
+import distutils.core
+from distutils.errors import DistutilsOptionError
+from distutils.util import convert_path as _convert_path
+
+from . import logging, monkey
+from . import version as _version_module
+from .depends import Require
+from .discovery import PackageFinder, PEP420PackageFinder
+from .dist import Distribution
+from .extension import Extension
+from .warnings import SetuptoolsDeprecationWarning
+
+__all__ = [
+    'setup',
+    'Distribution',
+    'Command',
+    'Extension',
+    'Require',
+    'SetuptoolsDeprecationWarning',
+    'find_packages',
+    'find_namespace_packages',
+]
+
+__version__ = _version_module.__version__
+
+bootstrap_install_from = None
+
+
+find_packages = PackageFinder.find
+find_namespace_packages = PEP420PackageFinder.find
+
+
+def _install_setup_requires(attrs):
+    # Note: do not use `setuptools.Distribution` directly, as
+    # our PEP 517 backend patch `distutils.core.Distribution`.
+    class MinimalDistribution(distutils.core.Distribution):
+        """
+        A minimal version of a distribution for supporting the
+        fetch_build_eggs interface.
+        """
+
+        def __init__(self, attrs):
+            _incl = 'dependency_links', 'setup_requires'
+            filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
+            super().__init__(filtered)
+            # Prevent accidentally triggering discovery with incomplete set of attrs
+            self.set_defaults._disable()
+
+        def _get_project_config_files(self, filenames=None):
+            """Ignore ``pyproject.toml``, they are not related to setup_requires"""
+            try:
+                cfg, toml = super()._split_standard_project_metadata(filenames)
+                return cfg, ()
+            except Exception:
+                return filenames, ()
+
+        def finalize_options(self):
+            """
+            Disable finalize_options to avoid building the working set.
+            Ref #2158.
+            """
+
+    dist = MinimalDistribution(attrs)
+
+    # Honor setup.cfg's options.
+    dist.parse_config_files(ignore_option_errors=True)
+    if dist.setup_requires:
+        _fetch_build_eggs(dist)
+
+
+def _fetch_build_eggs(dist):
+    try:
+        dist.fetch_build_eggs(dist.setup_requires)
+    except Exception as ex:
+        msg = """
+        It is possible a package already installed in your system
+        contains an version that is invalid according to PEP 440.
+        You can try `pip install --use-pep517` as a workaround for this problem,
+        or rely on a new virtual environment.
+
+        If the problem refers to a package that is not installed yet,
+        please contact that package's maintainers or distributors.
+        """
+        if "InvalidVersion" in ex.__class__.__name__:
+            if hasattr(ex, "add_note"):
+                ex.add_note(msg)  # PEP 678
+            else:
+                dist.announce(f"\n{msg}\n")
+        raise
+
+
+def setup(**attrs):
+    # Make sure we have any requirements needed to interpret 'attrs'.
+    logging.configure()
+    _install_setup_requires(attrs)
+    return distutils.core.setup(**attrs)
+
+
+setup.__doc__ = distutils.core.setup.__doc__
+
+
+_Command = monkey.get_unpatched(distutils.core.Command)
+
+
+class Command(_Command):
+    """
+    Setuptools internal actions are organized using a *command design pattern*.
+    This means that each action (or group of closely related actions) executed during
+    the build should be implemented as a ``Command`` subclass.
+
+    These commands are abstractions and do not necessarily correspond to a command that
+    can (or should) be executed via a terminal, in a CLI fashion (although historically
+    they would).
+
+    When creating a new command from scratch, custom defined classes **SHOULD** inherit
+    from ``setuptools.Command`` and implement a few mandatory methods.
+    Between these mandatory methods, are listed:
+
+    .. method:: initialize_options(self)
+
+        Set or (reset) all options/attributes/caches used by the command
+        to their default values. Note that these values may be overwritten during
+        the build.
+
+    .. method:: finalize_options(self)
+
+        Set final values for all options/attributes used by the command.
+        Most of the time, each option/attribute/cache should only be set if it does not
+        have any value yet (e.g. ``if self.attr is None: self.attr = val``).
+
+    .. method:: run(self)
+
+        Execute the actions intended by the command.
+        (Side effects **SHOULD** only take place when ``run`` is executed,
+        for example, creating new files or writing to the terminal output).
+
+    A useful analogy for command classes is to think of them as subroutines with local
+    variables called "options".  The options are "declared" in ``initialize_options()``
+    and "defined" (given their final values, aka "finalized") in ``finalize_options()``,
+    both of which must be defined by every command class. The "body" of the subroutine,
+    (where it does all the work) is the ``run()`` method.
+    Between ``initialize_options()`` and ``finalize_options()``, ``setuptools`` may set
+    the values for options/attributes based on user's input (or circumstance),
+    which means that the implementation should be careful to not overwrite values in
+    ``finalize_options`` unless necessary.
+
+    Please note that other commands (or other parts of setuptools) may also overwrite
+    the values of the command's options/attributes multiple times during the build
+    process.
+    Therefore it is important to consistently implement ``initialize_options()`` and
+    ``finalize_options()``. For example, all derived attributes (or attributes that
+    depend on the value of other attributes) **SHOULD** be recomputed in
+    ``finalize_options``.
+
+    When overwriting existing commands, custom defined classes **MUST** abide by the
+    same APIs implemented by the original class. They also **SHOULD** inherit from the
+    original class.
+    """
+
+    command_consumes_arguments = False
+
+    def __init__(self, dist, **kw):
+        """
+        Construct the command for dist, updating
+        vars(self) with any keyword parameters.
+        """
+        super().__init__(dist)
+        vars(self).update(kw)
+
+    def _ensure_stringlike(self, option, what, default=None):
+        val = getattr(self, option)
+        if val is None:
+            setattr(self, option, default)
+            return default
+        elif not isinstance(val, str):
+            raise DistutilsOptionError(
+                "'%s' must be a %s (got `%s`)" % (option, what, val)
+            )
+        return val
+
+    def ensure_string_list(self, option):
+        r"""Ensure that 'option' is a list of strings.  If 'option' is
+        currently a string, we split it either on /,\s*/ or /\s+/, so
+        "foo bar baz", "foo,bar,baz", and "foo,   bar baz" all become
+        ["foo", "bar", "baz"].
+
+        ..
+           TODO: This method seems to be similar to the one in ``distutils.cmd``
+           Probably it is just here for backward compatibility with old Python versions?
+
+        :meta private:
+        """
+        val = getattr(self, option)
+        if val is None:
+            return
+        elif isinstance(val, str):
+            setattr(self, option, re.split(r',\s*|\s+', val))
+        else:
+            if isinstance(val, list):
+                ok = all(isinstance(v, str) for v in val)
+            else:
+                ok = False
+            if not ok:
+                raise DistutilsOptionError(
+                    "'%s' must be a list of strings (got %r)" % (option, val)
+                )
+
+    def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+        cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+        vars(cmd).update(kw)
+        return cmd
+
+
+def _find_all_simple(path):
+    """
+    Find all files under 'path'
+    """
+    results = (
+        os.path.join(base, file)
+        for base, dirs, files in os.walk(path, followlinks=True)
+        for file in files
+    )
+    return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+    """
+    Find all files under 'dir' and return the list of full filenames.
+    Unless dir is '.', return full filenames with dir prepended.
+    """
+    files = _find_all_simple(dir)
+    if dir == os.curdir:
+        make_rel = functools.partial(os.path.relpath, start=dir)
+        files = map(make_rel, files)
+    return list(files)
+
+
+@functools.wraps(_convert_path)
+def convert_path(pathname):
+    SetuptoolsDeprecationWarning.emit(
+        "Access to implementation detail",
+        """
+        The function `convert_path` is not provided by setuptools itself,
+        and therefore not part of the public API.
+
+        Its direct usage by 3rd-party packages is considered improper and the function
+        may be removed in the future.
+        """,
+        due_date=(2023, 12, 13),  # initial deprecation 2022-03-25, see #3201
+    )
+    return _convert_path(pathname)
+
+
+class sic(str):
+    """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
+
+
+# Apply monkey patches
+monkey.patch_all()
diff --git a/.venv/Lib/site-packages/setuptools/_core_metadata.py b/.venv/Lib/site-packages/setuptools/_core_metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1d41c66804da741de6a4f28b02ea9cb058f9c72
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_core_metadata.py
@@ -0,0 +1,258 @@
+"""
+Handling of Core Metadata for Python packages (including reading and writing).
+
+See: https://packaging.python.org/en/latest/specifications/core-metadata/
+"""
+import os
+import stat
+import textwrap
+from email import message_from_file
+from email.message import Message
+from tempfile import NamedTemporaryFile
+from typing import Optional, List
+
+from distutils.util import rfc822_escape
+
+from . import _normalization
+from .extern.packaging.markers import Marker
+from .extern.packaging.requirements import Requirement
+from .extern.packaging.version import Version
+from .warnings import SetuptoolsDeprecationWarning
+
+
+def get_metadata_version(self):
+    mv = getattr(self, 'metadata_version', None)
+    if mv is None:
+        mv = Version('2.1')
+        self.metadata_version = mv
+    return mv
+
+
+def rfc822_unescape(content: str) -> str:
+    """Reverse RFC-822 escaping by removing leading whitespaces from content."""
+    lines = content.splitlines()
+    if len(lines) == 1:
+        return lines[0].lstrip()
+    return '\n'.join((lines[0].lstrip(), textwrap.dedent('\n'.join(lines[1:]))))
+
+
+def _read_field_from_msg(msg: Message, field: str) -> Optional[str]:
+    """Read Message header field."""
+    value = msg[field]
+    if value == 'UNKNOWN':
+        return None
+    return value
+
+
+def _read_field_unescaped_from_msg(msg: Message, field: str) -> Optional[str]:
+    """Read Message header field and apply rfc822_unescape."""
+    value = _read_field_from_msg(msg, field)
+    if value is None:
+        return value
+    return rfc822_unescape(value)
+
+
+def _read_list_from_msg(msg: Message, field: str) -> Optional[List[str]]:
+    """Read Message header field and return all results as list."""
+    values = msg.get_all(field, None)
+    if values == []:
+        return None
+    return values
+
+
+def _read_payload_from_msg(msg: Message) -> Optional[str]:
+    value = msg.get_payload().strip()
+    if value == 'UNKNOWN' or not value:
+        return None
+    return value
+
+
+def read_pkg_file(self, file):
+    """Reads the metadata values from a file object."""
+    msg = message_from_file(file)
+
+    self.metadata_version = Version(msg['metadata-version'])
+    self.name = _read_field_from_msg(msg, 'name')
+    self.version = _read_field_from_msg(msg, 'version')
+    self.description = _read_field_from_msg(msg, 'summary')
+    # we are filling author only.
+    self.author = _read_field_from_msg(msg, 'author')
+    self.maintainer = None
+    self.author_email = _read_field_from_msg(msg, 'author-email')
+    self.maintainer_email = None
+    self.url = _read_field_from_msg(msg, 'home-page')
+    self.download_url = _read_field_from_msg(msg, 'download-url')
+    self.license = _read_field_unescaped_from_msg(msg, 'license')
+
+    self.long_description = _read_field_unescaped_from_msg(msg, 'description')
+    if self.long_description is None and self.metadata_version >= Version('2.1'):
+        self.long_description = _read_payload_from_msg(msg)
+    self.description = _read_field_from_msg(msg, 'summary')
+
+    if 'keywords' in msg:
+        self.keywords = _read_field_from_msg(msg, 'keywords').split(',')
+
+    self.platforms = _read_list_from_msg(msg, 'platform')
+    self.classifiers = _read_list_from_msg(msg, 'classifier')
+
+    # PEP 314 - these fields only exist in 1.1
+    if self.metadata_version == Version('1.1'):
+        self.requires = _read_list_from_msg(msg, 'requires')
+        self.provides = _read_list_from_msg(msg, 'provides')
+        self.obsoletes = _read_list_from_msg(msg, 'obsoletes')
+    else:
+        self.requires = None
+        self.provides = None
+        self.obsoletes = None
+
+    self.license_files = _read_list_from_msg(msg, 'license-file')
+
+
+def single_line(val):
+    """
+    Quick and dirty validation for Summary pypa/setuptools#1390.
+    """
+    if '\n' in val:
+        # TODO: Replace with `raise ValueError("newlines not allowed")`
+        # after reviewing #2893.
+        msg = "newlines are not allowed in `summary` and will break in the future"
+        SetuptoolsDeprecationWarning.emit("Invalid config.", msg)
+        # due_date is undefined. Controversial change, there was a lot of push back.
+        val = val.strip().split('\n')[0]
+    return val
+
+
+def write_pkg_info(self, base_dir):
+    """Write the PKG-INFO file into the release tree."""
+    temp = ""
+    final = os.path.join(base_dir, 'PKG-INFO')
+    try:
+        # Use a temporary file while writing to avoid race conditions
+        # (e.g. `importlib.metadata` reading `.egg-info/PKG-INFO`):
+        with NamedTemporaryFile("w", encoding="utf-8", dir=base_dir, delete=False) as f:
+            temp = f.name
+            self.write_pkg_file(f)
+        permissions = stat.S_IMODE(os.lstat(temp).st_mode)
+        os.chmod(temp, permissions | stat.S_IRGRP | stat.S_IROTH)
+        os.replace(temp, final)  # atomic operation.
+    finally:
+        if temp and os.path.exists(temp):
+            os.remove(temp)
+
+
+# Based on Python 3.5 version
+def write_pkg_file(self, file):  # noqa: C901  # is too complex (14)  # FIXME
+    """Write the PKG-INFO format data to a file object."""
+    version = self.get_metadata_version()
+
+    def write_field(key, value):
+        file.write("%s: %s\n" % (key, value))
+
+    write_field('Metadata-Version', str(version))
+    write_field('Name', self.get_name())
+    write_field('Version', self.get_version())
+
+    summary = self.get_description()
+    if summary:
+        write_field('Summary', single_line(summary))
+
+    optional_fields = (
+        ('Home-page', 'url'),
+        ('Download-URL', 'download_url'),
+        ('Author', 'author'),
+        ('Author-email', 'author_email'),
+        ('Maintainer', 'maintainer'),
+        ('Maintainer-email', 'maintainer_email'),
+    )
+
+    for field, attr in optional_fields:
+        attr_val = getattr(self, attr, None)
+        if attr_val is not None:
+            write_field(field, attr_val)
+
+    license = self.get_license()
+    if license:
+        write_field('License', rfc822_escape(license))
+
+    for project_url in self.project_urls.items():
+        write_field('Project-URL', '%s, %s' % project_url)
+
+    keywords = ','.join(self.get_keywords())
+    if keywords:
+        write_field('Keywords', keywords)
+
+    platforms = self.get_platforms() or []
+    for platform in platforms:
+        write_field('Platform', platform)
+
+    self._write_list(file, 'Classifier', self.get_classifiers())
+
+    # PEP 314
+    self._write_list(file, 'Requires', self.get_requires())
+    self._write_list(file, 'Provides', self.get_provides())
+    self._write_list(file, 'Obsoletes', self.get_obsoletes())
+
+    # Setuptools specific for PEP 345
+    if hasattr(self, 'python_requires'):
+        write_field('Requires-Python', self.python_requires)
+
+    # PEP 566
+    if self.long_description_content_type:
+        write_field('Description-Content-Type', self.long_description_content_type)
+
+    self._write_list(file, 'License-File', self.license_files or [])
+    _write_requirements(self, file)
+
+    long_description = self.get_long_description()
+    if long_description:
+        file.write("\n%s" % long_description)
+        if not long_description.endswith("\n"):
+            file.write("\n")
+
+
+def _write_requirements(self, file):
+    for req in self._normalized_install_requires:
+        file.write(f"Requires-Dist: {req}\n")
+
+    processed_extras = {}
+    for augmented_extra, reqs in self._normalized_extras_require.items():
+        # Historically, setuptools allows "augmented extras": `:`
+        unsafe_extra, _, condition = augmented_extra.partition(":")
+        unsafe_extra = unsafe_extra.strip()
+        extra = _normalization.safe_extra(unsafe_extra)
+
+        if extra:
+            _write_provides_extra(file, processed_extras, extra, unsafe_extra)
+        for req in reqs:
+            r = _include_extra(req, extra, condition.strip())
+            file.write(f"Requires-Dist: {r}\n")
+
+    return processed_extras
+
+
+def _include_extra(req: str, extra: str, condition: str) -> Requirement:
+    r = Requirement(req)
+    parts = (
+        f"({r.marker})" if r.marker else None,
+        f"({condition})" if condition else None,
+        f"extra == {extra!r}" if extra else None,
+    )
+    r.marker = Marker(" and ".join(x for x in parts if x))
+    return r
+
+
+def _write_provides_extra(file, processed_extras, safe, unsafe):
+    previous = processed_extras.get(safe)
+    if previous == unsafe:
+        SetuptoolsDeprecationWarning.emit(
+            'Ambiguity during "extra" normalization for dependencies.',
+            f"""
+            {previous!r} and {unsafe!r} normalize to the same value:\n
+                {safe!r}\n
+            In future versions, setuptools might halt the build process.
+            """,
+            see_url="https://peps.python.org/pep-0685/",
+        )
+    else:
+        processed_extras[safe] = unsafe
+        file.write(f"Provides-Extra: {safe}\n")
diff --git a/.venv/Lib/site-packages/setuptools/_entry_points.py b/.venv/Lib/site-packages/setuptools/_entry_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..747a69067e3b1a5762536cff972b773ec3cf573b
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_entry_points.py
@@ -0,0 +1,88 @@
+import functools
+import operator
+import itertools
+
+from .errors import OptionError
+from .extern.jaraco.text import yield_lines
+from .extern.jaraco.functools import pass_none
+from ._importlib import metadata
+from ._itertools import ensure_unique
+from .extern.more_itertools import consume
+
+
+def ensure_valid(ep):
+    """
+    Exercise one of the dynamic properties to trigger
+    the pattern match.
+    """
+    try:
+        ep.extras
+    except AttributeError as ex:
+        msg = (
+            f"Problems to parse {ep}.\nPlease ensure entry-point follows the spec: "
+            "https://packaging.python.org/en/latest/specifications/entry-points/"
+        )
+        raise OptionError(msg) from ex
+
+
+def load_group(value, group):
+    """
+    Given a value of an entry point or series of entry points,
+    return each as an EntryPoint.
+    """
+    # normalize to a single sequence of lines
+    lines = yield_lines(value)
+    text = f'[{group}]\n' + '\n'.join(lines)
+    return metadata.EntryPoints._from_text(text)
+
+
+def by_group_and_name(ep):
+    return ep.group, ep.name
+
+
+def validate(eps: metadata.EntryPoints):
+    """
+    Ensure entry points are unique by group and name and validate each.
+    """
+    consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))
+    return eps
+
+
+@functools.singledispatch
+def load(eps):
+    """
+    Given a Distribution.entry_points, produce EntryPoints.
+    """
+    groups = itertools.chain.from_iterable(
+        load_group(value, group) for group, value in eps.items()
+    )
+    return validate(metadata.EntryPoints(groups))
+
+
+@load.register(str)
+def _(eps):
+    r"""
+    >>> ep, = load('[console_scripts]\nfoo=bar')
+    >>> ep.group
+    'console_scripts'
+    >>> ep.name
+    'foo'
+    >>> ep.value
+    'bar'
+    """
+    return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))
+
+
+load.register(type(None), lambda x: x)
+
+
+@pass_none
+def render(eps: metadata.EntryPoints):
+    by_group = operator.attrgetter('group')
+    groups = itertools.groupby(sorted(eps, key=by_group), by_group)
+
+    return '\n'.join(f'[{group}]\n{render_items(items)}\n' for group, items in groups)
+
+
+def render_items(eps):
+    return '\n'.join(f'{ep.name} = {ep.value}' for ep in sorted(eps))
diff --git a/.venv/Lib/site-packages/setuptools/_imp.py b/.venv/Lib/site-packages/setuptools/_imp.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d4ead0eb036be85c7681c74ef933969de0a6ceb
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_imp.py
@@ -0,0 +1,88 @@
+"""
+Re-implementation of find_module and get_frozen_object
+from the deprecated imp module.
+"""
+
+import os
+import importlib.util
+import importlib.machinery
+
+from importlib.util import module_from_spec
+
+
+PY_SOURCE = 1
+PY_COMPILED = 2
+C_EXTENSION = 3
+C_BUILTIN = 6
+PY_FROZEN = 7
+
+
+def find_spec(module, paths):
+    finder = (
+        importlib.machinery.PathFinder().find_spec
+        if isinstance(paths, list)
+        else importlib.util.find_spec
+    )
+    return finder(module, paths)
+
+
+def find_module(module, paths=None):
+    """Just like 'imp.find_module()', but with package support"""
+    spec = find_spec(module, paths)
+    if spec is None:
+        raise ImportError("Can't find %s" % module)
+    if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
+        spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
+
+    kind = -1
+    file = None
+    static = isinstance(spec.loader, type)
+    if (
+        spec.origin == 'frozen'
+        or static
+        and issubclass(spec.loader, importlib.machinery.FrozenImporter)
+    ):
+        kind = PY_FROZEN
+        path = None  # imp compabilty
+        suffix = mode = ''  # imp compatibility
+    elif (
+        spec.origin == 'built-in'
+        or static
+        and issubclass(spec.loader, importlib.machinery.BuiltinImporter)
+    ):
+        kind = C_BUILTIN
+        path = None  # imp compabilty
+        suffix = mode = ''  # imp compatibility
+    elif spec.has_location:
+        path = spec.origin
+        suffix = os.path.splitext(path)[1]
+        mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
+
+        if suffix in importlib.machinery.SOURCE_SUFFIXES:
+            kind = PY_SOURCE
+        elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
+            kind = PY_COMPILED
+        elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
+            kind = C_EXTENSION
+
+        if kind in {PY_SOURCE, PY_COMPILED}:
+            file = open(path, mode)
+    else:
+        path = None
+        suffix = mode = ''
+
+    return file, path, (suffix, mode, kind)
+
+
+def get_frozen_object(module, paths=None):
+    spec = find_spec(module, paths)
+    if not spec:
+        raise ImportError("Can't find %s" % module)
+    return spec.loader.get_code(module)
+
+
+def get_module(module, paths, info):
+    spec = find_spec(module, paths)
+    if not spec:
+        raise ImportError("Can't find %s" % module)
+    return module_from_spec(spec)
diff --git a/.venv/Lib/site-packages/setuptools/_importlib.py b/.venv/Lib/site-packages/setuptools/_importlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd2b01e2b59b0d8798aab0df27ee4efbd4f87331
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_importlib.py
@@ -0,0 +1,51 @@
+import sys
+
+
+def disable_importlib_metadata_finder(metadata):
+    """
+    Ensure importlib_metadata doesn't provide older, incompatible
+    Distributions.
+
+    Workaround for #3102.
+    """
+    try:
+        import importlib_metadata
+    except ImportError:
+        return
+    except AttributeError:
+        from .warnings import SetuptoolsWarning
+
+        SetuptoolsWarning.emit(
+            "Incompatibility problem.",
+            """
+            `importlib-metadata` version is incompatible with `setuptools`.
+            This problem is likely to be solved by installing an updated version of
+            `importlib-metadata`.
+            """,
+            see_url="https://github.com/python/importlib_metadata/issues/396",
+        )  # Ensure a descriptive message is shown.
+        raise  # This exception can be suppressed by _distutils_hack
+
+    if importlib_metadata is metadata:
+        return
+    to_remove = [
+        ob
+        for ob in sys.meta_path
+        if isinstance(ob, importlib_metadata.MetadataPathFinder)
+    ]
+    for item in to_remove:
+        sys.meta_path.remove(item)
+
+
+if sys.version_info < (3, 10):
+    from setuptools.extern import importlib_metadata as metadata
+
+    disable_importlib_metadata_finder(metadata)
+else:
+    import importlib.metadata as metadata  # noqa: F401
+
+
+if sys.version_info < (3, 9):
+    from setuptools.extern import importlib_resources as resources
+else:
+    import importlib.resources as resources  # noqa: F401
diff --git a/.venv/Lib/site-packages/setuptools/_itertools.py b/.venv/Lib/site-packages/setuptools/_itertools.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8bf6d210aec669b6b948942eda1db953e8725fa
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_itertools.py
@@ -0,0 +1,23 @@
+from setuptools.extern.more_itertools import consume  # noqa: F401
+
+
+# copied from jaraco.itertools 6.1
+def ensure_unique(iterable, key=lambda x: x):
+    """
+    Wrap an iterable to raise a ValueError if non-unique values are encountered.
+
+    >>> list(ensure_unique('abc'))
+    ['a', 'b', 'c']
+    >>> consume(ensure_unique('abca'))
+    Traceback (most recent call last):
+    ...
+    ValueError: Duplicate element 'a' encountered.
+    """
+    seen = set()
+    seen_add = seen.add
+    for element in iterable:
+        k = key(element)
+        if k in seen:
+            raise ValueError(f"Duplicate element {element!r} encountered.")
+        seen_add(k)
+        yield element
diff --git a/.venv/Lib/site-packages/setuptools/_normalization.py b/.venv/Lib/site-packages/setuptools/_normalization.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e94e662ef86bbb7dd72a34ec8737a2b7ca6406d
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_normalization.py
@@ -0,0 +1,125 @@
+"""
+Helpers for normalization as expected in wheel/sdist/module file names
+and core metadata
+"""
+import re
+from pathlib import Path
+from typing import Union
+
+from .extern import packaging
+from .warnings import SetuptoolsDeprecationWarning
+
+_Path = Union[str, Path]
+
+# https://packaging.python.org/en/latest/specifications/core-metadata/#name
+_VALID_NAME = re.compile(r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.I)
+_UNSAFE_NAME_CHARS = re.compile(r"[^A-Z0-9.]+", re.I)
+_NON_ALPHANUMERIC = re.compile(r"[^A-Z0-9]+", re.I)
+
+
+def safe_identifier(name: str) -> str:
+    """Make a string safe to be used as Python identifier.
+    >>> safe_identifier("12abc")
+    '_12abc'
+    >>> safe_identifier("__editable__.myns.pkg-78.9.3_local")
+    '__editable___myns_pkg_78_9_3_local'
+    """
+    safe = re.sub(r'\W|^(?=\d)', '_', name)
+    assert safe.isidentifier()
+    return safe
+
+
+def safe_name(component: str) -> str:
+    """Escape a component used as a project name according to Core Metadata.
+    >>> safe_name("hello world")
+    'hello-world'
+    >>> safe_name("hello?world")
+    'hello-world'
+    """
+    # See pkg_resources.safe_name
+    return _UNSAFE_NAME_CHARS.sub("-", component)
+
+
+def safe_version(version: str) -> str:
+    """Convert an arbitrary string into a valid version string.
+    >>> safe_version("1988 12 25")
+    '1988.12.25'
+    >>> safe_version("v0.2.1")
+    '0.2.1'
+    >>> safe_version("v0.2?beta")
+    '0.2b0'
+    >>> safe_version("v0.2 beta")
+    '0.2b0'
+    >>> safe_version("ubuntu lts")
+    Traceback (most recent call last):
+    ...
+    setuptools.extern.packaging.version.InvalidVersion: Invalid version: 'ubuntu.lts'
+    """
+    v = version.replace(' ', '.')
+    try:
+        return str(packaging.version.Version(v))
+    except packaging.version.InvalidVersion:
+        attempt = _UNSAFE_NAME_CHARS.sub("-", v)
+        return str(packaging.version.Version(attempt))
+
+
+def best_effort_version(version: str) -> str:
+    """Convert an arbitrary string into a version-like string.
+    >>> best_effort_version("v0.2 beta")
+    '0.2b0'
+
+    >>> import warnings
+    >>> warnings.simplefilter("ignore", category=SetuptoolsDeprecationWarning)
+    >>> best_effort_version("ubuntu lts")
+    'ubuntu.lts'
+    """
+    # See pkg_resources.safe_version
+    try:
+        return safe_version(version)
+    except packaging.version.InvalidVersion:
+        SetuptoolsDeprecationWarning.emit(
+            f"Invalid version: {version!r}.",
+            f"""
+            Version {version!r} is not valid according to PEP 440.
+
+            Please make sure to specify a valid version for your package.
+            Also note that future releases of setuptools may halt the build process
+            if an invalid version is given.
+            """,
+            see_url="https://peps.python.org/pep-0440/",
+            due_date=(2023, 9, 26),  # See setuptools/dist _validate_version
+        )
+        v = version.replace(' ', '.')
+        return safe_name(v)
+
+
+def safe_extra(extra: str) -> str:
+    """Normalize extra name according to PEP 685
+    >>> safe_extra("_FrIeNdLy-._.-bArD")
+    'friendly-bard'
+    >>> safe_extra("FrIeNdLy-._.-bArD__._-")
+    'friendly-bard'
+    """
+    return _NON_ALPHANUMERIC.sub("-", extra).strip("-").lower()
+
+
+def filename_component(value: str) -> str:
+    """Normalize each component of a filename (e.g. distribution/version part of wheel)
+    Note: ``value`` needs to be already normalized.
+    >>> filename_component("my-pkg")
+    'my_pkg'
+    """
+    return value.replace("-", "_").strip("_")
+
+
+def safer_name(value: str) -> str:
+    """Like ``safe_name`` but can be used as filename component for wheel"""
+    # See bdist_wheel.safer_name
+    return filename_component(safe_name(value))
+
+
+def safer_best_effort_version(value: str) -> str:
+    """Like ``best_effort_version`` but can be used as filename component for wheel"""
+    # See bdist_wheel.safer_verion
+    # TODO: Replace with only safe_version in the future (no need for best effort)
+    return filename_component(best_effort_version(value))
diff --git a/.venv/Lib/site-packages/setuptools/_path.py b/.venv/Lib/site-packages/setuptools/_path.py
new file mode 100644
index 0000000000000000000000000000000000000000..b99d9dadcfc3789629aa803d3256cd22d2873c29
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_path.py
@@ -0,0 +1,37 @@
+import os
+import sys
+from typing import Union
+
+_Path = Union[str, os.PathLike]
+
+
+def ensure_directory(path):
+    """Ensure that the parent directory of `path` exists"""
+    dirname = os.path.dirname(path)
+    os.makedirs(dirname, exist_ok=True)
+
+
+def same_path(p1: _Path, p2: _Path) -> bool:
+    """Differs from os.path.samefile because it does not require paths to exist.
+    Purely string based (no comparison between i-nodes).
+    >>> same_path("a/b", "./a/b")
+    True
+    >>> same_path("a/b", "a/./b")
+    True
+    >>> same_path("a/b", "././a/b")
+    True
+    >>> same_path("a/b", "./a/b/c/..")
+    True
+    >>> same_path("a/b", "../a/b/c")
+    False
+    >>> same_path("a", "a/b")
+    False
+    """
+    return normpath(p1) == normpath(p2)
+
+
+def normpath(filename: _Path) -> str:
+    """Normalize a file/dir name for comparison purposes."""
+    # See pkg_resources.normalize_path for notes about cygwin
+    file = os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+    return os.path.normcase(os.path.realpath(os.path.normpath(file)))
diff --git a/.venv/Lib/site-packages/setuptools/_reqs.py b/.venv/Lib/site-packages/setuptools/_reqs.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d5b927fd8728592fa8eb11891b1a6b1379c4199
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/_reqs.py
@@ -0,0 +1,33 @@
+from typing import Callable, Iterable, Iterator, TypeVar, Union, overload
+
+import setuptools.extern.jaraco.text as text
+from setuptools.extern.packaging.requirements import Requirement
+
+_T = TypeVar("_T")
+_StrOrIter = Union[str, Iterable[str]]
+
+
+def parse_strings(strs: _StrOrIter) -> Iterator[str]:
+    """
+    Yield requirement strings for each specification in `strs`.
+
+    `strs` must be a string, or a (possibly-nested) iterable thereof.
+    """
+    return text.join_continuation(map(text.drop_comment, text.yield_lines(strs)))
+
+
+@overload
+def parse(strs: _StrOrIter) -> Iterator[Requirement]:
+    ...
+
+
+@overload
+def parse(strs: _StrOrIter, parser: Callable[[str], _T]) -> Iterator[_T]:
+    ...
+
+
+def parse(strs, parser=Requirement):
+    """
+    Replacement for ``pkg_resources.parse_requirements`` that uses ``packaging``.
+    """
+    return map(parser, parse_strings(strs))
diff --git a/.venv/Lib/site-packages/setuptools/cli-32.exe b/.venv/Lib/site-packages/setuptools/cli-32.exe
new file mode 100644
index 0000000000000000000000000000000000000000..65c3cd99cc7433f271a5b9387abdd1ddb949d1a6
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/cli-32.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/cli-64.exe b/.venv/Lib/site-packages/setuptools/cli-64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..3ea50eebfe3f0113b231a318cc1ad6e238afd60d
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/cli-64.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/cli-arm64.exe b/.venv/Lib/site-packages/setuptools/cli-arm64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..da96455a07a0bad4cde5dc5626544325f82c722b
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/cli-arm64.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/cli.exe b/.venv/Lib/site-packages/setuptools/cli.exe
new file mode 100644
index 0000000000000000000000000000000000000000..65c3cd99cc7433f271a5b9387abdd1ddb949d1a6
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/cli.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/command/alias.py b/.venv/Lib/site-packages/setuptools/command/alias.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7b4d5456a7fa18d31d8ab4e9ef120b42e61ef13
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/alias.py
@@ -0,0 +1,78 @@
+from distutils.errors import DistutilsOptionError
+
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+
+def shquote(arg):
+    """Quote an argument for later parsing by shlex.split()"""
+    for c in '"', "'", "\\", "#":
+        if c in arg:
+            return repr(arg)
+    if arg.split() != [arg]:
+        return repr(arg)
+    return arg
+
+
+class alias(option_base):
+    """Define a shortcut that invokes one or more commands"""
+
+    description = "define a shortcut to invoke one or more commands"
+    command_consumes_arguments = True
+
+    user_options = [
+        ('remove', 'r', 'remove (unset) the alias'),
+    ] + option_base.user_options
+
+    boolean_options = option_base.boolean_options + ['remove']
+
+    def initialize_options(self):
+        option_base.initialize_options(self)
+        self.args = None
+        self.remove = None
+
+    def finalize_options(self):
+        option_base.finalize_options(self)
+        if self.remove and len(self.args) != 1:
+            raise DistutilsOptionError(
+                "Must specify exactly one argument (the alias name) when "
+                "using --remove"
+            )
+
+    def run(self):
+        aliases = self.distribution.get_option_dict('aliases')
+
+        if not self.args:
+            print("Command Aliases")
+            print("---------------")
+            for alias in aliases:
+                print("setup.py alias", format_alias(alias, aliases))
+            return
+
+        elif len(self.args) == 1:
+            (alias,) = self.args
+            if self.remove:
+                command = None
+            elif alias in aliases:
+                print("setup.py alias", format_alias(alias, aliases))
+                return
+            else:
+                print("No alias definition found for %r" % alias)
+                return
+        else:
+            alias = self.args[0]
+            command = ' '.join(map(shquote, self.args[1:]))
+
+        edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+    source, command = aliases[name]
+    if source == config_file('global'):
+        source = '--global-config '
+    elif source == config_file('user'):
+        source = '--user-config '
+    elif source == config_file('local'):
+        source = ''
+    else:
+        source = '--filename=%r' % source
+    return source + name + ' ' + command
diff --git a/.venv/Lib/site-packages/setuptools/command/bdist_egg.py b/.venv/Lib/site-packages/setuptools/command/bdist_egg.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdece56bc9d256e895acbd2b01d624e47a4729dd
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/bdist_egg.py
@@ -0,0 +1,464 @@
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+from distutils.dir_util import remove_tree, mkpath
+from distutils import log
+from types import CodeType
+import sys
+import os
+import re
+import textwrap
+import marshal
+
+from setuptools.extension import Library
+from setuptools import Command
+from .._path import ensure_directory
+
+from sysconfig import get_path, get_python_version
+
+
+def _get_purelib():
+    return get_path("purelib")
+
+
+def strip_module(filename):
+    if '.' in filename:
+        filename = os.path.splitext(filename)[0]
+    if filename.endswith('module'):
+        filename = filename[:-6]
+    return filename
+
+
+def sorted_walk(dir):
+    """Do os.walk in a reproducible way,
+    independent of indeterministic filesystem readdir order
+    """
+    for base, dirs, files in os.walk(dir):
+        dirs.sort()
+        files.sort()
+        yield base, dirs, files
+
+
+def write_stub(resource, pyfile):
+    _stub_template = textwrap.dedent(
+        """
+        def __bootstrap__():
+            global __bootstrap__, __loader__, __file__
+            import sys, pkg_resources, importlib.util
+            __file__ = pkg_resources.resource_filename(__name__, %r)
+            __loader__ = None; del __bootstrap__, __loader__
+            spec = importlib.util.spec_from_file_location(__name__,__file__)
+            mod = importlib.util.module_from_spec(spec)
+            spec.loader.exec_module(mod)
+        __bootstrap__()
+        """
+    ).lstrip()
+    with open(pyfile, 'w') as f:
+        f.write(_stub_template % resource)
+
+
+class bdist_egg(Command):
+    description = "create an \"egg\" distribution"
+
+    user_options = [
+        ('bdist-dir=', 'b', "temporary directory for creating the distribution"),
+        (
+            'plat-name=',
+            'p',
+            "platform name to embed in generated filenames "
+            "(by default uses `pkg_resources.get_build_platform()`)",
+        ),
+        ('exclude-source-files', None, "remove all .py files from the generated egg"),
+        (
+            'keep-temp',
+            'k',
+            "keep the pseudo-installation tree around after "
+            + "creating the distribution archive",
+        ),
+        ('dist-dir=', 'd', "directory to put final built distributions in"),
+        ('skip-build', None, "skip rebuilding everything (for testing/debugging)"),
+    ]
+
+    boolean_options = ['keep-temp', 'skip-build', 'exclude-source-files']
+
+    def initialize_options(self):
+        self.bdist_dir = None
+        self.plat_name = None
+        self.keep_temp = 0
+        self.dist_dir = None
+        self.skip_build = 0
+        self.egg_output = None
+        self.exclude_source_files = None
+
+    def finalize_options(self):
+        ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
+        self.egg_info = ei_cmd.egg_info
+
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+        if self.plat_name is None:
+            from pkg_resources import get_build_platform
+
+            self.plat_name = get_build_platform()
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+        if self.egg_output is None:
+            # Compute filename of the output egg
+            basename = ei_cmd._get_egg_basename(
+                py_version=get_python_version(),
+                platform=self.distribution.has_ext_modules() and self.plat_name,
+            )
+
+            self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
+
+    def do_install_data(self):
+        # Hack for packages that install data to install's --install-lib
+        self.get_finalized_command('install').install_lib = self.bdist_dir
+
+        site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
+        old, self.distribution.data_files = self.distribution.data_files, []
+
+        for item in old:
+            if isinstance(item, tuple) and len(item) == 2:
+                if os.path.isabs(item[0]):
+                    realpath = os.path.realpath(item[0])
+                    normalized = os.path.normcase(realpath)
+                    if normalized == site_packages or normalized.startswith(
+                        site_packages + os.sep
+                    ):
+                        item = realpath[len(site_packages) + 1 :], item[1]
+                        # XXX else: raise ???
+            self.distribution.data_files.append(item)
+
+        try:
+            log.info("installing package data to %s", self.bdist_dir)
+            self.call_command('install_data', force=0, root=None)
+        finally:
+            self.distribution.data_files = old
+
+    def get_outputs(self):
+        return [self.egg_output]
+
+    def call_command(self, cmdname, **kw):
+        """Invoke reinitialized command `cmdname` with keyword args"""
+        for dirname in INSTALL_DIRECTORY_ATTRS:
+            kw.setdefault(dirname, self.bdist_dir)
+        kw.setdefault('skip_build', self.skip_build)
+        kw.setdefault('dry_run', self.dry_run)
+        cmd = self.reinitialize_command(cmdname, **kw)
+        self.run_command(cmdname)
+        return cmd
+
+    def run(self):  # noqa: C901  # is too complex (14)  # FIXME
+        # Generate metadata first
+        self.run_command("egg_info")
+        # We run install_lib before install_data, because some data hacks
+        # pull their data path from the install_lib command.
+        log.info("installing library code to %s", self.bdist_dir)
+        instcmd = self.get_finalized_command('install')
+        old_root = instcmd.root
+        instcmd.root = None
+        if self.distribution.has_c_libraries() and not self.skip_build:
+            self.run_command('build_clib')
+        cmd = self.call_command('install_lib', warn_dir=0)
+        instcmd.root = old_root
+
+        all_outputs, ext_outputs = self.get_ext_outputs()
+        self.stubs = []
+        to_compile = []
+        for p, ext_name in enumerate(ext_outputs):
+            filename, ext = os.path.splitext(ext_name)
+            pyfile = os.path.join(self.bdist_dir, strip_module(filename) + '.py')
+            self.stubs.append(pyfile)
+            log.info("creating stub loader for %s", ext_name)
+            if not self.dry_run:
+                write_stub(os.path.basename(ext_name), pyfile)
+            to_compile.append(pyfile)
+            ext_outputs[p] = ext_name.replace(os.sep, '/')
+
+        if to_compile:
+            cmd.byte_compile(to_compile)
+        if self.distribution.data_files:
+            self.do_install_data()
+
+        # Make the EGG-INFO directory
+        archive_root = self.bdist_dir
+        egg_info = os.path.join(archive_root, 'EGG-INFO')
+        self.mkpath(egg_info)
+        if self.distribution.scripts:
+            script_dir = os.path.join(egg_info, 'scripts')
+            log.info("installing scripts to %s", script_dir)
+            self.call_command('install_scripts', install_dir=script_dir, no_ep=1)
+
+        self.copy_metadata_to(egg_info)
+        native_libs = os.path.join(egg_info, "native_libs.txt")
+        if all_outputs:
+            log.info("writing %s", native_libs)
+            if not self.dry_run:
+                ensure_directory(native_libs)
+                libs_file = open(native_libs, 'wt')
+                libs_file.write('\n'.join(all_outputs))
+                libs_file.write('\n')
+                libs_file.close()
+        elif os.path.isfile(native_libs):
+            log.info("removing %s", native_libs)
+            if not self.dry_run:
+                os.unlink(native_libs)
+
+        write_safety_flag(os.path.join(archive_root, 'EGG-INFO'), self.zip_safe())
+
+        if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
+            log.warn(
+                "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+                "Use the install_requires/extras_require setup() args instead."
+            )
+
+        if self.exclude_source_files:
+            self.zap_pyfiles()
+
+        # Make the archive
+        make_zipfile(
+            self.egg_output,
+            archive_root,
+            verbose=self.verbose,
+            dry_run=self.dry_run,
+            mode=self.gen_header(),
+        )
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+        # Add to 'Distribution.dist_files' so that the "upload" command works
+        getattr(self.distribution, 'dist_files', []).append(
+            ('bdist_egg', get_python_version(), self.egg_output)
+        )
+
+    def zap_pyfiles(self):
+        log.info("Removing .py files from temporary directory")
+        for base, dirs, files in walk_egg(self.bdist_dir):
+            for name in files:
+                path = os.path.join(base, name)
+
+                if name.endswith('.py'):
+                    log.debug("Deleting %s", path)
+                    os.unlink(path)
+
+                if base.endswith('__pycache__'):
+                    path_old = path
+
+                    pattern = r'(?P.+)\.(?P[^.]+)\.pyc'
+                    m = re.match(pattern, name)
+                    path_new = os.path.join(base, os.pardir, m.group('name') + '.pyc')
+                    log.info("Renaming file from [%s] to [%s]" % (path_old, path_new))
+                    try:
+                        os.remove(path_new)
+                    except OSError:
+                        pass
+                    os.rename(path_old, path_new)
+
+    def zip_safe(self):
+        safe = getattr(self.distribution, 'zip_safe', None)
+        if safe is not None:
+            return safe
+        log.warn("zip_safe flag not set; analyzing archive contents...")
+        return analyze_egg(self.bdist_dir, self.stubs)
+
+    def gen_header(self):
+        return 'w'
+
+    def copy_metadata_to(self, target_dir):
+        "Copy metadata (egg info) to the target_dir"
+        # normalize the path (so that a forward-slash in egg_info will
+        # match using startswith below)
+        norm_egg_info = os.path.normpath(self.egg_info)
+        prefix = os.path.join(norm_egg_info, '')
+        for path in self.ei_cmd.filelist.files:
+            if path.startswith(prefix):
+                target = os.path.join(target_dir, path[len(prefix) :])
+                ensure_directory(target)
+                self.copy_file(path, target)
+
+    def get_ext_outputs(self):
+        """Get a list of relative paths to C extensions in the output distro"""
+
+        all_outputs = []
+        ext_outputs = []
+
+        paths = {self.bdist_dir: ''}
+        for base, dirs, files in sorted_walk(self.bdist_dir):
+            for filename in files:
+                if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+                    all_outputs.append(paths[base] + filename)
+            for filename in dirs:
+                paths[os.path.join(base, filename)] = paths[base] + filename + '/'
+
+        if self.distribution.has_ext_modules():
+            build_cmd = self.get_finalized_command('build_ext')
+            for ext in build_cmd.extensions:
+                if isinstance(ext, Library):
+                    continue
+                fullname = build_cmd.get_ext_fullname(ext.name)
+                filename = build_cmd.get_ext_filename(fullname)
+                if not os.path.basename(filename).startswith('dl-'):
+                    if os.path.exists(os.path.join(self.bdist_dir, filename)):
+                        ext_outputs.append(filename)
+
+        return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+def walk_egg(egg_dir):
+    """Walk an unpacked egg's contents, skipping the metadata directory"""
+    walker = sorted_walk(egg_dir)
+    base, dirs, files = next(walker)
+    if 'EGG-INFO' in dirs:
+        dirs.remove('EGG-INFO')
+    yield base, dirs, files
+    for bdf in walker:
+        yield bdf
+
+
+def analyze_egg(egg_dir, stubs):
+    # check for existing flag in EGG-INFO
+    for flag, fn in safety_flags.items():
+        if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
+            return flag
+    if not can_scan():
+        return False
+    safe = True
+    for base, dirs, files in walk_egg(egg_dir):
+        for name in files:
+            if name.endswith('.py') or name.endswith('.pyw'):
+                continue
+            elif name.endswith('.pyc') or name.endswith('.pyo'):
+                # always scan, even if we already know we're not safe
+                safe = scan_module(egg_dir, base, name, stubs) and safe
+    return safe
+
+
+def write_safety_flag(egg_dir, safe):
+    # Write or remove zip safety flag file(s)
+    for flag, fn in safety_flags.items():
+        fn = os.path.join(egg_dir, fn)
+        if os.path.exists(fn):
+            if safe is None or bool(safe) != flag:
+                os.unlink(fn)
+        elif safe is not None and bool(safe) == flag:
+            f = open(fn, 'wt')
+            f.write('\n')
+            f.close()
+
+
+safety_flags = {
+    True: 'zip-safe',
+    False: 'not-zip-safe',
+}
+
+
+def scan_module(egg_dir, base, name, stubs):
+    """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+    filename = os.path.join(base, name)
+    if filename[:-1] in stubs:
+        return True  # Extension module
+    pkg = base[len(egg_dir) + 1 :].replace(os.sep, '.')
+    module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
+    if sys.version_info < (3, 7):
+        skip = 12  # skip magic & date & file size
+    else:
+        skip = 16  # skip magic & reserved? & date & file size
+    f = open(filename, 'rb')
+    f.read(skip)
+    code = marshal.load(f)
+    f.close()
+    safe = True
+    symbols = dict.fromkeys(iter_symbols(code))
+    for bad in ['__file__', '__path__']:
+        if bad in symbols:
+            log.warn("%s: module references %s", module, bad)
+            safe = False
+    if 'inspect' in symbols:
+        for bad in [
+            'getsource',
+            'getabsfile',
+            'getsourcefile',
+            'getfile' 'getsourcelines',
+            'findsource',
+            'getcomments',
+            'getframeinfo',
+            'getinnerframes',
+            'getouterframes',
+            'stack',
+            'trace',
+        ]:
+            if bad in symbols:
+                log.warn("%s: module MAY be using inspect.%s", module, bad)
+                safe = False
+    return safe
+
+
+def iter_symbols(code):
+    """Yield names and strings used by `code` and its nested code objects"""
+    for name in code.co_names:
+        yield name
+    for const in code.co_consts:
+        if isinstance(const, str):
+            yield const
+        elif isinstance(const, CodeType):
+            for name in iter_symbols(const):
+                yield name
+
+
+def can_scan():
+    if not sys.platform.startswith('java') and sys.platform != 'cli':
+        # CPython, PyPy, etc.
+        return True
+    log.warn("Unable to analyze compiled code on this platform.")
+    log.warn(
+        "Please ask the author to include a 'zip_safe'"
+        " setting (either True or False) in the package's setup.py"
+    )
+
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = ['install_lib', 'install_dir', 'install_data', 'install_base']
+
+
+def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, mode='w'):
+    """Create a zip file from all the files under 'base_dir'.  The output
+    zip file will be named 'base_dir' + ".zip".  Uses either the "zipfile"
+    Python module (if available) or the InfoZIP "zip" utility (if installed
+    and found on the default search path).  If neither tool is available,
+    raises DistutilsExecError.  Returns the name of the output zip file.
+    """
+    import zipfile
+
+    mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+    log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+    def visit(z, dirname, names):
+        for name in names:
+            path = os.path.normpath(os.path.join(dirname, name))
+            if os.path.isfile(path):
+                p = path[len(base_dir) + 1 :]
+                if not dry_run:
+                    z.write(path, p)
+                log.debug("adding '%s'", p)
+
+    compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+    if not dry_run:
+        z = zipfile.ZipFile(zip_filename, mode, compression=compression)
+        for dirname, dirs, files in sorted_walk(base_dir):
+            visit(z, dirname, files)
+        z.close()
+    else:
+        for dirname, dirs, files in sorted_walk(base_dir):
+            visit(None, dirname, files)
+    return zip_filename
diff --git a/.venv/Lib/site-packages/setuptools/command/bdist_rpm.py b/.venv/Lib/site-packages/setuptools/command/bdist_rpm.py
new file mode 100644
index 0000000000000000000000000000000000000000..30b7c2338533ec805cbfd82875c8ef311175e098
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/bdist_rpm.py
@@ -0,0 +1,40 @@
+import distutils.command.bdist_rpm as orig
+
+from ..warnings import SetuptoolsDeprecationWarning
+
+
+class bdist_rpm(orig.bdist_rpm):
+    """
+    Override the default bdist_rpm behavior to do the following:
+
+    1. Run egg_info to ensure the name and version are properly calculated.
+    2. Always run 'install' using --single-version-externally-managed to
+       disable eggs in RPM distributions.
+    """
+
+    def run(self):
+        SetuptoolsDeprecationWarning.emit(
+            "Deprecated command",
+            """
+            bdist_rpm is deprecated and will be removed in a future version.
+            Use bdist_wheel (wheel packages) instead.
+            """,
+            see_url="https://github.com/pypa/setuptools/issues/1988",
+            due_date=(2023, 10, 30),  # Deprecation introduced in 22 Oct 2021.
+        )
+
+        # ensure distro name is up-to-date
+        self.run_command('egg_info')
+
+        orig.bdist_rpm.run(self)
+
+    def _make_spec_file(self):
+        spec = orig.bdist_rpm._make_spec_file(self)
+        spec = [
+            line.replace(
+                "setup.py install ",
+                "setup.py install --single-version-externally-managed ",
+            ).replace("%setup", "%setup -n %{name}-%{unmangled_version}")
+            for line in spec
+        ]
+        return spec
diff --git a/.venv/Lib/site-packages/setuptools/command/build.py b/.venv/Lib/site-packages/setuptools/command/build.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f1d688e1797bb506139def5c6833afae8a62bf3
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/build.py
@@ -0,0 +1,149 @@
+import sys
+from typing import TYPE_CHECKING, List, Dict
+from distutils.command.build import build as _build
+
+from ..warnings import SetuptoolsDeprecationWarning
+
+if sys.version_info >= (3, 8):
+    from typing import Protocol
+elif TYPE_CHECKING:
+    from typing_extensions import Protocol
+else:
+    from abc import ABC as Protocol
+
+
+_ORIGINAL_SUBCOMMANDS = {"build_py", "build_clib", "build_ext", "build_scripts"}
+
+
+class build(_build):
+    # copy to avoid sharing the object with parent class
+    sub_commands = _build.sub_commands[:]
+
+    def get_sub_commands(self):
+        subcommands = {cmd[0] for cmd in _build.sub_commands}
+        if subcommands - _ORIGINAL_SUBCOMMANDS:
+            SetuptoolsDeprecationWarning.emit(
+                "Direct usage of `distutils` commands",
+                """
+                It seems that you are using `distutils.command.build` to add
+                new subcommands. Using `distutils` directly is considered deprecated,
+                please use `setuptools.command.build`.
+                """,
+                due_date=(2023, 12, 13),  # Warning introduced in 13 Jun 2022.
+                see_url="https://peps.python.org/pep-0632/",
+            )
+            self.sub_commands = _build.sub_commands
+        return super().get_sub_commands()
+
+
+class SubCommand(Protocol):
+    """In order to support editable installations (see :pep:`660`) all
+    build subcommands **SHOULD** implement this protocol. They also **MUST** inherit
+    from ``setuptools.Command``.
+
+    When creating an :pep:`editable wheel <660>`, ``setuptools`` will try to evaluate
+    custom ``build`` subcommands using the following procedure:
+
+    1. ``setuptools`` will set the ``editable_mode`` attribute to ``True``
+    2. ``setuptools`` will execute the ``run()`` command.
+
+       .. important::
+          Subcommands **SHOULD** take advantage of ``editable_mode=True`` to adequate
+          its behaviour or perform optimisations.
+
+          For example, if a subcommand doesn't need to generate an extra file and
+          all it does is to copy a source file into the build directory,
+          ``run()`` **SHOULD** simply "early return".
+
+          Similarly, if the subcommand creates files that would be placed alongside
+          Python files in the final distribution, during an editable install
+          the command **SHOULD** generate these files "in place" (i.e. write them to
+          the original source directory, instead of using the build directory).
+          Note that ``get_output_mapping()`` should reflect that and include mappings
+          for "in place" builds accordingly.
+
+    3. ``setuptools`` use any knowledge it can derive from the return values of
+       ``get_outputs()`` and ``get_output_mapping()`` to create an editable wheel.
+       When relevant ``setuptools`` **MAY** attempt to use file links based on the value
+       of ``get_output_mapping()``. Alternatively, ``setuptools`` **MAY** attempt to use
+       :doc:`import hooks ` to redirect any attempt to import
+       to the directory with the original source code and other files built in place.
+
+    Please note that custom sub-commands **SHOULD NOT** rely on ``run()`` being
+    executed (or not) to provide correct return values for ``get_outputs()``,
+    ``get_output_mapping()`` or ``get_source_files()``. The ``get_*`` methods should
+    work independently of ``run()``.
+    """
+
+    editable_mode: bool = False
+    """Boolean flag that will be set to ``True`` when setuptools is used for an
+    editable installation (see :pep:`660`).
+    Implementations **SHOULD** explicitly set the default value of this attribute to
+    ``False``.
+    When subcommands run, they can use this flag to perform optimizations or change
+    their behaviour accordingly.
+    """
+
+    build_lib: str
+    """String representing the directory where the build artifacts should be stored,
+    e.g. ``build/lib``.
+    For example, if a distribution wants to provide a Python module named ``pkg.mod``,
+    then a corresponding file should be written to ``{build_lib}/package/module.py``.
+    A way of thinking about this is that the files saved under ``build_lib``
+    would be eventually copied to one of the directories in :obj:`site.PREFIXES`
+    upon installation.
+
+    A command that produces platform-independent files (e.g. compiling text templates
+    into Python functions), **CAN** initialize ``build_lib`` by copying its value from
+    the ``build_py`` command. On the other hand, a command that produces
+    platform-specific files **CAN** initialize ``build_lib`` by copying its value from
+    the ``build_ext`` command. In general this is done inside the ``finalize_options``
+    method with the help of the ``set_undefined_options`` command::
+
+        def finalize_options(self):
+            self.set_undefined_options("build_py", ("build_lib", "build_lib"))
+            ...
+    """
+
+    def initialize_options(self):
+        """(Required by the original :class:`setuptools.Command` interface)"""
+
+    def finalize_options(self):
+        """(Required by the original :class:`setuptools.Command` interface)"""
+
+    def run(self):
+        """(Required by the original :class:`setuptools.Command` interface)"""
+
+    def get_source_files(self) -> List[str]:
+        """
+        Return a list of all files that are used by the command to create the expected
+        outputs.
+        For example, if your build command transpiles Java files into Python, you should
+        list here all the Java files.
+        The primary purpose of this function is to help populating the ``sdist``
+        with all the files necessary to build the distribution.
+        All files should be strings relative to the project root directory.
+        """
+
+    def get_outputs(self) -> List[str]:
+        """
+        Return a list of files intended for distribution as they would have been
+        produced by the build.
+        These files should be strings in the form of
+        ``"{build_lib}/destination/file/path"``.
+
+        .. note::
+           The return value of ``get_output()`` should include all files used as keys
+           in ``get_output_mapping()`` plus files that are generated during the build
+           and don't correspond to any source file already present in the project.
+        """
+
+    def get_output_mapping(self) -> Dict[str, str]:
+        """
+        Return a mapping between destination files as they would be produced by the
+        build (dict keys) into the respective existing (source) files (dict values).
+        Existing (source) files should be represented as strings relative to the project
+        root directory.
+        Destination files should be strings in the form of
+        ``"{build_lib}/destination/file/path"``.
+        """
diff --git a/.venv/Lib/site-packages/setuptools/command/build_clib.py b/.venv/Lib/site-packages/setuptools/command/build_clib.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f4229b2763603cf7350eb50560c0a5506ed54b4
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/build_clib.py
@@ -0,0 +1,99 @@
+import distutils.command.build_clib as orig
+from distutils.errors import DistutilsSetupError
+from distutils import log
+from setuptools.dep_util import newer_pairwise_group
+
+
+class build_clib(orig.build_clib):
+    """
+    Override the default build_clib behaviour to do the following:
+
+    1. Implement a rudimentary timestamp-based dependency system
+       so 'compile()' doesn't run every time.
+    2. Add more keys to the 'build_info' dictionary:
+        * obj_deps - specify dependencies for each object compiled.
+                     this should be a dictionary mapping a key
+                     with the source filename to a list of
+                     dependencies. Use an empty string for global
+                     dependencies.
+        * cflags   - specify a list of additional flags to pass to
+                     the compiler.
+    """
+
+    def build_libraries(self, libraries):
+        for lib_name, build_info in libraries:
+            sources = build_info.get('sources')
+            if sources is None or not isinstance(sources, (list, tuple)):
+                raise DistutilsSetupError(
+                    "in 'libraries' option (library '%s'), "
+                    "'sources' must be present and must be "
+                    "a list of source filenames" % lib_name
+                )
+            sources = sorted(list(sources))
+
+            log.info("building '%s' library", lib_name)
+
+            # Make sure everything is the correct type.
+            # obj_deps should be a dictionary of keys as sources
+            # and a list/tuple of files that are its dependencies.
+            obj_deps = build_info.get('obj_deps', dict())
+            if not isinstance(obj_deps, dict):
+                raise DistutilsSetupError(
+                    "in 'libraries' option (library '%s'), "
+                    "'obj_deps' must be a dictionary of "
+                    "type 'source: list'" % lib_name
+                )
+            dependencies = []
+
+            # Get the global dependencies that are specified by the '' key.
+            # These will go into every source's dependency list.
+            global_deps = obj_deps.get('', list())
+            if not isinstance(global_deps, (list, tuple)):
+                raise DistutilsSetupError(
+                    "in 'libraries' option (library '%s'), "
+                    "'obj_deps' must be a dictionary of "
+                    "type 'source: list'" % lib_name
+                )
+
+            # Build the list to be used by newer_pairwise_group
+            # each source will be auto-added to its dependencies.
+            for source in sources:
+                src_deps = [source]
+                src_deps.extend(global_deps)
+                extra_deps = obj_deps.get(source, list())
+                if not isinstance(extra_deps, (list, tuple)):
+                    raise DistutilsSetupError(
+                        "in 'libraries' option (library '%s'), "
+                        "'obj_deps' must be a dictionary of "
+                        "type 'source: list'" % lib_name
+                    )
+                src_deps.extend(extra_deps)
+                dependencies.append(src_deps)
+
+            expected_objects = self.compiler.object_filenames(
+                sources,
+                output_dir=self.build_temp,
+            )
+
+            if newer_pairwise_group(dependencies, expected_objects) != ([], []):
+                # First, compile the source code to object files in the library
+                # directory.  (This should probably change to putting object
+                # files in a temporary build directory.)
+                macros = build_info.get('macros')
+                include_dirs = build_info.get('include_dirs')
+                cflags = build_info.get('cflags')
+                self.compiler.compile(
+                    sources,
+                    output_dir=self.build_temp,
+                    macros=macros,
+                    include_dirs=include_dirs,
+                    extra_postargs=cflags,
+                    debug=self.debug,
+                )
+
+            # Now "link" the object files together into a static library.
+            # (On Unix at least, this isn't really linking -- it just
+            # builds an archive.  Whatever.)
+            self.compiler.create_static_lib(
+                expected_objects, lib_name, output_dir=self.build_clib, debug=self.debug
+            )
diff --git a/.venv/Lib/site-packages/setuptools/command/build_ext.py b/.venv/Lib/site-packages/setuptools/command/build_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a80781cf4e2c7e01a4658938140390145d5e097
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/build_ext.py
@@ -0,0 +1,456 @@
+import os
+import sys
+import itertools
+from importlib.machinery import EXTENSION_SUFFIXES
+from importlib.util import cache_from_source as _compiled_file_name
+from typing import Dict, Iterator, List, Tuple
+from pathlib import Path
+
+from distutils.command.build_ext import build_ext as _du_build_ext
+from distutils.ccompiler import new_compiler
+from distutils.sysconfig import customize_compiler, get_config_var
+from distutils import log
+
+from setuptools.errors import BaseError
+from setuptools.extension import Extension, Library
+
+try:
+    # Attempt to use Cython for building extensions, if available
+    from Cython.Distutils.build_ext import build_ext as _build_ext
+
+    # Additionally, assert that the compiler module will load
+    # also. Ref #1229.
+    __import__('Cython.Compiler.Main')
+except ImportError:
+    _build_ext = _du_build_ext
+
+# make sure _config_vars is initialized
+get_config_var("LDSHARED")
+from distutils.sysconfig import _config_vars as _CONFIG_VARS  # noqa
+
+
+def _customize_compiler_for_shlib(compiler):
+    if sys.platform == "darwin":
+        # building .dylib requires additional compiler flags on OSX; here we
+        # temporarily substitute the pyconfig.h variables so that distutils'
+        # 'customize_compiler' uses them before we build the shared libraries.
+        tmp = _CONFIG_VARS.copy()
+        try:
+            # XXX Help!  I don't have any idea whether these are right...
+            _CONFIG_VARS[
+                'LDSHARED'
+            ] = "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
+            _CONFIG_VARS['CCSHARED'] = " -dynamiclib"
+            _CONFIG_VARS['SO'] = ".dylib"
+            customize_compiler(compiler)
+        finally:
+            _CONFIG_VARS.clear()
+            _CONFIG_VARS.update(tmp)
+    else:
+        customize_compiler(compiler)
+
+
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+    use_stubs = True
+elif os.name != 'nt':
+    try:
+        import dl
+
+        use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
+    except ImportError:
+        pass
+
+
+def if_dl(s):
+    return s if have_rtld else ''
+
+
+def get_abi3_suffix():
+    """Return the file extension for an abi3-compliant Extension()"""
+    for suffix in EXTENSION_SUFFIXES:
+        if '.abi3' in suffix:  # Unix
+            return suffix
+        elif suffix == '.pyd':  # Windows
+            return suffix
+
+
+class build_ext(_build_ext):
+    editable_mode: bool = False
+    inplace: bool = False
+
+    def run(self):
+        """Build extensions in build directory, then copy if --inplace"""
+        old_inplace, self.inplace = self.inplace, 0
+        _build_ext.run(self)
+        self.inplace = old_inplace
+        if old_inplace:
+            self.copy_extensions_to_source()
+
+    def _get_inplace_equivalent(self, build_py, ext: Extension) -> Tuple[str, str]:
+        fullname = self.get_ext_fullname(ext.name)
+        filename = self.get_ext_filename(fullname)
+        modpath = fullname.split('.')
+        package = '.'.join(modpath[:-1])
+        package_dir = build_py.get_package_dir(package)
+        inplace_file = os.path.join(package_dir, os.path.basename(filename))
+        regular_file = os.path.join(self.build_lib, filename)
+        return (inplace_file, regular_file)
+
+    def copy_extensions_to_source(self):
+        build_py = self.get_finalized_command('build_py')
+        for ext in self.extensions:
+            inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext)
+
+            # Always copy, even if source is older than destination, to ensure
+            # that the right extensions for the current Python/platform are
+            # used.
+            if os.path.exists(regular_file) or not ext.optional:
+                self.copy_file(regular_file, inplace_file, level=self.verbose)
+
+            if ext._needs_stub:
+                inplace_stub = self._get_equivalent_stub(ext, inplace_file)
+                self._write_stub_file(inplace_stub, ext, compile=True)
+                # Always compile stub and remove the original (leave the cache behind)
+                # (this behaviour was observed in previous iterations of the code)
+
+    def _get_equivalent_stub(self, ext: Extension, output_file: str) -> str:
+        dir_ = os.path.dirname(output_file)
+        _, _, name = ext.name.rpartition(".")
+        return f"{os.path.join(dir_, name)}.py"
+
+    def _get_output_mapping(self) -> Iterator[Tuple[str, str]]:
+        if not self.inplace:
+            return
+
+        build_py = self.get_finalized_command('build_py')
+        opt = self.get_finalized_command('install_lib').optimize or ""
+
+        for ext in self.extensions:
+            inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext)
+            yield (regular_file, inplace_file)
+
+            if ext._needs_stub:
+                # This version of `build_ext` always builds artifacts in another dir,
+                # when "inplace=True" is given it just copies them back.
+                # This is done in the `copy_extensions_to_source` function, which
+                # always compile stub files via `_compile_and_remove_stub`.
+                # At the end of the process, a `.pyc` stub file is created without the
+                # corresponding `.py`.
+
+                inplace_stub = self._get_equivalent_stub(ext, inplace_file)
+                regular_stub = self._get_equivalent_stub(ext, regular_file)
+                inplace_cache = _compiled_file_name(inplace_stub, optimization=opt)
+                output_cache = _compiled_file_name(regular_stub, optimization=opt)
+                yield (output_cache, inplace_cache)
+
+    def get_ext_filename(self, fullname):
+        so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX')
+        if so_ext:
+            filename = os.path.join(*fullname.split('.')) + so_ext
+        else:
+            filename = _build_ext.get_ext_filename(self, fullname)
+            so_ext = get_config_var('EXT_SUFFIX')
+
+        if fullname in self.ext_map:
+            ext = self.ext_map[fullname]
+            use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix()
+            if use_abi3:
+                filename = filename[: -len(so_ext)]
+                so_ext = get_abi3_suffix()
+                filename = filename + so_ext
+            if isinstance(ext, Library):
+                fn, ext = os.path.splitext(filename)
+                return self.shlib_compiler.library_filename(fn, libtype)
+            elif use_stubs and ext._links_to_dynamic:
+                d, fn = os.path.split(filename)
+                return os.path.join(d, 'dl-' + fn)
+        return filename
+
+    def initialize_options(self):
+        _build_ext.initialize_options(self)
+        self.shlib_compiler = None
+        self.shlibs = []
+        self.ext_map = {}
+        self.editable_mode = False
+
+    def finalize_options(self):
+        _build_ext.finalize_options(self)
+        self.extensions = self.extensions or []
+        self.check_extensions_list(self.extensions)
+        self.shlibs = [ext for ext in self.extensions if isinstance(ext, Library)]
+        if self.shlibs:
+            self.setup_shlib_compiler()
+        for ext in self.extensions:
+            ext._full_name = self.get_ext_fullname(ext.name)
+        for ext in self.extensions:
+            fullname = ext._full_name
+            self.ext_map[fullname] = ext
+
+            # distutils 3.1 will also ask for module names
+            # XXX what to do with conflicts?
+            self.ext_map[fullname.split('.')[-1]] = ext
+
+            ltd = self.shlibs and self.links_to_dynamic(ext) or False
+            ns = ltd and use_stubs and not isinstance(ext, Library)
+            ext._links_to_dynamic = ltd
+            ext._needs_stub = ns
+            filename = ext._file_name = self.get_ext_filename(fullname)
+            libdir = os.path.dirname(os.path.join(self.build_lib, filename))
+            if ltd and libdir not in ext.library_dirs:
+                ext.library_dirs.append(libdir)
+            if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+                ext.runtime_library_dirs.append(os.curdir)
+
+        if self.editable_mode:
+            self.inplace = True
+
+    def setup_shlib_compiler(self):
+        compiler = self.shlib_compiler = new_compiler(
+            compiler=self.compiler, dry_run=self.dry_run, force=self.force
+        )
+        _customize_compiler_for_shlib(compiler)
+
+        if self.include_dirs is not None:
+            compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for name, value in self.define:
+                compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                compiler.undefine_macro(macro)
+        if self.libraries is not None:
+            compiler.set_libraries(self.libraries)
+        if self.library_dirs is not None:
+            compiler.set_library_dirs(self.library_dirs)
+        if self.rpath is not None:
+            compiler.set_runtime_library_dirs(self.rpath)
+        if self.link_objects is not None:
+            compiler.set_link_objects(self.link_objects)
+
+        # hack so distutils' build_extension() builds a library instead
+        compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+    def get_export_symbols(self, ext):
+        if isinstance(ext, Library):
+            return ext.export_symbols
+        return _build_ext.get_export_symbols(self, ext)
+
+    def build_extension(self, ext):
+        ext._convert_pyx_sources_to_lang()
+        _compiler = self.compiler
+        try:
+            if isinstance(ext, Library):
+                self.compiler = self.shlib_compiler
+            _build_ext.build_extension(self, ext)
+            if ext._needs_stub:
+                build_lib = self.get_finalized_command('build_py').build_lib
+                self.write_stub(build_lib, ext)
+        finally:
+            self.compiler = _compiler
+
+    def links_to_dynamic(self, ext):
+        """Return true if 'ext' links to a dynamic lib in the same package"""
+        # XXX this should check to ensure the lib is actually being built
+        # XXX as dynamic, and not just using a locally-found version or a
+        # XXX static-compiled version
+        libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+        pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
+        return any(pkg + libname in libnames for libname in ext.libraries)
+
+    def get_source_files(self) -> List[str]:
+        return [*_build_ext.get_source_files(self), *self._get_internal_depends()]
+
+    def _get_internal_depends(self) -> Iterator[str]:
+        """Yield ``ext.depends`` that are contained by the project directory"""
+        project_root = Path(self.distribution.src_root or os.curdir).resolve()
+        depends = (dep for ext in self.extensions for dep in ext.depends)
+
+        def skip(orig_path: str, reason: str) -> None:
+            log.info(
+                "dependency %s won't be automatically "
+                "included in the manifest: the path %s",
+                orig_path,
+                reason,
+            )
+
+        for dep in depends:
+            path = Path(dep)
+
+            if path.is_absolute():
+                skip(dep, "must be relative")
+                continue
+
+            if ".." in path.parts:
+                skip(dep, "can't have `..` segments")
+                continue
+
+            try:
+                resolved = (project_root / path).resolve(strict=True)
+            except OSError:
+                skip(dep, "doesn't exist")
+                continue
+
+            try:
+                resolved.relative_to(project_root)
+            except ValueError:
+                skip(dep, "must be inside the project root")
+                continue
+
+            yield path.as_posix()
+
+    def get_outputs(self) -> List[str]:
+        if self.inplace:
+            return list(self.get_output_mapping().keys())
+        return sorted(_build_ext.get_outputs(self) + self.__get_stubs_outputs())
+
+    def get_output_mapping(self) -> Dict[str, str]:
+        """See :class:`setuptools.commands.build.SubCommand`"""
+        mapping = self._get_output_mapping()
+        return dict(sorted(mapping, key=lambda x: x[0]))
+
+    def __get_stubs_outputs(self):
+        # assemble the base name for each extension that needs a stub
+        ns_ext_bases = (
+            os.path.join(self.build_lib, *ext._full_name.split('.'))
+            for ext in self.extensions
+            if ext._needs_stub
+        )
+        # pair each base with the extension
+        pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
+        return list(base + fnext for base, fnext in pairs)
+
+    def __get_output_extensions(self):
+        yield '.py'
+        yield '.pyc'
+        if self.get_finalized_command('build_py').optimize:
+            yield '.pyo'
+
+    def write_stub(self, output_dir, ext, compile=False):
+        stub_file = os.path.join(output_dir, *ext._full_name.split('.')) + '.py'
+        self._write_stub_file(stub_file, ext, compile)
+
+    def _write_stub_file(self, stub_file: str, ext: Extension, compile=False):
+        log.info("writing stub loader for %s to %s", ext._full_name, stub_file)
+        if compile and os.path.exists(stub_file):
+            raise BaseError(stub_file + " already exists! Please delete.")
+        if not self.dry_run:
+            f = open(stub_file, 'w')
+            f.write(
+                '\n'.join(
+                    [
+                        "def __bootstrap__():",
+                        "   global __bootstrap__, __file__, __loader__",
+                        "   import sys, os, pkg_resources, importlib.util"
+                        + if_dl(", dl"),
+                        "   __file__ = pkg_resources.resource_filename"
+                        "(__name__,%r)" % os.path.basename(ext._file_name),
+                        "   del __bootstrap__",
+                        "   if '__loader__' in globals():",
+                        "       del __loader__",
+                        if_dl("   old_flags = sys.getdlopenflags()"),
+                        "   old_dir = os.getcwd()",
+                        "   try:",
+                        "     os.chdir(os.path.dirname(__file__))",
+                        if_dl("     sys.setdlopenflags(dl.RTLD_NOW)"),
+                        "     spec = importlib.util.spec_from_file_location(",
+                        "                __name__, __file__)",
+                        "     mod = importlib.util.module_from_spec(spec)",
+                        "     spec.loader.exec_module(mod)",
+                        "   finally:",
+                        if_dl("     sys.setdlopenflags(old_flags)"),
+                        "     os.chdir(old_dir)",
+                        "__bootstrap__()",
+                        "",  # terminal \n
+                    ]
+                )
+            )
+            f.close()
+        if compile:
+            self._compile_and_remove_stub(stub_file)
+
+    def _compile_and_remove_stub(self, stub_file: str):
+        from distutils.util import byte_compile
+
+        byte_compile([stub_file], optimize=0, force=True, dry_run=self.dry_run)
+        optimize = self.get_finalized_command('install_lib').optimize
+        if optimize > 0:
+            byte_compile(
+                [stub_file], optimize=optimize, force=True, dry_run=self.dry_run
+            )
+        if os.path.exists(stub_file) and not self.dry_run:
+            os.unlink(stub_file)
+
+
+if use_stubs or os.name == 'nt':
+    # Build shared libraries
+    #
+    def link_shared_object(
+        self,
+        objects,
+        output_libname,
+        output_dir=None,
+        libraries=None,
+        library_dirs=None,
+        runtime_library_dirs=None,
+        export_symbols=None,
+        debug=0,
+        extra_preargs=None,
+        extra_postargs=None,
+        build_temp=None,
+        target_lang=None,
+    ):
+        self.link(
+            self.SHARED_LIBRARY,
+            objects,
+            output_libname,
+            output_dir,
+            libraries,
+            library_dirs,
+            runtime_library_dirs,
+            export_symbols,
+            debug,
+            extra_preargs,
+            extra_postargs,
+            build_temp,
+            target_lang,
+        )
+
+else:
+    # Build static libraries everywhere else
+    libtype = 'static'
+
+    def link_shared_object(
+        self,
+        objects,
+        output_libname,
+        output_dir=None,
+        libraries=None,
+        library_dirs=None,
+        runtime_library_dirs=None,
+        export_symbols=None,
+        debug=0,
+        extra_preargs=None,
+        extra_postargs=None,
+        build_temp=None,
+        target_lang=None,
+    ):
+        # XXX we need to either disallow these attrs on Library instances,
+        # or warn/abort here if set, or something...
+        # libraries=None, library_dirs=None, runtime_library_dirs=None,
+        # export_symbols=None, extra_preargs=None, extra_postargs=None,
+        # build_temp=None
+
+        assert output_dir is None  # distutils build_ext doesn't pass this
+        output_dir, filename = os.path.split(output_libname)
+        basename, ext = os.path.splitext(filename)
+        if self.library_filename("x").startswith('lib'):
+            # strip 'lib' prefix; this is kludgy if some platform uses
+            # a different prefix
+            basename = basename[3:]
+
+        self.create_static_lib(objects, basename, output_dir, debug, target_lang)
diff --git a/.venv/Lib/site-packages/setuptools/command/build_py.py b/.venv/Lib/site-packages/setuptools/command/build_py.py
new file mode 100644
index 0000000000000000000000000000000000000000..5709eb6d8c72fb57f1bc6994dda0110b96e9da45
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/build_py.py
@@ -0,0 +1,389 @@
+from functools import partial
+from glob import glob
+from distutils.util import convert_path
+import distutils.command.build_py as orig
+import os
+import fnmatch
+import textwrap
+import io
+import distutils.errors
+import itertools
+import stat
+from pathlib import Path
+from typing import Dict, Iterable, Iterator, List, Optional, Tuple
+
+from ..extern.more_itertools import unique_everseen
+from ..warnings import SetuptoolsDeprecationWarning
+
+
+def make_writable(target):
+    os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
+
+
+class build_py(orig.build_py):
+    """Enhanced 'build_py' command that includes data files with packages
+
+    The data files are specified via a 'package_data' argument to 'setup()'.
+    See 'setuptools.dist.Distribution' for more details.
+
+    Also, this version of the 'build_py' command allows you to specify both
+    'py_modules' and 'packages' in the same setup operation.
+    """
+
+    editable_mode: bool = False
+    existing_egg_info_dir: Optional[str] = None  #: Private API, internal use only.
+
+    def finalize_options(self):
+        orig.build_py.finalize_options(self)
+        self.package_data = self.distribution.package_data
+        self.exclude_package_data = self.distribution.exclude_package_data or {}
+        if 'data_files' in self.__dict__:
+            del self.__dict__['data_files']
+        self.__updated_files = []
+
+    def copy_file(
+        self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1
+    ):
+        # Overwrite base class to allow using links
+        if link:
+            infile = str(Path(infile).resolve())
+            outfile = str(Path(outfile).resolve())
+        return super().copy_file(
+            infile, outfile, preserve_mode, preserve_times, link, level
+        )
+
+    def run(self):
+        """Build modules, packages, and copy data files to build directory"""
+        if not (self.py_modules or self.packages) or self.editable_mode:
+            return
+
+        if self.py_modules:
+            self.build_modules()
+
+        if self.packages:
+            self.build_packages()
+            self.build_package_data()
+
+        # Only compile actual .py files, using our base class' idea of what our
+        # output files are.
+        self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
+
+    def __getattr__(self, attr):
+        "lazily compute data files"
+        if attr == 'data_files':
+            self.data_files = self._get_data_files()
+            return self.data_files
+        return orig.build_py.__getattr__(self, attr)
+
+    def build_module(self, module, module_file, package):
+        outfile, copied = orig.build_py.build_module(self, module, module_file, package)
+        if copied:
+            self.__updated_files.append(outfile)
+        return outfile, copied
+
+    def _get_data_files(self):
+        """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+        self.analyze_manifest()
+        return list(map(self._get_pkg_data_files, self.packages or ()))
+
+    def get_data_files_without_manifest(self):
+        """
+        Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,
+        but without triggering any attempt to analyze or build the manifest.
+        """
+        # Prevent eventual errors from unset `manifest_files`
+        # (that would otherwise be set by `analyze_manifest`)
+        self.__dict__.setdefault('manifest_files', {})
+        return list(map(self._get_pkg_data_files, self.packages or ()))
+
+    def _get_pkg_data_files(self, package):
+        # Locate package source directory
+        src_dir = self.get_package_dir(package)
+
+        # Compute package build directory
+        build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+        # Strip directory from globbed filenames
+        filenames = [
+            os.path.relpath(file, src_dir)
+            for file in self.find_data_files(package, src_dir)
+        ]
+        return package, src_dir, build_dir, filenames
+
+    def find_data_files(self, package, src_dir):
+        """Return filenames for package's data files in 'src_dir'"""
+        patterns = self._get_platform_patterns(
+            self.package_data,
+            package,
+            src_dir,
+        )
+        globs_expanded = map(partial(glob, recursive=True), patterns)
+        # flatten the expanded globs into an iterable of matches
+        globs_matches = itertools.chain.from_iterable(globs_expanded)
+        glob_files = filter(os.path.isfile, globs_matches)
+        files = itertools.chain(
+            self.manifest_files.get(package, []),
+            glob_files,
+        )
+        return self.exclude_data_files(package, src_dir, files)
+
+    def get_outputs(self, include_bytecode=1) -> List[str]:
+        """See :class:`setuptools.commands.build.SubCommand`"""
+        if self.editable_mode:
+            return list(self.get_output_mapping().keys())
+        return super().get_outputs(include_bytecode)
+
+    def get_output_mapping(self) -> Dict[str, str]:
+        """See :class:`setuptools.commands.build.SubCommand`"""
+        mapping = itertools.chain(
+            self._get_package_data_output_mapping(),
+            self._get_module_mapping(),
+        )
+        return dict(sorted(mapping, key=lambda x: x[0]))
+
+    def _get_module_mapping(self) -> Iterator[Tuple[str, str]]:
+        """Iterate over all modules producing (dest, src) pairs."""
+        for package, module, module_file in self.find_all_modules():
+            package = package.split('.')
+            filename = self.get_module_outfile(self.build_lib, package, module)
+            yield (filename, module_file)
+
+    def _get_package_data_output_mapping(self) -> Iterator[Tuple[str, str]]:
+        """Iterate over package data producing (dest, src) pairs."""
+        for package, src_dir, build_dir, filenames in self.data_files:
+            for filename in filenames:
+                target = os.path.join(build_dir, filename)
+                srcfile = os.path.join(src_dir, filename)
+                yield (target, srcfile)
+
+    def build_package_data(self):
+        """Copy data files into build directory"""
+        for target, srcfile in self._get_package_data_output_mapping():
+            self.mkpath(os.path.dirname(target))
+            _outf, _copied = self.copy_file(srcfile, target)
+            make_writable(target)
+
+    def analyze_manifest(self):
+        self.manifest_files = mf = {}
+        if not self.distribution.include_package_data:
+            return
+        src_dirs = {}
+        for package in self.packages or ():
+            # Locate package source directory
+            src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+        if (
+            getattr(self, 'existing_egg_info_dir', None)
+            and Path(self.existing_egg_info_dir, "SOURCES.txt").exists()
+        ):
+            egg_info_dir = self.existing_egg_info_dir
+            manifest = Path(egg_info_dir, "SOURCES.txt")
+            files = manifest.read_text(encoding="utf-8").splitlines()
+        else:
+            self.run_command('egg_info')
+            ei_cmd = self.get_finalized_command('egg_info')
+            egg_info_dir = ei_cmd.egg_info
+            files = ei_cmd.filelist.files
+
+        check = _IncludePackageDataAbuse()
+        for path in self._filter_build_files(files, egg_info_dir):
+            d, f = os.path.split(assert_relative(path))
+            prev = None
+            oldf = f
+            while d and d != prev and d not in src_dirs:
+                prev = d
+                d, df = os.path.split(d)
+                f = os.path.join(df, f)
+            if d in src_dirs:
+                if f == oldf:
+                    if check.is_module(f):
+                        continue  # it's a module, not data
+                else:
+                    importable = check.importable_subpackage(src_dirs[d], f)
+                    if importable:
+                        check.warn(importable)
+                mf.setdefault(src_dirs[d], []).append(path)
+
+    def _filter_build_files(self, files: Iterable[str], egg_info: str) -> Iterator[str]:
+        """
+        ``build_meta`` may try to create egg_info outside of the project directory,
+        and this can be problematic for certain plugins (reported in issue #3500).
+
+        Extensions might also include between their sources files created on the
+        ``build_lib`` and ``build_temp`` directories.
+
+        This function should filter this case of invalid files out.
+        """
+        build = self.get_finalized_command("build")
+        build_dirs = (egg_info, self.build_lib, build.build_temp, build.build_base)
+        norm_dirs = [os.path.normpath(p) for p in build_dirs if p]
+
+        for file in files:
+            norm_path = os.path.normpath(file)
+            if not os.path.isabs(file) or all(d not in norm_path for d in norm_dirs):
+                yield file
+
+    def get_data_files(self):
+        pass  # Lazily compute data files in _get_data_files() function.
+
+    def check_package(self, package, package_dir):
+        """Check namespace packages' __init__ for declare_namespace"""
+        try:
+            return self.packages_checked[package]
+        except KeyError:
+            pass
+
+        init_py = orig.build_py.check_package(self, package, package_dir)
+        self.packages_checked[package] = init_py
+
+        if not init_py or not self.distribution.namespace_packages:
+            return init_py
+
+        for pkg in self.distribution.namespace_packages:
+            if pkg == package or pkg.startswith(package + '.'):
+                break
+        else:
+            return init_py
+
+        with io.open(init_py, 'rb') as f:
+            contents = f.read()
+        if b'declare_namespace' not in contents:
+            raise distutils.errors.DistutilsError(
+                "Namespace package problem: %s is a namespace package, but "
+                "its\n__init__.py does not call declare_namespace()! Please "
+                'fix it.\n(See the setuptools manual under '
+                '"Namespace Packages" for details.)\n"' % (package,)
+            )
+        return init_py
+
+    def initialize_options(self):
+        self.packages_checked = {}
+        orig.build_py.initialize_options(self)
+        self.editable_mode = False
+        self.existing_egg_info_dir = None
+
+    def get_package_dir(self, package):
+        res = orig.build_py.get_package_dir(self, package)
+        if self.distribution.src_root is not None:
+            return os.path.join(self.distribution.src_root, res)
+        return res
+
+    def exclude_data_files(self, package, src_dir, files):
+        """Filter filenames for package's data files in 'src_dir'"""
+        files = list(files)
+        patterns = self._get_platform_patterns(
+            self.exclude_package_data,
+            package,
+            src_dir,
+        )
+        match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)
+        # flatten the groups of matches into an iterable of matches
+        matches = itertools.chain.from_iterable(match_groups)
+        bad = set(matches)
+        keepers = (fn for fn in files if fn not in bad)
+        # ditch dupes
+        return list(unique_everseen(keepers))
+
+    @staticmethod
+    def _get_platform_patterns(spec, package, src_dir):
+        """
+        yield platform-specific path patterns (suitable for glob
+        or fn_match) from a glob-based spec (such as
+        self.package_data or self.exclude_package_data)
+        matching package in src_dir.
+        """
+        raw_patterns = itertools.chain(
+            spec.get('', []),
+            spec.get(package, []),
+        )
+        return (
+            # Each pattern has to be converted to a platform-specific path
+            os.path.join(src_dir, convert_path(pattern))
+            for pattern in raw_patterns
+        )
+
+
+def assert_relative(path):
+    if not os.path.isabs(path):
+        return path
+    from distutils.errors import DistutilsSetupError
+
+    msg = (
+        textwrap.dedent(
+            """
+        Error: setup script specifies an absolute path:
+
+            %s
+
+        setup() arguments must *always* be /-separated paths relative to the
+        setup.py directory, *never* absolute paths.
+        """
+        ).lstrip()
+        % path
+    )
+    raise DistutilsSetupError(msg)
+
+
+class _IncludePackageDataAbuse:
+    """Inform users that package or module is included as 'data file'"""
+
+    class _Warning(SetuptoolsDeprecationWarning):
+        _SUMMARY = """
+        Package {importable!r} is absent from the `packages` configuration.
+        """
+
+        _DETAILS = """
+        ############################
+        # Package would be ignored #
+        ############################
+        Python recognizes {importable!r} as an importable package[^1],
+        but it is absent from setuptools' `packages` configuration.
+
+        This leads to an ambiguous overall configuration. If you want to distribute this
+        package, please make sure that {importable!r} is explicitly added
+        to the `packages` configuration field.
+
+        Alternatively, you can also rely on setuptools' discovery methods
+        (for example by using `find_namespace_packages(...)`/`find_namespace:`
+        instead of `find_packages(...)`/`find:`).
+
+        You can read more about "package discovery" on setuptools documentation page:
+
+        - https://setuptools.pypa.io/en/latest/userguide/package_discovery.html
+
+        If you don't want {importable!r} to be distributed and are
+        already explicitly excluding {importable!r} via
+        `find_namespace_packages(...)/find_namespace` or `find_packages(...)/find`,
+        you can try to use `exclude_package_data`, or `include-package-data=False` in
+        combination with a more fine grained `package-data` configuration.
+
+        You can read more about "package data files" on setuptools documentation page:
+
+        - https://setuptools.pypa.io/en/latest/userguide/datafiles.html
+
+
+        [^1]: For Python, any directory (with suitable naming) can be imported,
+              even if it does not contain any `.py` files.
+              On the other hand, currently there is no concept of package data
+              directory, all directories are treated like packages.
+        """
+        # _DUE_DATE: still not defined as this is particularly controversial.
+        # Warning initially introduced in May 2022. See issue #3340 for discussion.
+
+    def __init__(self):
+        self._already_warned = set()
+
+    def is_module(self, file):
+        return file.endswith(".py") and file[: -len(".py")].isidentifier()
+
+    def importable_subpackage(self, parent, file):
+        pkg = Path(file).parent
+        parts = list(itertools.takewhile(str.isidentifier, pkg.parts))
+        if parts:
+            return ".".join([parent, *parts])
+        return None
+
+    def warn(self, importable):
+        if importable not in self._already_warned:
+            self._Warning.emit(importable=importable)
+            self._already_warned.add(importable)
diff --git a/.venv/Lib/site-packages/setuptools/command/develop.py b/.venv/Lib/site-packages/setuptools/command/develop.py
new file mode 100644
index 0000000000000000000000000000000000000000..27bcf9eb5bca0f0eda0c64166110884c37f08a3a
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/develop.py
@@ -0,0 +1,188 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import glob
+import io
+
+from setuptools.command.easy_install import easy_install
+from setuptools import _path
+from setuptools import namespaces
+import setuptools
+
+
+class develop(namespaces.DevelopInstaller, easy_install):
+    """Set up package for development"""
+
+    description = "install package in 'development mode'"
+
+    user_options = easy_install.user_options + [
+        ("uninstall", "u", "Uninstall this source package"),
+        ("egg-path=", None, "Set the path to be used in the .egg-link file"),
+    ]
+
+    boolean_options = easy_install.boolean_options + ['uninstall']
+
+    command_consumes_arguments = False  # override base
+
+    def run(self):
+        if self.uninstall:
+            self.multi_version = True
+            self.uninstall_link()
+            self.uninstall_namespaces()
+        else:
+            self.install_for_development()
+        self.warn_deprecated_options()
+
+    def initialize_options(self):
+        self.uninstall = None
+        self.egg_path = None
+        easy_install.initialize_options(self)
+        self.setup_path = None
+        self.always_copy_from = '.'  # always copy eggs installed in curdir
+
+    def finalize_options(self):
+        import pkg_resources
+
+        ei = self.get_finalized_command("egg_info")
+        self.args = [ei.egg_name]
+
+        easy_install.finalize_options(self)
+        self.expand_basedirs()
+        self.expand_dirs()
+        # pick up setup-dir .egg files only: no .egg-info
+        self.package_index.scan(glob.glob('*.egg'))
+
+        egg_link_fn = ei.egg_name + '.egg-link'
+        self.egg_link = os.path.join(self.install_dir, egg_link_fn)
+        self.egg_base = ei.egg_base
+        if self.egg_path is None:
+            self.egg_path = os.path.abspath(ei.egg_base)
+
+        target = _path.normpath(self.egg_base)
+        egg_path = _path.normpath(os.path.join(self.install_dir, self.egg_path))
+        if egg_path != target:
+            raise DistutilsOptionError(
+                "--egg-path must be a relative path from the install"
+                " directory to " + target
+            )
+
+        # Make a distribution for the package's source
+        self.dist = pkg_resources.Distribution(
+            target,
+            pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
+            project_name=ei.egg_name,
+        )
+
+        self.setup_path = self._resolve_setup_path(
+            self.egg_base,
+            self.install_dir,
+            self.egg_path,
+        )
+
+    @staticmethod
+    def _resolve_setup_path(egg_base, install_dir, egg_path):
+        """
+        Generate a path from egg_base back to '.' where the
+        setup script resides and ensure that path points to the
+        setup path from $install_dir/$egg_path.
+        """
+        path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
+        if path_to_setup != os.curdir:
+            path_to_setup = '../' * (path_to_setup.count('/') + 1)
+        resolved = _path.normpath(os.path.join(install_dir, egg_path, path_to_setup))
+        curdir = _path.normpath(os.curdir)
+        if resolved != curdir:
+            raise DistutilsOptionError(
+                "Can't get a consistent path to setup script from"
+                " installation directory",
+                resolved,
+                curdir,
+            )
+        return path_to_setup
+
+    def install_for_development(self):
+        self.run_command('egg_info')
+
+        # Build extensions in-place
+        self.reinitialize_command('build_ext', inplace=1)
+        self.run_command('build_ext')
+
+        if setuptools.bootstrap_install_from:
+            self.easy_install(setuptools.bootstrap_install_from)
+            setuptools.bootstrap_install_from = None
+
+        self.install_namespaces()
+
+        # create an .egg-link in the installation dir, pointing to our egg
+        log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+        if not self.dry_run:
+            with open(self.egg_link, "w") as f:
+                f.write(self.egg_path + "\n" + self.setup_path)
+        # postprocess the installed distro, fixing up .pth, installing scripts,
+        # and handling requirements
+        self.process_distribution(None, self.dist, not self.no_deps)
+
+    def uninstall_link(self):
+        if os.path.exists(self.egg_link):
+            log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+            egg_link_file = open(self.egg_link)
+            contents = [line.rstrip() for line in egg_link_file]
+            egg_link_file.close()
+            if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
+                log.warn("Link points to %s: uninstall aborted", contents)
+                return
+            if not self.dry_run:
+                os.unlink(self.egg_link)
+        if not self.dry_run:
+            self.update_pth(self.dist)  # remove any .pth link to us
+        if self.distribution.scripts:
+            # XXX should also check for entry point scripts!
+            log.warn("Note: you must uninstall or replace scripts manually!")
+
+    def install_egg_scripts(self, dist):
+        if dist is not self.dist:
+            # Installing a dependency, so fall back to normal behavior
+            return easy_install.install_egg_scripts(self, dist)
+
+        # create wrapper scripts in the script dir, pointing to dist.scripts
+
+        # new-style...
+        self.install_wrapper_scripts(dist)
+
+        # ...and old-style
+        for script_name in self.distribution.scripts or []:
+            script_path = os.path.abspath(convert_path(script_name))
+            script_name = os.path.basename(script_path)
+            with io.open(script_path) as strm:
+                script_text = strm.read()
+            self.install_script(dist, script_name, script_text, script_path)
+
+    def install_wrapper_scripts(self, dist):
+        dist = VersionlessRequirement(dist)
+        return easy_install.install_wrapper_scripts(self, dist)
+
+
+class VersionlessRequirement:
+    """
+    Adapt a pkg_resources.Distribution to simply return the project
+    name as the 'requirement' so that scripts will work across
+    multiple versions.
+
+    >>> from pkg_resources import Distribution
+    >>> dist = Distribution(project_name='foo', version='1.0')
+    >>> str(dist.as_requirement())
+    'foo==1.0'
+    >>> adapted_dist = VersionlessRequirement(dist)
+    >>> str(adapted_dist.as_requirement())
+    'foo'
+    """
+
+    def __init__(self, dist):
+        self.__dist = dist
+
+    def __getattr__(self, name):
+        return getattr(self.__dist, name)
+
+    def as_requirement(self):
+        return self.project_name
diff --git a/.venv/Lib/site-packages/setuptools/command/dist_info.py b/.venv/Lib/site-packages/setuptools/command/dist_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..9df625cee7d8e48f8be9131b59b76a3352ec866b
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/dist_info.py
@@ -0,0 +1,127 @@
+"""
+Create a dist_info directory
+As defined in the wheel specification
+"""
+
+import os
+import shutil
+import sys
+from contextlib import contextmanager
+from distutils import log
+from distutils.core import Command
+from pathlib import Path
+
+from .. import _normalization
+from ..warnings import SetuptoolsDeprecationWarning
+
+
+class dist_info(Command):
+    """
+    This command is private and reserved for internal use of setuptools,
+    users should rely on ``setuptools.build_meta`` APIs.
+    """
+
+    description = "DO NOT CALL DIRECTLY, INTERNAL ONLY: create .dist-info directory"
+
+    user_options = [
+        (
+            'egg-base=',
+            'e',
+            "directory containing .egg-info directories"
+            " (default: top of the source tree)"
+            " DEPRECATED: use --output-dir.",
+        ),
+        (
+            'output-dir=',
+            'o',
+            "directory inside of which the .dist-info will be"
+            "created (default: top of the source tree)",
+        ),
+        ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+        ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+        ('no-date', 'D', "Don't include date stamp [default]"),
+        ('keep-egg-info', None, "*TRANSITIONAL* will be removed in the future"),
+    ]
+
+    boolean_options = ['tag-date', 'keep-egg-info']
+    negative_opt = {'no-date': 'tag-date'}
+
+    def initialize_options(self):
+        self.egg_base = None
+        self.output_dir = None
+        self.name = None
+        self.dist_info_dir = None
+        self.tag_date = None
+        self.tag_build = None
+        self.keep_egg_info = False
+
+    def finalize_options(self):
+        if self.egg_base:
+            msg = "--egg-base is deprecated for dist_info command. Use --output-dir."
+            SetuptoolsDeprecationWarning.emit(msg, due_date=(2023, 9, 26))
+            # This command is internal to setuptools, therefore it should be safe
+            # to remove the deprecated support soon.
+            self.output_dir = self.egg_base or self.output_dir
+
+        dist = self.distribution
+        project_dir = dist.src_root or os.curdir
+        self.output_dir = Path(self.output_dir or project_dir)
+
+        egg_info = self.reinitialize_command("egg_info")
+        egg_info.egg_base = str(self.output_dir)
+
+        if self.tag_date:
+            egg_info.tag_date = self.tag_date
+        else:
+            self.tag_date = egg_info.tag_date
+
+        if self.tag_build:
+            egg_info.tag_build = self.tag_build
+        else:
+            self.tag_build = egg_info.tag_build
+
+        egg_info.finalize_options()
+        self.egg_info = egg_info
+
+        name = _normalization.safer_name(dist.get_name())
+        version = _normalization.safer_best_effort_version(dist.get_version())
+        self.name = f"{name}-{version}"
+        self.dist_info_dir = os.path.join(self.output_dir, f"{self.name}.dist-info")
+
+    @contextmanager
+    def _maybe_bkp_dir(self, dir_path: str, requires_bkp: bool):
+        if requires_bkp:
+            bkp_name = f"{dir_path}.__bkp__"
+            _rm(bkp_name, ignore_errors=True)
+            _copy(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True)
+            try:
+                yield
+            finally:
+                _rm(dir_path, ignore_errors=True)
+                shutil.move(bkp_name, dir_path)
+        else:
+            yield
+
+    def run(self):
+        self.output_dir.mkdir(parents=True, exist_ok=True)
+        self.egg_info.run()
+        egg_info_dir = self.egg_info.egg_info
+        assert os.path.isdir(egg_info_dir), ".egg-info dir should have been created"
+
+        log.info("creating '{}'".format(os.path.abspath(self.dist_info_dir)))
+        bdist_wheel = self.get_finalized_command('bdist_wheel')
+
+        # TODO: if bdist_wheel if merged into setuptools, just add "keep_egg_info" there
+        with self._maybe_bkp_dir(egg_info_dir, self.keep_egg_info):
+            bdist_wheel.egg2dist(egg_info_dir, self.dist_info_dir)
+
+
+def _rm(dir_name, **opts):
+    if os.path.isdir(dir_name):
+        shutil.rmtree(dir_name, **opts)
+
+
+def _copy(src, dst, **opts):
+    if sys.version_info < (3, 8):
+        opts.pop("dirs_exist_ok", None)
+    shutil.copytree(src, dst, **opts)
diff --git a/.venv/Lib/site-packages/setuptools/command/easy_install.py b/.venv/Lib/site-packages/setuptools/command/easy_install.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ba4f094de0200f29efa131336241322a613e7d0
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/command/easy_install.py
@@ -0,0 +1,2366 @@
+"""
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages.  For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
+__ https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
+
+"""
+
+from glob import glob
+from distutils.util import get_platform
+from distutils.util import convert_path, subst_vars
+from distutils.errors import (
+    DistutilsArgError,
+    DistutilsOptionError,
+    DistutilsError,
+    DistutilsPlatformError,
+)
+from distutils import log, dir_util
+from distutils.command.build_scripts import first_line_re
+from distutils.spawn import find_executable
+from distutils.command import install
+import sys
+import os
+import zipimport
+import shutil
+import tempfile
+import zipfile
+import re
+import stat
+import random
+import textwrap
+import warnings
+import site
+import struct
+import contextlib
+import subprocess
+import shlex
+import io
+import configparser
+import sysconfig
+
+
+from sysconfig import get_path
+
+from setuptools import Command
+from setuptools.sandbox import run_setup
+from setuptools.command import setopt
+from setuptools.archive_util import unpack_archive
+from setuptools.package_index import (
+    PackageIndex,
+    parse_requirement_arg,
+    URL_SCHEME,
+)
+from setuptools.command import bdist_egg, egg_info
+from setuptools.warnings import SetuptoolsDeprecationWarning, SetuptoolsWarning
+from setuptools.wheel import Wheel
+from pkg_resources import (
+    normalize_path,
+    resource_string,
+    get_distribution,
+    find_distributions,
+    Environment,
+    Requirement,
+    Distribution,
+    PathMetadata,
+    EggMetadata,
+    WorkingSet,
+    DistributionNotFound,
+    VersionConflict,
+    DEVELOP_DIST,
+)
+import pkg_resources
+from .. import py312compat
+from .._path import ensure_directory
+from ..extern.jaraco.text import yield_lines
+
+
+# Turn on PEP440Warnings
+warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
+
+__all__ = [
+    'easy_install',
+    'PthDistributions',
+    'extract_wininst_cfg',
+    'get_exe_prefixes',
+]
+
+
+def is_64bit():
+    return struct.calcsize("P") == 8
+
+
+def _to_bytes(s):
+    return s.encode('utf8')
+
+
+def isascii(s):
+    try:
+        s.encode('ascii')
+        return True
+    except UnicodeError:
+        return False
+
+
+def _one_liner(text):
+    return textwrap.dedent(text).strip().replace('\n', '; ')
+
+
+class easy_install(Command):
+    """Manage a download/build/install process"""
+
+    description = "Find/get/install Python packages"
+    command_consumes_arguments = True
+
+    user_options = [
+        ('prefix=', None, "installation prefix"),
+        ("zip-ok", "z", "install package as a zipfile"),
+        ("multi-version", "m", "make apps have to require() a version"),
+        ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+        ("install-dir=", "d", "install package to DIR"),
+        ("script-dir=", "s", "install scripts to DIR"),
+        ("exclude-scripts", "x", "Don't install scripts"),
+        ("always-copy", "a", "Copy all needed packages to install dir"),
+        ("index-url=", "i", "base URL of Python Package Index"),
+        ("find-links=", "f", "additional URL(s) to search for packages"),
+        ("build-directory=", "b", "download/extract/build in DIR; keep the results"),
+        (
+            'optimize=',
+            'O',
+            "also compile with optimization: -O1 for \"python -O\", "
+            "-O2 for \"python -OO\", and -O0 to disable [default: -O0]",
+        ),
+        ('record=', None, "filename in which to record list of installed files"),
+        ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+        ('site-dirs=', 'S', "list of directories where .pth files work"),
+        ('editable', 'e', "Install specified packages in editable form"),
+        ('no-deps', 'N', "don't install dependencies"),
+        ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+        ('local-snapshots-ok', 'l', "allow building eggs from local checkouts"),
+        ('version', None, "print version information and exit"),
+        (
+            'no-find-links',
+            None,
+            "Don't load find-links defined in packages being installed",
+        ),
+        ('user', None, "install in user site-package '%s'" % site.USER_SITE),
+    ]
+    boolean_options = [
+        'zip-ok',
+        'multi-version',
+        'exclude-scripts',
+        'upgrade',
+        'always-copy',
+        'editable',
+        'no-deps',
+        'local-snapshots-ok',
+        'version',
+        'user',
+    ]
+
+    negative_opt = {'always-unzip': 'zip-ok'}
+    create_index = PackageIndex
+
+    def initialize_options(self):
+        EasyInstallDeprecationWarning.emit()
+
+        # the --user option seems to be an opt-in one,
+        # so the default should be False.
+        self.user = 0
+        self.zip_ok = self.local_snapshots_ok = None
+        self.install_dir = self.script_dir = self.exclude_scripts = None
+        self.index_url = None
+        self.find_links = None
+        self.build_directory = None
+        self.args = None
+        self.optimize = self.record = None
+        self.upgrade = self.always_copy = self.multi_version = None
+        self.editable = self.no_deps = self.allow_hosts = None
+        self.root = self.prefix = self.no_report = None
+        self.version = None
+        self.install_purelib = None  # for pure module distributions
+        self.install_platlib = None  # non-pure (dists w/ extensions)
+        self.install_headers = None  # for C/C++ headers
+        self.install_lib = None  # set to either purelib or platlib
+        self.install_scripts = None
+        self.install_data = None
+        self.install_base = None
+        self.install_platbase = None
+        self.install_userbase = site.USER_BASE
+        self.install_usersite = site.USER_SITE
+        self.no_find_links = None
+
+        # Options not specifiable via command line
+        self.package_index = None
+        self.pth_file = self.always_copy_from = None
+        self.site_dirs = None
+        self.installed_projects = {}
+        # Always read easy_install options, even if we are subclassed, or have
+        # an independent instance created.  This ensures that defaults will
+        # always come from the standard configuration file(s)' "easy_install"
+        # section, even if this is a "develop" or "install" command, or some
+        # other embedding.
+        self._dry_run = None
+        self.verbose = self.distribution.verbose
+        self.distribution._set_command_options(
+            self, self.distribution.get_option_dict('easy_install')
+        )
+
+    def delete_blockers(self, blockers):
+        extant_blockers = (
+            filename
+            for filename in blockers
+            if os.path.exists(filename) or os.path.islink(filename)
+        )
+        list(map(self._delete_path, extant_blockers))
+
+    def _delete_path(self, path):
+        log.info("Deleting %s", path)
+        if self.dry_run:
+            return
+
+        is_tree = os.path.isdir(path) and not os.path.islink(path)
+        remover = _rmtree if is_tree else os.unlink
+        remover(path)
+
+    @staticmethod
+    def _render_version():
+        """
+        Render the Setuptools version and installation details, then exit.
+        """
+        ver = '{}.{}'.format(*sys.version_info)
+        dist = get_distribution('setuptools')
+        tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
+        print(tmpl.format(**locals()))
+        raise SystemExit()
+
+    def finalize_options(self):  # noqa: C901  # is too complex (25)  # FIXME
+        self.version and self._render_version()
+
+        py_version = sys.version.split()[0]
+
+        self.config_vars = dict(sysconfig.get_config_vars())
+
+        self.config_vars.update(
+            {
+                'dist_name': self.distribution.get_name(),
+                'dist_version': self.distribution.get_version(),
+                'dist_fullname': self.distribution.get_fullname(),
+                'py_version': py_version,
+                'py_version_short': (
+                    f'{sys.version_info.major}.{sys.version_info.minor}'
+                ),
+                'py_version_nodot': f'{sys.version_info.major}{sys.version_info.minor}',
+                'sys_prefix': self.config_vars['prefix'],
+                'sys_exec_prefix': self.config_vars['exec_prefix'],
+                # Only python 3.2+ has abiflags
+                'abiflags': getattr(sys, 'abiflags', ''),
+                'platlibdir': getattr(sys, 'platlibdir', 'lib'),
+            }
+        )
+        with contextlib.suppress(AttributeError):
+            # only for distutils outside stdlib
+            self.config_vars.update(
+                {
+                    'implementation_lower': install._get_implementation().lower(),
+                    'implementation': install._get_implementation(),
+                }
+            )
+
+        # pypa/distutils#113 Python 3.9 compat
+        self.config_vars.setdefault(
+            'py_version_nodot_plat',
+            getattr(sys, 'windir', '').replace('.', ''),
+        )
+
+        self.config_vars['userbase'] = self.install_userbase
+        self.config_vars['usersite'] = self.install_usersite
+        if self.user and not site.ENABLE_USER_SITE:
+            log.warn("WARNING: The user site-packages directory is disabled.")
+
+        self._fix_install_dir_for_user_site()
+
+        self.expand_basedirs()
+        self.expand_dirs()
+
+        self._expand(
+            'install_dir',
+            'script_dir',
+            'build_directory',
+            'site_dirs',
+        )
+        # If a non-default installation directory was specified, default the
+        # script directory to match it.
+        if self.script_dir is None:
+            self.script_dir = self.install_dir
+
+        if self.no_find_links is None:
+            self.no_find_links = False
+
+        # Let install_dir get set by install_lib command, which in turn
+        # gets its info from the install command, and takes into account
+        # --prefix and --home and all that other crud.
+        self.set_undefined_options('install_lib', ('install_dir', 'install_dir'))
+        # Likewise, set default script_dir from 'install_scripts.install_dir'
+        self.set_undefined_options('install_scripts', ('install_dir', 'script_dir'))
+
+        if self.user and self.install_purelib:
+            self.install_dir = self.install_purelib
+            self.script_dir = self.install_scripts
+        # default --record from the install command
+        self.set_undefined_options('install', ('record', 'record'))
+        self.all_site_dirs = get_site_dirs()
+        self.all_site_dirs.extend(self._process_site_dirs(self.site_dirs))
+
+        if not self.editable:
+            self.check_site_dir()
+        default_index = os.getenv("__EASYINSTALL_INDEX", "https://pypi.org/simple/")
+        # ^ Private API for testing purposes only
+        self.index_url = self.index_url or default_index
+        self.shadow_path = self.all_site_dirs[:]
+        for path_item in self.install_dir, normalize_path(self.script_dir):
+            if path_item not in self.shadow_path:
+                self.shadow_path.insert(0, path_item)
+
+        if self.allow_hosts is not None:
+            hosts = [s.strip() for s in self.allow_hosts.split(',')]
+        else:
+            hosts = ['*']
+        if self.package_index is None:
+            self.package_index = self.create_index(
+                self.index_url,
+                search_path=self.shadow_path,
+                hosts=hosts,
+            )
+        self.local_index = Environment(self.shadow_path + sys.path)
+
+        if self.find_links is not None:
+            if isinstance(self.find_links, str):
+                self.find_links = self.find_links.split()
+        else:
+            self.find_links = []
+        if self.local_snapshots_ok:
+            self.package_index.scan_egg_links(self.shadow_path + sys.path)
+        if not self.no_find_links:
+            self.package_index.add_find_links(self.find_links)
+        self.set_undefined_options('install_lib', ('optimize', 'optimize'))
+        self.optimize = self._validate_optimize(self.optimize)
+
+        if self.editable and not self.build_directory:
+            raise DistutilsArgError(
+                "Must specify a build directory (-b) when using --editable"
+            )
+        if not self.args:
+            raise DistutilsArgError(
+                "No urls, filenames, or requirements specified (see --help)"
+            )
+
+        self.outputs = []
+
+    @staticmethod
+    def _process_site_dirs(site_dirs):
+        if site_dirs is None:
+            return
+
+        normpath = map(normalize_path, sys.path)
+        site_dirs = [os.path.expanduser(s.strip()) for s in site_dirs.split(',')]
+        for d in site_dirs:
+            if not os.path.isdir(d):
+                log.warn("%s (in --site-dirs) does not exist", d)
+            elif normalize_path(d) not in normpath:
+                raise DistutilsOptionError(d + " (in --site-dirs) is not on sys.path")
+            else:
+                yield normalize_path(d)
+
+    @staticmethod
+    def _validate_optimize(value):
+        try:
+            value = int(value)
+            if value not in range(3):
+                raise ValueError
+        except ValueError as e:
+            raise DistutilsOptionError("--optimize must be 0, 1, or 2") from e
+
+        return value
+
+    def _fix_install_dir_for_user_site(self):
+        """
+        Fix the install_dir if "--user" was used.
+        """
+        if not self.user:
+            return
+
+        self.create_home_path()
+        if self.install_userbase is None:
+            msg = "User base directory is not specified"
+            raise DistutilsPlatformError(msg)
+        self.install_base = self.install_platbase = self.install_userbase
+        scheme_name = f'{os.name}_user'
+        self.select_scheme(scheme_name)
+
+    def _expand_attrs(self, attrs):
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                if os.name == 'posix' or os.name == 'nt':
+                    val = os.path.expanduser(val)
+                val = subst_vars(val, self.config_vars)
+                setattr(self, attr, val)
+
+    def expand_basedirs(self):
+        """Calls `os.path.expanduser` on install_base, install_platbase and
+        root."""
+        self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+    def expand_dirs(self):
+        """Calls `os.path.expanduser` on install dirs."""
+        dirs = [
+            'install_purelib',
+            'install_platlib',
+            'install_lib',
+            'install_headers',
+            'install_scripts',
+            'install_data',
+        ]
+        self._expand_attrs(dirs)
+
+    def run(self, show_deprecation=True):
+        if show_deprecation:
+            self.announce(
+                "WARNING: The easy_install command is deprecated "
+                "and will be removed in a future version.",
+                log.WARN,
+            )
+        if self.verbose != self.distribution.verbose:
+            log.set_verbosity(self.verbose)
+        try:
+            for spec in self.args:
+                self.easy_install(spec, not self.no_deps)
+            if self.record:
+                outputs = self.outputs
+                if self.root:  # strip any package prefix
+                    root_len = len(self.root)
+                    for counter in range(len(outputs)):
+                        outputs[counter] = outputs[counter][root_len:]
+                from distutils import file_util
+
+                self.execute(
+                    file_util.write_file,
+                    (self.record, outputs),
+                    "writing list of installed files to '%s'" % self.record,
+                )
+            self.warn_deprecated_options()
+        finally:
+            log.set_verbosity(self.distribution.verbose)
+
+    def pseudo_tempname(self):
+        """Return a pseudo-tempname base in the install directory.
+        This code is intentionally naive; if a malicious party can write to
+        the target directory you're already in deep doodoo.
+        """
+        try:
+            pid = os.getpid()
+        except Exception:
+            pid = random.randint(0, sys.maxsize)
+        return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+    def warn_deprecated_options(self):
+        pass
+
+    def check_site_dir(self):  # noqa: C901  # is too complex (12)  # FIXME
+        """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+        instdir = normalize_path(self.install_dir)
+        pth_file = os.path.join(instdir, 'easy-install.pth')
+
+        if not os.path.exists(instdir):
+            try:
+                os.makedirs(instdir)
+            except (OSError, IOError):
+                self.cant_write_to_target()
+
+        # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+        is_site_dir = instdir in self.all_site_dirs
+
+        if not is_site_dir and not self.multi_version:
+            # No?  Then directly test whether it does .pth file processing
+            is_site_dir = self.check_pth_processing()
+        else:
+            # make sure we can write to target dir
+            testfile = self.pseudo_tempname() + '.write-test'
+            test_exists = os.path.exists(testfile)
+            try:
+                if test_exists:
+                    os.unlink(testfile)
+                open(testfile, 'w').close()
+                os.unlink(testfile)
+            except (OSError, IOError):
+                self.cant_write_to_target()
+
+        if not is_site_dir and not self.multi_version:
+            # Can't install non-multi to non-site dir with easy_install
+            pythonpath = os.environ.get('PYTHONPATH', '')
+            log.warn(self.__no_default_msg, self.install_dir, pythonpath)
+
+        if is_site_dir:
+            if self.pth_file is None:
+                self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
+        else:
+            self.pth_file = None
+
+        if self.multi_version and not os.path.exists(pth_file):
+            self.pth_file = None  # don't create a .pth file
+        self.install_dir = instdir
+
+    __cant_write_msg = textwrap.dedent(
+        """
+        can't create or remove files in install directory
+
+        The following error occurred while trying to add or remove files in the
+        installation directory:
+
+            %s
+
+        The installation directory you specified (via --install-dir, --prefix, or
+        the distutils default setting) was:
+
+            %s
+        """
+    ).lstrip()  # noqa
+
+    __not_exists_id = textwrap.dedent(
+        """
+        This directory does not currently exist.  Please create it and try again, or
+        choose a different installation directory (using the -d or --install-dir
+        option).
+        """
+    ).lstrip()  # noqa
+
+    __access_msg = textwrap.dedent(
+        """
+        Perhaps your account does not have write access to this directory?  If the
+        installation directory is a system-owned directory, you may need to sign in
+        as the administrator or "root" account.  If you do not have administrative
+        access to this machine, you may wish to choose a different installation
+        directory, preferably one that is listed in your PYTHONPATH environment
+        variable.
+
+        For information on other options, you may wish to consult the
+        documentation at:
+
+          https://setuptools.pypa.io/en/latest/deprecated/easy_install.html
+
+        Please make the appropriate changes for your system and try again.
+        """
+    ).lstrip()  # noqa
+
+    def cant_write_to_target(self):
+        msg = self.__cant_write_msg % (
+            sys.exc_info()[1],
+            self.install_dir,
+        )
+
+        if not os.path.exists(self.install_dir):
+            msg += '\n' + self.__not_exists_id
+        else:
+            msg += '\n' + self.__access_msg
+        raise DistutilsError(msg)
+
+    def check_pth_processing(self):
+        """Empirically verify whether .pth files are supported in inst. dir"""
+        instdir = self.install_dir
+        log.info("Checking .pth file support in %s", instdir)
+        pth_file = self.pseudo_tempname() + ".pth"
+        ok_file = pth_file + '.ok'
+        ok_exists = os.path.exists(ok_file)
+        tmpl = (
+            _one_liner(
+                """
+            import os
+            f = open({ok_file!r}, 'w')
+            f.write('OK')
+            f.close()
+            """
+            )
+            + '\n'
+        )
+        try:
+            if ok_exists:
+                os.unlink(ok_file)
+            dirname = os.path.dirname(ok_file)
+            os.makedirs(dirname, exist_ok=True)
+            f = open(pth_file, 'w')
+        except (OSError, IOError):
+            self.cant_write_to_target()
+        else:
+            try:
+                f.write(tmpl.format(**locals()))
+                f.close()
+                f = None
+                executable = sys.executable
+                if os.name == 'nt':
+                    dirname, basename = os.path.split(executable)
+                    alt = os.path.join(dirname, 'pythonw.exe')
+                    use_alt = basename.lower() == 'python.exe' and os.path.exists(alt)
+                    if use_alt:
+                        # use pythonw.exe to avoid opening a console window
+                        executable = alt
+
+                from distutils.spawn import spawn
+
+                spawn([executable, '-E', '-c', 'pass'], 0)
+
+                if os.path.exists(ok_file):
+                    log.info("TEST PASSED: %s appears to support .pth files", instdir)
+                    return True
+            finally:
+                if f:
+                    f.close()
+                if os.path.exists(ok_file):
+                    os.unlink(ok_file)
+                if os.path.exists(pth_file):
+                    os.unlink(pth_file)
+        if not self.multi_version:
+            log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+        return False
+
+    def install_egg_scripts(self, dist):
+        """Write all the scripts for `dist`, unless scripts are excluded"""
+        if not self.exclude_scripts and dist.metadata_isdir('scripts'):
+            for script_name in dist.metadata_listdir('scripts'):
+                if dist.metadata_isdir('scripts/' + script_name):
+                    # The "script" is a directory, likely a Python 3
+                    # __pycache__ directory, so skip it.
+                    continue
+                self.install_script(
+                    dist, script_name, dist.get_metadata('scripts/' + script_name)
+                )
+        self.install_wrapper_scripts(dist)
+
+    def add_output(self, path):
+        if os.path.isdir(path):
+            for base, dirs, files in os.walk(path):
+                for filename in files:
+                    self.outputs.append(os.path.join(base, filename))
+        else:
+            self.outputs.append(path)
+
+    def not_editable(self, spec):
+        if self.editable:
+            raise DistutilsArgError(
+                "Invalid argument %r: you can't use filenames or URLs "
+                "with --editable (except via the --find-links option)." % (spec,)
+            )
+
+    def check_editable(self, spec):
+        if not self.editable:
+            return
+
+        if os.path.exists(os.path.join(self.build_directory, spec.key)):
+            raise DistutilsArgError(
+                "%r already exists in %s; can't do a checkout there"
+                % (spec.key, self.build_directory)
+            )
+
+    @contextlib.contextmanager
+    def _tmpdir(self):
+        tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
+        try:
+            # cast to str as workaround for #709 and #710 and #712
+            yield str(tmpdir)
+        finally:
+            os.path.exists(tmpdir) and _rmtree(tmpdir)
+
+    def easy_install(self, spec, deps=False):
+        with self._tmpdir() as tmpdir:
+            if not isinstance(spec, Requirement):
+                if URL_SCHEME(spec):
+                    # It's a url, download it to tmpdir and process
+                    self.not_editable(spec)
+                    dl = self.package_index.download(spec, tmpdir)
+                    return self.install_item(None, dl, tmpdir, deps, True)
+
+                elif os.path.exists(spec):
+                    # Existing file or directory, just process it directly
+                    self.not_editable(spec)
+                    return self.install_item(None, spec, tmpdir, deps, True)
+                else:
+                    spec = parse_requirement_arg(spec)
+
+            self.check_editable(spec)
+            dist = self.package_index.fetch_distribution(
+                spec,
+                tmpdir,
+                self.upgrade,
+                self.editable,
+                not self.always_copy,
+                self.local_index,
+            )
+            if dist is None:
+                msg = "Could not find suitable distribution for %r" % spec
+                if self.always_copy:
+                    msg += " (--always-copy skips system and development eggs)"
+                raise DistutilsError(msg)
+            elif dist.precedence == DEVELOP_DIST:
+                # .egg-info dists don't need installing, just process deps
+                self.process_distribution(spec, dist, deps, "Using")
+                return dist
+            else:
+                return self.install_item(spec, dist.location, tmpdir, deps)
+
+    def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+        # Installation is also needed if file in tmpdir or is not an egg
+        install_needed = install_needed or self.always_copy
+        install_needed = install_needed or os.path.dirname(download) == tmpdir
+        install_needed = install_needed or not download.endswith('.egg')
+        install_needed = install_needed or (
+            self.always_copy_from is not None
+            and os.path.dirname(normalize_path(download))
+            == normalize_path(self.always_copy_from)
+        )
+
+        if spec and not install_needed:
+            # at this point, we know it's a local .egg, we just don't know if
+            # it's already installed.
+            for dist in self.local_index[spec.project_name]:
+                if dist.location == download:
+                    break
+            else:
+                install_needed = True  # it's not in the local index
+
+        log.info("Processing %s", os.path.basename(download))
+
+        if install_needed:
+            dists = self.install_eggs(spec, download, tmpdir)
+            for dist in dists:
+                self.process_distribution(spec, dist, deps)
+        else:
+            dists = [self.egg_distribution(download)]
+            self.process_distribution(spec, dists[0], deps, "Using")
+
+        if spec is not None:
+            for dist in dists:
+                if dist in spec:
+                    return dist
+
+    def select_scheme(self, name):
+        try:
+            install._select_scheme(self, name)
+        except AttributeError:
+            # stdlib distutils
+            install.install.select_scheme(self, name.replace('posix', 'unix'))
+
+    # FIXME: 'easy_install.process_distribution' is too complex (12)
+    def process_distribution(  # noqa: C901
+        self,
+        requirement,
+        dist,
+        deps=True,
+        *info,
+    ):
+        self.update_pth(dist)
+        self.package_index.add(dist)
+        if dist in self.local_index[dist.key]:
+            self.local_index.remove(dist)
+        self.local_index.add(dist)
+        self.install_egg_scripts(dist)
+        self.installed_projects[dist.key] = dist
+        log.info(self.installation_report(requirement, dist, *info))
+        if dist.has_metadata('dependency_links.txt') and not self.no_find_links:
+            self.package_index.add_find_links(
+                dist.get_metadata_lines('dependency_links.txt')
+            )
+        if not deps and not self.always_copy:
+            return
+        elif requirement is not None and dist.key != requirement.key:
+            log.warn("Skipping dependencies for %s", dist)
+            return  # XXX this is not the distribution we were looking for
+        elif requirement is None or dist not in requirement:
+            # if we wound up with a different version, resolve what we've got
+            distreq = dist.as_requirement()
+            requirement = Requirement(str(distreq))
+        log.info("Processing dependencies for %s", requirement)
+        try:
+            distros = WorkingSet([]).resolve(
+                [requirement], self.local_index, self.easy_install
+            )
+        except DistributionNotFound as e:
+            raise DistutilsError(str(e)) from e
+        except VersionConflict as e:
+            raise DistutilsError(e.report()) from e
+        if self.always_copy or self.always_copy_from:
+            # Force all the relevant distros to be copied or activated
+            for dist in distros:
+                if dist.key not in self.installed_projects:
+                    self.easy_install(dist.as_requirement())
+        log.info("Finished processing dependencies for %s", requirement)
+
+    def should_unzip(self, dist):
+        if self.zip_ok is not None:
+            return not self.zip_ok
+        if dist.has_metadata('not-zip-safe'):
+            return True
+        if not dist.has_metadata('zip-safe'):
+            return True
+        return False
+
+    def maybe_move(self, spec, dist_filename, setup_base):
+        dst = os.path.join(self.build_directory, spec.key)
+        if os.path.exists(dst):
+            msg = "%r already exists in %s; build directory %s will not be kept"
+            log.warn(msg, spec.key, self.build_directory, setup_base)
+            return setup_base
+        if os.path.isdir(dist_filename):
+            setup_base = dist_filename
+        else:
+            if os.path.dirname(dist_filename) == setup_base:
+                os.unlink(dist_filename)  # get it out of the tmp dir
+            contents = os.listdir(setup_base)
+            if len(contents) == 1:
+                dist_filename = os.path.join(setup_base, contents[0])
+                if os.path.isdir(dist_filename):
+                    # if the only thing there is a directory, move it instead
+                    setup_base = dist_filename
+        ensure_directory(dst)
+        shutil.move(setup_base, dst)
+        return dst
+
+    def install_wrapper_scripts(self, dist):
+        if self.exclude_scripts:
+            return
+        for args in ScriptWriter.best().get_args(dist):
+            self.write_script(*args)
+
+    def install_script(self, dist, script_name, script_text, dev_path=None):
+        """Generate a legacy script wrapper and install it"""
+        spec = str(dist.as_requirement())
+        is_script = is_python_script(script_text, script_name)
+
+        if is_script:
+            body = self._load_template(dev_path) % locals()
+            script_text = ScriptWriter.get_header(script_text) + body
+        self.write_script(script_name, _to_bytes(script_text), 'b')
+
+    @staticmethod
+    def _load_template(dev_path):
+        """
+        There are a couple of template scripts in the package. This
+        function loads one of them and prepares it for use.
+        """
+        # See https://github.com/pypa/setuptools/issues/134 for info
+        # on script file naming and downstream issues with SVR4
+        name = 'script.tmpl'
+        if dev_path:
+            name = name.replace('.tmpl', ' (dev).tmpl')
+
+        raw_bytes = resource_string('setuptools', name)
+        return raw_bytes.decode('utf-8')
+
+    def write_script(self, script_name, contents, mode="t", blockers=()):
+        """Write an executable file to the scripts directory"""
+        self.delete_blockers(  # clean up old .py/.pyw w/o a script
+            [os.path.join(self.script_dir, x) for x in blockers]
+        )
+        log.info("Installing %s script to %s", script_name, self.script_dir)
+        target = os.path.join(self.script_dir, script_name)
+        self.add_output(target)
+
+        if self.dry_run:
+            return
+
+        mask = current_umask()
+        ensure_directory(target)
+        if os.path.exists(target):
+            os.unlink(target)
+        with open(target, "w" + mode) as f:
+            f.write(contents)
+        chmod(target, 0o777 - mask)
+
+    def install_eggs(self, spec, dist_filename, tmpdir):
+        # .egg dirs or files are already built, so just return them
+        installer_map = {
+            '.egg': self.install_egg,
+            '.exe': self.install_exe,
+            '.whl': self.install_wheel,
+        }
+        try:
+            install_dist = installer_map[dist_filename.lower()[-4:]]
+        except KeyError:
+            pass
+        else:
+            return [install_dist(dist_filename, tmpdir)]
+
+        # Anything else, try to extract and build
+        setup_base = tmpdir
+        if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+            unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+        elif os.path.isdir(dist_filename):
+            setup_base = os.path.abspath(dist_filename)
+
+        if (
+            setup_base.startswith(tmpdir)  # something we downloaded
+            and self.build_directory
+            and spec is not None
+        ):
+            setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+        # Find the setup.py file
+        setup_script = os.path.join(setup_base, 'setup.py')
+
+        if not os.path.exists(setup_script):
+            setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+            if not setups:
+                raise DistutilsError(
+                    "Couldn't find a setup script in %s"
+                    % os.path.abspath(dist_filename)
+                )
+            if len(setups) > 1:
+                raise DistutilsError(
+                    "Multiple setup scripts in %s" % os.path.abspath(dist_filename)
+                )
+            setup_script = setups[0]
+
+        # Now run it, and return the result
+        if self.editable:
+            log.info(self.report_editable(spec, setup_script))
+            return []
+        else:
+            return self.build_and_install(setup_script, setup_base)
+
+    def egg_distribution(self, egg_path):
+        if os.path.isdir(egg_path):
+            metadata = PathMetadata(egg_path, os.path.join(egg_path, 'EGG-INFO'))
+        else:
+            metadata = EggMetadata(zipimport.zipimporter(egg_path))
+        return Distribution.from_filename(egg_path, metadata=metadata)
+
+    # FIXME: 'easy_install.install_egg' is too complex (11)
+    def install_egg(self, egg_path, tmpdir):  # noqa: C901
+        destination = os.path.join(
+            self.install_dir,
+            os.path.basename(egg_path),
+        )
+        destination = os.path.abspath(destination)
+        if not self.dry_run:
+            ensure_directory(destination)
+
+        dist = self.egg_distribution(egg_path)
+        if not (
+            os.path.exists(destination) and os.path.samefile(egg_path, destination)
+        ):
+            if os.path.isdir(destination) and not os.path.islink(destination):
+                dir_util.remove_tree(destination, dry_run=self.dry_run)
+            elif os.path.exists(destination):
+                self.execute(
+                    os.unlink,
+                    (destination,),
+                    "Removing " + destination,
+                )
+            try:
+                new_dist_is_zipped = False
+                if os.path.isdir(egg_path):
+                    if egg_path.startswith(tmpdir):
+                        f, m = shutil.move, "Moving"
+                    else:
+                        f, m = shutil.copytree, "Copying"
+                elif self.should_unzip(dist):
+                    self.mkpath(destination)
+                    f, m = self.unpack_and_compile, "Extracting"
+                else:
+                    new_dist_is_zipped = True
+                    if egg_path.startswith(tmpdir):
+                        f, m = shutil.move, "Moving"
+                    else:
+                        f, m = shutil.copy2, "Copying"
+                self.execute(
+                    f,
+                    (egg_path, destination),
+                    (m + " %s to %s")
+                    % (os.path.basename(egg_path), os.path.dirname(destination)),
+                )
+                update_dist_caches(
+                    destination,
+                    fix_zipimporter_caches=new_dist_is_zipped,
+                )
+            except Exception:
+                update_dist_caches(destination, fix_zipimporter_caches=False)
+                raise
+
+        self.add_output(destination)
+        return self.egg_distribution(destination)
+
+    def install_exe(self, dist_filename, tmpdir):
+        # See if it's valid, get data
+        cfg = extract_wininst_cfg(dist_filename)
+        if cfg is None:
+            raise DistutilsError(
+                "%s is not a valid distutils Windows .exe" % dist_filename
+            )
+        # Create a dummy distribution object until we build the real distro
+        dist = Distribution(
+            None,
+            project_name=cfg.get('metadata', 'name'),
+            version=cfg.get('metadata', 'version'),
+            platform=get_platform(),
+        )
+
+        # Convert the .exe to an unpacked egg
+        egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
+        dist.location = egg_path
+        egg_tmp = egg_path + '.tmp'
+        _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+        pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
+        ensure_directory(pkg_inf)  # make sure EGG-INFO dir exists
+        dist._provider = PathMetadata(egg_tmp, _egg_info)  # XXX
+        self.exe_to_egg(dist_filename, egg_tmp)
+
+        # Write EGG-INFO/PKG-INFO
+        if not os.path.exists(pkg_inf):
+            f = open(pkg_inf, 'w')
+            f.write('Metadata-Version: 1.0\n')
+            for k, v in cfg.items('metadata'):
+                if k != 'target_version':
+                    f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
+            f.close()
+        script_dir = os.path.join(_egg_info, 'scripts')
+        # delete entry-point scripts to avoid duping
+        self.delete_blockers(
+            [os.path.join(script_dir, args[0]) for args in ScriptWriter.get_args(dist)]
+        )
+        # Build .egg file from tmpdir
+        bdist_egg.make_zipfile(
+            egg_path,
+            egg_tmp,
+            verbose=self.verbose,
+            dry_run=self.dry_run,
+        )
+        # install the .egg
+        return self.install_egg(egg_path, tmpdir)
+
+    # FIXME: 'easy_install.exe_to_egg' is too complex (12)
+    def exe_to_egg(self, dist_filename, egg_tmp):  # noqa: C901
+        """Extract a bdist_wininst to the directories an egg would use"""
+        # Check for .pth file and set up prefix translations
+        prefixes = get_exe_prefixes(dist_filename)
+        to_compile = []
+        native_libs = []
+        top_level = {}
+
+        def process(src, dst):
+            s = src.lower()
+            for old, new in prefixes:
+                if s.startswith(old):
+                    src = new + src[len(old) :]
+                    parts = src.split('/')
+                    dst = os.path.join(egg_tmp, *parts)
+                    dl = dst.lower()
+                    if dl.endswith('.pyd') or dl.endswith('.dll'):
+                        parts[-1] = bdist_egg.strip_module(parts[-1])
+                        top_level[os.path.splitext(parts[0])[0]] = 1
+                        native_libs.append(src)
+                    elif dl.endswith('.py') and old != 'SCRIPTS/':
+                        top_level[os.path.splitext(parts[0])[0]] = 1
+                        to_compile.append(dst)
+                    return dst
+            if not src.endswith('.pth'):
+                log.warn("WARNING: can't process %s", src)
+            return None
+
+        # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+        unpack_archive(dist_filename, egg_tmp, process)
+        stubs = []
+        for res in native_libs:
+            if res.lower().endswith('.pyd'):  # create stubs for .pyd's
+                parts = res.split('/')
+                resource = parts[-1]
+                parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
+                pyfile = os.path.join(egg_tmp, *parts)
+                to_compile.append(pyfile)
+                stubs.append(pyfile)
+                bdist_egg.write_stub(resource, pyfile)
+        self.byte_compile(to_compile)  # compile .py's
+        bdist_egg.write_safety_flag(
+            os.path.join(egg_tmp, 'EGG-INFO'), bdist_egg.analyze_egg(egg_tmp, stubs)
+        )  # write zip-safety flag
+
+        for name in 'top_level', 'native_libs':
+            if locals()[name]:
+                txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
+                if not os.path.exists(txt):
+                    f = open(txt, 'w')
+                    f.write('\n'.join(locals()[name]) + '\n')
+                    f.close()
+
+    def install_wheel(self, wheel_path, tmpdir):
+        wheel = Wheel(wheel_path)
+        assert wheel.is_compatible()
+        destination = os.path.join(self.install_dir, wheel.egg_name())
+        destination = os.path.abspath(destination)
+        if not self.dry_run:
+            ensure_directory(destination)
+        if os.path.isdir(destination) and not os.path.islink(destination):
+            dir_util.remove_tree(destination, dry_run=self.dry_run)
+        elif os.path.exists(destination):
+            self.execute(
+                os.unlink,
+                (destination,),
+                "Removing " + destination,
+            )
+        try:
+            self.execute(
+                wheel.install_as_egg,
+                (destination,),
+                ("Installing %s to %s")
+                % (os.path.basename(wheel_path), os.path.dirname(destination)),
+            )
+        finally:
+            update_dist_caches(destination, fix_zipimporter_caches=False)
+        self.add_output(destination)
+        return self.egg_distribution(destination)
+
+    __mv_warning = textwrap.dedent(
+        """
+        Because this distribution was installed --multi-version, before you can
+        import modules from this package in an application, you will need to
+        'import pkg_resources' and then use a 'require()' call similar to one of
+        these examples, in order to select the desired version:
+
+            pkg_resources.require("%(name)s")  # latest installed version
+            pkg_resources.require("%(name)s==%(version)s")  # this exact version
+            pkg_resources.require("%(name)s>=%(version)s")  # this version or higher
+        """
+    ).lstrip()  # noqa
+
+    __id_warning = textwrap.dedent(
+        """
+        Note also that the installation directory must be on sys.path at runtime for
+        this to work.  (e.g. by being the application's script directory, by being on
+        PYTHONPATH, or by being added to sys.path by your code.)
+        """
+    )  # noqa
+
+    def installation_report(self, req, dist, what="Installed"):
+        """Helpful installation message for display to package users"""
+        msg = "\n%(what)s %(eggloc)s%(extras)s"
+        if self.multi_version and not self.no_report:
+            msg += '\n' + self.__mv_warning
+            if self.install_dir not in map(normalize_path, sys.path):
+                msg += '\n' + self.__id_warning
+
+        eggloc = dist.location
+        name = dist.project_name
+        version = dist.version
+        extras = ''  # TODO: self.report_extras(req, dist)
+        return msg % locals()
+
+    __editable_msg = textwrap.dedent(
+        """
+        Extracted editable version of %(spec)s to %(dirname)s
+
+        If it uses setuptools in its setup script, you can activate it in
+        "development" mode by going to that directory and running::
+
+            %(python)s setup.py develop
+
+        See the setuptools documentation for the "develop" command for more info.
+        """
+    ).lstrip()  # noqa
+
+    def report_editable(self, spec, setup_script):
+        dirname = os.path.dirname(setup_script)
+        python = sys.executable
+        return '\n' + self.__editable_msg % locals()
+
+    def run_setup(self, setup_script, setup_base, args):
+        sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+        sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+        args = list(args)
+        if self.verbose > 2:
+            v = 'v' * (self.verbose - 1)
+            args.insert(0, '-' + v)
+        elif self.verbose < 2:
+            args.insert(0, '-q')
+        if self.dry_run:
+            args.insert(0, '-n')
+        log.info("Running %s %s", setup_script[len(setup_base) + 1 :], ' '.join(args))
+        try:
+            run_setup(setup_script, args)
+        except SystemExit as v:
+            raise DistutilsError("Setup script exited with %s" % (v.args[0],)) from v
+
+    def build_and_install(self, setup_script, setup_base):
+        args = ['bdist_egg', '--dist-dir']
+
+        dist_dir = tempfile.mkdtemp(
+            prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+        )
+        try:
+            self._set_fetcher_options(os.path.dirname(setup_script))
+            args.append(dist_dir)
+
+            self.run_setup(setup_script, setup_base, args)
+            all_eggs = Environment([dist_dir])
+            eggs = []
+            for key in all_eggs:
+                for dist in all_eggs[key]:
+                    eggs.append(self.install_egg(dist.location, setup_base))
+            if not eggs and not self.dry_run:
+                log.warn("No eggs found in %s (setup script problem?)", dist_dir)
+            return eggs
+        finally:
+            _rmtree(dist_dir)
+            log.set_verbosity(self.verbose)  # restore our log verbosity
+
+    def _set_fetcher_options(self, base):
+        """
+        When easy_install is about to run bdist_egg on a source dist, that
+        source dist might have 'setup_requires' directives, requiring
+        additional fetching. Ensure the fetcher options given to easy_install
+        are available to that command as well.
+        """
+        # find the fetch options from easy_install and write them out
+        # to the setup.cfg file.
+        ei_opts = self.distribution.get_option_dict('easy_install').copy()
+        fetch_directives = (
+            'find_links',
+            'site_dirs',
+            'index_url',
+            'optimize',
+            'allow_hosts',
+        )
+        fetch_options = {}
+        for key, val in ei_opts.items():
+            if key not in fetch_directives:
+                continue
+            fetch_options[key] = val[1]
+        # create a settings dictionary suitable for `edit_config`
+        settings = dict(easy_install=fetch_options)
+        cfg_filename = os.path.join(base, 'setup.cfg')
+        setopt.edit_config(cfg_filename, settings)
+
+    def update_pth(self, dist):  # noqa: C901  # is too complex (11)  # FIXME
+        if self.pth_file is None:
+            return
+
+        for d in self.pth_file[dist.key]:  # drop old entries
+            if not self.multi_version and d.location == dist.location:
+                continue
+
+            log.info("Removing %s from easy-install.pth file", d)
+            self.pth_file.remove(d)
+            if d.location in self.shadow_path:
+                self.shadow_path.remove(d.location)
+
+        if not self.multi_version:
+            if dist.location in self.pth_file.paths:
+                log.info(
+                    "%s is already the active version in easy-install.pth",
+                    dist,
+                )
+            else:
+                log.info("Adding %s to easy-install.pth file", dist)
+                self.pth_file.add(dist)  # add new entry
+                if dist.location not in self.shadow_path:
+                    self.shadow_path.append(dist.location)
+
+        if self.dry_run:
+            return
+
+        self.pth_file.save()
+
+        if dist.key != 'setuptools':
+            return
+
+        # Ensure that setuptools itself never becomes unavailable!
+        # XXX should this check for latest version?
+        filename = os.path.join(self.install_dir, 'setuptools.pth')
+        if os.path.islink(filename):
+            os.unlink(filename)
+        with open(filename, 'wt') as f:
+            f.write(self.pth_file.make_relative(dist.location) + '\n')
+
+    def unpack_progress(self, src, dst):
+        # Progress filter for unpacking
+        log.debug("Unpacking %s to %s", src, dst)
+        return dst  # only unpack-and-compile skips files for dry run
+
+    def unpack_and_compile(self, egg_path, destination):
+        to_compile = []
+        to_chmod = []
+
+        def pf(src, dst):
+            if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+                to_compile.append(dst)
+            elif dst.endswith('.dll') or dst.endswith('.so'):
+                to_chmod.append(dst)
+            self.unpack_progress(src, dst)
+            return not self.dry_run and dst or None
+
+        unpack_archive(egg_path, destination, pf)
+        self.byte_compile(to_compile)
+        if not self.dry_run:
+            for f in to_chmod:
+                mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
+                chmod(f, mode)
+
+    def byte_compile(self, to_compile):
+        if sys.dont_write_bytecode:
+            return
+
+        from distutils.util import byte_compile
+
+        try:
+            # try to make the byte compile messages quieter
+            log.set_verbosity(self.verbose - 1)
+
+            byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+            if self.optimize:
+                byte_compile(
+                    to_compile,
+                    optimize=self.optimize,
+                    force=1,
+                    dry_run=self.dry_run,
+                )
+        finally:
+            log.set_verbosity(self.verbose)  # restore original verbosity
+
+    __no_default_msg = textwrap.dedent(
+        """
+        bad install directory or PYTHONPATH
+
+        You are attempting to install a package to a directory that is not
+        on PYTHONPATH and which Python does not read ".pth" files from.  The
+        installation directory you specified (via --install-dir, --prefix, or
+        the distutils default setting) was:
+
+            %s
+
+        and your PYTHONPATH environment variable currently contains:
+
+            %r
+
+        Here are some of your options for correcting the problem:
+
+        * You can choose a different installation directory, i.e., one that is
+          on PYTHONPATH or supports .pth files
+
+        * You can add the installation directory to the PYTHONPATH environment
+          variable.  (It must then also be on PYTHONPATH whenever you run
+          Python and want to use the package(s) you are installing.)
+
+        * You can set up the installation directory to support ".pth" files by
+          using one of the approaches described here:
+
+          https://setuptools.pypa.io/en/latest/deprecated/easy_install.html#custom-installation-locations
+
+
+        Please make the appropriate changes for your system and try again.
+        """
+    ).strip()
+
+    def create_home_path(self):
+        """Create directories under ~."""
+        if not self.user:
+            return
+        home = convert_path(os.path.expanduser("~"))
+        for path in only_strs(self.config_vars.values()):
+            if path.startswith(home) and not os.path.isdir(path):
+                self.debug_print("os.makedirs('%s', 0o700)" % path)
+                os.makedirs(path, 0o700)
+
+    INSTALL_SCHEMES = dict(
+        posix=dict(
+            install_dir='$base/lib/python$py_version_short/site-packages',
+            script_dir='$base/bin',
+        ),
+    )
+
+    DEFAULT_SCHEME = dict(
+        install_dir='$base/Lib/site-packages',
+        script_dir='$base/Scripts',
+    )
+
+    def _expand(self, *attrs):
+        config_vars = self.get_finalized_command('install').config_vars
+
+        if self.prefix:
+            # Set default install_dir/scripts from --prefix
+            config_vars = dict(config_vars)
+            config_vars['base'] = self.prefix
+            scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
+            for attr, val in scheme.items():
+                if getattr(self, attr, None) is None:
+                    setattr(self, attr, val)
+
+        from distutils.util import subst_vars
+
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                val = subst_vars(val, config_vars)
+                if os.name == 'posix':
+                    val = os.path.expanduser(val)
+                setattr(self, attr, val)
+
+
+def _pythonpath():
+    items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
+    return filter(None, items)
+
+
+def get_site_dirs():
+    """
+    Return a list of 'site' dirs
+    """
+
+    sitedirs = []
+
+    # start with PYTHONPATH
+    sitedirs.extend(_pythonpath())
+
+    prefixes = [sys.prefix]
+    if sys.exec_prefix != sys.prefix:
+        prefixes.append(sys.exec_prefix)
+    for prefix in prefixes:
+        if not prefix:
+            continue
+
+        if sys.platform in ('os2emx', 'riscos'):
+            sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
+        elif os.sep == '/':
+            sitedirs.extend(
+                [
+                    os.path.join(
+                        prefix,
+                        "lib",
+                        "python{}.{}".format(*sys.version_info),
+                        "site-packages",
+                    ),
+                    os.path.join(prefix, "lib", "site-python"),
+                ]
+            )
+        else:
+            sitedirs.extend(
+                [
+                    prefix,
+                    os.path.join(prefix, "lib", "site-packages"),
+                ]
+            )
+        if sys.platform != 'darwin':
+            continue
+
+        # for framework builds *only* we add the standard Apple
+        # locations. Currently only per-user, but /Library and
+        # /Network/Library could be added too
+        if 'Python.framework' not in prefix:
+            continue
+
+        home = os.environ.get('HOME')
+        if not home:
+            continue
+
+        home_sp = os.path.join(
+            home,
+            'Library',
+            'Python',
+            '{}.{}'.format(*sys.version_info),
+            'site-packages',
+        )
+        sitedirs.append(home_sp)
+    lib_paths = get_path('purelib'), get_path('platlib')
+
+    sitedirs.extend(s for s in lib_paths if s not in sitedirs)
+
+    if site.ENABLE_USER_SITE:
+        sitedirs.append(site.USER_SITE)
+
+    with contextlib.suppress(AttributeError):
+        sitedirs.extend(site.getsitepackages())
+
+    sitedirs = list(map(normalize_path, sitedirs))
+
+    return sitedirs
+
+
+def expand_paths(inputs):  # noqa: C901  # is too complex (11)  # FIXME
+    """Yield sys.path directories that might contain "old-style" packages"""
+
+    seen = {}
+
+    for dirname in inputs:
+        dirname = normalize_path(dirname)
+        if dirname in seen:
+            continue
+
+        seen[dirname] = 1
+        if not os.path.isdir(dirname):
+            continue
+
+        files = os.listdir(dirname)
+        yield dirname, files
+
+        for name in files:
+            if not name.endswith('.pth'):
+                # We only care about the .pth files
+                continue
+            if name in ('easy-install.pth', 'setuptools.pth'):
+                # Ignore .pth files that we control
+                continue
+
+            # Read the .pth file
+            f = open(os.path.join(dirname, name))
+            lines = list(yield_lines(f))
+            f.close()
+
+            # Yield existing non-dupe, non-import directory lines from it
+            for line in lines:
+                if line.startswith("import"):
+                    continue
+
+                line = normalize_path(line.rstrip())
+                if line in seen:
+                    continue
+
+                seen[line] = 1
+                if not os.path.isdir(line):
+                    continue
+
+                yield line, os.listdir(line)
+
+
+def extract_wininst_cfg(dist_filename):
+    """Extract configuration data from a bdist_wininst .exe
+
+    Returns a configparser.RawConfigParser, or None
+    """
+    f = open(dist_filename, 'rb')
+    try:
+        endrec = zipfile._EndRecData(f)
+        if endrec is None:
+            return None
+
+        prepended = (endrec[9] - endrec[5]) - endrec[6]
+        if prepended < 12:  # no wininst data here
+            return None
+        f.seek(prepended - 12)
+
+        tag, cfglen, bmlen = struct.unpack("egg path translations for a given .exe file"""
+
+    prefixes = [
+        ('PURELIB/', ''),
+        ('PLATLIB/pywin32_system32', ''),
+        ('PLATLIB/', ''),
+        ('SCRIPTS/', 'EGG-INFO/scripts/'),
+        ('DATA/lib/site-packages', ''),
+    ]
+    z = zipfile.ZipFile(exe_filename)
+    try:
+        for info in z.infolist():
+            name = info.filename
+            parts = name.split('/')
+            if len(parts) == 3 and parts[2] == 'PKG-INFO':
+                if parts[1].endswith('.egg-info'):
+                    prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
+                    break
+            if len(parts) != 2 or not name.endswith('.pth'):
+                continue
+            if name.endswith('-nspkg.pth'):
+                continue
+            if parts[0].upper() in ('PURELIB', 'PLATLIB'):
+                contents = z.read(name).decode()
+                for pth in yield_lines(contents):
+                    pth = pth.strip().replace('\\', '/')
+                    if not pth.startswith('import'):
+                        prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
+    finally:
+        z.close()
+    prefixes = [(x.lower(), y) for x, y in prefixes]
+    prefixes.sort()
+    prefixes.reverse()
+    return prefixes
+
+
+class PthDistributions(Environment):
+    """A .pth file with Distribution paths in it"""
+
+    def __init__(self, filename, sitedirs=()):
+        self.filename = filename
+        self.sitedirs = list(map(normalize_path, sitedirs))
+        self.basedir = normalize_path(os.path.dirname(self.filename))
+        self.paths, self.dirty = self._load()
+        # keep a copy if someone manually updates the paths attribute on the instance
+        self._init_paths = self.paths[:]
+        super().__init__([], None, None)
+        for path in yield_lines(self.paths):
+            list(map(self.add, find_distributions(path, True)))
+
+    def _load_raw(self):
+        paths = []
+        dirty = saw_import = False
+        seen = dict.fromkeys(self.sitedirs)
+        f = open(self.filename, 'rt')
+        for line in f:
+            path = line.rstrip()
+            # still keep imports and empty/commented lines for formatting
+            paths.append(path)
+            if line.startswith(('import ', 'from ')):
+                saw_import = True
+                continue
+            stripped_path = path.strip()
+            if not stripped_path or stripped_path.startswith('#'):
+                continue
+            # skip non-existent paths, in case somebody deleted a package
+            # manually, and duplicate paths as well
+            normalized_path = normalize_path(os.path.join(self.basedir, path))
+            if normalized_path in seen or not os.path.exists(normalized_path):
+                log.debug("cleaned up dirty or duplicated %r", path)
+                dirty = True
+                paths.pop()
+                continue
+            seen[normalized_path] = 1
+        f.close()
+        # remove any trailing empty/blank line
+        while paths and not paths[-1].strip():
+            paths.pop()
+            dirty = True
+        return paths, dirty or (paths and saw_import)
+
+    def _load(self):
+        if os.path.isfile(self.filename):
+            return self._load_raw()
+        return [], False
+
+    def save(self):
+        """Write changed .pth file back to disk"""
+        # first reload the file
+        last_paths, last_dirty = self._load()
+        # and check that there are no difference with what we have.
+        # there can be difference if someone else has written to the file
+        # since we first loaded it.
+        # we don't want to lose the eventual new paths added since then.
+        for path in last_paths[:]:
+            if path not in self.paths:
+                self.paths.append(path)
+                log.info("detected new path %r", path)
+                last_dirty = True
+            else:
+                last_paths.remove(path)
+        # also, re-check that all paths are still valid before saving them
+        for path in self.paths[:]:
+            if path not in last_paths and not path.startswith(
+                ('import ', 'from ', '#')
+            ):
+                absolute_path = os.path.join(self.basedir, path)
+                if not os.path.exists(absolute_path):
+                    self.paths.remove(path)
+                    log.info("removing now non-existent path %r", path)
+                    last_dirty = True
+
+        self.dirty |= last_dirty or self.paths != self._init_paths
+        if not self.dirty:
+            return
+
+        rel_paths = list(map(self.make_relative, self.paths))
+        if rel_paths:
+            log.debug("Saving %s", self.filename)
+            lines = self._wrap_lines(rel_paths)
+            data = '\n'.join(lines) + '\n'
+            if os.path.islink(self.filename):
+                os.unlink(self.filename)
+            with open(self.filename, 'wt') as f:
+                f.write(data)
+        elif os.path.exists(self.filename):
+            log.debug("Deleting empty %s", self.filename)
+            os.unlink(self.filename)
+
+        self.dirty = False
+        self._init_paths[:] = self.paths[:]
+
+    @staticmethod
+    def _wrap_lines(lines):
+        return lines
+
+    def add(self, dist):
+        """Add `dist` to the distribution map"""
+        new_path = dist.location not in self.paths and (
+            dist.location not in self.sitedirs
+            or
+            # account for '.' being in PYTHONPATH
+            dist.location == os.getcwd()
+        )
+        if new_path:
+            self.paths.append(dist.location)
+            self.dirty = True
+        super().add(dist)
+
+    def remove(self, dist):
+        """Remove `dist` from the distribution map"""
+        while dist.location in self.paths:
+            self.paths.remove(dist.location)
+            self.dirty = True
+        super().remove(dist)
+
+    def make_relative(self, path):
+        npath, last = os.path.split(normalize_path(path))
+        baselen = len(self.basedir)
+        parts = [last]
+        sep = os.altsep == '/' and '/' or os.sep
+        while len(npath) >= baselen:
+            if npath == self.basedir:
+                parts.append(os.curdir)
+                parts.reverse()
+                return sep.join(parts)
+            npath, last = os.path.split(npath)
+            parts.append(last)
+        else:
+            return path
+
+
+class RewritePthDistributions(PthDistributions):
+    @classmethod
+    def _wrap_lines(cls, lines):
+        yield cls.prelude
+        for line in lines:
+            yield line
+        yield cls.postlude
+
+    prelude = _one_liner(
+        """
+        import sys
+        sys.__plen = len(sys.path)
+        """
+    )
+    postlude = _one_liner(
+        """
+        import sys
+        new = sys.path[sys.__plen:]
+        del sys.path[sys.__plen:]
+        p = getattr(sys, '__egginsert', 0)
+        sys.path[p:p] = new
+        sys.__egginsert = p + len(new)
+        """
+    )
+
+
+if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
+    PthDistributions = RewritePthDistributions
+
+
+def _first_line_re():
+    """
+    Return a regular expression based on first_line_re suitable for matching
+    strings.
+    """
+    if isinstance(first_line_re.pattern, str):
+        return first_line_re
+
+    # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
+    return re.compile(first_line_re.pattern.decode())
+
+
+def auto_chmod(func, arg, exc):
+    if func in [os.unlink, os.remove] and os.name == 'nt':
+        chmod(arg, stat.S_IWRITE)
+        return func(arg)
+    et, ev, _ = sys.exc_info()
+    # TODO: This code doesn't make sense. What is it trying to do?
+    raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
+
+
+def update_dist_caches(dist_path, fix_zipimporter_caches):
+    """
+    Fix any globally cached `dist_path` related data
+
+    `dist_path` should be a path of a newly installed egg distribution (zipped
+    or unzipped).
+
+    sys.path_importer_cache contains finder objects that have been cached when
+    importing data from the original distribution. Any such finders need to be
+    cleared since the replacement distribution might be packaged differently,
+    e.g. a zipped egg distribution might get replaced with an unzipped egg
+    folder or vice versa. Having the old finders cached may then cause Python
+    to attempt loading modules from the replacement distribution using an
+    incorrect loader.
+
+    zipimport.zipimporter objects are Python loaders charged with importing
+    data packaged inside zip archives. If stale loaders referencing the
+    original distribution, are left behind, they can fail to load modules from
+    the replacement distribution. E.g. if an old zipimport.zipimporter instance
+    is used to load data from a new zipped egg archive, it may cause the
+    operation to attempt to locate the requested data in the wrong location -
+    one indicated by the original distribution's zip archive directory
+    information. Such an operation may then fail outright, e.g. report having
+    read a 'bad local file header', or even worse, it may fail silently &
+    return invalid data.
+
+    zipimport._zip_directory_cache contains cached zip archive directory
+    information for all existing zipimport.zipimporter instances and all such
+    instances connected to the same archive share the same cached directory
+    information.
+
+    If asked, and the underlying Python implementation allows it, we can fix
+    all existing zipimport.zipimporter instances instead of having to track
+    them down and remove them one by one, by updating their shared cached zip
+    archive directory information. This, of course, assumes that the
+    replacement distribution is packaged as a zipped egg.
+
+    If not asked to fix existing zipimport.zipimporter instances, we still do
+    our best to clear any remaining zipimport.zipimporter related cached data
+    that might somehow later get used when attempting to load data from the new
+    distribution and thus cause such load operations to fail. Note that when
+    tracking down such remaining stale data, we can not catch every conceivable
+    usage from here, and we clear only those that we know of and have found to
+    cause problems if left alive. Any remaining caches should be updated by
+    whomever is in charge of maintaining them, i.e. they should be ready to
+    handle us replacing their zip archives with new distributions at runtime.
+
+    """
+    # There are several other known sources of stale zipimport.zipimporter
+    # instances that we do not clear here, but might if ever given a reason to
+    # do so:
+    # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
+    # set') may contain distributions which may in turn contain their
+    #   zipimport.zipimporter loaders.
+    # * Several zipimport.zipimporter loaders held by local variables further
+    #   up the function call stack when running the setuptools installation.
+    # * Already loaded modules may have their __loader__ attribute set to the
+    #   exact loader instance used when importing them. Python 3.4 docs state
+    #   that this information is intended mostly for introspection and so is
+    #   not expected to cause us problems.
+    normalized_path = normalize_path(dist_path)
+    _uncache(normalized_path, sys.path_importer_cache)
+    if fix_zipimporter_caches:
+        _replace_zip_directory_cache_data(normalized_path)
+    else:
+        # Here, even though we do not want to fix existing and now stale
+        # zipimporter cache information, we still want to remove it. Related to
+        # Python's zip archive directory information cache, we clear each of
+        # its stale entries in two phases:
+        #   1. Clear the entry so attempting to access zip archive information
+        #      via any existing stale zipimport.zipimporter instances fails.
+        #   2. Remove the entry from the cache so any newly constructed
+        #      zipimport.zipimporter instances do not end up using old stale
+        #      zip archive directory information.
+        # This whole stale data removal step does not seem strictly necessary,
+        # but has been left in because it was done before we started replacing
+        # the zip archive directory information cache content if possible, and
+        # there are no relevant unit tests that we can depend on to tell us if
+        # this is really needed.
+        _remove_and_clear_zip_directory_cache_data(normalized_path)
+
+
+def _collect_zipimporter_cache_entries(normalized_path, cache):
+    """
+    Return zipimporter cache entry keys related to a given normalized path.
+
+    Alternative path spellings (e.g. those using different character case or
+    those using alternative path separators) related to the same path are
+    included. Any sub-path entries are included as well, i.e. those
+    corresponding to zip archives embedded in other zip archives.
+
+    """
+    result = []
+    prefix_len = len(normalized_path)
+    for p in cache:
+        np = normalize_path(p)
+        if np.startswith(normalized_path) and np[prefix_len : prefix_len + 1] in (
+            os.sep,
+            '',
+        ):
+            result.append(p)
+    return result
+
+
+def _update_zipimporter_cache(normalized_path, cache, updater=None):
+    """
+    Update zipimporter cache data for a given normalized path.
+
+    Any sub-path entries are processed as well, i.e. those corresponding to zip
+    archives embedded in other zip archives.
+
+    Given updater is a callable taking a cache entry key and the original entry
+    (after already removing the entry from the cache), and expected to update
+    the entry and possibly return a new one to be inserted in its place.
+    Returning None indicates that the entry should not be replaced with a new
+    one. If no updater is given, the cache entries are simply removed without
+    any additional processing, the same as if the updater simply returned None.
+
+    """
+    for p in _collect_zipimporter_cache_entries(normalized_path, cache):
+        # N.B. pypy's custom zipimport._zip_directory_cache implementation does
+        # not support the complete dict interface:
+        # * Does not support item assignment, thus not allowing this function
+        #    to be used only for removing existing cache entries.
+        #  * Does not support the dict.pop() method, forcing us to use the
+        #    get/del patterns instead. For more detailed information see the
+        #    following links:
+        #      https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
+        #      https://foss.heptapod.net/pypy/pypy/-/blob/144c4e65cb6accb8e592f3a7584ea38265d1873c/pypy/module/zipimport/interp_zipimport.py
+        old_entry = cache[p]
+        del cache[p]
+        new_entry = updater and updater(p, old_entry)
+        if new_entry is not None:
+            cache[p] = new_entry
+
+
+def _uncache(normalized_path, cache):
+    _update_zipimporter_cache(normalized_path, cache)
+
+
+def _remove_and_clear_zip_directory_cache_data(normalized_path):
+    def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
+        old_entry.clear()
+
+    _update_zipimporter_cache(
+        normalized_path,
+        zipimport._zip_directory_cache,
+        updater=clear_and_remove_cached_zip_archive_directory_data,
+    )
+
+
+# PyPy Python implementation does not allow directly writing to the
+# zipimport._zip_directory_cache and so prevents us from attempting to correct
+# its content. The best we can do there is clear the problematic cache content
+# and have PyPy repopulate it as needed. The downside is that if there are any
+# stale zipimport.zipimporter instances laying around, attempting to use them
+# will fail due to not having its zip archive directory information available
+# instead of being automatically corrected to use the new correct zip archive
+# directory information.
+if '__pypy__' in sys.builtin_module_names:
+    _replace_zip_directory_cache_data = _remove_and_clear_zip_directory_cache_data
+else:
+
+    def _replace_zip_directory_cache_data(normalized_path):
+        def replace_cached_zip_archive_directory_data(path, old_entry):
+            # N.B. In theory, we could load the zip directory information just
+            # once for all updated path spellings, and then copy it locally and
+            # update its contained path strings to contain the correct
+            # spelling, but that seems like a way too invasive move (this cache
+            # structure is not officially documented anywhere and could in
+            # theory change with new Python releases) for no significant
+            # benefit.
+            old_entry.clear()
+            zipimport.zipimporter(path)
+            old_entry.update(zipimport._zip_directory_cache[path])
+            return old_entry
+
+        _update_zipimporter_cache(
+            normalized_path,
+            zipimport._zip_directory_cache,
+            updater=replace_cached_zip_archive_directory_data,
+        )
+
+
+def is_python(text, filename=''):
+    "Is this string a valid Python script?"
+    try:
+        compile(text, filename, 'exec')
+    except (SyntaxError, TypeError):
+        return False
+    else:
+        return True
+
+
+def is_sh(executable):
+    """Determine if the specified executable is a .sh (contains a #! line)"""
+    try:
+        with io.open(executable, encoding='latin-1') as fp:
+            magic = fp.read(2)
+    except (OSError, IOError):
+        return executable
+    return magic == '#!'
+
+
+def nt_quote_arg(arg):
+    """Quote a command line argument according to Windows parsing rules"""
+    return subprocess.list2cmdline([arg])
+
+
+def is_python_script(script_text, filename):
+    """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc."""
+    if filename.endswith('.py') or filename.endswith('.pyw'):
+        return True  # extension says it's Python
+    if is_python(script_text, filename):
+        return True  # it's syntactically valid Python
+    if script_text.startswith('#!'):
+        # It begins with a '#!' line, so check if 'python' is in it somewhere
+        return 'python' in script_text.splitlines()[0].lower()
+
+    return False  # Not any Python I can recognize
+
+
+try:
+    from os import chmod as _chmod
+except ImportError:
+    # Jython compatibility
+    def _chmod(*args):
+        pass
+
+
+def chmod(path, mode):
+    log.debug("changing mode of %s to %o", path, mode)
+    try:
+        _chmod(path, mode)
+    except os.error as e:
+        log.debug("chmod failed: %s", e)
+
+
+class CommandSpec(list):
+    """
+    A command spec for a #! header, specified as a list of arguments akin to
+    those passed to Popen.
+    """
+
+    options = []
+    split_args = dict()
+
+    @classmethod
+    def best(cls):
+        """
+        Choose the best CommandSpec class based on environmental conditions.
+        """
+        return cls
+
+    @classmethod
+    def _sys_executable(cls):
+        _default = os.path.normpath(sys.executable)
+        return os.environ.get('__PYVENV_LAUNCHER__', _default)
+
+    @classmethod
+    def from_param(cls, param):
+        """
+        Construct a CommandSpec from a parameter to build_scripts, which may
+        be None.
+        """
+        if isinstance(param, cls):
+            return param
+        if isinstance(param, list):
+            return cls(param)
+        if param is None:
+            return cls.from_environment()
+        # otherwise, assume it's a string.
+        return cls.from_string(param)
+
+    @classmethod
+    def from_environment(cls):
+        return cls([cls._sys_executable()])
+
+    @classmethod
+    def from_string(cls, string):
+        """
+        Construct a command spec from a simple string representing a command
+        line parseable by shlex.split.
+        """
+        items = shlex.split(string, **cls.split_args)
+        return cls(items)
+
+    def install_options(self, script_text):
+        self.options = shlex.split(self._extract_options(script_text))
+        cmdline = subprocess.list2cmdline(self)
+        if not isascii(cmdline):
+            self.options[:0] = ['-x']
+
+    @staticmethod
+    def _extract_options(orig_script):
+        """
+        Extract any options from the first line of the script.
+        """
+        first = (orig_script + '\n').splitlines()[0]
+        match = _first_line_re().match(first)
+        options = match.group(1) or '' if match else ''
+        return options.strip()
+
+    def as_header(self):
+        return self._render(self + list(self.options))
+
+    @staticmethod
+    def _strip_quotes(item):
+        _QUOTES = '"\''
+        for q in _QUOTES:
+            if item.startswith(q) and item.endswith(q):
+                return item[1:-1]
+        return item
+
+    @staticmethod
+    def _render(items):
+        cmdline = subprocess.list2cmdline(
+            CommandSpec._strip_quotes(item.strip()) for item in items
+        )
+        return '#!' + cmdline + '\n'
+
+
+# For pbr compat; will be removed in a future version.
+sys_executable = CommandSpec._sys_executable()
+
+
+class WindowsCommandSpec(CommandSpec):
+    split_args = dict(posix=False)
+
+
+class ScriptWriter:
+    """
+    Encapsulates behavior around writing entry point scripts for console and
+    gui apps.
+    """
+
+    template = textwrap.dedent(
+        r"""
+        # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
+        import re
+        import sys
+
+        # for compatibility with easy_install; see #2198
+        __requires__ = %(spec)r
+
+        try:
+            from importlib.metadata import distribution
+        except ImportError:
+            try:
+                from importlib_metadata import distribution
+            except ImportError:
+                from pkg_resources import load_entry_point
+
+
+        def importlib_load_entry_point(spec, group, name):
+            dist_name, _, _ = spec.partition('==')
+            matches = (
+                entry_point
+                for entry_point in distribution(dist_name).entry_points
+                if entry_point.group == group and entry_point.name == name
+            )
+            return next(matches).load()
+
+
+        globals().setdefault('load_entry_point', importlib_load_entry_point)
+
+
+        if __name__ == '__main__':
+            sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+            sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
+        """
+    ).lstrip()
+
+    command_spec_class = CommandSpec
+
+    @classmethod
+    def get_args(cls, dist, header=None):
+        """
+        Yield write_script() argument tuples for a distribution's
+        console_scripts and gui_scripts entry points.
+        """
+        if header is None:
+            header = cls.get_header()
+        spec = str(dist.as_requirement())
+        for type_ in 'console', 'gui':
+            group = type_ + '_scripts'
+            for name, ep in dist.get_entry_map(group).items():
+                cls._ensure_safe_name(name)
+                script_text = cls.template % locals()
+                args = cls._get_script_args(type_, name, header, script_text)
+                for res in args:
+                    yield res
+
+    @staticmethod
+    def _ensure_safe_name(name):
+        """
+        Prevent paths in *_scripts entry point names.
+        """
+        has_path_sep = re.search(r'[\\/]', name)
+        if has_path_sep:
+            raise ValueError("Path separators not allowed in script names")
+
+    @classmethod
+    def best(cls):
+        """
+        Select the best ScriptWriter for this environment.
+        """
+        if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
+            return WindowsScriptWriter.best()
+        else:
+            return cls
+
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        # Simply write the stub with no extension.
+        yield (name, header + script_text)
+
+    @classmethod
+    def get_header(cls, script_text="", executable=None):
+        """Create a #! line, getting options (if any) from script_text"""
+        cmd = cls.command_spec_class.best().from_param(executable)
+        cmd.install_options(script_text)
+        return cmd.as_header()
+
+
+class WindowsScriptWriter(ScriptWriter):
+    command_spec_class = WindowsCommandSpec
+
+    @classmethod
+    def best(cls):
+        """
+        Select the best ScriptWriter suitable for Windows
+        """
+        writer_lookup = dict(
+            executable=WindowsExecutableLauncherWriter,
+            natural=cls,
+        )
+        # for compatibility, use the executable launcher by default
+        launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
+        return writer_lookup[launcher]
+
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        "For Windows, add a .py extension"
+        ext = dict(console='.pya', gui='.pyw')[type_]
+        if ext not in os.environ['PATHEXT'].lower().split(';'):
+            msg = (
+                "{ext} not listed in PATHEXT; scripts will not be "
+                "recognized as executables."
+            ).format(**locals())
+            SetuptoolsWarning.emit(msg)
+        old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
+        old.remove(ext)
+        header = cls._adjust_header(type_, header)
+        blockers = [name + x for x in old]
+        yield name + ext, header + script_text, 't', blockers
+
+    @classmethod
+    def _adjust_header(cls, type_, orig_header):
+        """
+        Make sure 'pythonw' is used for gui and 'python' is used for
+        console (regardless of what sys.executable is).
+        """
+        pattern = 'pythonw.exe'
+        repl = 'python.exe'
+        if type_ == 'gui':
+            pattern, repl = repl, pattern
+        pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
+        new_header = pattern_ob.sub(string=orig_header, repl=repl)
+        return new_header if cls._use_header(new_header) else orig_header
+
+    @staticmethod
+    def _use_header(new_header):
+        """
+        Should _adjust_header use the replaced header?
+
+        On non-windows systems, always use. On
+        Windows systems, only use the replaced header if it resolves
+        to an executable on the system.
+        """
+        clean_header = new_header[2:-1].strip('"')
+        return sys.platform != 'win32' or find_executable(clean_header)
+
+
+class WindowsExecutableLauncherWriter(WindowsScriptWriter):
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        """
+        For Windows, add a .py extension and an .exe launcher
+        """
+        if type_ == 'gui':
+            launcher_type = 'gui'
+            ext = '-script.pyw'
+            old = ['.pyw']
+        else:
+            launcher_type = 'cli'
+            ext = '-script.py'
+            old = ['.py', '.pyc', '.pyo']
+        hdr = cls._adjust_header(type_, header)
+        blockers = [name + x for x in old]
+        yield (name + ext, hdr + script_text, 't', blockers)
+        yield (
+            name + '.exe',
+            get_win_launcher(launcher_type),
+            'b',  # write in binary mode
+        )
+        if not is_64bit():
+            # install a manifest for the launcher to prevent Windows
+            # from detecting it as an installer (which it will for
+            #  launchers like easy_install.exe). Consider only
+            #  adding a manifest for launchers detected as installers.
+            #  See Distribute #143 for details.
+            m_name = name + '.exe.manifest'
+            yield (m_name, load_launcher_manifest(name), 't')
+
+
+def get_win_launcher(type):
+    """
+    Load the Windows launcher (executable) suitable for launching a script.
+
+    `type` should be either 'cli' or 'gui'
+
+    Returns the executable as a byte string.
+    """
+    launcher_fn = '%s.exe' % type
+    if is_64bit():
+        if get_platform() == "win-arm64":
+            launcher_fn = launcher_fn.replace(".", "-arm64.")
+        else:
+            launcher_fn = launcher_fn.replace(".", "-64.")
+    else:
+        launcher_fn = launcher_fn.replace(".", "-32.")
+    return resource_string('setuptools', launcher_fn)
+
+
+def load_launcher_manifest(name):
+    manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
+    return manifest.decode('utf-8') % vars()
+
+
+def _rmtree(path, ignore_errors=False, onexc=auto_chmod):
+    return py312compat.shutil_rmtree(path, ignore_errors, onexc)
+
+
+def current_umask():
+    tmp = os.umask(0o022)
+    os.umask(tmp)
+    return tmp
+
+
+def only_strs(values):
+    """
+    Exclude non-str values. Ref #3063.
+    """
+    return filter(lambda val: isinstance(val, str), values)
+
+
+class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
+    _SUMMARY = "easy_install command is deprecated."
+    _DETAILS = """
+    Please avoid running ``setup.py`` and ``easy_install``.
+    Instead, use pypa/build, pypa/installer or other
+    standards-based tools.
+    """
+    _SEE_URL = "https://github.com/pypa/setuptools/issues/917"
+    # _DUE_DATE not defined yet
diff --git a/.venv/Lib/site-packages/setuptools/dep_util.py b/.venv/Lib/site-packages/setuptools/dep_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc9ccf62c20afffdedce9e90ada0e01a9e0705bf
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/dep_util.py
@@ -0,0 +1,24 @@
+from distutils.dep_util import newer_group
+
+
+# yes, this is was almost entirely copy-pasted from
+# 'newer_pairwise()', this is just another convenience
+# function.
+def newer_pairwise_group(sources_groups, targets):
+    """Walk both arguments in parallel, testing if each source group is newer
+    than its corresponding target. Returns a pair of lists (sources_groups,
+    targets) where sources is newer than target, according to the semantics
+    of 'newer_group()'.
+    """
+    if len(sources_groups) != len(targets):
+        raise ValueError("'sources_group' and 'targets' must be the same length")
+
+    # build a pair of lists (sources_groups, targets) where source is newer
+    n_sources = []
+    n_targets = []
+    for i in range(len(sources_groups)):
+        if newer_group(sources_groups[i], targets[i]):
+            n_sources.append(sources_groups[i])
+            n_targets.append(targets[i])
+
+    return n_sources, n_targets
diff --git a/.venv/Lib/site-packages/setuptools/depends.py b/.venv/Lib/site-packages/setuptools/depends.py
new file mode 100644
index 0000000000000000000000000000000000000000..180e82045926b7548529b3bb47c1e458b4a0bd82
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/depends.py
@@ -0,0 +1,178 @@
+import sys
+import marshal
+import contextlib
+import dis
+
+
+from . import _imp
+from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
+from .extern.packaging.version import Version
+
+
+__all__ = ['Require', 'find_module', 'get_module_constant', 'extract_constant']
+
+
+class Require:
+    """A prerequisite to building or installing a distribution"""
+
+    def __init__(
+        self, name, requested_version, module, homepage='', attribute=None, format=None
+    ):
+        if format is None and requested_version is not None:
+            format = Version
+
+        if format is not None:
+            requested_version = format(requested_version)
+            if attribute is None:
+                attribute = '__version__'
+
+        self.__dict__.update(locals())
+        del self.self
+
+    def full_name(self):
+        """Return full package/distribution name, w/version"""
+        if self.requested_version is not None:
+            return '%s-%s' % (self.name, self.requested_version)
+        return self.name
+
+    def version_ok(self, version):
+        """Is 'version' sufficiently up-to-date?"""
+        return (
+            self.attribute is None
+            or self.format is None
+            or str(version) != "unknown"
+            and self.format(version) >= self.requested_version
+        )
+
+    def get_version(self, paths=None, default="unknown"):
+        """Get version number of installed module, 'None', or 'default'
+
+        Search 'paths' for module.  If not found, return 'None'.  If found,
+        return the extracted version attribute, or 'default' if no version
+        attribute was specified, or the value cannot be determined without
+        importing the module.  The version is formatted according to the
+        requirement's version format (if any), unless it is 'None' or the
+        supplied 'default'.
+        """
+
+        if self.attribute is None:
+            try:
+                f, p, i = find_module(self.module, paths)
+                if f:
+                    f.close()
+                return default
+            except ImportError:
+                return None
+
+        v = get_module_constant(self.module, self.attribute, default, paths)
+
+        if v is not None and v is not default and self.format is not None:
+            return self.format(v)
+
+        return v
+
+    def is_present(self, paths=None):
+        """Return true if dependency is present on 'paths'"""
+        return self.get_version(paths) is not None
+
+    def is_current(self, paths=None):
+        """Return true if dependency is present and up-to-date on 'paths'"""
+        version = self.get_version(paths)
+        if version is None:
+            return False
+        return self.version_ok(str(version))
+
+
+def maybe_close(f):
+    @contextlib.contextmanager
+    def empty():
+        yield
+        return
+
+    if not f:
+        return empty()
+
+    return contextlib.closing(f)
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+    """Find 'module' by searching 'paths', and extract 'symbol'
+
+    Return 'None' if 'module' does not exist on 'paths', or it does not define
+    'symbol'.  If the module defines 'symbol' as a constant, return the
+    constant.  Otherwise, return 'default'."""
+
+    try:
+        f, path, (suffix, mode, kind) = info = find_module(module, paths)
+    except ImportError:
+        # Module doesn't exist
+        return None
+
+    with maybe_close(f):
+        if kind == PY_COMPILED:
+            f.read(8)  # skip magic & date
+            code = marshal.load(f)
+        elif kind == PY_FROZEN:
+            code = _imp.get_frozen_object(module, paths)
+        elif kind == PY_SOURCE:
+            code = compile(f.read(), path, 'exec')
+        else:
+            # Not something we can parse; we'll have to import it.  :(
+            imported = _imp.get_module(module, paths, info)
+            return getattr(imported, symbol, None)
+
+    return extract_constant(code, symbol, default)
+
+
+def extract_constant(code, symbol, default=-1):
+    """Extract the constant value of 'symbol' from 'code'
+
+    If the name 'symbol' is bound to a constant value by the Python code
+    object 'code', return that value.  If 'symbol' is bound to an expression,
+    return 'default'.  Otherwise, return 'None'.
+
+    Return value is based on the first assignment to 'symbol'.  'symbol' must
+    be a global, or at least a non-"fast" local in the code block.  That is,
+    only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+    must be present in 'code.co_names'.
+    """
+    if symbol not in code.co_names:
+        # name's not there, can't possibly be an assignment
+        return None
+
+    name_idx = list(code.co_names).index(symbol)
+
+    STORE_NAME = 90
+    STORE_GLOBAL = 97
+    LOAD_CONST = 100
+
+    const = default
+
+    for byte_code in dis.Bytecode(code):
+        op = byte_code.opcode
+        arg = byte_code.arg
+
+        if op == LOAD_CONST:
+            const = code.co_consts[arg]
+        elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
+            return const
+        else:
+            const = default
+
+
+def _update_globals():
+    """
+    Patch the globals to remove the objects not available on some platforms.
+
+    XXX it'd be better to test assertions about bytecode instead.
+    """
+
+    if not sys.platform.startswith('java') and sys.platform != 'cli':
+        return
+    incompatible = 'extract_constant', 'get_module_constant'
+    for name in incompatible:
+        del globals()[name]
+        __all__.remove(name)
+
+
+_update_globals()
diff --git a/.venv/Lib/site-packages/setuptools/discovery.py b/.venv/Lib/site-packages/setuptools/discovery.py
new file mode 100644
index 0000000000000000000000000000000000000000..25962863b9375665c916b3d386d41424c579785d
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/discovery.py
@@ -0,0 +1,614 @@
+"""Automatic discovery of Python modules and packages (for inclusion in the
+distribution) and other config values.
+
+For the purposes of this module, the following nomenclature is used:
+
+- "src-layout": a directory representing a Python project that contains a "src"
+  folder. Everything under the "src" folder is meant to be included in the
+  distribution when packaging the project. Example::
+
+    .
+    ├── tox.ini
+    ├── pyproject.toml
+    └── src/
+        └── mypkg/
+            ├── __init__.py
+            ├── mymodule.py
+            └── my_data_file.txt
+
+- "flat-layout": a Python project that does not use "src-layout" but instead
+  have a directory under the project root for each package::
+
+    .
+    ├── tox.ini
+    ├── pyproject.toml
+    └── mypkg/
+        ├── __init__.py
+        ├── mymodule.py
+        └── my_data_file.txt
+
+- "single-module": a project that contains a single Python script direct under
+  the project root (no directory used)::
+
+    .
+    ├── tox.ini
+    ├── pyproject.toml
+    └── mymodule.py
+
+"""
+
+import itertools
+import os
+from fnmatch import fnmatchcase
+from glob import glob
+from pathlib import Path
+from typing import (
+    TYPE_CHECKING,
+    Dict,
+    Iterable,
+    Iterator,
+    List,
+    Mapping,
+    Optional,
+    Tuple,
+    Union,
+)
+
+import _distutils_hack.override  # noqa: F401
+
+from distutils import log
+from distutils.util import convert_path
+
+_Path = Union[str, os.PathLike]
+StrIter = Iterator[str]
+
+chain_iter = itertools.chain.from_iterable
+
+if TYPE_CHECKING:
+    from setuptools import Distribution  # noqa
+
+
+def _valid_name(path: _Path) -> bool:
+    # Ignore invalid names that cannot be imported directly
+    return os.path.basename(path).isidentifier()
+
+
+class _Filter:
+    """
+    Given a list of patterns, create a callable that will be true only if
+    the input matches at least one of the patterns.
+    """
+
+    def __init__(self, *patterns: str):
+        self._patterns = dict.fromkeys(patterns)
+
+    def __call__(self, item: str) -> bool:
+        return any(fnmatchcase(item, pat) for pat in self._patterns)
+
+    def __contains__(self, item: str) -> bool:
+        return item in self._patterns
+
+
+class _Finder:
+    """Base class that exposes functionality for module/package finders"""
+
+    ALWAYS_EXCLUDE: Tuple[str, ...] = ()
+    DEFAULT_EXCLUDE: Tuple[str, ...] = ()
+
+    @classmethod
+    def find(
+        cls,
+        where: _Path = '.',
+        exclude: Iterable[str] = (),
+        include: Iterable[str] = ('*',),
+    ) -> List[str]:
+        """Return a list of all Python items (packages or modules, depending on
+        the finder implementation) found within directory 'where'.
+
+        'where' is the root directory which will be searched.
+        It should be supplied as a "cross-platform" (i.e. URL-style) path;
+        it will be converted to the appropriate local path syntax.
+
+        'exclude' is a sequence of names to exclude; '*' can be used
+        as a wildcard in the names.
+        When finding packages, 'foo.*' will exclude all subpackages of 'foo'
+        (but not 'foo' itself).
+
+        'include' is a sequence of names to include.
+        If it's specified, only the named items will be included.
+        If it's not specified, all found items will be included.
+        'include' can contain shell style wildcard patterns just like
+        'exclude'.
+        """
+
+        exclude = exclude or cls.DEFAULT_EXCLUDE
+        return list(
+            cls._find_iter(
+                convert_path(str(where)),
+                _Filter(*cls.ALWAYS_EXCLUDE, *exclude),
+                _Filter(*include),
+            )
+        )
+
+    @classmethod
+    def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
+        raise NotImplementedError
+
+
+class PackageFinder(_Finder):
+    """
+    Generate a list of all Python packages found within a directory
+    """
+
+    ALWAYS_EXCLUDE = ("ez_setup", "*__pycache__")
+
+    @classmethod
+    def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
+        """
+        All the packages found in 'where' that pass the 'include' filter, but
+        not the 'exclude' filter.
+        """
+        for root, dirs, files in os.walk(str(where), followlinks=True):
+            # Copy dirs to iterate over it, then empty dirs.
+            all_dirs = dirs[:]
+            dirs[:] = []
+
+            for dir in all_dirs:
+                full_path = os.path.join(root, dir)
+                rel_path = os.path.relpath(full_path, where)
+                package = rel_path.replace(os.path.sep, '.')
+
+                # Skip directory trees that are not valid packages
+                if '.' in dir or not cls._looks_like_package(full_path, package):
+                    continue
+
+                # Should this package be included?
+                if include(package) and not exclude(package):
+                    yield package
+
+                # Early pruning if there is nothing else to be scanned
+                if f"{package}*" in exclude or f"{package}.*" in exclude:
+                    continue
+
+                # Keep searching subdirectories, as there may be more packages
+                # down there, even if the parent was excluded.
+                dirs.append(dir)
+
+    @staticmethod
+    def _looks_like_package(path: _Path, _package_name: str) -> bool:
+        """Does a directory look like a package?"""
+        return os.path.isfile(os.path.join(path, '__init__.py'))
+
+
+class PEP420PackageFinder(PackageFinder):
+    @staticmethod
+    def _looks_like_package(_path: _Path, _package_name: str) -> bool:
+        return True
+
+
+class ModuleFinder(_Finder):
+    """Find isolated Python modules.
+    This function will **not** recurse subdirectories.
+    """
+
+    @classmethod
+    def _find_iter(cls, where: _Path, exclude: _Filter, include: _Filter) -> StrIter:
+        for file in glob(os.path.join(where, "*.py")):
+            module, _ext = os.path.splitext(os.path.basename(file))
+
+            if not cls._looks_like_module(module):
+                continue
+
+            if include(module) and not exclude(module):
+                yield module
+
+    _looks_like_module = staticmethod(_valid_name)
+
+
+# We have to be extra careful in the case of flat layout to not include files
+# and directories not meant for distribution (e.g. tool-related)
+
+
+class FlatLayoutPackageFinder(PEP420PackageFinder):
+    _EXCLUDE = (
+        "ci",
+        "bin",
+        "debian",
+        "doc",
+        "docs",
+        "documentation",
+        "manpages",
+        "news",
+        "newsfragments",
+        "changelog",
+        "test",
+        "tests",
+        "unit_test",
+        "unit_tests",
+        "example",
+        "examples",
+        "scripts",
+        "tools",
+        "util",
+        "utils",
+        "python",
+        "build",
+        "dist",
+        "venv",
+        "env",
+        "requirements",
+        # ---- Task runners / Build tools ----
+        "tasks",  # invoke
+        "fabfile",  # fabric
+        "site_scons",  # SCons
+        # ---- Other tools ----
+        "benchmark",
+        "benchmarks",
+        "exercise",
+        "exercises",
+        "htmlcov",  # Coverage.py
+        # ---- Hidden directories/Private packages ----
+        "[._]*",
+    )
+
+    DEFAULT_EXCLUDE = tuple(chain_iter((p, f"{p}.*") for p in _EXCLUDE))
+    """Reserved package names"""
+
+    @staticmethod
+    def _looks_like_package(_path: _Path, package_name: str) -> bool:
+        names = package_name.split('.')
+        # Consider PEP 561
+        root_pkg_is_valid = names[0].isidentifier() or names[0].endswith("-stubs")
+        return root_pkg_is_valid and all(name.isidentifier() for name in names[1:])
+
+
+class FlatLayoutModuleFinder(ModuleFinder):
+    DEFAULT_EXCLUDE = (
+        "setup",
+        "conftest",
+        "test",
+        "tests",
+        "example",
+        "examples",
+        "build",
+        # ---- Task runners ----
+        "toxfile",
+        "noxfile",
+        "pavement",
+        "dodo",
+        "tasks",
+        "fabfile",
+        # ---- Other tools ----
+        "[Ss][Cc]onstruct",  # SCons
+        "conanfile",  # Connan: C/C++ build tool
+        "manage",  # Django
+        "benchmark",
+        "benchmarks",
+        "exercise",
+        "exercises",
+        # ---- Hidden files/Private modules ----
+        "[._]*",
+    )
+    """Reserved top-level module names"""
+
+
+def _find_packages_within(root_pkg: str, pkg_dir: _Path) -> List[str]:
+    nested = PEP420PackageFinder.find(pkg_dir)
+    return [root_pkg] + [".".join((root_pkg, n)) for n in nested]
+
+
+class ConfigDiscovery:
+    """Fill-in metadata and options that can be automatically derived
+    (from other metadata/options, the file system or conventions)
+    """
+
+    def __init__(self, distribution: "Distribution"):
+        self.dist = distribution
+        self._called = False
+        self._disabled = False
+        self._skip_ext_modules = False
+
+    def _disable(self):
+        """Internal API to disable automatic discovery"""
+        self._disabled = True
+
+    def _ignore_ext_modules(self):
+        """Internal API to disregard ext_modules.
+
+        Normally auto-discovery would not be triggered if ``ext_modules`` are set
+        (this is done for backward compatibility with existing packages relying on
+        ``setup.py`` or ``setup.cfg``). However, ``setuptools`` can call this function
+        to ignore given ``ext_modules`` and proceed with the auto-discovery if
+        ``packages`` and ``py_modules`` are not given (e.g. when using pyproject.toml
+        metadata).
+        """
+        self._skip_ext_modules = True
+
+    @property
+    def _root_dir(self) -> _Path:
+        # The best is to wait until `src_root` is set in dist, before using _root_dir.
+        return self.dist.src_root or os.curdir
+
+    @property
+    def _package_dir(self) -> Dict[str, str]:
+        if self.dist.package_dir is None:
+            return {}
+        return self.dist.package_dir
+
+    def __call__(self, force=False, name=True, ignore_ext_modules=False):
+        """Automatically discover missing configuration fields
+        and modifies the given ``distribution`` object in-place.
+
+        Note that by default this will only have an effect the first time the
+        ``ConfigDiscovery`` object is called.
+
+        To repeatedly invoke automatic discovery (e.g. when the project
+        directory changes), please use ``force=True`` (or create a new
+        ``ConfigDiscovery`` instance).
+        """
+        if force is False and (self._called or self._disabled):
+            # Avoid overhead of multiple calls
+            return
+
+        self._analyse_package_layout(ignore_ext_modules)
+        if name:
+            self.analyse_name()  # depends on ``packages`` and ``py_modules``
+
+        self._called = True
+
+    def _explicitly_specified(self, ignore_ext_modules: bool) -> bool:
+        """``True`` if the user has specified some form of package/module listing"""
+        ignore_ext_modules = ignore_ext_modules or self._skip_ext_modules
+        ext_modules = not (self.dist.ext_modules is None or ignore_ext_modules)
+        return (
+            self.dist.packages is not None
+            or self.dist.py_modules is not None
+            or ext_modules
+            or hasattr(self.dist, "configuration")
+            and self.dist.configuration
+            # ^ Some projects use numpy.distutils.misc_util.Configuration
+        )
+
+    def _analyse_package_layout(self, ignore_ext_modules: bool) -> bool:
+        if self._explicitly_specified(ignore_ext_modules):
+            # For backward compatibility, just try to find modules/packages
+            # when nothing is given
+            return True
+
+        log.debug(
+            "No `packages` or `py_modules` configuration, performing "
+            "automatic discovery."
+        )
+
+        return (
+            self._analyse_explicit_layout()
+            or self._analyse_src_layout()
+            # flat-layout is the trickiest for discovery so it should be last
+            or self._analyse_flat_layout()
+        )
+
+    def _analyse_explicit_layout(self) -> bool:
+        """The user can explicitly give a package layout via ``package_dir``"""
+        package_dir = self._package_dir.copy()  # don't modify directly
+        package_dir.pop("", None)  # This falls under the "src-layout" umbrella
+        root_dir = self._root_dir
+
+        if not package_dir:
+            return False
+
+        log.debug(f"`explicit-layout` detected -- analysing {package_dir}")
+        pkgs = chain_iter(
+            _find_packages_within(pkg, os.path.join(root_dir, parent_dir))
+            for pkg, parent_dir in package_dir.items()
+        )
+        self.dist.packages = list(pkgs)
+        log.debug(f"discovered packages -- {self.dist.packages}")
+        return True
+
+    def _analyse_src_layout(self) -> bool:
+        """Try to find all packages or modules under the ``src`` directory
+        (or anything pointed by ``package_dir[""]``).
+
+        The "src-layout" is relatively safe for automatic discovery.
+        We assume that everything within is meant to be included in the
+        distribution.
+
+        If ``package_dir[""]`` is not given, but the ``src`` directory exists,
+        this function will set ``package_dir[""] = "src"``.
+        """
+        package_dir = self._package_dir
+        src_dir = os.path.join(self._root_dir, package_dir.get("", "src"))
+        if not os.path.isdir(src_dir):
+            return False
+
+        log.debug(f"`src-layout` detected -- analysing {src_dir}")
+        package_dir.setdefault("", os.path.basename(src_dir))
+        self.dist.package_dir = package_dir  # persist eventual modifications
+        self.dist.packages = PEP420PackageFinder.find(src_dir)
+        self.dist.py_modules = ModuleFinder.find(src_dir)
+        log.debug(f"discovered packages -- {self.dist.packages}")
+        log.debug(f"discovered py_modules -- {self.dist.py_modules}")
+        return True
+
+    def _analyse_flat_layout(self) -> bool:
+        """Try to find all packages and modules under the project root.
+
+        Since the ``flat-layout`` is more dangerous in terms of accidentally including
+        extra files/directories, this function is more conservative and will raise an
+        error if multiple packages or modules are found.
+
+        This assumes that multi-package dists are uncommon and refuse to support that
+        use case in order to be able to prevent unintended errors.
+        """
+        log.debug(f"`flat-layout` detected -- analysing {self._root_dir}")
+        return self._analyse_flat_packages() or self._analyse_flat_modules()
+
+    def _analyse_flat_packages(self) -> bool:
+        self.dist.packages = FlatLayoutPackageFinder.find(self._root_dir)
+        top_level = remove_nested_packages(remove_stubs(self.dist.packages))
+        log.debug(f"discovered packages -- {self.dist.packages}")
+        self._ensure_no_accidental_inclusion(top_level, "packages")
+        return bool(top_level)
+
+    def _analyse_flat_modules(self) -> bool:
+        self.dist.py_modules = FlatLayoutModuleFinder.find(self._root_dir)
+        log.debug(f"discovered py_modules -- {self.dist.py_modules}")
+        self._ensure_no_accidental_inclusion(self.dist.py_modules, "modules")
+        return bool(self.dist.py_modules)
+
+    def _ensure_no_accidental_inclusion(self, detected: List[str], kind: str):
+        if len(detected) > 1:
+            from inspect import cleandoc
+
+            from setuptools.errors import PackageDiscoveryError
+
+            msg = f"""Multiple top-level {kind} discovered in a flat-layout: {detected}.
+
+            To avoid accidental inclusion of unwanted files or directories,
+            setuptools will not proceed with this build.
+
+            If you are trying to create a single distribution with multiple {kind}
+            on purpose, you should not rely on automatic discovery.
+            Instead, consider the following options:
+
+            1. set up custom discovery (`find` directive with `include` or `exclude`)
+            2. use a `src-layout`
+            3. explicitly set `py_modules` or `packages` with a list of names
+
+            To find more information, look for "package discovery" on setuptools docs.
+            """
+            raise PackageDiscoveryError(cleandoc(msg))
+
+    def analyse_name(self):
+        """The packages/modules are the essential contribution of the author.
+        Therefore the name of the distribution can be derived from them.
+        """
+        if self.dist.metadata.name or self.dist.name:
+            # get_name() is not reliable (can return "UNKNOWN")
+            return None
+
+        log.debug("No `name` configuration, performing automatic discovery")
+
+        name = (
+            self._find_name_single_package_or_module()
+            or self._find_name_from_packages()
+        )
+        if name:
+            self.dist.metadata.name = name
+
+    def _find_name_single_package_or_module(self) -> Optional[str]:
+        """Exactly one module or package"""
+        for field in ('packages', 'py_modules'):
+            items = getattr(self.dist, field, None) or []
+            if items and len(items) == 1:
+                log.debug(f"Single module/package detected, name: {items[0]}")
+                return items[0]
+
+        return None
+
+    def _find_name_from_packages(self) -> Optional[str]:
+        """Try to find the root package that is not a PEP 420 namespace"""
+        if not self.dist.packages:
+            return None
+
+        packages = remove_stubs(sorted(self.dist.packages, key=len))
+        package_dir = self.dist.package_dir or {}
+
+        parent_pkg = find_parent_package(packages, package_dir, self._root_dir)
+        if parent_pkg:
+            log.debug(f"Common parent package detected, name: {parent_pkg}")
+            return parent_pkg
+
+        log.warn("No parent package detected, impossible to derive `name`")
+        return None
+
+
+def remove_nested_packages(packages: List[str]) -> List[str]:
+    """Remove nested packages from a list of packages.
+
+    >>> remove_nested_packages(["a", "a.b1", "a.b2", "a.b1.c1"])
+    ['a']
+    >>> remove_nested_packages(["a", "b", "c.d", "c.d.e.f", "g.h", "a.a1"])
+    ['a', 'b', 'c.d', 'g.h']
+    """
+    pkgs = sorted(packages, key=len)
+    top_level = pkgs[:]
+    size = len(pkgs)
+    for i, name in enumerate(reversed(pkgs)):
+        if any(name.startswith(f"{other}.") for other in top_level):
+            top_level.pop(size - i - 1)
+
+    return top_level
+
+
+def remove_stubs(packages: List[str]) -> List[str]:
+    """Remove type stubs (:pep:`561`) from a list of packages.
+
+    >>> remove_stubs(["a", "a.b", "a-stubs", "a-stubs.b.c", "b", "c-stubs"])
+    ['a', 'a.b', 'b']
+    """
+    return [pkg for pkg in packages if not pkg.split(".")[0].endswith("-stubs")]
+
+
+def find_parent_package(
+    packages: List[str], package_dir: Mapping[str, str], root_dir: _Path
+) -> Optional[str]:
+    """Find the parent package that is not a namespace."""
+    packages = sorted(packages, key=len)
+    common_ancestors = []
+    for i, name in enumerate(packages):
+        if not all(n.startswith(f"{name}.") for n in packages[i + 1 :]):
+            # Since packages are sorted by length, this condition is able
+            # to find a list of all common ancestors.
+            # When there is divergence (e.g. multiple root packages)
+            # the list will be empty
+            break
+        common_ancestors.append(name)
+
+    for name in common_ancestors:
+        pkg_path = find_package_path(name, package_dir, root_dir)
+        init = os.path.join(pkg_path, "__init__.py")
+        if os.path.isfile(init):
+            return name
+
+    return None
+
+
+def find_package_path(
+    name: str, package_dir: Mapping[str, str], root_dir: _Path
+) -> str:
+    """Given a package name, return the path where it should be found on
+    disk, considering the ``package_dir`` option.
+
+    >>> path = find_package_path("my.pkg", {"": "root/is/nested"}, ".")
+    >>> path.replace(os.sep, "/")
+    './root/is/nested/my/pkg'
+
+    >>> path = find_package_path("my.pkg", {"my": "root/is/nested"}, ".")
+    >>> path.replace(os.sep, "/")
+    './root/is/nested/pkg'
+
+    >>> path = find_package_path("my.pkg", {"my.pkg": "root/is/nested"}, ".")
+    >>> path.replace(os.sep, "/")
+    './root/is/nested'
+
+    >>> path = find_package_path("other.pkg", {"my.pkg": "root/is/nested"}, ".")
+    >>> path.replace(os.sep, "/")
+    './other/pkg'
+    """
+    parts = name.split(".")
+    for i in range(len(parts), 0, -1):
+        # Look backwards, the most specific package_dir first
+        partial_name = ".".join(parts[:i])
+        if partial_name in package_dir:
+            parent = package_dir[partial_name]
+            return os.path.join(root_dir, parent, *parts[i:])
+
+    parent = package_dir.get("") or ""
+    return os.path.join(root_dir, *parent.split("/"), *parts)
+
+
+def construct_package_dir(packages: List[str], package_path: _Path) -> Dict[str, str]:
+    parent_pkgs = remove_nested_packages(packages)
+    prefix = Path(package_path).parts
+    return {pkg: "/".join([*prefix, *pkg.split(".")]) for pkg in parent_pkgs}
diff --git a/.venv/Lib/site-packages/setuptools/dist.py b/.venv/Lib/site-packages/setuptools/dist.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1d361f1c30e6f88f524ac11196e32462d27562c
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/dist.py
@@ -0,0 +1,1006 @@
+__all__ = ['Distribution']
+
+
+import io
+import itertools
+import numbers
+import os
+import re
+import sys
+from contextlib import suppress
+from glob import iglob
+from pathlib import Path
+from typing import List, Optional, Set
+
+import distutils.cmd
+import distutils.command
+import distutils.core
+import distutils.dist
+import distutils.log
+from distutils.debug import DEBUG
+from distutils.errors import DistutilsOptionError, DistutilsSetupError
+from distutils.fancy_getopt import translate_longopt
+from distutils.util import strtobool
+
+from .extern.more_itertools import partition, unique_everseen
+from .extern.ordered_set import OrderedSet
+from .extern.packaging.markers import InvalidMarker, Marker
+from .extern.packaging.specifiers import InvalidSpecifier, SpecifierSet
+from .extern.packaging.version import InvalidVersion, Version
+
+from . import _entry_points
+from . import _normalization
+from . import _reqs
+from . import command as _  # noqa  -- imported for side-effects
+from ._importlib import metadata
+from .config import setupcfg, pyprojecttoml
+from .discovery import ConfigDiscovery
+from .monkey import get_unpatched
+from .warnings import InformationOnly, SetuptoolsDeprecationWarning
+
+
+sequence = tuple, list
+
+
+def check_importable(dist, attr, value):
+    try:
+        ep = metadata.EntryPoint(value=value, name=None, group=None)
+        assert not ep.extras
+    except (TypeError, ValueError, AttributeError, AssertionError) as e:
+        raise DistutilsSetupError(
+            "%r must be importable 'module:attrs' string (got %r)" % (attr, value)
+        ) from e
+
+
+def assert_string_list(dist, attr, value):
+    """Verify that value is a string list"""
+    try:
+        # verify that value is a list or tuple to exclude unordered
+        # or single-use iterables
+        assert isinstance(value, (list, tuple))
+        # verify that elements of value are strings
+        assert ''.join(value) != value
+    except (TypeError, ValueError, AttributeError, AssertionError) as e:
+        raise DistutilsSetupError(
+            "%r must be a list of strings (got %r)" % (attr, value)
+        ) from e
+
+
+def check_nsp(dist, attr, value):
+    """Verify that namespace packages are valid"""
+    ns_packages = value
+    assert_string_list(dist, attr, ns_packages)
+    for nsp in ns_packages:
+        if not dist.has_contents_for(nsp):
+            raise DistutilsSetupError(
+                "Distribution contains no modules or packages for "
+                + "namespace package %r" % nsp
+            )
+        parent, sep, child = nsp.rpartition('.')
+        if parent and parent not in ns_packages:
+            distutils.log.warn(
+                "WARNING: %r is declared as a package namespace, but %r"
+                " is not: please correct this in setup.py",
+                nsp,
+                parent,
+            )
+        SetuptoolsDeprecationWarning.emit(
+            "The namespace_packages parameter is deprecated.",
+            "Please replace its usage with implicit namespaces (PEP 420).",
+            see_docs="references/keywords.html#keyword-namespace-packages"
+            # TODO: define due_date, it may break old packages that are no longer
+            # maintained (e.g. sphinxcontrib extensions) when installed from source.
+            # Warning officially introduced in May 2022, however the deprecation
+            # was mentioned much earlier in the docs (May 2020, see #2149).
+        )
+
+
+def check_extras(dist, attr, value):
+    """Verify that extras_require mapping is valid"""
+    try:
+        list(itertools.starmap(_check_extra, value.items()))
+    except (TypeError, ValueError, AttributeError) as e:
+        raise DistutilsSetupError(
+            "'extras_require' must be a dictionary whose values are "
+            "strings or lists of strings containing valid project/version "
+            "requirement specifiers."
+        ) from e
+
+
+def _check_extra(extra, reqs):
+    name, sep, marker = extra.partition(':')
+    try:
+        _check_marker(marker)
+    except InvalidMarker:
+        msg = f"Invalid environment marker: {marker} ({extra!r})"
+        raise DistutilsSetupError(msg) from None
+    list(_reqs.parse(reqs))
+
+
+def _check_marker(marker):
+    if not marker:
+        return
+    m = Marker(marker)
+    m.evaluate()
+
+
+def assert_bool(dist, attr, value):
+    """Verify that value is True, False, 0, or 1"""
+    if bool(value) != value:
+        tmpl = "{attr!r} must be a boolean value (got {value!r})"
+        raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
+
+
+def invalid_unless_false(dist, attr, value):
+    if not value:
+        DistDeprecationWarning.emit(f"{attr} is ignored.")
+        # TODO: should there be a `due_date` here?
+        return
+    raise DistutilsSetupError(f"{attr} is invalid.")
+
+
+def check_requirements(dist, attr, value):
+    """Verify that install_requires is a valid requirements list"""
+    try:
+        list(_reqs.parse(value))
+        if isinstance(value, (dict, set)):
+            raise TypeError("Unordered types are not allowed")
+    except (TypeError, ValueError) as error:
+        tmpl = (
+            "{attr!r} must be a string or list of strings "
+            "containing valid project/version requirement specifiers; {error}"
+        )
+        raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error
+
+
+def check_specifier(dist, attr, value):
+    """Verify that value is a valid version specifier"""
+    try:
+        SpecifierSet(value)
+    except (InvalidSpecifier, AttributeError) as error:
+        tmpl = (
+            "{attr!r} must be a string " "containing valid version specifiers; {error}"
+        )
+        raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) from error
+
+
+def check_entry_points(dist, attr, value):
+    """Verify that entry_points map is parseable"""
+    try:
+        _entry_points.load(value)
+    except Exception as e:
+        raise DistutilsSetupError(e) from e
+
+
+def check_test_suite(dist, attr, value):
+    if not isinstance(value, str):
+        raise DistutilsSetupError("test_suite must be a string")
+
+
+def check_package_data(dist, attr, value):
+    """Verify that value is a dictionary of package names to glob lists"""
+    if not isinstance(value, dict):
+        raise DistutilsSetupError(
+            "{!r} must be a dictionary mapping package names to lists of "
+            "string wildcard patterns".format(attr)
+        )
+    for k, v in value.items():
+        if not isinstance(k, str):
+            raise DistutilsSetupError(
+                "keys of {!r} dict must be strings (got {!r})".format(attr, k)
+            )
+        assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
+
+
+def check_packages(dist, attr, value):
+    for pkgname in value:
+        if not re.match(r'\w+(\.\w+)*', pkgname):
+            distutils.log.warn(
+                "WARNING: %r not a valid package name; please use only "
+                ".-separated package names in setup.py",
+                pkgname,
+            )
+
+
+_Distribution = get_unpatched(distutils.core.Distribution)
+
+
+class Distribution(_Distribution):
+    """Distribution with support for tests and package data
+
+    This is an enhanced version of 'distutils.dist.Distribution' that
+    effectively adds the following new optional keyword arguments to 'setup()':
+
+     'install_requires' -- a string or sequence of strings specifying project
+        versions that the distribution requires when installed, in the format
+        used by 'pkg_resources.require()'.  They will be installed
+        automatically when the package is installed.  If you wish to use
+        packages that are not available in PyPI, or want to give your users an
+        alternate download location, you can add a 'find_links' option to the
+        '[easy_install]' section of your project's 'setup.cfg' file, and then
+        setuptools will scan the listed web pages for links that satisfy the
+        requirements.
+
+     'extras_require' -- a dictionary mapping names of optional "extras" to the
+        additional requirement(s) that using those extras incurs. For example,
+        this::
+
+            extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+        indicates that the distribution can optionally provide an extra
+        capability called "reST", but it can only be used if docutils and
+        reSTedit are installed.  If the user installs your package using
+        EasyInstall and requests one of your extras, the corresponding
+        additional requirements will be installed if needed.
+
+     'test_suite' -- the name of a test suite to run for the 'test' command.
+        If the user runs 'python setup.py test', the package will be installed,
+        and the named test suite will be run.  The format is the same as
+        would be used on a 'unittest.py' command line.  That is, it is the
+        dotted name of an object to import and call to generate a test suite.
+
+     'package_data' -- a dictionary mapping package names to lists of filenames
+        or globs to use to find data files contained in the named packages.
+        If the dictionary has filenames or globs listed under '""' (the empty
+        string), those names will be searched for in every package, in addition
+        to any names for the specific package.  Data files found using these
+        names/globs will be installed along with the package, in the same
+        location as the package.  Note that globs are allowed to reference
+        the contents of non-package subdirectories, as long as you use '/' as
+        a path separator.  (Globs are automatically converted to
+        platform-specific paths at runtime.)
+
+    In addition to these new keywords, this class also has several new methods
+    for manipulating the distribution's contents.  For example, the 'include()'
+    and 'exclude()' methods can be thought of as in-place add and subtract
+    commands that add or remove packages, modules, extensions, and so on from
+    the distribution.
+    """
+
+    _DISTUTILS_UNSUPPORTED_METADATA = {
+        'long_description_content_type': lambda: None,
+        'project_urls': dict,
+        'provides_extras': OrderedSet,
+        'license_file': lambda: None,
+        'license_files': lambda: None,
+        # Both install_requires and extras_require are needed to write PKG-INFO,
+        # So we take this opportunity to cache parsed requirement objects.
+        # These attributes are not part of the public API and intended for internal use.
+        '_normalized_install_requires': dict,  # Dict[str, Requirement]
+        '_normalized_extras_require': dict,  # Dict[str, Dict[str, Requirement]]
+    }
+
+    _patched_dist = None
+
+    def patch_missing_pkg_info(self, attrs):
+        # Fake up a replacement for the data that would normally come from
+        # PKG-INFO, but which might not yet be built if this is a fresh
+        # checkout.
+        #
+        if not attrs or 'name' not in attrs or 'version' not in attrs:
+            return
+        name = _normalization.safe_name(str(attrs['name'])).lower()
+        with suppress(metadata.PackageNotFoundError):
+            dist = metadata.distribution(name)
+            if dist is not None and not dist.read_text('PKG-INFO'):
+                dist._version = _normalization.safe_version(str(attrs['version']))
+                self._patched_dist = dist
+
+    def __init__(self, attrs=None):
+        have_package_data = hasattr(self, "package_data")
+        if not have_package_data:
+            self.package_data = {}
+        attrs = attrs or {}
+        self.dist_files = []
+        # Filter-out setuptools' specific options.
+        self.src_root = attrs.pop("src_root", None)
+        self.patch_missing_pkg_info(attrs)
+        self.dependency_links = attrs.pop('dependency_links', [])
+        self.setup_requires = attrs.pop('setup_requires', [])
+        for ep in metadata.entry_points(group='distutils.setup_keywords'):
+            vars(self).setdefault(ep.name, None)
+        _Distribution.__init__(
+            self,
+            {
+                k: v
+                for k, v in attrs.items()
+                if k not in self._DISTUTILS_UNSUPPORTED_METADATA
+            },
+        )
+
+        # Private API (setuptools-use only, not restricted to Distribution)
+        # Stores files that are referenced by the configuration and need to be in the
+        # sdist (e.g. `version = file: VERSION.txt`)
+        self._referenced_files: Set[str] = set()
+
+        self.set_defaults = ConfigDiscovery(self)
+
+        self._set_metadata_defaults(attrs)
+
+        self.metadata.version = self._normalize_version(
+            self._validate_version(self.metadata.version)
+        )
+        self._finalize_requires()
+
+    def _validate_metadata(self):
+        required = {"name"}
+        provided = {
+            key
+            for key in vars(self.metadata)
+            if getattr(self.metadata, key, None) is not None
+        }
+        missing = required - provided
+
+        if missing:
+            msg = f"Required package metadata is missing: {missing}"
+            raise DistutilsSetupError(msg)
+
+    def _set_metadata_defaults(self, attrs):
+        """
+        Fill-in missing metadata fields not supported by distutils.
+        Some fields may have been set by other tools (e.g. pbr).
+        Those fields (vars(self.metadata)) take precedence to
+        supplied attrs.
+        """
+        for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
+            vars(self.metadata).setdefault(option, attrs.get(option, default()))
+
+    @staticmethod
+    def _normalize_version(version):
+        from . import sic
+
+        if isinstance(version, sic) or version is None:
+            return version
+
+        normalized = str(Version(version))
+        if version != normalized:
+            InformationOnly.emit(f"Normalizing '{version}' to '{normalized}'")
+            return normalized
+        return version
+
+    @staticmethod
+    def _validate_version(version):
+        if isinstance(version, numbers.Number):
+            # Some people apparently take "version number" too literally :)
+            version = str(version)
+
+        if version is not None:
+            try:
+                Version(version)
+            except (InvalidVersion, TypeError):
+                from . import sic
+
+                SetuptoolsDeprecationWarning.emit(
+                    f"Invalid version: {version!r}.",
+                    """
+                    The version specified is not a valid version according to PEP 440.
+                    This may not work as expected with newer versions of
+                    setuptools, pip, and PyPI.
+                    """,
+                    see_url="https://peps.python.org/pep-0440/",
+                    due_date=(2023, 9, 26),
+                    # Warning initially introduced in 26 Sept 2014
+                    # pypa/packaging already removed legacy versions.
+                )
+                return sic(version)
+        return version
+
+    def _finalize_requires(self):
+        """
+        Set `metadata.python_requires` and fix environment markers
+        in `install_requires` and `extras_require`.
+        """
+        if getattr(self, 'python_requires', None):
+            self.metadata.python_requires = self.python_requires
+
+        self._normalize_requires()
+
+        if self.extras_require:
+            for extra in self.extras_require.keys():
+                # Setuptools allows a weird ": syntax for extras
+                extra = extra.split(':')[0]
+                if extra:
+                    self.metadata.provides_extras.add(extra)
+
+    def _normalize_requires(self):
+        """Make sure requirement-related attributes exist and are normalized"""
+        install_requires = getattr(self, "install_requires", None) or []
+        extras_require = getattr(self, "extras_require", None) or {}
+        meta = self.metadata
+        meta._normalized_install_requires = {
+            str(r): r for r in _reqs.parse(install_requires)
+        }
+        meta._normalized_extras_require = {
+            k: {str(r): r for r in _reqs.parse(v or [])}
+            for k, v in extras_require.items()
+        }
+        self.install_requires = list(meta._normalized_install_requires)
+        self.extras_require = {
+            k: list(v) for k, v in meta._normalized_extras_require.items()
+        }
+
+    def _finalize_license_files(self):
+        """Compute names of all license files which should be included."""
+        license_files: Optional[List[str]] = self.metadata.license_files
+        patterns: List[str] = license_files if license_files else []
+
+        license_file: Optional[str] = self.metadata.license_file
+        if license_file and license_file not in patterns:
+            patterns.append(license_file)
+
+        if license_files is None and license_file is None:
+            # Default patterns match the ones wheel uses
+            # See https://wheel.readthedocs.io/en/stable/user_guide.html
+            # -> 'Including license files in the generated wheel file'
+            patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
+
+        self.metadata.license_files = list(
+            unique_everseen(self._expand_patterns(patterns))
+        )
+
+    @staticmethod
+    def _expand_patterns(patterns):
+        """
+        >>> list(Distribution._expand_patterns(['LICENSE']))
+        ['LICENSE']
+        >>> list(Distribution._expand_patterns(['setup.cfg', 'LIC*']))
+        ['setup.cfg', 'LICENSE']
+        """
+        return (
+            path
+            for pattern in patterns
+            for path in sorted(iglob(pattern))
+            if not path.endswith('~') and os.path.isfile(path)
+        )
+
+    # FIXME: 'Distribution._parse_config_files' is too complex (14)
+    def _parse_config_files(self, filenames=None):  # noqa: C901
+        """
+        Adapted from distutils.dist.Distribution.parse_config_files,
+        this method provides the same functionality in subtly-improved
+        ways.
+        """
+        from configparser import ConfigParser
+
+        # Ignore install directory options if we have a venv
+        ignore_options = (
+            []
+            if sys.prefix == sys.base_prefix
+            else [
+                'install-base',
+                'install-platbase',
+                'install-lib',
+                'install-platlib',
+                'install-purelib',
+                'install-headers',
+                'install-scripts',
+                'install-data',
+                'prefix',
+                'exec-prefix',
+                'home',
+                'user',
+                'root',
+            ]
+        )
+
+        ignore_options = frozenset(ignore_options)
+
+        if filenames is None:
+            filenames = self.find_config_files()
+
+        if DEBUG:
+            self.announce("Distribution.parse_config_files():")
+
+        parser = ConfigParser()
+        parser.optionxform = str
+        for filename in filenames:
+            with io.open(filename, encoding='utf-8') as reader:
+                if DEBUG:
+                    self.announce("  reading {filename}".format(**locals()))
+                parser.read_file(reader)
+            for section in parser.sections():
+                options = parser.options(section)
+                opt_dict = self.get_option_dict(section)
+
+                for opt in options:
+                    if opt == '__name__' or opt in ignore_options:
+                        continue
+
+                    val = parser.get(section, opt)
+                    opt = self.warn_dash_deprecation(opt, section)
+                    opt = self.make_option_lowercase(opt, section)
+                    opt_dict[opt] = (filename, val)
+
+            # Make the ConfigParser forget everything (so we retain
+            # the original filenames that options come from)
+            parser.__init__()
+
+        if 'global' not in self.command_options:
+            return
+
+        # If there was a "global" section in the config file, use it
+        # to set Distribution options.
+
+        for opt, (src, val) in self.command_options['global'].items():
+            alias = self.negative_opt.get(opt)
+            if alias:
+                val = not strtobool(val)
+            elif opt in ('verbose', 'dry_run'):  # ugh!
+                val = strtobool(val)
+
+            try:
+                setattr(self, alias or opt, val)
+            except ValueError as e:
+                raise DistutilsOptionError(e) from e
+
+    def warn_dash_deprecation(self, opt, section):
+        if section in (
+            'options.extras_require',
+            'options.data_files',
+        ):
+            return opt
+
+        underscore_opt = opt.replace('-', '_')
+        commands = list(
+            itertools.chain(
+                distutils.command.__all__,
+                self._setuptools_commands(),
+            )
+        )
+        if (
+            not section.startswith('options')
+            and section != 'metadata'
+            and section not in commands
+        ):
+            return underscore_opt
+
+        if '-' in opt:
+            SetuptoolsDeprecationWarning.emit(
+                "Invalid dash-separated options",
+                f"""
+                Usage of dash-separated {opt!r} will not be supported in future
+                versions. Please use the underscore name {underscore_opt!r} instead.
+                """,
+                see_docs="userguide/declarative_config.html",
+                due_date=(2023, 9, 26),
+                # Warning initially introduced in 3 Mar 2021
+            )
+        return underscore_opt
+
+    def _setuptools_commands(self):
+        try:
+            return metadata.distribution('setuptools').entry_points.names
+        except metadata.PackageNotFoundError:
+            # during bootstrapping, distribution doesn't exist
+            return []
+
+    def make_option_lowercase(self, opt, section):
+        if section != 'metadata' or opt.islower():
+            return opt
+
+        lowercase_opt = opt.lower()
+        SetuptoolsDeprecationWarning.emit(
+            "Invalid uppercase configuration",
+            f"""
+            Usage of uppercase key {opt!r} in {section!r} will not be supported in
+            future versions. Please use lowercase {lowercase_opt!r} instead.
+            """,
+            see_docs="userguide/declarative_config.html",
+            due_date=(2023, 9, 26),
+            # Warning initially introduced in 6 Mar 2021
+        )
+        return lowercase_opt
+
+    # FIXME: 'Distribution._set_command_options' is too complex (14)
+    def _set_command_options(self, command_obj, option_dict=None):  # noqa: C901
+        """
+        Set the options for 'command_obj' from 'option_dict'.  Basically
+        this means copying elements of a dictionary ('option_dict') to
+        attributes of an instance ('command').
+
+        'command_obj' must be a Command instance.  If 'option_dict' is not
+        supplied, uses the standard option dictionary for this command
+        (from 'self.command_options').
+
+        (Adopted from distutils.dist.Distribution._set_command_options)
+        """
+        command_name = command_obj.get_command_name()
+        if option_dict is None:
+            option_dict = self.get_option_dict(command_name)
+
+        if DEBUG:
+            self.announce("  setting options for '%s' command:" % command_name)
+        for option, (source, value) in option_dict.items():
+            if DEBUG:
+                self.announce("    %s = %s (from %s)" % (option, value, source))
+            try:
+                bool_opts = [translate_longopt(o) for o in command_obj.boolean_options]
+            except AttributeError:
+                bool_opts = []
+            try:
+                neg_opt = command_obj.negative_opt
+            except AttributeError:
+                neg_opt = {}
+
+            try:
+                is_string = isinstance(value, str)
+                if option in neg_opt and is_string:
+                    setattr(command_obj, neg_opt[option], not strtobool(value))
+                elif option in bool_opts and is_string:
+                    setattr(command_obj, option, strtobool(value))
+                elif hasattr(command_obj, option):
+                    setattr(command_obj, option, value)
+                else:
+                    raise DistutilsOptionError(
+                        "error in %s: command '%s' has no such option '%s'"
+                        % (source, command_name, option)
+                    )
+            except ValueError as e:
+                raise DistutilsOptionError(e) from e
+
+    def _get_project_config_files(self, filenames):
+        """Add default file and split between INI and TOML"""
+        tomlfiles = []
+        standard_project_metadata = Path(self.src_root or os.curdir, "pyproject.toml")
+        if filenames is not None:
+            parts = partition(lambda f: Path(f).suffix == ".toml", filenames)
+            filenames = list(parts[0])  # 1st element => predicate is False
+            tomlfiles = list(parts[1])  # 2nd element => predicate is True
+        elif standard_project_metadata.exists():
+            tomlfiles = [standard_project_metadata]
+        return filenames, tomlfiles
+
+    def parse_config_files(self, filenames=None, ignore_option_errors=False):
+        """Parses configuration files from various levels
+        and loads configuration.
+        """
+        inifiles, tomlfiles = self._get_project_config_files(filenames)
+
+        self._parse_config_files(filenames=inifiles)
+
+        setupcfg.parse_configuration(
+            self, self.command_options, ignore_option_errors=ignore_option_errors
+        )
+        for filename in tomlfiles:
+            pyprojecttoml.apply_configuration(self, filename, ignore_option_errors)
+
+        self._finalize_requires()
+        self._finalize_license_files()
+
+    def fetch_build_eggs(self, requires):
+        """Resolve pre-setup requirements"""
+        from .installer import _fetch_build_eggs
+
+        return _fetch_build_eggs(self, requires)
+
+    def finalize_options(self):
+        """
+        Allow plugins to apply arbitrary operations to the
+        distribution. Each hook may optionally define a 'order'
+        to influence the order of execution. Smaller numbers
+        go first and the default is 0.
+        """
+        group = 'setuptools.finalize_distribution_options'
+
+        def by_order(hook):
+            return getattr(hook, 'order', 0)
+
+        defined = metadata.entry_points(group=group)
+        filtered = itertools.filterfalse(self._removed, defined)
+        loaded = map(lambda e: e.load(), filtered)
+        for ep in sorted(loaded, key=by_order):
+            ep(self)
+
+    @staticmethod
+    def _removed(ep):
+        """
+        When removing an entry point, if metadata is loaded
+        from an older version of Setuptools, that removed
+        entry point will attempt to be loaded and will fail.
+        See #2765 for more details.
+        """
+        removed = {
+            # removed 2021-09-05
+            '2to3_doctests',
+        }
+        return ep.name in removed
+
+    def _finalize_setup_keywords(self):
+        for ep in metadata.entry_points(group='distutils.setup_keywords'):
+            value = getattr(self, ep.name, None)
+            if value is not None:
+                ep.load()(self, ep.name, value)
+
+    def get_egg_cache_dir(self):
+        from . import windows_support
+
+        egg_cache_dir = os.path.join(os.curdir, '.eggs')
+        if not os.path.exists(egg_cache_dir):
+            os.mkdir(egg_cache_dir)
+            windows_support.hide_file(egg_cache_dir)
+            readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
+            with open(readme_txt_filename, 'w') as f:
+                f.write(
+                    'This directory contains eggs that were downloaded '
+                    'by setuptools to build, test, and run plug-ins.\n\n'
+                )
+                f.write(
+                    'This directory caches those eggs to prevent '
+                    'repeated downloads.\n\n'
+                )
+                f.write('However, it is safe to delete this directory.\n\n')
+
+        return egg_cache_dir
+
+    def fetch_build_egg(self, req):
+        """Fetch an egg needed for building"""
+        from .installer import fetch_build_egg
+
+        return fetch_build_egg(self, req)
+
+    def get_command_class(self, command):
+        """Pluggable version of get_command_class()"""
+        if command in self.cmdclass:
+            return self.cmdclass[command]
+
+        eps = metadata.entry_points(group='distutils.commands', name=command)
+        for ep in eps:
+            self.cmdclass[command] = cmdclass = ep.load()
+            return cmdclass
+        else:
+            return _Distribution.get_command_class(self, command)
+
+    def print_commands(self):
+        for ep in metadata.entry_points(group='distutils.commands'):
+            if ep.name not in self.cmdclass:
+                cmdclass = ep.load()
+                self.cmdclass[ep.name] = cmdclass
+        return _Distribution.print_commands(self)
+
+    def get_command_list(self):
+        for ep in metadata.entry_points(group='distutils.commands'):
+            if ep.name not in self.cmdclass:
+                cmdclass = ep.load()
+                self.cmdclass[ep.name] = cmdclass
+        return _Distribution.get_command_list(self)
+
+    def include(self, **attrs):
+        """Add items to distribution that are named in keyword arguments
+
+        For example, 'dist.include(py_modules=["x"])' would add 'x' to
+        the distribution's 'py_modules' attribute, if it was not already
+        there.
+
+        Currently, this method only supports inclusion for attributes that are
+        lists or tuples.  If you need to add support for adding to other
+        attributes in this or a subclass, you can add an '_include_X' method,
+        where 'X' is the name of the attribute.  The method will be called with
+        the value passed to 'include()'.  So, 'dist.include(foo={"bar":"baz"})'
+        will try to call 'dist._include_foo({"bar":"baz"})', which can then
+        handle whatever special inclusion logic is needed.
+        """
+        for k, v in attrs.items():
+            include = getattr(self, '_include_' + k, None)
+            if include:
+                include(v)
+            else:
+                self._include_misc(k, v)
+
+    def exclude_package(self, package):
+        """Remove packages, modules, and extensions in named package"""
+
+        pfx = package + '.'
+        if self.packages:
+            self.packages = [
+                p for p in self.packages if p != package and not p.startswith(pfx)
+            ]
+
+        if self.py_modules:
+            self.py_modules = [
+                p for p in self.py_modules if p != package and not p.startswith(pfx)
+            ]
+
+        if self.ext_modules:
+            self.ext_modules = [
+                p
+                for p in self.ext_modules
+                if p.name != package and not p.name.startswith(pfx)
+            ]
+
+    def has_contents_for(self, package):
+        """Return true if 'exclude_package(package)' would do something"""
+
+        pfx = package + '.'
+
+        for p in self.iter_distribution_names():
+            if p == package or p.startswith(pfx):
+                return True
+
+    def _exclude_misc(self, name, value):
+        """Handle 'exclude()' for list/tuple attrs without a special handler"""
+        if not isinstance(value, sequence):
+            raise DistutilsSetupError(
+                "%s: setting must be a list or tuple (%r)" % (name, value)
+            )
+        try:
+            old = getattr(self, name)
+        except AttributeError as e:
+            raise DistutilsSetupError("%s: No such distribution setting" % name) from e
+        if old is not None and not isinstance(old, sequence):
+            raise DistutilsSetupError(
+                name + ": this setting cannot be changed via include/exclude"
+            )
+        elif old:
+            setattr(self, name, [item for item in old if item not in value])
+
+    def _include_misc(self, name, value):
+        """Handle 'include()' for list/tuple attrs without a special handler"""
+
+        if not isinstance(value, sequence):
+            raise DistutilsSetupError("%s: setting must be a list (%r)" % (name, value))
+        try:
+            old = getattr(self, name)
+        except AttributeError as e:
+            raise DistutilsSetupError("%s: No such distribution setting" % name) from e
+        if old is None:
+            setattr(self, name, value)
+        elif not isinstance(old, sequence):
+            raise DistutilsSetupError(
+                name + ": this setting cannot be changed via include/exclude"
+            )
+        else:
+            new = [item for item in value if item not in old]
+            setattr(self, name, old + new)
+
+    def exclude(self, **attrs):
+        """Remove items from distribution that are named in keyword arguments
+
+        For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+        the distribution's 'py_modules' attribute.  Excluding packages uses
+        the 'exclude_package()' method, so all of the package's contained
+        packages, modules, and extensions are also excluded.
+
+        Currently, this method only supports exclusion from attributes that are
+        lists or tuples.  If you need to add support for excluding from other
+        attributes in this or a subclass, you can add an '_exclude_X' method,
+        where 'X' is the name of the attribute.  The method will be called with
+        the value passed to 'exclude()'.  So, 'dist.exclude(foo={"bar":"baz"})'
+        will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+        handle whatever special exclusion logic is needed.
+        """
+        for k, v in attrs.items():
+            exclude = getattr(self, '_exclude_' + k, None)
+            if exclude:
+                exclude(v)
+            else:
+                self._exclude_misc(k, v)
+
+    def _exclude_packages(self, packages):
+        if not isinstance(packages, sequence):
+            raise DistutilsSetupError(
+                "packages: setting must be a list or tuple (%r)" % (packages,)
+            )
+        list(map(self.exclude_package, packages))
+
+    def _parse_command_opts(self, parser, args):
+        # Remove --with-X/--without-X options when processing command args
+        self.global_options = self.__class__.global_options
+        self.negative_opt = self.__class__.negative_opt
+
+        # First, expand any aliases
+        command = args[0]
+        aliases = self.get_option_dict('aliases')
+        while command in aliases:
+            src, alias = aliases[command]
+            del aliases[command]  # ensure each alias can expand only once!
+            import shlex
+
+            args[:1] = shlex.split(alias, True)
+            command = args[0]
+
+        nargs = _Distribution._parse_command_opts(self, parser, args)
+
+        # Handle commands that want to consume all remaining arguments
+        cmd_class = self.get_command_class(command)
+        if getattr(cmd_class, 'command_consumes_arguments', None):
+            self.get_option_dict(command)['args'] = ("command line", nargs)
+            if nargs is not None:
+                return []
+
+        return nargs
+
+    def get_cmdline_options(self):
+        """Return a '{cmd: {opt:val}}' map of all command-line options
+
+        Option names are all long, but do not include the leading '--', and
+        contain dashes rather than underscores.  If the option doesn't take
+        an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+        Note that options provided by config files are intentionally excluded.
+        """
+
+        d = {}
+
+        for cmd, opts in self.command_options.items():
+            for opt, (src, val) in opts.items():
+                if src != "command line":
+                    continue
+
+                opt = opt.replace('_', '-')
+
+                if val == 0:
+                    cmdobj = self.get_command_obj(cmd)
+                    neg_opt = self.negative_opt.copy()
+                    neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
+                    for neg, pos in neg_opt.items():
+                        if pos == opt:
+                            opt = neg
+                            val = None
+                            break
+                    else:
+                        raise AssertionError("Shouldn't be able to get here")
+
+                elif val == 1:
+                    val = None
+
+                d.setdefault(cmd, {})[opt] = val
+
+        return d
+
+    def iter_distribution_names(self):
+        """Yield all packages, modules, and extension names in distribution"""
+
+        for pkg in self.packages or ():
+            yield pkg
+
+        for module in self.py_modules or ():
+            yield module
+
+        for ext in self.ext_modules or ():
+            if isinstance(ext, tuple):
+                name, buildinfo = ext
+            else:
+                name = ext.name
+            if name.endswith('module'):
+                name = name[:-6]
+            yield name
+
+    def handle_display_options(self, option_order):
+        """If there were any non-global "display-only" options
+        (--help-commands or the metadata display options) on the command
+        line, display the requested info and return true; else return
+        false.
+        """
+        import sys
+
+        if self.help_commands:
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Stdout may be StringIO (e.g. in tests)
+        if not isinstance(sys.stdout, io.TextIOWrapper):
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Don't wrap stdout if utf-8 is already the encoding. Provides
+        #  workaround for #334.
+        if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Print metadata in UTF-8 no matter the platform
+        encoding = sys.stdout.encoding
+        sys.stdout.reconfigure(encoding='utf-8')
+        try:
+            return _Distribution.handle_display_options(self, option_order)
+        finally:
+            sys.stdout.reconfigure(encoding=encoding)
+
+    def run_command(self, command):
+        self.set_defaults()
+        # Postpone defaults until all explicit configuration is considered
+        # (setup() args, config files, command line and plugins)
+
+        super().run_command(command)
+
+
+class DistDeprecationWarning(SetuptoolsDeprecationWarning):
+    """Class for warning about deprecations in dist in
+    setuptools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/.venv/Lib/site-packages/setuptools/errors.py b/.venv/Lib/site-packages/setuptools/errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec7fb3b6c4856708dc6bc3b0c35fd8df73156029
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/errors.py
@@ -0,0 +1,58 @@
+"""setuptools.errors
+
+Provides exceptions used by setuptools modules.
+"""
+
+from distutils import errors as _distutils_errors
+
+
+# Re-export errors from distutils to facilitate the migration to PEP632
+
+ByteCompileError = _distutils_errors.DistutilsByteCompileError
+CCompilerError = _distutils_errors.CCompilerError
+ClassError = _distutils_errors.DistutilsClassError
+CompileError = _distutils_errors.CompileError
+ExecError = _distutils_errors.DistutilsExecError
+FileError = _distutils_errors.DistutilsFileError
+InternalError = _distutils_errors.DistutilsInternalError
+LibError = _distutils_errors.LibError
+LinkError = _distutils_errors.LinkError
+ModuleError = _distutils_errors.DistutilsModuleError
+OptionError = _distutils_errors.DistutilsOptionError
+PlatformError = _distutils_errors.DistutilsPlatformError
+PreprocessError = _distutils_errors.PreprocessError
+SetupError = _distutils_errors.DistutilsSetupError
+TemplateError = _distutils_errors.DistutilsTemplateError
+UnknownFileError = _distutils_errors.UnknownFileError
+
+# The root error class in the hierarchy
+BaseError = _distutils_errors.DistutilsError
+
+
+class RemovedCommandError(BaseError, RuntimeError):
+    """Error used for commands that have been removed in setuptools.
+
+    Since ``setuptools`` is built on ``distutils``, simply removing a command
+    from ``setuptools`` will make the behavior fall back to ``distutils``; this
+    error is raised if a command exists in ``distutils`` but has been actively
+    removed in ``setuptools``.
+    """
+
+
+class PackageDiscoveryError(BaseError, RuntimeError):
+    """Impossible to perform automatic discovery of packages and/or modules.
+
+    The current project layout or given discovery options can lead to problems when
+    scanning the project directory.
+
+    Setuptools might also refuse to complete auto-discovery if an error prone condition
+    is detected (e.g. when a project is organised as a flat-layout but contains
+    multiple directories that can be taken as top-level packages inside a single
+    distribution [*]_). In these situations the users are encouraged to be explicit
+    about which packages to include or to make the discovery parameters more specific.
+
+    .. [*] Since multi-package distributions are uncommon it is very likely that the
+       developers did not intend for all the directories to be packaged, and are just
+       leaving auxiliary code in the repository top-level, such as maintenance-related
+       scripts.
+    """
diff --git a/.venv/Lib/site-packages/setuptools/extension.py b/.venv/Lib/site-packages/setuptools/extension.py
new file mode 100644
index 0000000000000000000000000000000000000000..58c023f6b4479c631f382e5062932793d2bee26b
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/extension.py
@@ -0,0 +1,148 @@
+import re
+import functools
+import distutils.core
+import distutils.errors
+import distutils.extension
+
+from .monkey import get_unpatched
+
+
+def _have_cython():
+    """
+    Return True if Cython can be imported.
+    """
+    cython_impl = 'Cython.Distutils.build_ext'
+    try:
+        # from (cython_impl) import build_ext
+        __import__(cython_impl, fromlist=['build_ext']).build_ext
+        return True
+    except Exception:
+        pass
+    return False
+
+
+# for compatibility
+have_pyrex = _have_cython
+
+_Extension = get_unpatched(distutils.core.Extension)
+
+
+class Extension(_Extension):
+    """
+    Describes a single extension module.
+
+    This means that all source files will be compiled into a single binary file
+    ``.`` (with ```` derived from ``name`` and
+    ```` defined by one of the values in
+    ``importlib.machinery.EXTENSION_SUFFIXES``).
+
+    In the case ``.pyx`` files are passed as ``sources and`` ``Cython`` is **not**
+    installed in the build environment, ``setuptools`` may also try to look for the
+    equivalent ``.cpp`` or ``.c`` files.
+
+    :arg str name:
+      the full name of the extension, including any packages -- ie.
+      *not* a filename or pathname, but Python dotted name
+
+    :arg list[str] sources:
+      list of source filenames, relative to the distribution root
+      (where the setup script lives), in Unix form (slash-separated)
+      for portability.  Source files may be C, C++, SWIG (.i),
+      platform-specific resource files, or whatever else is recognized
+      by the "build_ext" command as source for a Python extension.
+
+    :keyword list[str] include_dirs:
+      list of directories to search for C/C++ header files (in Unix
+      form for portability)
+
+    :keyword list[tuple[str, str|None]] define_macros:
+      list of macros to define; each macro is defined using a 2-tuple:
+      the first item corresponding to the name of the macro and the second
+      item either a string with its value or None to
+      define it without a particular value (equivalent of "#define
+      FOO" in source or -DFOO on Unix C compiler command line)
+
+    :keyword list[str] undef_macros:
+      list of macros to undefine explicitly
+
+    :keyword list[str] library_dirs:
+      list of directories to search for C/C++ libraries at link time
+
+    :keyword list[str] libraries:
+      list of library names (not filenames or paths) to link against
+
+    :keyword list[str] runtime_library_dirs:
+      list of directories to search for C/C++ libraries at run time
+      (for shared extensions, this is when the extension is loaded).
+      Setting this will cause an exception during build on Windows
+      platforms.
+
+    :keyword list[str] extra_objects:
+      list of extra files to link with (eg. object files not implied
+      by 'sources', static library that must be explicitly specified,
+      binary resource files, etc.)
+
+    :keyword list[str] extra_compile_args:
+      any extra platform- and compiler-specific information to use
+      when compiling the source files in 'sources'.  For platforms and
+      compilers where "command line" makes sense, this is typically a
+      list of command-line arguments, but for other platforms it could
+      be anything.
+
+    :keyword list[str] extra_link_args:
+      any extra platform- and compiler-specific information to use
+      when linking object files together to create the extension (or
+      to create a new static Python interpreter).  Similar
+      interpretation as for 'extra_compile_args'.
+
+    :keyword list[str] export_symbols:
+      list of symbols to be exported from a shared extension.  Not
+      used on all platforms, and not generally necessary for Python
+      extensions, which typically export exactly one symbol: "init" +
+      extension_name.
+
+    :keyword list[str] swig_opts:
+      any extra options to pass to SWIG if a source file has the .i
+      extension.
+
+    :keyword list[str] depends:
+      list of files that the extension depends on
+
+    :keyword str language:
+      extension language (i.e. "c", "c++", "objc"). Will be detected
+      from the source extensions if not provided.
+
+    :keyword bool optional:
+      specifies that a build failure in the extension should not abort the
+      build process, but simply not install the failing extension.
+
+    :keyword bool py_limited_api:
+      opt-in flag for the usage of :doc:`Python's limited API `.
+
+    :raises setuptools.errors.PlatformError: if 'runtime_library_dirs' is
+      specified on Windows. (since v63)
+    """
+
+    def __init__(self, name, sources, *args, **kw):
+        # The *args is needed for compatibility as calls may use positional
+        # arguments. py_limited_api may be set only via keyword.
+        self.py_limited_api = kw.pop("py_limited_api", False)
+        super().__init__(name, sources, *args, **kw)
+
+    def _convert_pyx_sources_to_lang(self):
+        """
+        Replace sources with .pyx extensions to sources with the target
+        language extension. This mechanism allows language authors to supply
+        pre-converted sources but to prefer the .pyx sources.
+        """
+        if _have_cython():
+            # the build has Cython, so allow it to compile the .pyx files
+            return
+        lang = self.language or ''
+        target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
+        sub = functools.partial(re.sub, '.pyx$', target_ext)
+        self.sources = list(map(sub, self.sources))
+
+
+class Library(Extension):
+    """Just like a regular Extension, but built as a library instead"""
diff --git a/.venv/Lib/site-packages/setuptools/glob.py b/.venv/Lib/site-packages/setuptools/glob.py
new file mode 100644
index 0000000000000000000000000000000000000000..647b9bc6ed2946ef57f87227b8dcd638199ae0c7
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/glob.py
@@ -0,0 +1,166 @@
+"""
+Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
+
+Changes include:
+ * `yield from` and PEP3102 `*` removed.
+ * Hidden files are not ignored.
+"""
+
+import os
+import re
+import fnmatch
+
+__all__ = ["glob", "iglob", "escape"]
+
+
+def glob(pathname, recursive=False):
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    If recursive is true, the pattern '**' will match any files and
+    zero or more directories and subdirectories.
+    """
+    return list(iglob(pathname, recursive=recursive))
+
+
+def iglob(pathname, recursive=False):
+    """Return an iterator which yields the paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    If recursive is true, the pattern '**' will match any files and
+    zero or more directories and subdirectories.
+    """
+    it = _iglob(pathname, recursive)
+    if recursive and _isrecursive(pathname):
+        s = next(it)  # skip empty string
+        assert not s
+    return it
+
+
+def _iglob(pathname, recursive):
+    dirname, basename = os.path.split(pathname)
+    glob_in_dir = glob2 if recursive and _isrecursive(basename) else glob1
+
+    if not has_magic(pathname):
+        if basename:
+            if os.path.lexists(pathname):
+                yield pathname
+        else:
+            # Patterns ending with a slash should match only directories
+            if os.path.isdir(dirname):
+                yield pathname
+        return
+
+    if not dirname:
+        yield from glob_in_dir(dirname, basename)
+        return
+    # `os.path.split()` returns the argument itself as a dirname if it is a
+    # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
+    # contains magic characters (i.e. r'\\?\C:').
+    if dirname != pathname and has_magic(dirname):
+        dirs = _iglob(dirname, recursive)
+    else:
+        dirs = [dirname]
+    if not has_magic(basename):
+        glob_in_dir = glob0
+    for dirname in dirs:
+        for name in glob_in_dir(dirname, basename):
+            yield os.path.join(dirname, name)
+
+
+# These 2 helper functions non-recursively glob inside a literal directory.
+# They return a list of basenames. `glob1` accepts a pattern while `glob0`
+# takes a literal basename (so it only has to check for its existence).
+
+
+def glob1(dirname, pattern):
+    if not dirname:
+        if isinstance(pattern, bytes):
+            dirname = os.curdir.encode('ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except OSError:
+        return []
+    return fnmatch.filter(names, pattern)
+
+
+def glob0(dirname, basename):
+    if not basename:
+        # `os.path.split()` returns an empty basename for paths ending with a
+        # directory separator.  'q*x/' should match only directories.
+        if os.path.isdir(dirname):
+            return [basename]
+    else:
+        if os.path.lexists(os.path.join(dirname, basename)):
+            return [basename]
+    return []
+
+
+# This helper function recursively yields relative pathnames inside a literal
+# directory.
+
+
+def glob2(dirname, pattern):
+    assert _isrecursive(pattern)
+    yield pattern[:0]
+    for x in _rlistdir(dirname):
+        yield x
+
+
+# Recursively yields relative pathnames inside a literal directory.
+def _rlistdir(dirname):
+    if not dirname:
+        if isinstance(dirname, bytes):
+            dirname = os.curdir.encode('ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except os.error:
+        return
+    for x in names:
+        yield x
+        path = os.path.join(dirname, x) if dirname else x
+        for y in _rlistdir(path):
+            yield os.path.join(x, y)
+
+
+magic_check = re.compile('([*?[])')
+magic_check_bytes = re.compile(b'([*?[])')
+
+
+def has_magic(s):
+    if isinstance(s, bytes):
+        match = magic_check_bytes.search(s)
+    else:
+        match = magic_check.search(s)
+    return match is not None
+
+
+def _isrecursive(pattern):
+    if isinstance(pattern, bytes):
+        return pattern == b'**'
+    else:
+        return pattern == '**'
+
+
+def escape(pathname):
+    """Escape all special characters."""
+    # Escaping is done by wrapping any of "*?[" between square brackets.
+    # Metacharacters do not work in the drive part and shouldn't be escaped.
+    drive, pathname = os.path.splitdrive(pathname)
+    if isinstance(pathname, bytes):
+        pathname = magic_check_bytes.sub(br'[\1]', pathname)
+    else:
+        pathname = magic_check.sub(r'[\1]', pathname)
+    return drive + pathname
diff --git a/.venv/Lib/site-packages/setuptools/gui-32.exe b/.venv/Lib/site-packages/setuptools/gui-32.exe
new file mode 100644
index 0000000000000000000000000000000000000000..1eb430c6d614a5daea4139badc09c222a4b0e72a
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/gui-32.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/gui-64.exe b/.venv/Lib/site-packages/setuptools/gui-64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..031cb77c17ba8d8a983448268851d612e05e80d1
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/gui-64.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/gui-arm64.exe b/.venv/Lib/site-packages/setuptools/gui-arm64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..1e00ffacb182c2af206e5dd9d9fbc41d236da0d1
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/gui-arm64.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/gui.exe b/.venv/Lib/site-packages/setuptools/gui.exe
new file mode 100644
index 0000000000000000000000000000000000000000..1eb430c6d614a5daea4139badc09c222a4b0e72a
Binary files /dev/null and b/.venv/Lib/site-packages/setuptools/gui.exe differ
diff --git a/.venv/Lib/site-packages/setuptools/installer.py b/.venv/Lib/site-packages/setuptools/installer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e83f959a1b7880a4bd2b4e500e884c41edfbece5
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/installer.py
@@ -0,0 +1,145 @@
+import glob
+import os
+import subprocess
+import sys
+import tempfile
+from distutils import log
+from distutils.errors import DistutilsError
+from functools import partial
+
+from . import _reqs
+from .wheel import Wheel
+from .warnings import SetuptoolsDeprecationWarning
+
+
+def _fixup_find_links(find_links):
+    """Ensure find-links option end-up being a list of strings."""
+    if isinstance(find_links, str):
+        return find_links.split()
+    assert isinstance(find_links, (tuple, list))
+    return find_links
+
+
+def fetch_build_egg(dist, req):
+    """Fetch an egg needed for building.
+
+    Use pip/wheel to fetch/build a wheel."""
+    _DeprecatedInstaller.emit()
+    _warn_wheel_not_available(dist)
+    return _fetch_build_egg_no_warn(dist, req)
+
+
+def _fetch_build_eggs(dist, requires):
+    import pkg_resources  # Delay import to avoid unnecessary side-effects
+
+    _DeprecatedInstaller.emit(stacklevel=3)
+    _warn_wheel_not_available(dist)
+
+    resolved_dists = pkg_resources.working_set.resolve(
+        _reqs.parse(requires, pkg_resources.Requirement),  # required for compatibility
+        installer=partial(_fetch_build_egg_no_warn, dist),  # avoid warning twice
+        replace_conflicting=True,
+    )
+    for dist in resolved_dists:
+        pkg_resources.working_set.add(dist, replace=True)
+    return resolved_dists
+
+
+def _fetch_build_egg_no_warn(dist, req):  # noqa: C901  # is too complex (16)  # FIXME
+    import pkg_resources  # Delay import to avoid unnecessary side-effects
+
+    # Ignore environment markers; if supplied, it is required.
+    req = strip_marker(req)
+    # Take easy_install options into account, but do not override relevant
+    # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
+    # take precedence.
+    opts = dist.get_option_dict('easy_install')
+    if 'allow_hosts' in opts:
+        raise DistutilsError(
+            'the `allow-hosts` option is not supported '
+            'when using pip to install requirements.'
+        )
+    quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ
+    if 'PIP_INDEX_URL' in os.environ:
+        index_url = None
+    elif 'index_url' in opts:
+        index_url = opts['index_url'][1]
+    else:
+        index_url = None
+    find_links = (
+        _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts else []
+    )
+    if dist.dependency_links:
+        find_links.extend(dist.dependency_links)
+    eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
+    environment = pkg_resources.Environment()
+    for egg_dist in pkg_resources.find_distributions(eggs_dir):
+        if egg_dist in req and environment.can_add(egg_dist):
+            return egg_dist
+    with tempfile.TemporaryDirectory() as tmpdir:
+        cmd = [
+            sys.executable,
+            '-m',
+            'pip',
+            '--disable-pip-version-check',
+            'wheel',
+            '--no-deps',
+            '-w',
+            tmpdir,
+        ]
+        if quiet:
+            cmd.append('--quiet')
+        if index_url is not None:
+            cmd.extend(('--index-url', index_url))
+        for link in find_links or []:
+            cmd.extend(('--find-links', link))
+        # If requirement is a PEP 508 direct URL, directly pass
+        # the URL to pip, as `req @ url` does not work on the
+        # command line.
+        cmd.append(req.url or str(req))
+        try:
+            subprocess.check_call(cmd)
+        except subprocess.CalledProcessError as e:
+            raise DistutilsError(str(e)) from e
+        wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
+        dist_location = os.path.join(eggs_dir, wheel.egg_name())
+        wheel.install_as_egg(dist_location)
+        dist_metadata = pkg_resources.PathMetadata(
+            dist_location, os.path.join(dist_location, 'EGG-INFO')
+        )
+        dist = pkg_resources.Distribution.from_filename(
+            dist_location, metadata=dist_metadata
+        )
+        return dist
+
+
+def strip_marker(req):
+    """
+    Return a new requirement without the environment marker to avoid
+    calling pip with something like `babel; extra == "i18n"`, which
+    would always be ignored.
+    """
+    import pkg_resources  # Delay import to avoid unnecessary side-effects
+
+    # create a copy to avoid mutating the input
+    req = pkg_resources.Requirement.parse(str(req))
+    req.marker = None
+    return req
+
+
+def _warn_wheel_not_available(dist):
+    import pkg_resources  # Delay import to avoid unnecessary side-effects
+
+    try:
+        pkg_resources.get_distribution('wheel')
+    except pkg_resources.DistributionNotFound:
+        dist.announce('WARNING: The wheel package is not available.', log.WARN)
+
+
+class _DeprecatedInstaller(SetuptoolsDeprecationWarning):
+    _SUMMARY = "setuptools.installer and fetch_build_eggs are deprecated."
+    _DETAILS = """
+    Requirements should be satisfied by a PEP 517 installer.
+    If you are using pip, you can try `pip install --use-pep517`.
+    """
+    # _DUE_DATE not decided yet
diff --git a/.venv/Lib/site-packages/setuptools/launch.py b/.venv/Lib/site-packages/setuptools/launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..0208fdf33b640cd9791359d74673bb90cfb87f96
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/launch.py
@@ -0,0 +1,36 @@
+"""
+Launch the Python script on the command line after
+setuptools is bootstrapped via import.
+"""
+
+# Note that setuptools gets imported implicitly by the
+# invocation of this script using python -m setuptools.launch
+
+import tokenize
+import sys
+
+
+def run():
+    """
+    Run the script in sys.argv[1] as if it had
+    been invoked naturally.
+    """
+    __builtins__
+    script_name = sys.argv[1]
+    namespace = dict(
+        __file__=script_name,
+        __name__='__main__',
+        __doc__=None,
+    )
+    sys.argv[:] = sys.argv[1:]
+
+    open_ = getattr(tokenize, 'open', open)
+    with open_(script_name) as fid:
+        script = fid.read()
+    norm_script = script.replace('\\r\\n', '\\n')
+    code = compile(norm_script, script_name, 'exec')
+    exec(code, namespace)
+
+
+if __name__ == '__main__':
+    run()
diff --git a/.venv/Lib/site-packages/setuptools/logging.py b/.venv/Lib/site-packages/setuptools/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceca99ca76206390e75303787ac118a4e8f17ff3
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/logging.py
@@ -0,0 +1,38 @@
+import sys
+import inspect
+import logging
+import distutils.log
+from . import monkey
+
+
+def _not_warning(record):
+    return record.levelno < logging.WARNING
+
+
+def configure():
+    """
+    Configure logging to emit warning and above to stderr
+    and everything else to stdout. This behavior is provided
+    for compatibility with distutils.log but may change in
+    the future.
+    """
+    err_handler = logging.StreamHandler()
+    err_handler.setLevel(logging.WARNING)
+    out_handler = logging.StreamHandler(sys.stdout)
+    out_handler.addFilter(_not_warning)
+    handlers = err_handler, out_handler
+    logging.basicConfig(
+        format="{message}", style='{', handlers=handlers, level=logging.DEBUG
+    )
+    if inspect.ismodule(distutils.dist.log):
+        monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
+        # For some reason `distutils.log` module is getting cached in `distutils.dist`
+        # and then loaded again when patched,
+        # implying: id(distutils.log) != id(distutils.dist.log).
+        # Make sure the same module object is used everywhere:
+        distutils.dist.log = distutils.log
+
+
+def set_threshold(level):
+    logging.root.setLevel(level * 10)
+    return set_threshold.unpatched(level)
diff --git a/.venv/Lib/site-packages/setuptools/monkey.py b/.venv/Lib/site-packages/setuptools/monkey.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ab98c178a1a809ff0a643737cafe699d34a0459
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/monkey.py
@@ -0,0 +1,167 @@
+"""
+Monkey patching of distutils.
+"""
+
+import functools
+import inspect
+import platform
+import sys
+import types
+from importlib import import_module
+
+import distutils.filelist
+
+
+__all__ = []
+"""
+Everything is private. Contact the project team
+if you think you need this functionality.
+"""
+
+
+def _get_mro(cls):
+    """
+    Returns the bases classes for cls sorted by the MRO.
+
+    Works around an issue on Jython where inspect.getmro will not return all
+    base classes if multiple classes share the same name. Instead, this
+    function will return a tuple containing the class itself, and the contents
+    of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
+    """
+    if platform.python_implementation() == "Jython":
+        return (cls,) + cls.__bases__
+    return inspect.getmro(cls)
+
+
+def get_unpatched(item):
+    lookup = (
+        get_unpatched_class
+        if isinstance(item, type)
+        else get_unpatched_function
+        if isinstance(item, types.FunctionType)
+        else lambda item: None
+    )
+    return lookup(item)
+
+
+def get_unpatched_class(cls):
+    """Protect against re-patching the distutils if reloaded
+
+    Also ensures that no other distutils extension monkeypatched the distutils
+    first.
+    """
+    external_bases = (
+        cls for cls in _get_mro(cls) if not cls.__module__.startswith('setuptools')
+    )
+    base = next(external_bases)
+    if not base.__module__.startswith('distutils'):
+        msg = "distutils has already been patched by %r" % cls
+        raise AssertionError(msg)
+    return base
+
+
+def patch_all():
+    import setuptools
+
+    # we can't patch distutils.cmd, alas
+    distutils.core.Command = setuptools.Command
+
+    has_issue_12885 = sys.version_info <= (3, 5, 3)
+
+    if has_issue_12885:
+        # fix findall bug in distutils (http://bugs.python.org/issue12885)
+        distutils.filelist.findall = setuptools.findall
+
+    needs_warehouse = (3, 4) < sys.version_info < (3, 4, 6) or (
+        3,
+        5,
+    ) < sys.version_info <= (3, 5, 3)
+
+    if needs_warehouse:
+        warehouse = 'https://upload.pypi.org/legacy/'
+        distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
+
+    _patch_distribution_metadata()
+
+    # Install Distribution throughout the distutils
+    for module in distutils.dist, distutils.core, distutils.cmd:
+        module.Distribution = setuptools.dist.Distribution
+
+    # Install the patched Extension
+    distutils.core.Extension = setuptools.extension.Extension
+    distutils.extension.Extension = setuptools.extension.Extension
+    if 'distutils.command.build_ext' in sys.modules:
+        sys.modules[
+            'distutils.command.build_ext'
+        ].Extension = setuptools.extension.Extension
+
+    patch_for_msvc_specialized_compiler()
+
+
+def _patch_distribution_metadata():
+    from . import _core_metadata
+
+    """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
+    for attr in (
+        'write_pkg_info',
+        'write_pkg_file',
+        'read_pkg_file',
+        'get_metadata_version',
+    ):
+        new_val = getattr(_core_metadata, attr)
+        setattr(distutils.dist.DistributionMetadata, attr, new_val)
+
+
+def patch_func(replacement, target_mod, func_name):
+    """
+    Patch func_name in target_mod with replacement
+
+    Important - original must be resolved by name to avoid
+    patching an already patched function.
+    """
+    original = getattr(target_mod, func_name)
+
+    # set the 'unpatched' attribute on the replacement to
+    # point to the original.
+    vars(replacement).setdefault('unpatched', original)
+
+    # replace the function in the original module
+    setattr(target_mod, func_name, replacement)
+
+
+def get_unpatched_function(candidate):
+    return getattr(candidate, 'unpatched')
+
+
+def patch_for_msvc_specialized_compiler():
+    """
+    Patch functions in distutils to use standalone Microsoft Visual C++
+    compilers.
+    """
+    # import late to avoid circular imports on Python < 3.5
+    msvc = import_module('setuptools.msvc')
+
+    if platform.system() != 'Windows':
+        # Compilers only available on Microsoft Windows
+        return
+
+    def patch_params(mod_name, func_name):
+        """
+        Prepare the parameters for patch_func to patch indicated function.
+        """
+        repl_prefix = 'msvc14_'
+        repl_name = repl_prefix + func_name.lstrip('_')
+        repl = getattr(msvc, repl_name)
+        mod = import_module(mod_name)
+        if not hasattr(mod, func_name):
+            raise ImportError(func_name)
+        return repl, mod, func_name
+
+    # Python 3.5+
+    msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
+
+    try:
+        # Patch distutils._msvccompiler._get_vc_env
+        patch_func(*msvc14('_get_vc_env'))
+    except ImportError:
+        pass
diff --git a/.venv/Lib/site-packages/setuptools/msvc.py b/.venv/Lib/site-packages/setuptools/msvc.py
new file mode 100644
index 0000000000000000000000000000000000000000..5785c1694598ecd8aed5bc04ed5317946c8598f2
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/msvc.py
@@ -0,0 +1,1739 @@
+"""
+Improved support for Microsoft Visual C++ compilers.
+
+Known supported compilers:
+--------------------------
+Microsoft Visual C++ 14.X:
+    Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
+    Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
+    Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)
+
+This may also support compilers shipped with compatible Visual Studio versions.
+"""
+
+import json
+from io import open
+from os import listdir, pathsep
+from os.path import join, isfile, isdir, dirname
+from subprocess import CalledProcessError
+import contextlib
+import platform
+import itertools
+import subprocess
+import distutils.errors
+from setuptools.extern.more_itertools import unique_everseen
+
+if platform.system() == 'Windows':
+    import winreg
+    from os import environ
+else:
+    # Mock winreg and environ so the module can be imported on this platform.
+
+    class winreg:
+        HKEY_USERS = None
+        HKEY_CURRENT_USER = None
+        HKEY_LOCAL_MACHINE = None
+        HKEY_CLASSES_ROOT = None
+
+    environ = dict()
+
+
+def _msvc14_find_vc2015():
+    """Python 3.8 "distutils/_msvccompiler.py" backport"""
+    try:
+        key = winreg.OpenKey(
+            winreg.HKEY_LOCAL_MACHINE,
+            r"Software\Microsoft\VisualStudio\SxS\VC7",
+            0,
+            winreg.KEY_READ | winreg.KEY_WOW64_32KEY,
+        )
+    except OSError:
+        return None, None
+
+    best_version = 0
+    best_dir = None
+    with key:
+        for i in itertools.count():
+            try:
+                v, vc_dir, vt = winreg.EnumValue(key, i)
+            except OSError:
+                break
+            if v and vt == winreg.REG_SZ and isdir(vc_dir):
+                try:
+                    version = int(float(v))
+                except (ValueError, TypeError):
+                    continue
+                if version >= 14 and version > best_version:
+                    best_version, best_dir = version, vc_dir
+    return best_version, best_dir
+
+
+def _msvc14_find_vc2017():
+    """Python 3.8 "distutils/_msvccompiler.py" backport
+
+    Returns "15, path" based on the result of invoking vswhere.exe
+    If no install is found, returns "None, None"
+
+    The version is returned to avoid unnecessarily changing the function
+    result. It may be ignored when the path is not None.
+
+    If vswhere.exe is not available, by definition, VS 2017 is not
+    installed.
+    """
+    root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles")
+    if not root:
+        return None, None
+
+    suitable_components = (
+        "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
+        "Microsoft.VisualStudio.Workload.WDExpress",
+    )
+
+    for component in suitable_components:
+        # Workaround for `-requiresAny` (only available on VS 2017 > 15.6)
+        with contextlib.suppress(CalledProcessError, OSError, UnicodeDecodeError):
+            path = (
+                subprocess.check_output(
+                    [
+                        join(
+                            root, "Microsoft Visual Studio", "Installer", "vswhere.exe"
+                        ),
+                        "-latest",
+                        "-prerelease",
+                        "-requires",
+                        component,
+                        "-property",
+                        "installationPath",
+                        "-products",
+                        "*",
+                    ]
+                )
+                .decode(encoding="mbcs", errors="strict")
+                .strip()
+            )
+
+            path = join(path, "VC", "Auxiliary", "Build")
+            if isdir(path):
+                return 15, path
+
+    return None, None  # no suitable component found
+
+
+PLAT_SPEC_TO_RUNTIME = {
+    'x86': 'x86',
+    'x86_amd64': 'x64',
+    'x86_arm': 'arm',
+    'x86_arm64': 'arm64',
+}
+
+
+def _msvc14_find_vcvarsall(plat_spec):
+    """Python 3.8 "distutils/_msvccompiler.py" backport"""
+    _, best_dir = _msvc14_find_vc2017()
+    vcruntime = None
+
+    if plat_spec in PLAT_SPEC_TO_RUNTIME:
+        vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
+    else:
+        vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
+
+    if best_dir:
+        vcredist = join(
+            best_dir,
+            "..",
+            "..",
+            "redist",
+            "MSVC",
+            "**",
+            vcruntime_plat,
+            "Microsoft.VC14*.CRT",
+            "vcruntime140.dll",
+        )
+        try:
+            import glob
+
+            vcruntime = glob.glob(vcredist, recursive=True)[-1]
+        except (ImportError, OSError, LookupError):
+            vcruntime = None
+
+    if not best_dir:
+        best_version, best_dir = _msvc14_find_vc2015()
+        if best_version:
+            vcruntime = join(
+                best_dir,
+                'redist',
+                vcruntime_plat,
+                "Microsoft.VC140.CRT",
+                "vcruntime140.dll",
+            )
+
+    if not best_dir:
+        return None, None
+
+    vcvarsall = join(best_dir, "vcvarsall.bat")
+    if not isfile(vcvarsall):
+        return None, None
+
+    if not vcruntime or not isfile(vcruntime):
+        vcruntime = None
+
+    return vcvarsall, vcruntime
+
+
+def _msvc14_get_vc_env(plat_spec):
+    """Python 3.8 "distutils/_msvccompiler.py" backport"""
+    if "DISTUTILS_USE_SDK" in environ:
+        return {key.lower(): value for key, value in environ.items()}
+
+    vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)
+    if not vcvarsall:
+        raise distutils.errors.DistutilsPlatformError("Unable to find vcvarsall.bat")
+
+    try:
+        out = subprocess.check_output(
+            'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
+            stderr=subprocess.STDOUT,
+        ).decode('utf-16le', errors='replace')
+    except subprocess.CalledProcessError as exc:
+        raise distutils.errors.DistutilsPlatformError(
+            "Error executing {}".format(exc.cmd)
+        ) from exc
+
+    env = {
+        key.lower(): value
+        for key, _, value in (line.partition('=') for line in out.splitlines())
+        if key and value
+    }
+
+    if vcruntime:
+        env['py_vcruntime_redist'] = vcruntime
+    return env
+
+
+def msvc14_get_vc_env(plat_spec):
+    """
+    Patched "distutils._msvccompiler._get_vc_env" for support extra
+    Microsoft Visual C++ 14.X compilers.
+
+    Set environment without use of "vcvarsall.bat".
+
+    Parameters
+    ----------
+    plat_spec: str
+        Target architecture.
+
+    Return
+    ------
+    dict
+        environment
+    """
+
+    # Always use backport from CPython 3.8
+    try:
+        return _msvc14_get_vc_env(plat_spec)
+    except distutils.errors.DistutilsPlatformError as exc:
+        _augment_exception(exc, 14.0)
+        raise
+
+
+def _augment_exception(exc, version, arch=''):
+    """
+    Add details to the exception message to help guide the user
+    as to what action will resolve it.
+    """
+    # Error if MSVC++ directory not found or environment not set
+    message = exc.args[0]
+
+    if "vcvarsall" in message.lower() or "visual c" in message.lower():
+        # Special error message if MSVC++ not installed
+        tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.'
+        message = tmpl.format(**locals())
+        msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
+        if version == 9.0:
+            if arch.lower().find('ia64') > -1:
+                # For VC++ 9.0, if IA64 support is needed, redirect user
+                # to Windows SDK 7.0.
+                # Note: No download link available from Microsoft.
+                message += ' Get it with "Microsoft Windows SDK 7.0"'
+            else:
+                # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
+                # This redirection link is maintained by Microsoft.
+                # Contact vspython@microsoft.com if it needs updating.
+                message += ' Get it from http://aka.ms/vcpython27'
+        elif version == 10.0:
+            # For VC++ 10.0 Redirect user to Windows SDK 7.1
+            message += ' Get it with "Microsoft Windows SDK 7.1": '
+            message += msdownload % 8279
+        elif version >= 14.0:
+            # For VC++ 14.X Redirect user to latest Visual C++ Build Tools
+            message += (
+                ' Get it with "Microsoft C++ Build Tools": '
+                r'https://visualstudio.microsoft.com'
+                r'/visual-cpp-build-tools/'
+            )
+
+    exc.args = (message,)
+
+
+class PlatformInfo:
+    """
+    Current and Target Architectures information.
+
+    Parameters
+    ----------
+    arch: str
+        Target architecture.
+    """
+
+    current_cpu = environ.get('processor_architecture', '').lower()
+
+    def __init__(self, arch):
+        self.arch = arch.lower().replace('x64', 'amd64')
+
+    @property
+    def target_cpu(self):
+        """
+        Return Target CPU architecture.
+
+        Return
+        ------
+        str
+            Target CPU
+        """
+        return self.arch[self.arch.find('_') + 1 :]
+
+    def target_is_x86(self):
+        """
+        Return True if target CPU is x86 32 bits..
+
+        Return
+        ------
+        bool
+            CPU is x86 32 bits
+        """
+        return self.target_cpu == 'x86'
+
+    def current_is_x86(self):
+        """
+        Return True if current CPU is x86 32 bits..
+
+        Return
+        ------
+        bool
+            CPU is x86 32 bits
+        """
+        return self.current_cpu == 'x86'
+
+    def current_dir(self, hidex86=False, x64=False):
+        """
+        Current platform specific subfolder.
+
+        Parameters
+        ----------
+        hidex86: bool
+            return '' and not '\x86' if architecture is x86.
+        x64: bool
+            return '\x64' and not '\amd64' if architecture is amd64.
+
+        Return
+        ------
+        str
+            subfolder: '\target', or '' (see hidex86 parameter)
+        """
+        return (
+            ''
+            if (self.current_cpu == 'x86' and hidex86)
+            else r'\x64'
+            if (self.current_cpu == 'amd64' and x64)
+            else r'\%s' % self.current_cpu
+        )
+
+    def target_dir(self, hidex86=False, x64=False):
+        r"""
+        Target platform specific subfolder.
+
+        Parameters
+        ----------
+        hidex86: bool
+            return '' and not '\x86' if architecture is x86.
+        x64: bool
+            return '\x64' and not '\amd64' if architecture is amd64.
+
+        Return
+        ------
+        str
+            subfolder: '\current', or '' (see hidex86 parameter)
+        """
+        return (
+            ''
+            if (self.target_cpu == 'x86' and hidex86)
+            else r'\x64'
+            if (self.target_cpu == 'amd64' and x64)
+            else r'\%s' % self.target_cpu
+        )
+
+    def cross_dir(self, forcex86=False):
+        r"""
+        Cross platform specific subfolder.
+
+        Parameters
+        ----------
+        forcex86: bool
+            Use 'x86' as current architecture even if current architecture is
+            not x86.
+
+        Return
+        ------
+        str
+            subfolder: '' if target architecture is current architecture,
+            '\current_target' if not.
+        """
+        current = 'x86' if forcex86 else self.current_cpu
+        return (
+            ''
+            if self.target_cpu == current
+            else self.target_dir().replace('\\', '\\%s_' % current)
+        )
+
+
+class RegistryInfo:
+    """
+    Microsoft Visual Studio related registry information.
+
+    Parameters
+    ----------
+    platform_info: PlatformInfo
+        "PlatformInfo" instance.
+    """
+
+    HKEYS = (
+        winreg.HKEY_USERS,
+        winreg.HKEY_CURRENT_USER,
+        winreg.HKEY_LOCAL_MACHINE,
+        winreg.HKEY_CLASSES_ROOT,
+    )
+
+    def __init__(self, platform_info):
+        self.pi = platform_info
+
+    @property
+    def visualstudio(self):
+        """
+        Microsoft Visual Studio root registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return 'VisualStudio'
+
+    @property
+    def sxs(self):
+        """
+        Microsoft Visual Studio SxS registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return join(self.visualstudio, 'SxS')
+
+    @property
+    def vc(self):
+        """
+        Microsoft Visual C++ VC7 registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return join(self.sxs, 'VC7')
+
+    @property
+    def vs(self):
+        """
+        Microsoft Visual Studio VS7 registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return join(self.sxs, 'VS7')
+
+    @property
+    def vc_for_python(self):
+        """
+        Microsoft Visual C++ for Python registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return r'DevDiv\VCForPython'
+
+    @property
+    def microsoft_sdk(self):
+        """
+        Microsoft SDK registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return 'Microsoft SDKs'
+
+    @property
+    def windows_sdk(self):
+        """
+        Microsoft Windows/Platform SDK registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return join(self.microsoft_sdk, 'Windows')
+
+    @property
+    def netfx_sdk(self):
+        """
+        Microsoft .NET Framework SDK registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return join(self.microsoft_sdk, 'NETFXSDK')
+
+    @property
+    def windows_kits_roots(self):
+        """
+        Microsoft Windows Kits Roots registry key.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        return r'Windows Kits\Installed Roots'
+
+    def microsoft(self, key, x86=False):
+        """
+        Return key in Microsoft software registry.
+
+        Parameters
+        ----------
+        key: str
+            Registry key path where look.
+        x86: str
+            Force x86 software registry.
+
+        Return
+        ------
+        str
+            Registry key
+        """
+        node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
+        return join('Software', node64, 'Microsoft', key)
+
+    def lookup(self, key, name):
+        """
+        Look for values in registry in Microsoft software registry.
+
+        Parameters
+        ----------
+        key: str
+            Registry key path where look.
+        name: str
+            Value name to find.
+
+        Return
+        ------
+        str
+            value
+        """
+        key_read = winreg.KEY_READ
+        openkey = winreg.OpenKey
+        closekey = winreg.CloseKey
+        ms = self.microsoft
+        for hkey in self.HKEYS:
+            bkey = None
+            try:
+                bkey = openkey(hkey, ms(key), 0, key_read)
+            except (OSError, IOError):
+                if not self.pi.current_is_x86():
+                    try:
+                        bkey = openkey(hkey, ms(key, True), 0, key_read)
+                    except (OSError, IOError):
+                        continue
+                else:
+                    continue
+            try:
+                return winreg.QueryValueEx(bkey, name)[0]
+            except (OSError, IOError):
+                pass
+            finally:
+                if bkey:
+                    closekey(bkey)
+
+
+class SystemInfo:
+    """
+    Microsoft Windows and Visual Studio related system information.
+
+    Parameters
+    ----------
+    registry_info: RegistryInfo
+        "RegistryInfo" instance.
+    vc_ver: float
+        Required Microsoft Visual C++ version.
+    """
+
+    # Variables and properties in this class use originals CamelCase variables
+    # names from Microsoft source files for more easy comparison.
+    WinDir = environ.get('WinDir', '')
+    ProgramFiles = environ.get('ProgramFiles', '')
+    ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
+
+    def __init__(self, registry_info, vc_ver=None):
+        self.ri = registry_info
+        self.pi = self.ri.pi
+
+        self.known_vs_paths = self.find_programdata_vs_vers()
+
+        # Except for VS15+, VC version is aligned with VS version
+        self.vs_ver = self.vc_ver = vc_ver or self._find_latest_available_vs_ver()
+
+    def _find_latest_available_vs_ver(self):
+        """
+        Find the latest VC version
+
+        Return
+        ------
+        float
+            version
+        """
+        reg_vc_vers = self.find_reg_vs_vers()
+
+        if not (reg_vc_vers or self.known_vs_paths):
+            raise distutils.errors.DistutilsPlatformError(
+                'No Microsoft Visual C++ version found'
+            )
+
+        vc_vers = set(reg_vc_vers)
+        vc_vers.update(self.known_vs_paths)
+        return sorted(vc_vers)[-1]
+
+    def find_reg_vs_vers(self):
+        """
+        Find Microsoft Visual Studio versions available in registry.
+
+        Return
+        ------
+        list of float
+            Versions
+        """
+        ms = self.ri.microsoft
+        vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
+        vs_vers = []
+        for hkey, key in itertools.product(self.ri.HKEYS, vckeys):
+            try:
+                bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
+            except (OSError, IOError):
+                continue
+            with bkey:
+                subkeys, values, _ = winreg.QueryInfoKey(bkey)
+                for i in range(values):
+                    with contextlib.suppress(ValueError):
+                        ver = float(winreg.EnumValue(bkey, i)[0])
+                        if ver not in vs_vers:
+                            vs_vers.append(ver)
+                for i in range(subkeys):
+                    with contextlib.suppress(ValueError):
+                        ver = float(winreg.EnumKey(bkey, i))
+                        if ver not in vs_vers:
+                            vs_vers.append(ver)
+        return sorted(vs_vers)
+
+    def find_programdata_vs_vers(self):
+        r"""
+        Find Visual studio 2017+ versions from information in
+        "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
+
+        Return
+        ------
+        dict
+            float version as key, path as value.
+        """
+        vs_versions = {}
+        instances_dir = r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
+
+        try:
+            hashed_names = listdir(instances_dir)
+
+        except (OSError, IOError):
+            # Directory not exists with all Visual Studio versions
+            return vs_versions
+
+        for name in hashed_names:
+            try:
+                # Get VS installation path from "state.json" file
+                state_path = join(instances_dir, name, 'state.json')
+                with open(state_path, 'rt', encoding='utf-8') as state_file:
+                    state = json.load(state_file)
+                vs_path = state['installationPath']
+
+                # Raises OSError if this VS installation does not contain VC
+                listdir(join(vs_path, r'VC\Tools\MSVC'))
+
+                # Store version and path
+                vs_versions[
+                    self._as_float_version(state['installationVersion'])
+                ] = vs_path
+
+            except (OSError, IOError, KeyError):
+                # Skip if "state.json" file is missing or bad format
+                continue
+
+        return vs_versions
+
+    @staticmethod
+    def _as_float_version(version):
+        """
+        Return a string version as a simplified float version (major.minor)
+
+        Parameters
+        ----------
+        version: str
+            Version.
+
+        Return
+        ------
+        float
+            version
+        """
+        return float('.'.join(version.split('.')[:2]))
+
+    @property
+    def VSInstallDir(self):
+        """
+        Microsoft Visual Studio directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        # Default path
+        default = join(
+            self.ProgramFilesx86, 'Microsoft Visual Studio %0.1f' % self.vs_ver
+        )
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default
+
+    @property
+    def VCInstallDir(self):
+        """
+        Microsoft Visual C++ directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        path = self._guess_vc() or self._guess_vc_legacy()
+
+        if not isdir(path):
+            msg = 'Microsoft Visual C++ directory not found'
+            raise distutils.errors.DistutilsPlatformError(msg)
+
+        return path
+
+    def _guess_vc(self):
+        """
+        Locate Visual C++ for VS2017+.
+
+        Return
+        ------
+        str
+            path
+        """
+        if self.vs_ver <= 14.0:
+            return ''
+
+        try:
+            # First search in known VS paths
+            vs_dir = self.known_vs_paths[self.vs_ver]
+        except KeyError:
+            # Else, search with path from registry
+            vs_dir = self.VSInstallDir
+
+        guess_vc = join(vs_dir, r'VC\Tools\MSVC')
+
+        # Subdir with VC exact version as name
+        try:
+            # Update the VC version with real one instead of VS version
+            vc_ver = listdir(guess_vc)[-1]
+            self.vc_ver = self._as_float_version(vc_ver)
+            return join(guess_vc, vc_ver)
+        except (OSError, IOError, IndexError):
+            return ''
+
+    def _guess_vc_legacy(self):
+        """
+        Locate Visual C++ for versions prior to 2017.
+
+        Return
+        ------
+        str
+            path
+        """
+        default = join(
+            self.ProgramFilesx86, r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver
+        )
+
+        # Try to get "VC++ for Python" path from registry as default path
+        reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)
+        python_vc = self.ri.lookup(reg_path, 'installdir')
+        default_vc = join(python_vc, 'VC') if python_vc else default
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc
+
+    @property
+    def WindowsSdkVersion(self):
+        """
+        Microsoft Windows SDK versions for specified MSVC++ version.
+
+        Return
+        ------
+        tuple of str
+            versions
+        """
+        if self.vs_ver <= 9.0:
+            return '7.0', '6.1', '6.0a'
+        elif self.vs_ver == 10.0:
+            return '7.1', '7.0a'
+        elif self.vs_ver == 11.0:
+            return '8.0', '8.0a'
+        elif self.vs_ver == 12.0:
+            return '8.1', '8.1a'
+        elif self.vs_ver >= 14.0:
+            return '10.0', '8.1'
+
+    @property
+    def WindowsSdkLastVersion(self):
+        """
+        Microsoft Windows SDK last version.
+
+        Return
+        ------
+        str
+            version
+        """
+        return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))
+
+    @property  # noqa: C901
+    def WindowsSdkDir(self):  # noqa: C901  # is too complex (12)  # FIXME
+        """
+        Microsoft Windows SDK directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        sdkdir = ''
+        for ver in self.WindowsSdkVersion:
+            # Try to get it from registry
+            loc = join(self.ri.windows_sdk, 'v%s' % ver)
+            sdkdir = self.ri.lookup(loc, 'installationfolder')
+            if sdkdir:
+                break
+        if not sdkdir or not isdir(sdkdir):
+            # Try to get "VC++ for Python" version from registry
+            path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
+            install_base = self.ri.lookup(path, 'installdir')
+            if install_base:
+                sdkdir = join(install_base, 'WinSDK')
+        if not sdkdir or not isdir(sdkdir):
+            # If fail, use default new path
+            for ver in self.WindowsSdkVersion:
+                intver = ver[: ver.rfind('.')]
+                path = r'Microsoft SDKs\Windows Kits\%s' % intver
+                d = join(self.ProgramFiles, path)
+                if isdir(d):
+                    sdkdir = d
+        if not sdkdir or not isdir(sdkdir):
+            # If fail, use default old path
+            for ver in self.WindowsSdkVersion:
+                path = r'Microsoft SDKs\Windows\v%s' % ver
+                d = join(self.ProgramFiles, path)
+                if isdir(d):
+                    sdkdir = d
+        if not sdkdir:
+            # If fail, use Platform SDK
+            sdkdir = join(self.VCInstallDir, 'PlatformSDK')
+        return sdkdir
+
+    @property
+    def WindowsSDKExecutablePath(self):
+        """
+        Microsoft Windows SDK executable directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        # Find WinSDK NetFx Tools registry dir name
+        if self.vs_ver <= 11.0:
+            netfxver = 35
+            arch = ''
+        else:
+            netfxver = 40
+            hidex86 = True if self.vs_ver <= 12.0 else False
+            arch = self.pi.current_dir(x64=True, hidex86=hidex86)
+        fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
+
+        # list all possibles registry paths
+        regpaths = []
+        if self.vs_ver >= 14.0:
+            for ver in self.NetFxSdkVersion:
+                regpaths += [join(self.ri.netfx_sdk, ver, fx)]
+
+        for ver in self.WindowsSdkVersion:
+            regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
+
+        # Return installation folder from the more recent path
+        for path in regpaths:
+            execpath = self.ri.lookup(path, 'installationfolder')
+            if execpath:
+                return execpath
+
+    @property
+    def FSharpInstallDir(self):
+        """
+        Microsoft Visual F# directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver)
+        return self.ri.lookup(path, 'productdir') or ''
+
+    @property
+    def UniversalCRTSdkDir(self):
+        """
+        Microsoft Universal CRT SDK directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        # Set Kit Roots versions for specified MSVC++ version
+        vers = ('10', '81') if self.vs_ver >= 14.0 else ()
+
+        # Find path of the more recent Kit
+        for ver in vers:
+            sdkdir = self.ri.lookup(self.ri.windows_kits_roots, 'kitsroot%s' % ver)
+            if sdkdir:
+                return sdkdir or ''
+
+    @property
+    def UniversalCRTSdkLastVersion(self):
+        """
+        Microsoft Universal C Runtime SDK last version.
+
+        Return
+        ------
+        str
+            version
+        """
+        return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))
+
+    @property
+    def NetFxSdkVersion(self):
+        """
+        Microsoft .NET Framework SDK versions.
+
+        Return
+        ------
+        tuple of str
+            versions
+        """
+        # Set FxSdk versions for specified VS version
+        return (
+            ('4.7.2', '4.7.1', '4.7', '4.6.2', '4.6.1', '4.6', '4.5.2', '4.5.1', '4.5')
+            if self.vs_ver >= 14.0
+            else ()
+        )
+
+    @property
+    def NetFxSdkDir(self):
+        """
+        Microsoft .NET Framework SDK directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        sdkdir = ''
+        for ver in self.NetFxSdkVersion:
+            loc = join(self.ri.netfx_sdk, ver)
+            sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
+            if sdkdir:
+                break
+        return sdkdir
+
+    @property
+    def FrameworkDir32(self):
+        """
+        Microsoft .NET Framework 32bit directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        # Default path
+        guess_fw = join(self.WinDir, r'Microsoft.NET\Framework')
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
+
+    @property
+    def FrameworkDir64(self):
+        """
+        Microsoft .NET Framework 64bit directory.
+
+        Return
+        ------
+        str
+            path
+        """
+        # Default path
+        guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64')
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
+
+    @property
+    def FrameworkVersion32(self):
+        """
+        Microsoft .NET Framework 32bit versions.
+
+        Return
+        ------
+        tuple of str
+            versions
+        """
+        return self._find_dot_net_versions(32)
+
+    @property
+    def FrameworkVersion64(self):
+        """
+        Microsoft .NET Framework 64bit versions.
+
+        Return
+        ------
+        tuple of str
+            versions
+        """
+        return self._find_dot_net_versions(64)
+
+    def _find_dot_net_versions(self, bits):
+        """
+        Find Microsoft .NET Framework versions.
+
+        Parameters
+        ----------
+        bits: int
+            Platform number of bits: 32 or 64.
+
+        Return
+        ------
+        tuple of str
+            versions
+        """
+        # Find actual .NET version in registry
+        reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
+        dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
+        ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
+
+        # Set .NET versions for specified MSVC++ version
+        if self.vs_ver >= 12.0:
+            return ver, 'v4.0'
+        elif self.vs_ver >= 10.0:
+            return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
+        elif self.vs_ver == 9.0:
+            return 'v3.5', 'v2.0.50727'
+        elif self.vs_ver == 8.0:
+            return 'v3.0', 'v2.0.50727'
+
+    @staticmethod
+    def _use_last_dir_name(path, prefix=''):
+        """
+        Return name of the last dir in path or '' if no dir found.
+
+        Parameters
+        ----------
+        path: str
+            Use dirs in this path
+        prefix: str
+            Use only dirs starting by this prefix
+
+        Return
+        ------
+        str
+            name
+        """
+        matching_dirs = (
+            dir_name
+            for dir_name in reversed(listdir(path))
+            if isdir(join(path, dir_name)) and dir_name.startswith(prefix)
+        )
+        return next(matching_dirs, None) or ''
+
+
+class EnvironmentInfo:
+    """
+    Return environment variables for specified Microsoft Visual C++ version
+    and platform : Lib, Include, Path and libpath.
+
+    This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
+
+    Script created by analysing Microsoft environment configuration files like
+    "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
+
+    Parameters
+    ----------
+    arch: str
+        Target architecture.
+    vc_ver: float
+        Required Microsoft Visual C++ version. If not set, autodetect the last
+        version.
+    vc_min_ver: float
+        Minimum Microsoft Visual C++ version.
+    """
+
+    # Variables and properties in this class use originals CamelCase variables
+    # names from Microsoft source files for more easy comparison.
+
+    def __init__(self, arch, vc_ver=None, vc_min_ver=0):
+        self.pi = PlatformInfo(arch)
+        self.ri = RegistryInfo(self.pi)
+        self.si = SystemInfo(self.ri, vc_ver)
+
+        if self.vc_ver < vc_min_ver:
+            err = 'No suitable Microsoft Visual C++ version found'
+            raise distutils.errors.DistutilsPlatformError(err)
+
+    @property
+    def vs_ver(self):
+        """
+        Microsoft Visual Studio.
+
+        Return
+        ------
+        float
+            version
+        """
+        return self.si.vs_ver
+
+    @property
+    def vc_ver(self):
+        """
+        Microsoft Visual C++ version.
+
+        Return
+        ------
+        float
+            version
+        """
+        return self.si.vc_ver
+
+    @property
+    def VSTools(self):
+        """
+        Microsoft Visual Studio Tools.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        paths = [r'Common7\IDE', r'Common7\Tools']
+
+        if self.vs_ver >= 14.0:
+            arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+            paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
+            paths += [r'Team Tools\Performance Tools']
+            paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
+
+        return [join(self.si.VSInstallDir, path) for path in paths]
+
+    @property
+    def VCIncludes(self):
+        """
+        Microsoft Visual C++ & Microsoft Foundation Class Includes.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        return [
+            join(self.si.VCInstallDir, 'Include'),
+            join(self.si.VCInstallDir, r'ATLMFC\Include'),
+        ]
+
+    @property
+    def VCLibraries(self):
+        """
+        Microsoft Visual C++ & Microsoft Foundation Class Libraries.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver >= 15.0:
+            arch_subdir = self.pi.target_dir(x64=True)
+        else:
+            arch_subdir = self.pi.target_dir(hidex86=True)
+        paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
+
+        if self.vs_ver >= 14.0:
+            paths += [r'Lib\store%s' % arch_subdir]
+
+        return [join(self.si.VCInstallDir, path) for path in paths]
+
+    @property
+    def VCStoreRefs(self):
+        """
+        Microsoft Visual C++ store references Libraries.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 14.0:
+            return []
+        return [join(self.si.VCInstallDir, r'Lib\store\references')]
+
+    @property
+    def VCTools(self):
+        """
+        Microsoft Visual C++ Tools.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        si = self.si
+        tools = [join(si.VCInstallDir, 'VCPackages')]
+
+        forcex86 = True if self.vs_ver <= 10.0 else False
+        arch_subdir = self.pi.cross_dir(forcex86)
+        if arch_subdir:
+            tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
+
+        if self.vs_ver == 14.0:
+            path = 'Bin%s' % self.pi.current_dir(hidex86=True)
+            tools += [join(si.VCInstallDir, path)]
+
+        elif self.vs_ver >= 15.0:
+            host_dir = (
+                r'bin\HostX86%s' if self.pi.current_is_x86() else r'bin\HostX64%s'
+            )
+            tools += [join(si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
+
+            if self.pi.current_cpu != self.pi.target_cpu:
+                tools += [
+                    join(si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))
+                ]
+
+        else:
+            tools += [join(si.VCInstallDir, 'Bin')]
+
+        return tools
+
+    @property
+    def OSLibraries(self):
+        """
+        Microsoft Windows SDK Libraries.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver <= 10.0:
+            arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
+            return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
+
+        else:
+            arch_subdir = self.pi.target_dir(x64=True)
+            lib = join(self.si.WindowsSdkDir, 'lib')
+            libver = self._sdk_subdir
+            return [join(lib, '%sum%s' % (libver, arch_subdir))]
+
+    @property
+    def OSIncludes(self):
+        """
+        Microsoft Windows SDK Include.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        include = join(self.si.WindowsSdkDir, 'include')
+
+        if self.vs_ver <= 10.0:
+            return [include, join(include, 'gl')]
+
+        else:
+            if self.vs_ver >= 14.0:
+                sdkver = self._sdk_subdir
+            else:
+                sdkver = ''
+            return [
+                join(include, '%sshared' % sdkver),
+                join(include, '%sum' % sdkver),
+                join(include, '%swinrt' % sdkver),
+            ]
+
+    @property
+    def OSLibpath(self):
+        """
+        Microsoft Windows SDK Libraries Paths.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        ref = join(self.si.WindowsSdkDir, 'References')
+        libpath = []
+
+        if self.vs_ver <= 9.0:
+            libpath += self.OSLibraries
+
+        if self.vs_ver >= 11.0:
+            libpath += [join(ref, r'CommonConfiguration\Neutral')]
+
+        if self.vs_ver >= 14.0:
+            libpath += [
+                ref,
+                join(self.si.WindowsSdkDir, 'UnionMetadata'),
+                join(ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
+                join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
+                join(ref, 'Windows.Networking.Connectivity.WwanContract', '1.0.0.0'),
+                join(
+                    self.si.WindowsSdkDir,
+                    'ExtensionSDKs',
+                    'Microsoft.VCLibs',
+                    '%0.1f' % self.vs_ver,
+                    'References',
+                    'CommonConfiguration',
+                    'neutral',
+                ),
+            ]
+        return libpath
+
+    @property
+    def SdkTools(self):
+        """
+        Microsoft Windows SDK Tools.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        return list(self._sdk_tools())
+
+    def _sdk_tools(self):
+        """
+        Microsoft Windows SDK Tools paths generator.
+
+        Return
+        ------
+        generator of str
+            paths
+        """
+        if self.vs_ver < 15.0:
+            bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
+            yield join(self.si.WindowsSdkDir, bin_dir)
+
+        if not self.pi.current_is_x86():
+            arch_subdir = self.pi.current_dir(x64=True)
+            path = 'Bin%s' % arch_subdir
+            yield join(self.si.WindowsSdkDir, path)
+
+        if self.vs_ver in (10.0, 11.0):
+            if self.pi.target_is_x86():
+                arch_subdir = ''
+            else:
+                arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+            path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
+            yield join(self.si.WindowsSdkDir, path)
+
+        elif self.vs_ver >= 15.0:
+            path = join(self.si.WindowsSdkDir, 'Bin')
+            arch_subdir = self.pi.current_dir(x64=True)
+            sdkver = self.si.WindowsSdkLastVersion
+            yield join(path, '%s%s' % (sdkver, arch_subdir))
+
+        if self.si.WindowsSDKExecutablePath:
+            yield self.si.WindowsSDKExecutablePath
+
+    @property
+    def _sdk_subdir(self):
+        """
+        Microsoft Windows SDK version subdir.
+
+        Return
+        ------
+        str
+            subdir
+        """
+        ucrtver = self.si.WindowsSdkLastVersion
+        return ('%s\\' % ucrtver) if ucrtver else ''
+
+    @property
+    def SdkSetup(self):
+        """
+        Microsoft Windows SDK Setup.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver > 9.0:
+            return []
+
+        return [join(self.si.WindowsSdkDir, 'Setup')]
+
+    @property
+    def FxTools(self):
+        """
+        Microsoft .NET Framework Tools.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        pi = self.pi
+        si = self.si
+
+        if self.vs_ver <= 10.0:
+            include32 = True
+            include64 = not pi.target_is_x86() and not pi.current_is_x86()
+        else:
+            include32 = pi.target_is_x86() or pi.current_is_x86()
+            include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
+
+        tools = []
+        if include32:
+            tools += [join(si.FrameworkDir32, ver) for ver in si.FrameworkVersion32]
+        if include64:
+            tools += [join(si.FrameworkDir64, ver) for ver in si.FrameworkVersion64]
+        return tools
+
+    @property
+    def NetFxSDKLibraries(self):
+        """
+        Microsoft .Net Framework SDK Libraries.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
+            return []
+
+        arch_subdir = self.pi.target_dir(x64=True)
+        return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
+
+    @property
+    def NetFxSDKIncludes(self):
+        """
+        Microsoft .Net Framework SDK Includes.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
+            return []
+
+        return [join(self.si.NetFxSdkDir, r'include\um')]
+
+    @property
+    def VsTDb(self):
+        """
+        Microsoft Visual Studio Team System Database.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
+
+    @property
+    def MSBuild(self):
+        """
+        Microsoft Build Engine.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 12.0:
+            return []
+        elif self.vs_ver < 15.0:
+            base_path = self.si.ProgramFilesx86
+            arch_subdir = self.pi.current_dir(hidex86=True)
+        else:
+            base_path = self.si.VSInstallDir
+            arch_subdir = ''
+
+        path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir)
+        build = [join(base_path, path)]
+
+        if self.vs_ver >= 15.0:
+            # Add Roslyn C# & Visual Basic Compiler
+            build += [join(base_path, path, 'Roslyn')]
+
+        return build
+
+    @property
+    def HTMLHelpWorkshop(self):
+        """
+        Microsoft HTML Help Workshop.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 11.0:
+            return []
+
+        return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
+
+    @property
+    def UCRTLibraries(self):
+        """
+        Microsoft Universal C Runtime SDK Libraries.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 14.0:
+            return []
+
+        arch_subdir = self.pi.target_dir(x64=True)
+        lib = join(self.si.UniversalCRTSdkDir, 'lib')
+        ucrtver = self._ucrt_subdir
+        return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
+
+    @property
+    def UCRTIncludes(self):
+        """
+        Microsoft Universal C Runtime SDK Include.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if self.vs_ver < 14.0:
+            return []
+
+        include = join(self.si.UniversalCRTSdkDir, 'include')
+        return [join(include, '%sucrt' % self._ucrt_subdir)]
+
+    @property
+    def _ucrt_subdir(self):
+        """
+        Microsoft Universal C Runtime SDK version subdir.
+
+        Return
+        ------
+        str
+            subdir
+        """
+        ucrtver = self.si.UniversalCRTSdkLastVersion
+        return ('%s\\' % ucrtver) if ucrtver else ''
+
+    @property
+    def FSharp(self):
+        """
+        Microsoft Visual F#.
+
+        Return
+        ------
+        list of str
+            paths
+        """
+        if 11.0 > self.vs_ver > 12.0:
+            return []
+
+        return [self.si.FSharpInstallDir]
+
+    @property
+    def VCRuntimeRedist(self):
+        """
+        Microsoft Visual C++ runtime redistributable dll.
+
+        Return
+        ------
+        str
+            path
+        """
+        vcruntime = 'vcruntime%d0.dll' % self.vc_ver
+        arch_subdir = self.pi.target_dir(x64=True).strip('\\')
+
+        # Installation prefixes candidates
+        prefixes = []
+        tools_path = self.si.VCInstallDir
+        redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist'))
+        if isdir(redist_path):
+            # Redist version may not be exactly the same as tools
+            redist_path = join(redist_path, listdir(redist_path)[-1])
+            prefixes += [redist_path, join(redist_path, 'onecore')]
+
+        prefixes += [join(tools_path, 'redist')]  # VS14 legacy path
+
+        # CRT directory
+        crt_dirs = (
+            'Microsoft.VC%d.CRT' % (self.vc_ver * 10),
+            # Sometime store in directory with VS version instead of VC
+            'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10),
+        )
+
+        # vcruntime path
+        for prefix, crt_dir in itertools.product(prefixes, crt_dirs):
+            path = join(prefix, arch_subdir, crt_dir, vcruntime)
+            if isfile(path):
+                return path
+
+    def return_env(self, exists=True):
+        """
+        Return environment dict.
+
+        Parameters
+        ----------
+        exists: bool
+            It True, only return existing paths.
+
+        Return
+        ------
+        dict
+            environment
+        """
+        env = dict(
+            include=self._build_paths(
+                'include',
+                [
+                    self.VCIncludes,
+                    self.OSIncludes,
+                    self.UCRTIncludes,
+                    self.NetFxSDKIncludes,
+                ],
+                exists,
+            ),
+            lib=self._build_paths(
+                'lib',
+                [
+                    self.VCLibraries,
+                    self.OSLibraries,
+                    self.FxTools,
+                    self.UCRTLibraries,
+                    self.NetFxSDKLibraries,
+                ],
+                exists,
+            ),
+            libpath=self._build_paths(
+                'libpath',
+                [self.VCLibraries, self.FxTools, self.VCStoreRefs, self.OSLibpath],
+                exists,
+            ),
+            path=self._build_paths(
+                'path',
+                [
+                    self.VCTools,
+                    self.VSTools,
+                    self.VsTDb,
+                    self.SdkTools,
+                    self.SdkSetup,
+                    self.FxTools,
+                    self.MSBuild,
+                    self.HTMLHelpWorkshop,
+                    self.FSharp,
+                ],
+                exists,
+            ),
+        )
+        if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
+            env['py_vcruntime_redist'] = self.VCRuntimeRedist
+        return env
+
+    def _build_paths(self, name, spec_path_lists, exists):
+        """
+        Given an environment variable name and specified paths,
+        return a pathsep-separated string of paths containing
+        unique, extant, directories from those paths and from
+        the environment variable. Raise an error if no paths
+        are resolved.
+
+        Parameters
+        ----------
+        name: str
+            Environment variable name
+        spec_path_lists: list of str
+            Paths
+        exists: bool
+            It True, only return existing paths.
+
+        Return
+        ------
+        str
+            Pathsep-separated paths
+        """
+        # flatten spec_path_lists
+        spec_paths = itertools.chain.from_iterable(spec_path_lists)
+        env_paths = environ.get(name, '').split(pathsep)
+        paths = itertools.chain(spec_paths, env_paths)
+        extant_paths = list(filter(isdir, paths)) if exists else paths
+        if not extant_paths:
+            msg = "%s environment variable is empty" % name.upper()
+            raise distutils.errors.DistutilsPlatformError(msg)
+        unique_paths = unique_everseen(extant_paths)
+        return pathsep.join(unique_paths)
diff --git a/.venv/Lib/site-packages/setuptools/windows_support.py b/.venv/Lib/site-packages/setuptools/windows_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdadeb597ca6450548e53c8de89d75a13029e74b
--- /dev/null
+++ b/.venv/Lib/site-packages/setuptools/windows_support.py
@@ -0,0 +1,30 @@
+import platform
+
+
+def windows_only(func):
+    if platform.system() != 'Windows':
+        return lambda *args, **kwargs: None
+    return func
+
+
+@windows_only
+def hide_file(path):
+    """
+    Set the hidden attribute on a file or directory.
+
+    From http://stackoverflow.com/questions/19622133/
+
+    `path` must be text.
+    """
+    import ctypes
+
+    __import__('ctypes.wintypes')
+    SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
+    SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
+    SetFileAttributes.restype = ctypes.wintypes.BOOL
+
+    FILE_ATTRIBUTE_HIDDEN = 0x02
+
+    ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
+    if not ret:
+        raise ctypes.WinError()