ZAIDX11 commited on
Commit
85a5bf3
·
verified ·
1 Parent(s): 09d99cd

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. hfenv/Lib/site-packages/pkg_resources/_vendor/__init__.py +0 -0
  2. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py +36 -0
  3. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py +170 -0
  4. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py +104 -0
  5. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py +98 -0
  6. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py +35 -0
  7. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py +121 -0
  8. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py +137 -0
  9. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/readers.py +122 -0
  10. hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py +116 -0
  11. hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/__init__.py +0 -0
  12. hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py +213 -0
  13. hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py +525 -0
  14. hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py +599 -0
  15. hfenv/Lib/site-packages/pkg_resources/_vendor/more_itertools/__init__.py +4 -0
  16. hfenv/Lib/site-packages/pkg_resources/_vendor/more_itertools/more.py +0 -0
  17. hfenv/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py +698 -0
  18. hfenv/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py +304 -0
  19. hfenv/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py +146 -0
  20. hfenv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py +802 -0
hfenv/Lib/site-packages/pkg_resources/_vendor/__init__.py ADDED
File without changes
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Read resources contained within a package."""
2
+
3
+ from ._common import (
4
+ as_file,
5
+ files,
6
+ Package,
7
+ )
8
+
9
+ from ._legacy import (
10
+ contents,
11
+ open_binary,
12
+ read_binary,
13
+ open_text,
14
+ read_text,
15
+ is_resource,
16
+ path,
17
+ Resource,
18
+ )
19
+
20
+ from .abc import ResourceReader
21
+
22
+
23
+ __all__ = [
24
+ 'Package',
25
+ 'Resource',
26
+ 'ResourceReader',
27
+ 'as_file',
28
+ 'contents',
29
+ 'files',
30
+ 'is_resource',
31
+ 'open_binary',
32
+ 'open_text',
33
+ 'path',
34
+ 'read_binary',
35
+ 'read_text',
36
+ ]
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import suppress
2
+ from io import TextIOWrapper
3
+
4
+ from . import abc
5
+
6
+
7
+ class SpecLoaderAdapter:
8
+ """
9
+ Adapt a package spec to adapt the underlying loader.
10
+ """
11
+
12
+ def __init__(self, spec, adapter=lambda spec: spec.loader):
13
+ self.spec = spec
14
+ self.loader = adapter(spec)
15
+
16
+ def __getattr__(self, name):
17
+ return getattr(self.spec, name)
18
+
19
+
20
+ class TraversableResourcesLoader:
21
+ """
22
+ Adapt a loader to provide TraversableResources.
23
+ """
24
+
25
+ def __init__(self, spec):
26
+ self.spec = spec
27
+
28
+ def get_resource_reader(self, name):
29
+ return CompatibilityFiles(self.spec)._native()
30
+
31
+
32
+ def _io_wrapper(file, mode='r', *args, **kwargs):
33
+ if mode == 'r':
34
+ return TextIOWrapper(file, *args, **kwargs)
35
+ elif mode == 'rb':
36
+ return file
37
+ raise ValueError(
38
+ "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
39
+ )
40
+
41
+
42
+ class CompatibilityFiles:
43
+ """
44
+ Adapter for an existing or non-existent resource reader
45
+ to provide a compatibility .files().
46
+ """
47
+
48
+ class SpecPath(abc.Traversable):
49
+ """
50
+ Path tied to a module spec.
51
+ Can be read and exposes the resource reader children.
52
+ """
53
+
54
+ def __init__(self, spec, reader):
55
+ self._spec = spec
56
+ self._reader = reader
57
+
58
+ def iterdir(self):
59
+ if not self._reader:
60
+ return iter(())
61
+ return iter(
62
+ CompatibilityFiles.ChildPath(self._reader, path)
63
+ for path in self._reader.contents()
64
+ )
65
+
66
+ def is_file(self):
67
+ return False
68
+
69
+ is_dir = is_file
70
+
71
+ def joinpath(self, other):
72
+ if not self._reader:
73
+ return CompatibilityFiles.OrphanPath(other)
74
+ return CompatibilityFiles.ChildPath(self._reader, other)
75
+
76
+ @property
77
+ def name(self):
78
+ return self._spec.name
79
+
80
+ def open(self, mode='r', *args, **kwargs):
81
+ return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
82
+
83
+ class ChildPath(abc.Traversable):
84
+ """
85
+ Path tied to a resource reader child.
86
+ Can be read but doesn't expose any meaningful children.
87
+ """
88
+
89
+ def __init__(self, reader, name):
90
+ self._reader = reader
91
+ self._name = name
92
+
93
+ def iterdir(self):
94
+ return iter(())
95
+
96
+ def is_file(self):
97
+ return self._reader.is_resource(self.name)
98
+
99
+ def is_dir(self):
100
+ return not self.is_file()
101
+
102
+ def joinpath(self, other):
103
+ return CompatibilityFiles.OrphanPath(self.name, other)
104
+
105
+ @property
106
+ def name(self):
107
+ return self._name
108
+
109
+ def open(self, mode='r', *args, **kwargs):
110
+ return _io_wrapper(
111
+ self._reader.open_resource(self.name), mode, *args, **kwargs
112
+ )
113
+
114
+ class OrphanPath(abc.Traversable):
115
+ """
116
+ Orphan path, not tied to a module spec or resource reader.
117
+ Can't be read and doesn't expose any meaningful children.
118
+ """
119
+
120
+ def __init__(self, *path_parts):
121
+ if len(path_parts) < 1:
122
+ raise ValueError('Need at least one path part to construct a path')
123
+ self._path = path_parts
124
+
125
+ def iterdir(self):
126
+ return iter(())
127
+
128
+ def is_file(self):
129
+ return False
130
+
131
+ is_dir = is_file
132
+
133
+ def joinpath(self, other):
134
+ return CompatibilityFiles.OrphanPath(*self._path, other)
135
+
136
+ @property
137
+ def name(self):
138
+ return self._path[-1]
139
+
140
+ def open(self, mode='r', *args, **kwargs):
141
+ raise FileNotFoundError("Can't open orphan path")
142
+
143
+ def __init__(self, spec):
144
+ self.spec = spec
145
+
146
+ @property
147
+ def _reader(self):
148
+ with suppress(AttributeError):
149
+ return self.spec.loader.get_resource_reader(self.spec.name)
150
+
151
+ def _native(self):
152
+ """
153
+ Return the native reader if it supports files().
154
+ """
155
+ reader = self._reader
156
+ return reader if hasattr(reader, 'files') else self
157
+
158
+ def __getattr__(self, attr):
159
+ return getattr(self._reader, attr)
160
+
161
+ def files(self):
162
+ return CompatibilityFiles.SpecPath(self.spec, self._reader)
163
+
164
+
165
+ def wrap_spec(package):
166
+ """
167
+ Construct a package spec with traversable compatibility
168
+ on the spec/loader/reader.
169
+ """
170
+ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_common.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ import tempfile
4
+ import functools
5
+ import contextlib
6
+ import types
7
+ import importlib
8
+
9
+ from typing import Union, Optional
10
+ from .abc import ResourceReader, Traversable
11
+
12
+ from ._compat import wrap_spec
13
+
14
+ Package = Union[types.ModuleType, str]
15
+
16
+
17
+ def files(package):
18
+ # type: (Package) -> Traversable
19
+ """
20
+ Get a Traversable resource from a package
21
+ """
22
+ return from_package(get_package(package))
23
+
24
+
25
+ def get_resource_reader(package):
26
+ # type: (types.ModuleType) -> Optional[ResourceReader]
27
+ """
28
+ Return the package's loader if it's a ResourceReader.
29
+ """
30
+ # We can't use
31
+ # a issubclass() check here because apparently abc.'s __subclasscheck__()
32
+ # hook wants to create a weak reference to the object, but
33
+ # zipimport.zipimporter does not support weak references, resulting in a
34
+ # TypeError. That seems terrible.
35
+ spec = package.__spec__
36
+ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
37
+ if reader is None:
38
+ return None
39
+ return reader(spec.name) # type: ignore
40
+
41
+
42
+ def resolve(cand):
43
+ # type: (Package) -> types.ModuleType
44
+ return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
45
+
46
+
47
+ def get_package(package):
48
+ # type: (Package) -> types.ModuleType
49
+ """Take a package name or module object and return the module.
50
+
51
+ Raise an exception if the resolved module is not a package.
52
+ """
53
+ resolved = resolve(package)
54
+ if wrap_spec(resolved).submodule_search_locations is None:
55
+ raise TypeError(f'{package!r} is not a package')
56
+ return resolved
57
+
58
+
59
+ def from_package(package):
60
+ """
61
+ Return a Traversable object for the given package.
62
+
63
+ """
64
+ spec = wrap_spec(package)
65
+ reader = spec.loader.get_resource_reader(spec.name)
66
+ return reader.files()
67
+
68
+
69
+ @contextlib.contextmanager
70
+ def _tempfile(reader, suffix=''):
71
+ # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
72
+ # blocks due to the need to close the temporary file to work on Windows
73
+ # properly.
74
+ fd, raw_path = tempfile.mkstemp(suffix=suffix)
75
+ try:
76
+ try:
77
+ os.write(fd, reader())
78
+ finally:
79
+ os.close(fd)
80
+ del reader
81
+ yield pathlib.Path(raw_path)
82
+ finally:
83
+ try:
84
+ os.remove(raw_path)
85
+ except FileNotFoundError:
86
+ pass
87
+
88
+
89
+ @functools.singledispatch
90
+ def as_file(path):
91
+ """
92
+ Given a Traversable object, return that object as a
93
+ path on the local file system in a context manager.
94
+ """
95
+ return _tempfile(path.read_bytes, suffix=path.name)
96
+
97
+
98
+ @as_file.register(pathlib.Path)
99
+ @contextlib.contextmanager
100
+ def _(path):
101
+ """
102
+ Degenerate behavior for pathlib.Path objects.
103
+ """
104
+ yield path
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ import abc
4
+ import sys
5
+ import pathlib
6
+ from contextlib import suppress
7
+
8
+ if sys.version_info >= (3, 10):
9
+ from zipfile import Path as ZipPath # type: ignore
10
+ else:
11
+ from ..zipp import Path as ZipPath # type: ignore
12
+
13
+
14
+ try:
15
+ from typing import runtime_checkable # type: ignore
16
+ except ImportError:
17
+
18
+ def runtime_checkable(cls): # type: ignore
19
+ return cls
20
+
21
+
22
+ try:
23
+ from typing import Protocol # type: ignore
24
+ except ImportError:
25
+ Protocol = abc.ABC # type: ignore
26
+
27
+
28
+ class TraversableResourcesLoader:
29
+ """
30
+ Adapt loaders to provide TraversableResources and other
31
+ compatibility.
32
+
33
+ Used primarily for Python 3.9 and earlier where the native
34
+ loaders do not yet implement TraversableResources.
35
+ """
36
+
37
+ def __init__(self, spec):
38
+ self.spec = spec
39
+
40
+ @property
41
+ def path(self):
42
+ return self.spec.origin
43
+
44
+ def get_resource_reader(self, name):
45
+ from . import readers, _adapters
46
+
47
+ def _zip_reader(spec):
48
+ with suppress(AttributeError):
49
+ return readers.ZipReader(spec.loader, spec.name)
50
+
51
+ def _namespace_reader(spec):
52
+ with suppress(AttributeError, ValueError):
53
+ return readers.NamespaceReader(spec.submodule_search_locations)
54
+
55
+ def _available_reader(spec):
56
+ with suppress(AttributeError):
57
+ return spec.loader.get_resource_reader(spec.name)
58
+
59
+ def _native_reader(spec):
60
+ reader = _available_reader(spec)
61
+ return reader if hasattr(reader, 'files') else None
62
+
63
+ def _file_reader(spec):
64
+ try:
65
+ path = pathlib.Path(self.path)
66
+ except TypeError:
67
+ return None
68
+ if path.exists():
69
+ return readers.FileReader(self)
70
+
71
+ return (
72
+ # native reader if it supplies 'files'
73
+ _native_reader(self.spec)
74
+ or
75
+ # local ZipReader if a zip module
76
+ _zip_reader(self.spec)
77
+ or
78
+ # local NamespaceReader if a namespace module
79
+ _namespace_reader(self.spec)
80
+ or
81
+ # local FileReader
82
+ _file_reader(self.spec)
83
+ # fallback - adapt the spec ResourceReader to TraversableReader
84
+ or _adapters.CompatibilityFiles(self.spec)
85
+ )
86
+
87
+
88
+ def wrap_spec(package):
89
+ """
90
+ Construct a package spec with traversable compatibility
91
+ on the spec/loader/reader.
92
+
93
+ Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
94
+ from above for older Python compatibility (<3.10).
95
+ """
96
+ from . import _adapters
97
+
98
+ return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import filterfalse
2
+
3
+ from typing import (
4
+ Callable,
5
+ Iterable,
6
+ Iterator,
7
+ Optional,
8
+ Set,
9
+ TypeVar,
10
+ Union,
11
+ )
12
+
13
+ # Type and type variable definitions
14
+ _T = TypeVar('_T')
15
+ _U = TypeVar('_U')
16
+
17
+
18
+ def unique_everseen(
19
+ iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
20
+ ) -> Iterator[_T]:
21
+ "List unique elements, preserving order. Remember all elements ever seen."
22
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
23
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
24
+ seen: Set[Union[_T, _U]] = set()
25
+ seen_add = seen.add
26
+ if key is None:
27
+ for element in filterfalse(seen.__contains__, iterable):
28
+ seen_add(element)
29
+ yield element
30
+ else:
31
+ for element in iterable:
32
+ k = key(element)
33
+ if k not in seen:
34
+ seen_add(k)
35
+ yield element
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import os
3
+ import pathlib
4
+ import types
5
+ import warnings
6
+
7
+ from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
8
+
9
+ from . import _common
10
+
11
+ Package = Union[types.ModuleType, str]
12
+ Resource = str
13
+
14
+
15
+ def deprecated(func):
16
+ @functools.wraps(func)
17
+ def wrapper(*args, **kwargs):
18
+ warnings.warn(
19
+ f"{func.__name__} is deprecated. Use files() instead. "
20
+ "Refer to https://importlib-resources.readthedocs.io"
21
+ "/en/latest/using.html#migrating-from-legacy for migration advice.",
22
+ DeprecationWarning,
23
+ stacklevel=2,
24
+ )
25
+ return func(*args, **kwargs)
26
+
27
+ return wrapper
28
+
29
+
30
+ def normalize_path(path):
31
+ # type: (Any) -> str
32
+ """Normalize a path by ensuring it is a string.
33
+
34
+ If the resulting string contains path separators, an exception is raised.
35
+ """
36
+ str_path = str(path)
37
+ parent, file_name = os.path.split(str_path)
38
+ if parent:
39
+ raise ValueError(f'{path!r} must be only a file name')
40
+ return file_name
41
+
42
+
43
+ @deprecated
44
+ def open_binary(package: Package, resource: Resource) -> BinaryIO:
45
+ """Return a file-like object opened for binary reading of the resource."""
46
+ return (_common.files(package) / normalize_path(resource)).open('rb')
47
+
48
+
49
+ @deprecated
50
+ def read_binary(package: Package, resource: Resource) -> bytes:
51
+ """Return the binary contents of the resource."""
52
+ return (_common.files(package) / normalize_path(resource)).read_bytes()
53
+
54
+
55
+ @deprecated
56
+ def open_text(
57
+ package: Package,
58
+ resource: Resource,
59
+ encoding: str = 'utf-8',
60
+ errors: str = 'strict',
61
+ ) -> TextIO:
62
+ """Return a file-like object opened for text reading of the resource."""
63
+ return (_common.files(package) / normalize_path(resource)).open(
64
+ 'r', encoding=encoding, errors=errors
65
+ )
66
+
67
+
68
+ @deprecated
69
+ def read_text(
70
+ package: Package,
71
+ resource: Resource,
72
+ encoding: str = 'utf-8',
73
+ errors: str = 'strict',
74
+ ) -> str:
75
+ """Return the decoded string of the resource.
76
+
77
+ The decoding-related arguments have the same semantics as those of
78
+ bytes.decode().
79
+ """
80
+ with open_text(package, resource, encoding, errors) as fp:
81
+ return fp.read()
82
+
83
+
84
+ @deprecated
85
+ def contents(package: Package) -> Iterable[str]:
86
+ """Return an iterable of entries in `package`.
87
+
88
+ Note that not all entries are resources. Specifically, directories are
89
+ not considered resources. Use `is_resource()` on each entry returned here
90
+ to check if it is a resource or not.
91
+ """
92
+ return [path.name for path in _common.files(package).iterdir()]
93
+
94
+
95
+ @deprecated
96
+ def is_resource(package: Package, name: str) -> bool:
97
+ """True if `name` is a resource inside `package`.
98
+
99
+ Directories are *not* resources.
100
+ """
101
+ resource = normalize_path(name)
102
+ return any(
103
+ traversable.name == resource and traversable.is_file()
104
+ for traversable in _common.files(package).iterdir()
105
+ )
106
+
107
+
108
+ @deprecated
109
+ def path(
110
+ package: Package,
111
+ resource: Resource,
112
+ ) -> ContextManager[pathlib.Path]:
113
+ """A context manager providing a file path object to the resource.
114
+
115
+ If the resource does not already exist on its own on the file system,
116
+ a temporary file will be created. If the file was created, the file
117
+ will be deleted upon exiting the context manager (no exception is
118
+ raised if the file was deleted prior to the context manager
119
+ exiting).
120
+ """
121
+ return _common.as_file(_common.files(package) / normalize_path(resource))
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from typing import BinaryIO, Iterable, Text
3
+
4
+ from ._compat import runtime_checkable, Protocol
5
+
6
+
7
+ class ResourceReader(metaclass=abc.ABCMeta):
8
+ """Abstract base class for loaders to provide resource reading support."""
9
+
10
+ @abc.abstractmethod
11
+ def open_resource(self, resource: Text) -> BinaryIO:
12
+ """Return an opened, file-like object for binary reading.
13
+
14
+ The 'resource' argument is expected to represent only a file name.
15
+ If the resource cannot be found, FileNotFoundError is raised.
16
+ """
17
+ # This deliberately raises FileNotFoundError instead of
18
+ # NotImplementedError so that if this method is accidentally called,
19
+ # it'll still do the right thing.
20
+ raise FileNotFoundError
21
+
22
+ @abc.abstractmethod
23
+ def resource_path(self, resource: Text) -> Text:
24
+ """Return the file system path to the specified resource.
25
+
26
+ The 'resource' argument is expected to represent only a file name.
27
+ If the resource does not exist on the file system, raise
28
+ FileNotFoundError.
29
+ """
30
+ # This deliberately raises FileNotFoundError instead of
31
+ # NotImplementedError so that if this method is accidentally called,
32
+ # it'll still do the right thing.
33
+ raise FileNotFoundError
34
+
35
+ @abc.abstractmethod
36
+ def is_resource(self, path: Text) -> bool:
37
+ """Return True if the named 'path' is a resource.
38
+
39
+ Files are resources, directories are not.
40
+ """
41
+ raise FileNotFoundError
42
+
43
+ @abc.abstractmethod
44
+ def contents(self) -> Iterable[str]:
45
+ """Return an iterable of entries in `package`."""
46
+ raise FileNotFoundError
47
+
48
+
49
+ @runtime_checkable
50
+ class Traversable(Protocol):
51
+ """
52
+ An object with a subset of pathlib.Path methods suitable for
53
+ traversing directories and opening files.
54
+ """
55
+
56
+ @abc.abstractmethod
57
+ def iterdir(self):
58
+ """
59
+ Yield Traversable objects in self
60
+ """
61
+
62
+ def read_bytes(self):
63
+ """
64
+ Read contents of self as bytes
65
+ """
66
+ with self.open('rb') as strm:
67
+ return strm.read()
68
+
69
+ def read_text(self, encoding=None):
70
+ """
71
+ Read contents of self as text
72
+ """
73
+ with self.open(encoding=encoding) as strm:
74
+ return strm.read()
75
+
76
+ @abc.abstractmethod
77
+ def is_dir(self) -> bool:
78
+ """
79
+ Return True if self is a directory
80
+ """
81
+
82
+ @abc.abstractmethod
83
+ def is_file(self) -> bool:
84
+ """
85
+ Return True if self is a file
86
+ """
87
+
88
+ @abc.abstractmethod
89
+ def joinpath(self, child):
90
+ """
91
+ Return Traversable child in self
92
+ """
93
+
94
+ def __truediv__(self, child):
95
+ """
96
+ Return Traversable child in self
97
+ """
98
+ return self.joinpath(child)
99
+
100
+ @abc.abstractmethod
101
+ def open(self, mode='r', *args, **kwargs):
102
+ """
103
+ mode may be 'r' or 'rb' to open as text or binary. Return a handle
104
+ suitable for reading (same as pathlib.Path.open).
105
+
106
+ When opening as text, accepts encoding parameters such as those
107
+ accepted by io.TextIOWrapper.
108
+ """
109
+
110
+ @abc.abstractproperty
111
+ def name(self) -> str:
112
+ """
113
+ The base name of this object without any parent references.
114
+ """
115
+
116
+
117
+ class TraversableResources(ResourceReader):
118
+ """
119
+ The required interface for providing traversable
120
+ resources.
121
+ """
122
+
123
+ @abc.abstractmethod
124
+ def files(self):
125
+ """Return a Traversable object for the loaded package."""
126
+
127
+ def open_resource(self, resource):
128
+ return self.files().joinpath(resource).open('rb')
129
+
130
+ def resource_path(self, resource):
131
+ raise FileNotFoundError(resource)
132
+
133
+ def is_resource(self, path):
134
+ return self.files().joinpath(path).is_file()
135
+
136
+ def contents(self):
137
+ return (item.name for item in self.files().iterdir())
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/readers.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import pathlib
3
+ import operator
4
+
5
+ from . import abc
6
+
7
+ from ._itertools import unique_everseen
8
+ from ._compat import ZipPath
9
+
10
+
11
+ def remove_duplicates(items):
12
+ return iter(collections.OrderedDict.fromkeys(items))
13
+
14
+
15
+ class FileReader(abc.TraversableResources):
16
+ def __init__(self, loader):
17
+ self.path = pathlib.Path(loader.path).parent
18
+
19
+ def resource_path(self, resource):
20
+ """
21
+ Return the file system path to prevent
22
+ `resources.path()` from creating a temporary
23
+ copy.
24
+ """
25
+ return str(self.path.joinpath(resource))
26
+
27
+ def files(self):
28
+ return self.path
29
+
30
+
31
+ class ZipReader(abc.TraversableResources):
32
+ def __init__(self, loader, module):
33
+ _, _, name = module.rpartition('.')
34
+ self.prefix = loader.prefix.replace('\\', '/') + name + '/'
35
+ self.archive = loader.archive
36
+
37
+ def open_resource(self, resource):
38
+ try:
39
+ return super().open_resource(resource)
40
+ except KeyError as exc:
41
+ raise FileNotFoundError(exc.args[0])
42
+
43
+ def is_resource(self, path):
44
+ # workaround for `zipfile.Path.is_file` returning true
45
+ # for non-existent paths.
46
+ target = self.files().joinpath(path)
47
+ return target.is_file() and target.exists()
48
+
49
+ def files(self):
50
+ return ZipPath(self.archive, self.prefix)
51
+
52
+
53
+ class MultiplexedPath(abc.Traversable):
54
+ """
55
+ Given a series of Traversable objects, implement a merged
56
+ version of the interface across all objects. Useful for
57
+ namespace packages which may be multihomed at a single
58
+ name.
59
+ """
60
+
61
+ def __init__(self, *paths):
62
+ self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
63
+ if not self._paths:
64
+ message = 'MultiplexedPath must contain at least one path'
65
+ raise FileNotFoundError(message)
66
+ if not all(path.is_dir() for path in self._paths):
67
+ raise NotADirectoryError('MultiplexedPath only supports directories')
68
+
69
+ def iterdir(self):
70
+ files = (file for path in self._paths for file in path.iterdir())
71
+ return unique_everseen(files, key=operator.attrgetter('name'))
72
+
73
+ def read_bytes(self):
74
+ raise FileNotFoundError(f'{self} is not a file')
75
+
76
+ def read_text(self, *args, **kwargs):
77
+ raise FileNotFoundError(f'{self} is not a file')
78
+
79
+ def is_dir(self):
80
+ return True
81
+
82
+ def is_file(self):
83
+ return False
84
+
85
+ def joinpath(self, child):
86
+ # first try to find child in current paths
87
+ for file in self.iterdir():
88
+ if file.name == child:
89
+ return file
90
+ # if it does not exist, construct it with the first path
91
+ return self._paths[0] / child
92
+
93
+ __truediv__ = joinpath
94
+
95
+ def open(self, *args, **kwargs):
96
+ raise FileNotFoundError(f'{self} is not a file')
97
+
98
+ @property
99
+ def name(self):
100
+ return self._paths[0].name
101
+
102
+ def __repr__(self):
103
+ paths = ', '.join(f"'{path}'" for path in self._paths)
104
+ return f'MultiplexedPath({paths})'
105
+
106
+
107
+ class NamespaceReader(abc.TraversableResources):
108
+ def __init__(self, namespace_path):
109
+ if 'NamespacePath' not in str(namespace_path):
110
+ raise ValueError('Invalid path')
111
+ self.path = MultiplexedPath(*list(namespace_path))
112
+
113
+ def resource_path(self, resource):
114
+ """
115
+ Return the file system path to prevent
116
+ `resources.path()` from creating a temporary
117
+ copy.
118
+ """
119
+ return str(self.path.joinpath(resource))
120
+
121
+ def files(self):
122
+ return self.path
hfenv/Lib/site-packages/pkg_resources/_vendor/importlib_resources/simple.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interface adapters for low-level readers.
3
+ """
4
+
5
+ import abc
6
+ import io
7
+ import itertools
8
+ from typing import BinaryIO, List
9
+
10
+ from .abc import Traversable, TraversableResources
11
+
12
+
13
+ class SimpleReader(abc.ABC):
14
+ """
15
+ The minimum, low-level interface required from a resource
16
+ provider.
17
+ """
18
+
19
+ @abc.abstractproperty
20
+ def package(self):
21
+ # type: () -> str
22
+ """
23
+ The name of the package for which this reader loads resources.
24
+ """
25
+
26
+ @abc.abstractmethod
27
+ def children(self):
28
+ # type: () -> List['SimpleReader']
29
+ """
30
+ Obtain an iterable of SimpleReader for available
31
+ child containers (e.g. directories).
32
+ """
33
+
34
+ @abc.abstractmethod
35
+ def resources(self):
36
+ # type: () -> List[str]
37
+ """
38
+ Obtain available named resources for this virtual package.
39
+ """
40
+
41
+ @abc.abstractmethod
42
+ def open_binary(self, resource):
43
+ # type: (str) -> BinaryIO
44
+ """
45
+ Obtain a File-like for a named resource.
46
+ """
47
+
48
+ @property
49
+ def name(self):
50
+ return self.package.split('.')[-1]
51
+
52
+
53
+ class ResourceHandle(Traversable):
54
+ """
55
+ Handle to a named resource in a ResourceReader.
56
+ """
57
+
58
+ def __init__(self, parent, name):
59
+ # type: (ResourceContainer, str) -> None
60
+ self.parent = parent
61
+ self.name = name # type: ignore
62
+
63
+ def is_file(self):
64
+ return True
65
+
66
+ def is_dir(self):
67
+ return False
68
+
69
+ def open(self, mode='r', *args, **kwargs):
70
+ stream = self.parent.reader.open_binary(self.name)
71
+ if 'b' not in mode:
72
+ stream = io.TextIOWrapper(*args, **kwargs)
73
+ return stream
74
+
75
+ def joinpath(self, name):
76
+ raise RuntimeError("Cannot traverse into a resource")
77
+
78
+
79
+ class ResourceContainer(Traversable):
80
+ """
81
+ Traversable container for a package's resources via its reader.
82
+ """
83
+
84
+ def __init__(self, reader):
85
+ # type: (SimpleReader) -> None
86
+ self.reader = reader
87
+
88
+ def is_dir(self):
89
+ return True
90
+
91
+ def is_file(self):
92
+ return False
93
+
94
+ def iterdir(self):
95
+ files = (ResourceHandle(self, name) for name in self.reader.resources)
96
+ dirs = map(ResourceContainer, self.reader.children())
97
+ return itertools.chain(files, dirs)
98
+
99
+ def open(self, *args, **kwargs):
100
+ raise IsADirectoryError()
101
+
102
+ def joinpath(self, name):
103
+ return next(
104
+ traversable for traversable in self.iterdir() if traversable.name == name
105
+ )
106
+
107
+
108
+ class TraversableReader(TraversableResources, SimpleReader):
109
+ """
110
+ A TraversableResources based on SimpleReader. Resource providers
111
+ may derive from this class to provide the TraversableResources
112
+ interface by supplying the SimpleReader interface.
113
+ """
114
+
115
+ def files(self):
116
+ return ResourceContainer(self)
hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/__init__.py ADDED
File without changes
hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/context.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import contextlib
4
+ import functools
5
+ import tempfile
6
+ import shutil
7
+ import operator
8
+
9
+
10
+ @contextlib.contextmanager
11
+ def pushd(dir):
12
+ orig = os.getcwd()
13
+ os.chdir(dir)
14
+ try:
15
+ yield dir
16
+ finally:
17
+ os.chdir(orig)
18
+
19
+
20
+ @contextlib.contextmanager
21
+ def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
22
+ """
23
+ Get a tarball, extract it, change to that directory, yield, then
24
+ clean up.
25
+ `runner` is the function to invoke commands.
26
+ `pushd` is a context manager for changing the directory.
27
+ """
28
+ if target_dir is None:
29
+ target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
30
+ if runner is None:
31
+ runner = functools.partial(subprocess.check_call, shell=True)
32
+ # In the tar command, use --strip-components=1 to strip the first path and
33
+ # then
34
+ # use -C to cause the files to be extracted to {target_dir}. This ensures
35
+ # that we always know where the files were extracted.
36
+ runner('mkdir {target_dir}'.format(**vars()))
37
+ try:
38
+ getter = 'wget {url} -O -'
39
+ extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
40
+ cmd = ' | '.join((getter, extract))
41
+ runner(cmd.format(compression=infer_compression(url), **vars()))
42
+ with pushd(target_dir):
43
+ yield target_dir
44
+ finally:
45
+ runner('rm -Rf {target_dir}'.format(**vars()))
46
+
47
+
48
+ def infer_compression(url):
49
+ """
50
+ Given a URL or filename, infer the compression code for tar.
51
+ """
52
+ # cheat and just assume it's the last two characters
53
+ compression_indicator = url[-2:]
54
+ mapping = dict(gz='z', bz='j', xz='J')
55
+ # Assume 'z' (gzip) if no match
56
+ return mapping.get(compression_indicator, 'z')
57
+
58
+
59
+ @contextlib.contextmanager
60
+ def temp_dir(remover=shutil.rmtree):
61
+ """
62
+ Create a temporary directory context. Pass a custom remover
63
+ to override the removal behavior.
64
+ """
65
+ temp_dir = tempfile.mkdtemp()
66
+ try:
67
+ yield temp_dir
68
+ finally:
69
+ remover(temp_dir)
70
+
71
+
72
+ @contextlib.contextmanager
73
+ def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
74
+ """
75
+ Check out the repo indicated by url.
76
+
77
+ If dest_ctx is supplied, it should be a context manager
78
+ to yield the target directory for the check out.
79
+ """
80
+ exe = 'git' if 'git' in url else 'hg'
81
+ with dest_ctx() as repo_dir:
82
+ cmd = [exe, 'clone', url, repo_dir]
83
+ if branch:
84
+ cmd.extend(['--branch', branch])
85
+ devnull = open(os.path.devnull, 'w')
86
+ stdout = devnull if quiet else None
87
+ subprocess.check_call(cmd, stdout=stdout)
88
+ yield repo_dir
89
+
90
+
91
+ @contextlib.contextmanager
92
+ def null():
93
+ yield
94
+
95
+
96
+ class ExceptionTrap:
97
+ """
98
+ A context manager that will catch certain exceptions and provide an
99
+ indication they occurred.
100
+
101
+ >>> with ExceptionTrap() as trap:
102
+ ... raise Exception()
103
+ >>> bool(trap)
104
+ True
105
+
106
+ >>> with ExceptionTrap() as trap:
107
+ ... pass
108
+ >>> bool(trap)
109
+ False
110
+
111
+ >>> with ExceptionTrap(ValueError) as trap:
112
+ ... raise ValueError("1 + 1 is not 3")
113
+ >>> bool(trap)
114
+ True
115
+
116
+ >>> with ExceptionTrap(ValueError) as trap:
117
+ ... raise Exception()
118
+ Traceback (most recent call last):
119
+ ...
120
+ Exception
121
+
122
+ >>> bool(trap)
123
+ False
124
+ """
125
+
126
+ exc_info = None, None, None
127
+
128
+ def __init__(self, exceptions=(Exception,)):
129
+ self.exceptions = exceptions
130
+
131
+ def __enter__(self):
132
+ return self
133
+
134
+ @property
135
+ def type(self):
136
+ return self.exc_info[0]
137
+
138
+ @property
139
+ def value(self):
140
+ return self.exc_info[1]
141
+
142
+ @property
143
+ def tb(self):
144
+ return self.exc_info[2]
145
+
146
+ def __exit__(self, *exc_info):
147
+ type = exc_info[0]
148
+ matches = type and issubclass(type, self.exceptions)
149
+ if matches:
150
+ self.exc_info = exc_info
151
+ return matches
152
+
153
+ def __bool__(self):
154
+ return bool(self.type)
155
+
156
+ def raises(self, func, *, _test=bool):
157
+ """
158
+ Wrap func and replace the result with the truth
159
+ value of the trap (True if an exception occurred).
160
+
161
+ First, give the decorator an alias to support Python 3.8
162
+ Syntax.
163
+
164
+ >>> raises = ExceptionTrap(ValueError).raises
165
+
166
+ Now decorate a function that always fails.
167
+
168
+ >>> @raises
169
+ ... def fail():
170
+ ... raise ValueError('failed')
171
+ >>> fail()
172
+ True
173
+ """
174
+
175
+ @functools.wraps(func)
176
+ def wrapper(*args, **kwargs):
177
+ with ExceptionTrap(self.exceptions) as trap:
178
+ func(*args, **kwargs)
179
+ return _test(trap)
180
+
181
+ return wrapper
182
+
183
+ def passes(self, func):
184
+ """
185
+ Wrap func and replace the result with the truth
186
+ value of the trap (True if no exception).
187
+
188
+ First, give the decorator an alias to support Python 3.8
189
+ Syntax.
190
+
191
+ >>> passes = ExceptionTrap(ValueError).passes
192
+
193
+ Now decorate a function that always fails.
194
+
195
+ >>> @passes
196
+ ... def fail():
197
+ ... raise ValueError('failed')
198
+
199
+ >>> fail()
200
+ False
201
+ """
202
+ return self.raises(func, _test=operator.not_)
203
+
204
+
205
+ class suppress(contextlib.suppress, contextlib.ContextDecorator):
206
+ """
207
+ A version of contextlib.suppress with decorator support.
208
+
209
+ >>> @suppress(KeyError)
210
+ ... def key_error():
211
+ ... {}['']
212
+ >>> key_error()
213
+ """
hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import time
3
+ import inspect
4
+ import collections
5
+ import types
6
+ import itertools
7
+
8
+ import pkg_resources.extern.more_itertools
9
+
10
+ from typing import Callable, TypeVar
11
+
12
+
13
+ CallableT = TypeVar("CallableT", bound=Callable[..., object])
14
+
15
+
16
+ def compose(*funcs):
17
+ """
18
+ Compose any number of unary functions into a single unary function.
19
+
20
+ >>> import textwrap
21
+ >>> expected = str.strip(textwrap.dedent(compose.__doc__))
22
+ >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
23
+ >>> strip_and_dedent(compose.__doc__) == expected
24
+ True
25
+
26
+ Compose also allows the innermost function to take arbitrary arguments.
27
+
28
+ >>> round_three = lambda x: round(x, ndigits=3)
29
+ >>> f = compose(round_three, int.__truediv__)
30
+ >>> [f(3*x, x+1) for x in range(1,10)]
31
+ [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
32
+ """
33
+
34
+ def compose_two(f1, f2):
35
+ return lambda *args, **kwargs: f1(f2(*args, **kwargs))
36
+
37
+ return functools.reduce(compose_two, funcs)
38
+
39
+
40
+ def method_caller(method_name, *args, **kwargs):
41
+ """
42
+ Return a function that will call a named method on the
43
+ target object with optional positional and keyword
44
+ arguments.
45
+
46
+ >>> lower = method_caller('lower')
47
+ >>> lower('MyString')
48
+ 'mystring'
49
+ """
50
+
51
+ def call_method(target):
52
+ func = getattr(target, method_name)
53
+ return func(*args, **kwargs)
54
+
55
+ return call_method
56
+
57
+
58
+ def once(func):
59
+ """
60
+ Decorate func so it's only ever called the first time.
61
+
62
+ This decorator can ensure that an expensive or non-idempotent function
63
+ will not be expensive on subsequent calls and is idempotent.
64
+
65
+ >>> add_three = once(lambda a: a+3)
66
+ >>> add_three(3)
67
+ 6
68
+ >>> add_three(9)
69
+ 6
70
+ >>> add_three('12')
71
+ 6
72
+
73
+ To reset the stored value, simply clear the property ``saved_result``.
74
+
75
+ >>> del add_three.saved_result
76
+ >>> add_three(9)
77
+ 12
78
+ >>> add_three(8)
79
+ 12
80
+
81
+ Or invoke 'reset()' on it.
82
+
83
+ >>> add_three.reset()
84
+ >>> add_three(-3)
85
+ 0
86
+ >>> add_three(0)
87
+ 0
88
+ """
89
+
90
+ @functools.wraps(func)
91
+ def wrapper(*args, **kwargs):
92
+ if not hasattr(wrapper, 'saved_result'):
93
+ wrapper.saved_result = func(*args, **kwargs)
94
+ return wrapper.saved_result
95
+
96
+ wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
97
+ return wrapper
98
+
99
+
100
+ def method_cache(
101
+ method: CallableT,
102
+ cache_wrapper: Callable[
103
+ [CallableT], CallableT
104
+ ] = functools.lru_cache(), # type: ignore[assignment]
105
+ ) -> CallableT:
106
+ """
107
+ Wrap lru_cache to support storing the cache data in the object instances.
108
+
109
+ Abstracts the common paradigm where the method explicitly saves an
110
+ underscore-prefixed protected property on first call and returns that
111
+ subsequently.
112
+
113
+ >>> class MyClass:
114
+ ... calls = 0
115
+ ...
116
+ ... @method_cache
117
+ ... def method(self, value):
118
+ ... self.calls += 1
119
+ ... return value
120
+
121
+ >>> a = MyClass()
122
+ >>> a.method(3)
123
+ 3
124
+ >>> for x in range(75):
125
+ ... res = a.method(x)
126
+ >>> a.calls
127
+ 75
128
+
129
+ Note that the apparent behavior will be exactly like that of lru_cache
130
+ except that the cache is stored on each instance, so values in one
131
+ instance will not flush values from another, and when an instance is
132
+ deleted, so are the cached values for that instance.
133
+
134
+ >>> b = MyClass()
135
+ >>> for x in range(35):
136
+ ... res = b.method(x)
137
+ >>> b.calls
138
+ 35
139
+ >>> a.method(0)
140
+ 0
141
+ >>> a.calls
142
+ 75
143
+
144
+ Note that if method had been decorated with ``functools.lru_cache()``,
145
+ a.calls would have been 76 (due to the cached value of 0 having been
146
+ flushed by the 'b' instance).
147
+
148
+ Clear the cache with ``.cache_clear()``
149
+
150
+ >>> a.method.cache_clear()
151
+
152
+ Same for a method that hasn't yet been called.
153
+
154
+ >>> c = MyClass()
155
+ >>> c.method.cache_clear()
156
+
157
+ Another cache wrapper may be supplied:
158
+
159
+ >>> cache = functools.lru_cache(maxsize=2)
160
+ >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
161
+ >>> a = MyClass()
162
+ >>> a.method2()
163
+ 3
164
+
165
+ Caution - do not subsequently wrap the method with another decorator, such
166
+ as ``@property``, which changes the semantics of the function.
167
+
168
+ See also
169
+ http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
170
+ for another implementation and additional justification.
171
+ """
172
+
173
+ def wrapper(self: object, *args: object, **kwargs: object) -> object:
174
+ # it's the first call, replace the method with a cached, bound method
175
+ bound_method: CallableT = types.MethodType( # type: ignore[assignment]
176
+ method, self
177
+ )
178
+ cached_method = cache_wrapper(bound_method)
179
+ setattr(self, method.__name__, cached_method)
180
+ return cached_method(*args, **kwargs)
181
+
182
+ # Support cache clear even before cache has been created.
183
+ wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
184
+
185
+ return ( # type: ignore[return-value]
186
+ _special_method_cache(method, cache_wrapper) or wrapper
187
+ )
188
+
189
+
190
+ def _special_method_cache(method, cache_wrapper):
191
+ """
192
+ Because Python treats special methods differently, it's not
193
+ possible to use instance attributes to implement the cached
194
+ methods.
195
+
196
+ Instead, install the wrapper method under a different name
197
+ and return a simple proxy to that wrapper.
198
+
199
+ https://github.com/jaraco/jaraco.functools/issues/5
200
+ """
201
+ name = method.__name__
202
+ special_names = '__getattr__', '__getitem__'
203
+ if name not in special_names:
204
+ return
205
+
206
+ wrapper_name = '__cached' + name
207
+
208
+ def proxy(self, *args, **kwargs):
209
+ if wrapper_name not in vars(self):
210
+ bound = types.MethodType(method, self)
211
+ cache = cache_wrapper(bound)
212
+ setattr(self, wrapper_name, cache)
213
+ else:
214
+ cache = getattr(self, wrapper_name)
215
+ return cache(*args, **kwargs)
216
+
217
+ return proxy
218
+
219
+
220
+ def apply(transform):
221
+ """
222
+ Decorate a function with a transform function that is
223
+ invoked on results returned from the decorated function.
224
+
225
+ >>> @apply(reversed)
226
+ ... def get_numbers(start):
227
+ ... "doc for get_numbers"
228
+ ... return range(start, start+3)
229
+ >>> list(get_numbers(4))
230
+ [6, 5, 4]
231
+ >>> get_numbers.__doc__
232
+ 'doc for get_numbers'
233
+ """
234
+
235
+ def wrap(func):
236
+ return functools.wraps(func)(compose(transform, func))
237
+
238
+ return wrap
239
+
240
+
241
+ def result_invoke(action):
242
+ r"""
243
+ Decorate a function with an action function that is
244
+ invoked on the results returned from the decorated
245
+ function (for its side-effect), then return the original
246
+ result.
247
+
248
+ >>> @result_invoke(print)
249
+ ... def add_two(a, b):
250
+ ... return a + b
251
+ >>> x = add_two(2, 3)
252
+ 5
253
+ >>> x
254
+ 5
255
+ """
256
+
257
+ def wrap(func):
258
+ @functools.wraps(func)
259
+ def wrapper(*args, **kwargs):
260
+ result = func(*args, **kwargs)
261
+ action(result)
262
+ return result
263
+
264
+ return wrapper
265
+
266
+ return wrap
267
+
268
+
269
+ def call_aside(f, *args, **kwargs):
270
+ """
271
+ Call a function for its side effect after initialization.
272
+
273
+ >>> @call_aside
274
+ ... def func(): print("called")
275
+ called
276
+ >>> func()
277
+ called
278
+
279
+ Use functools.partial to pass parameters to the initial call
280
+
281
+ >>> @functools.partial(call_aside, name='bingo')
282
+ ... def func(name): print("called with", name)
283
+ called with bingo
284
+ """
285
+ f(*args, **kwargs)
286
+ return f
287
+
288
+
289
+ class Throttler:
290
+ """
291
+ Rate-limit a function (or other callable)
292
+ """
293
+
294
+ def __init__(self, func, max_rate=float('Inf')):
295
+ if isinstance(func, Throttler):
296
+ func = func.func
297
+ self.func = func
298
+ self.max_rate = max_rate
299
+ self.reset()
300
+
301
+ def reset(self):
302
+ self.last_called = 0
303
+
304
+ def __call__(self, *args, **kwargs):
305
+ self._wait()
306
+ return self.func(*args, **kwargs)
307
+
308
+ def _wait(self):
309
+ "ensure at least 1/max_rate seconds from last call"
310
+ elapsed = time.time() - self.last_called
311
+ must_wait = 1 / self.max_rate - elapsed
312
+ time.sleep(max(0, must_wait))
313
+ self.last_called = time.time()
314
+
315
+ def __get__(self, obj, type=None):
316
+ return first_invoke(self._wait, functools.partial(self.func, obj))
317
+
318
+
319
+ def first_invoke(func1, func2):
320
+ """
321
+ Return a function that when invoked will invoke func1 without
322
+ any parameters (for its side-effect) and then invoke func2
323
+ with whatever parameters were passed, returning its result.
324
+ """
325
+
326
+ def wrapper(*args, **kwargs):
327
+ func1()
328
+ return func2(*args, **kwargs)
329
+
330
+ return wrapper
331
+
332
+
333
+ def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
334
+ """
335
+ Given a callable func, trap the indicated exceptions
336
+ for up to 'retries' times, invoking cleanup on the
337
+ exception. On the final attempt, allow any exceptions
338
+ to propagate.
339
+ """
340
+ attempts = itertools.count() if retries == float('inf') else range(retries)
341
+ for attempt in attempts:
342
+ try:
343
+ return func()
344
+ except trap:
345
+ cleanup()
346
+
347
+ return func()
348
+
349
+
350
+ def retry(*r_args, **r_kwargs):
351
+ """
352
+ Decorator wrapper for retry_call. Accepts arguments to retry_call
353
+ except func and then returns a decorator for the decorated function.
354
+
355
+ Ex:
356
+
357
+ >>> @retry(retries=3)
358
+ ... def my_func(a, b):
359
+ ... "this is my funk"
360
+ ... print(a, b)
361
+ >>> my_func.__doc__
362
+ 'this is my funk'
363
+ """
364
+
365
+ def decorate(func):
366
+ @functools.wraps(func)
367
+ def wrapper(*f_args, **f_kwargs):
368
+ bound = functools.partial(func, *f_args, **f_kwargs)
369
+ return retry_call(bound, *r_args, **r_kwargs)
370
+
371
+ return wrapper
372
+
373
+ return decorate
374
+
375
+
376
+ def print_yielded(func):
377
+ """
378
+ Convert a generator into a function that prints all yielded elements
379
+
380
+ >>> @print_yielded
381
+ ... def x():
382
+ ... yield 3; yield None
383
+ >>> x()
384
+ 3
385
+ None
386
+ """
387
+ print_all = functools.partial(map, print)
388
+ print_results = compose(more_itertools.consume, print_all, func)
389
+ return functools.wraps(func)(print_results)
390
+
391
+
392
+ def pass_none(func):
393
+ """
394
+ Wrap func so it's not called if its first param is None
395
+
396
+ >>> print_text = pass_none(print)
397
+ >>> print_text('text')
398
+ text
399
+ >>> print_text(None)
400
+ """
401
+
402
+ @functools.wraps(func)
403
+ def wrapper(param, *args, **kwargs):
404
+ if param is not None:
405
+ return func(param, *args, **kwargs)
406
+
407
+ return wrapper
408
+
409
+
410
+ def assign_params(func, namespace):
411
+ """
412
+ Assign parameters from namespace where func solicits.
413
+
414
+ >>> def func(x, y=3):
415
+ ... print(x, y)
416
+ >>> assigned = assign_params(func, dict(x=2, z=4))
417
+ >>> assigned()
418
+ 2 3
419
+
420
+ The usual errors are raised if a function doesn't receive
421
+ its required parameters:
422
+
423
+ >>> assigned = assign_params(func, dict(y=3, z=4))
424
+ >>> assigned()
425
+ Traceback (most recent call last):
426
+ TypeError: func() ...argument...
427
+
428
+ It even works on methods:
429
+
430
+ >>> class Handler:
431
+ ... def meth(self, arg):
432
+ ... print(arg)
433
+ >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
434
+ crystal
435
+ """
436
+ sig = inspect.signature(func)
437
+ params = sig.parameters.keys()
438
+ call_ns = {k: namespace[k] for k in params if k in namespace}
439
+ return functools.partial(func, **call_ns)
440
+
441
+
442
+ def save_method_args(method):
443
+ """
444
+ Wrap a method such that when it is called, the args and kwargs are
445
+ saved on the method.
446
+
447
+ >>> class MyClass:
448
+ ... @save_method_args
449
+ ... def method(self, a, b):
450
+ ... print(a, b)
451
+ >>> my_ob = MyClass()
452
+ >>> my_ob.method(1, 2)
453
+ 1 2
454
+ >>> my_ob._saved_method.args
455
+ (1, 2)
456
+ >>> my_ob._saved_method.kwargs
457
+ {}
458
+ >>> my_ob.method(a=3, b='foo')
459
+ 3 foo
460
+ >>> my_ob._saved_method.args
461
+ ()
462
+ >>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
463
+ True
464
+
465
+ The arguments are stored on the instance, allowing for
466
+ different instance to save different args.
467
+
468
+ >>> your_ob = MyClass()
469
+ >>> your_ob.method({str('x'): 3}, b=[4])
470
+ {'x': 3} [4]
471
+ >>> your_ob._saved_method.args
472
+ ({'x': 3},)
473
+ >>> my_ob._saved_method.args
474
+ ()
475
+ """
476
+ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
477
+
478
+ @functools.wraps(method)
479
+ def wrapper(self, *args, **kwargs):
480
+ attr_name = '_saved_' + method.__name__
481
+ attr = args_and_kwargs(args, kwargs)
482
+ setattr(self, attr_name, attr)
483
+ return method(self, *args, **kwargs)
484
+
485
+ return wrapper
486
+
487
+
488
+ def except_(*exceptions, replace=None, use=None):
489
+ """
490
+ Replace the indicated exceptions, if raised, with the indicated
491
+ literal replacement or evaluated expression (if present).
492
+
493
+ >>> safe_int = except_(ValueError)(int)
494
+ >>> safe_int('five')
495
+ >>> safe_int('5')
496
+ 5
497
+
498
+ Specify a literal replacement with ``replace``.
499
+
500
+ >>> safe_int_r = except_(ValueError, replace=0)(int)
501
+ >>> safe_int_r('five')
502
+ 0
503
+
504
+ Provide an expression to ``use`` to pass through particular parameters.
505
+
506
+ >>> safe_int_pt = except_(ValueError, use='args[0]')(int)
507
+ >>> safe_int_pt('five')
508
+ 'five'
509
+
510
+ """
511
+
512
+ def decorate(func):
513
+ @functools.wraps(func)
514
+ def wrapper(*args, **kwargs):
515
+ try:
516
+ return func(*args, **kwargs)
517
+ except exceptions:
518
+ try:
519
+ return eval(use)
520
+ except TypeError:
521
+ return replace
522
+
523
+ return wrapper
524
+
525
+ return decorate
hfenv/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py ADDED
@@ -0,0 +1,599 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import itertools
3
+ import textwrap
4
+ import functools
5
+
6
+ try:
7
+ from importlib.resources import files # type: ignore
8
+ except ImportError: # pragma: nocover
9
+ from pkg_resources.extern.importlib_resources import files # type: ignore
10
+
11
+ from pkg_resources.extern.jaraco.functools import compose, method_cache
12
+ from pkg_resources.extern.jaraco.context import ExceptionTrap
13
+
14
+
15
+ def substitution(old, new):
16
+ """
17
+ Return a function that will perform a substitution on a string
18
+ """
19
+ return lambda s: s.replace(old, new)
20
+
21
+
22
+ def multi_substitution(*substitutions):
23
+ """
24
+ Take a sequence of pairs specifying substitutions, and create
25
+ a function that performs those substitutions.
26
+
27
+ >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
28
+ 'baz'
29
+ """
30
+ substitutions = itertools.starmap(substitution, substitutions)
31
+ # compose function applies last function first, so reverse the
32
+ # substitutions to get the expected order.
33
+ substitutions = reversed(tuple(substitutions))
34
+ return compose(*substitutions)
35
+
36
+
37
+ class FoldedCase(str):
38
+ """
39
+ A case insensitive string class; behaves just like str
40
+ except compares equal when the only variation is case.
41
+
42
+ >>> s = FoldedCase('hello world')
43
+
44
+ >>> s == 'Hello World'
45
+ True
46
+
47
+ >>> 'Hello World' == s
48
+ True
49
+
50
+ >>> s != 'Hello World'
51
+ False
52
+
53
+ >>> s.index('O')
54
+ 4
55
+
56
+ >>> s.split('O')
57
+ ['hell', ' w', 'rld']
58
+
59
+ >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
60
+ ['alpha', 'Beta', 'GAMMA']
61
+
62
+ Sequence membership is straightforward.
63
+
64
+ >>> "Hello World" in [s]
65
+ True
66
+ >>> s in ["Hello World"]
67
+ True
68
+
69
+ You may test for set inclusion, but candidate and elements
70
+ must both be folded.
71
+
72
+ >>> FoldedCase("Hello World") in {s}
73
+ True
74
+ >>> s in {FoldedCase("Hello World")}
75
+ True
76
+
77
+ String inclusion works as long as the FoldedCase object
78
+ is on the right.
79
+
80
+ >>> "hello" in FoldedCase("Hello World")
81
+ True
82
+
83
+ But not if the FoldedCase object is on the left:
84
+
85
+ >>> FoldedCase('hello') in 'Hello World'
86
+ False
87
+
88
+ In that case, use ``in_``:
89
+
90
+ >>> FoldedCase('hello').in_('Hello World')
91
+ True
92
+
93
+ >>> FoldedCase('hello') > FoldedCase('Hello')
94
+ False
95
+ """
96
+
97
+ def __lt__(self, other):
98
+ return self.lower() < other.lower()
99
+
100
+ def __gt__(self, other):
101
+ return self.lower() > other.lower()
102
+
103
+ def __eq__(self, other):
104
+ return self.lower() == other.lower()
105
+
106
+ def __ne__(self, other):
107
+ return self.lower() != other.lower()
108
+
109
+ def __hash__(self):
110
+ return hash(self.lower())
111
+
112
+ def __contains__(self, other):
113
+ return super().lower().__contains__(other.lower())
114
+
115
+ def in_(self, other):
116
+ "Does self appear in other?"
117
+ return self in FoldedCase(other)
118
+
119
+ # cache lower since it's likely to be called frequently.
120
+ @method_cache
121
+ def lower(self):
122
+ return super().lower()
123
+
124
+ def index(self, sub):
125
+ return self.lower().index(sub.lower())
126
+
127
+ def split(self, splitter=' ', maxsplit=0):
128
+ pattern = re.compile(re.escape(splitter), re.I)
129
+ return pattern.split(self, maxsplit)
130
+
131
+
132
+ # Python 3.8 compatibility
133
+ _unicode_trap = ExceptionTrap(UnicodeDecodeError)
134
+
135
+
136
+ @_unicode_trap.passes
137
+ def is_decodable(value):
138
+ r"""
139
+ Return True if the supplied value is decodable (using the default
140
+ encoding).
141
+
142
+ >>> is_decodable(b'\xff')
143
+ False
144
+ >>> is_decodable(b'\x32')
145
+ True
146
+ """
147
+ value.decode()
148
+
149
+
150
+ def is_binary(value):
151
+ r"""
152
+ Return True if the value appears to be binary (that is, it's a byte
153
+ string and isn't decodable).
154
+
155
+ >>> is_binary(b'\xff')
156
+ True
157
+ >>> is_binary('\xff')
158
+ False
159
+ """
160
+ return isinstance(value, bytes) and not is_decodable(value)
161
+
162
+
163
+ def trim(s):
164
+ r"""
165
+ Trim something like a docstring to remove the whitespace that
166
+ is common due to indentation and formatting.
167
+
168
+ >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
169
+ 'foo = bar\n\tbar = baz'
170
+ """
171
+ return textwrap.dedent(s).strip()
172
+
173
+
174
+ def wrap(s):
175
+ """
176
+ Wrap lines of text, retaining existing newlines as
177
+ paragraph markers.
178
+
179
+ >>> print(wrap(lorem_ipsum))
180
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
181
+ eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
182
+ minim veniam, quis nostrud exercitation ullamco laboris nisi ut
183
+ aliquip ex ea commodo consequat. Duis aute irure dolor in
184
+ reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
185
+ pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
186
+ culpa qui officia deserunt mollit anim id est laborum.
187
+ <BLANKLINE>
188
+ Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
189
+ varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
190
+ magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
191
+ gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
192
+ risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
193
+ eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
194
+ fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
195
+ a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
196
+ neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
197
+ sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
198
+ nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
199
+ quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
200
+ molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
201
+ """
202
+ paragraphs = s.splitlines()
203
+ wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
204
+ return '\n\n'.join(wrapped)
205
+
206
+
207
+ def unwrap(s):
208
+ r"""
209
+ Given a multi-line string, return an unwrapped version.
210
+
211
+ >>> wrapped = wrap(lorem_ipsum)
212
+ >>> wrapped.count('\n')
213
+ 20
214
+ >>> unwrapped = unwrap(wrapped)
215
+ >>> unwrapped.count('\n')
216
+ 1
217
+ >>> print(unwrapped)
218
+ Lorem ipsum dolor sit amet, consectetur adipiscing ...
219
+ Curabitur pretium tincidunt lacus. Nulla gravida orci ...
220
+
221
+ """
222
+ paragraphs = re.split(r'\n\n+', s)
223
+ cleaned = (para.replace('\n', ' ') for para in paragraphs)
224
+ return '\n'.join(cleaned)
225
+
226
+
227
+
228
+
229
+ class Splitter(object):
230
+ """object that will split a string with the given arguments for each call
231
+
232
+ >>> s = Splitter(',')
233
+ >>> s('hello, world, this is your, master calling')
234
+ ['hello', ' world', ' this is your', ' master calling']
235
+ """
236
+
237
+ def __init__(self, *args):
238
+ self.args = args
239
+
240
+ def __call__(self, s):
241
+ return s.split(*self.args)
242
+
243
+
244
+ def indent(string, prefix=' ' * 4):
245
+ """
246
+ >>> indent('foo')
247
+ ' foo'
248
+ """
249
+ return prefix + string
250
+
251
+
252
+ class WordSet(tuple):
253
+ """
254
+ Given an identifier, return the words that identifier represents,
255
+ whether in camel case, underscore-separated, etc.
256
+
257
+ >>> WordSet.parse("camelCase")
258
+ ('camel', 'Case')
259
+
260
+ >>> WordSet.parse("under_sep")
261
+ ('under', 'sep')
262
+
263
+ Acronyms should be retained
264
+
265
+ >>> WordSet.parse("firstSNL")
266
+ ('first', 'SNL')
267
+
268
+ >>> WordSet.parse("you_and_I")
269
+ ('you', 'and', 'I')
270
+
271
+ >>> WordSet.parse("A simple test")
272
+ ('A', 'simple', 'test')
273
+
274
+ Multiple caps should not interfere with the first cap of another word.
275
+
276
+ >>> WordSet.parse("myABCClass")
277
+ ('my', 'ABC', 'Class')
278
+
279
+ The result is a WordSet, so you can get the form you need.
280
+
281
+ >>> WordSet.parse("myABCClass").underscore_separated()
282
+ 'my_ABC_Class'
283
+
284
+ >>> WordSet.parse('a-command').camel_case()
285
+ 'ACommand'
286
+
287
+ >>> WordSet.parse('someIdentifier').lowered().space_separated()
288
+ 'some identifier'
289
+
290
+ Slices of the result should return another WordSet.
291
+
292
+ >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
293
+ 'out_of_context'
294
+
295
+ >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
296
+ 'word set'
297
+
298
+ >>> example = WordSet.parse('figured it out')
299
+ >>> example.headless_camel_case()
300
+ 'figuredItOut'
301
+ >>> example.dash_separated()
302
+ 'figured-it-out'
303
+
304
+ """
305
+
306
+ _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
307
+
308
+ def capitalized(self):
309
+ return WordSet(word.capitalize() for word in self)
310
+
311
+ def lowered(self):
312
+ return WordSet(word.lower() for word in self)
313
+
314
+ def camel_case(self):
315
+ return ''.join(self.capitalized())
316
+
317
+ def headless_camel_case(self):
318
+ words = iter(self)
319
+ first = next(words).lower()
320
+ new_words = itertools.chain((first,), WordSet(words).camel_case())
321
+ return ''.join(new_words)
322
+
323
+ def underscore_separated(self):
324
+ return '_'.join(self)
325
+
326
+ def dash_separated(self):
327
+ return '-'.join(self)
328
+
329
+ def space_separated(self):
330
+ return ' '.join(self)
331
+
332
+ def trim_right(self, item):
333
+ """
334
+ Remove the item from the end of the set.
335
+
336
+ >>> WordSet.parse('foo bar').trim_right('foo')
337
+ ('foo', 'bar')
338
+ >>> WordSet.parse('foo bar').trim_right('bar')
339
+ ('foo',)
340
+ >>> WordSet.parse('').trim_right('bar')
341
+ ()
342
+ """
343
+ return self[:-1] if self and self[-1] == item else self
344
+
345
+ def trim_left(self, item):
346
+ """
347
+ Remove the item from the beginning of the set.
348
+
349
+ >>> WordSet.parse('foo bar').trim_left('foo')
350
+ ('bar',)
351
+ >>> WordSet.parse('foo bar').trim_left('bar')
352
+ ('foo', 'bar')
353
+ >>> WordSet.parse('').trim_left('bar')
354
+ ()
355
+ """
356
+ return self[1:] if self and self[0] == item else self
357
+
358
+ def trim(self, item):
359
+ """
360
+ >>> WordSet.parse('foo bar').trim('foo')
361
+ ('bar',)
362
+ """
363
+ return self.trim_left(item).trim_right(item)
364
+
365
+ def __getitem__(self, item):
366
+ result = super(WordSet, self).__getitem__(item)
367
+ if isinstance(item, slice):
368
+ result = WordSet(result)
369
+ return result
370
+
371
+ @classmethod
372
+ def parse(cls, identifier):
373
+ matches = cls._pattern.finditer(identifier)
374
+ return WordSet(match.group(0) for match in matches)
375
+
376
+ @classmethod
377
+ def from_class_name(cls, subject):
378
+ return cls.parse(subject.__class__.__name__)
379
+
380
+
381
+ # for backward compatibility
382
+ words = WordSet.parse
383
+
384
+
385
+ def simple_html_strip(s):
386
+ r"""
387
+ Remove HTML from the string `s`.
388
+
389
+ >>> str(simple_html_strip(''))
390
+ ''
391
+
392
+ >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
393
+ A stormy day in paradise
394
+
395
+ >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
396
+ Somebody tell the truth.
397
+
398
+ >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
399
+ What about
400
+ multiple lines?
401
+ """
402
+ html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
403
+ texts = (match.group(3) or '' for match in html_stripper.finditer(s))
404
+ return ''.join(texts)
405
+
406
+
407
+ class SeparatedValues(str):
408
+ """
409
+ A string separated by a separator. Overrides __iter__ for getting
410
+ the values.
411
+
412
+ >>> list(SeparatedValues('a,b,c'))
413
+ ['a', 'b', 'c']
414
+
415
+ Whitespace is stripped and empty values are discarded.
416
+
417
+ >>> list(SeparatedValues(' a, b , c, '))
418
+ ['a', 'b', 'c']
419
+ """
420
+
421
+ separator = ','
422
+
423
+ def __iter__(self):
424
+ parts = self.split(self.separator)
425
+ return filter(None, (part.strip() for part in parts))
426
+
427
+
428
+ class Stripper:
429
+ r"""
430
+ Given a series of lines, find the common prefix and strip it from them.
431
+
432
+ >>> lines = [
433
+ ... 'abcdefg\n',
434
+ ... 'abc\n',
435
+ ... 'abcde\n',
436
+ ... ]
437
+ >>> res = Stripper.strip_prefix(lines)
438
+ >>> res.prefix
439
+ 'abc'
440
+ >>> list(res.lines)
441
+ ['defg\n', '\n', 'de\n']
442
+
443
+ If no prefix is common, nothing should be stripped.
444
+
445
+ >>> lines = [
446
+ ... 'abcd\n',
447
+ ... '1234\n',
448
+ ... ]
449
+ >>> res = Stripper.strip_prefix(lines)
450
+ >>> res.prefix = ''
451
+ >>> list(res.lines)
452
+ ['abcd\n', '1234\n']
453
+ """
454
+
455
+ def __init__(self, prefix, lines):
456
+ self.prefix = prefix
457
+ self.lines = map(self, lines)
458
+
459
+ @classmethod
460
+ def strip_prefix(cls, lines):
461
+ prefix_lines, lines = itertools.tee(lines)
462
+ prefix = functools.reduce(cls.common_prefix, prefix_lines)
463
+ return cls(prefix, lines)
464
+
465
+ def __call__(self, line):
466
+ if not self.prefix:
467
+ return line
468
+ null, prefix, rest = line.partition(self.prefix)
469
+ return rest
470
+
471
+ @staticmethod
472
+ def common_prefix(s1, s2):
473
+ """
474
+ Return the common prefix of two lines.
475
+ """
476
+ index = min(len(s1), len(s2))
477
+ while s1[:index] != s2[:index]:
478
+ index -= 1
479
+ return s1[:index]
480
+
481
+
482
+ def remove_prefix(text, prefix):
483
+ """
484
+ Remove the prefix from the text if it exists.
485
+
486
+ >>> remove_prefix('underwhelming performance', 'underwhelming ')
487
+ 'performance'
488
+
489
+ >>> remove_prefix('something special', 'sample')
490
+ 'something special'
491
+ """
492
+ null, prefix, rest = text.rpartition(prefix)
493
+ return rest
494
+
495
+
496
+ def remove_suffix(text, suffix):
497
+ """
498
+ Remove the suffix from the text if it exists.
499
+
500
+ >>> remove_suffix('name.git', '.git')
501
+ 'name'
502
+
503
+ >>> remove_suffix('something special', 'sample')
504
+ 'something special'
505
+ """
506
+ rest, suffix, null = text.partition(suffix)
507
+ return rest
508
+
509
+
510
+ def normalize_newlines(text):
511
+ r"""
512
+ Replace alternate newlines with the canonical newline.
513
+
514
+ >>> normalize_newlines('Lorem Ipsum\u2029')
515
+ 'Lorem Ipsum\n'
516
+ >>> normalize_newlines('Lorem Ipsum\r\n')
517
+ 'Lorem Ipsum\n'
518
+ >>> normalize_newlines('Lorem Ipsum\x85')
519
+ 'Lorem Ipsum\n'
520
+ """
521
+ newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
522
+ pattern = '|'.join(newlines)
523
+ return re.sub(pattern, '\n', text)
524
+
525
+
526
+ def _nonblank(str):
527
+ return str and not str.startswith('#')
528
+
529
+
530
+ @functools.singledispatch
531
+ def yield_lines(iterable):
532
+ r"""
533
+ Yield valid lines of a string or iterable.
534
+
535
+ >>> list(yield_lines(''))
536
+ []
537
+ >>> list(yield_lines(['foo', 'bar']))
538
+ ['foo', 'bar']
539
+ >>> list(yield_lines('foo\nbar'))
540
+ ['foo', 'bar']
541
+ >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
542
+ ['foo', 'baz #comment']
543
+ >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
544
+ ['foo', 'bar', 'baz', 'bing']
545
+ """
546
+ return itertools.chain.from_iterable(map(yield_lines, iterable))
547
+
548
+
549
+ @yield_lines.register(str)
550
+ def _(text):
551
+ return filter(_nonblank, map(str.strip, text.splitlines()))
552
+
553
+
554
+ def drop_comment(line):
555
+ """
556
+ Drop comments.
557
+
558
+ >>> drop_comment('foo # bar')
559
+ 'foo'
560
+
561
+ A hash without a space may be in a URL.
562
+
563
+ >>> drop_comment('http://example.com/foo#bar')
564
+ 'http://example.com/foo#bar'
565
+ """
566
+ return line.partition(' #')[0]
567
+
568
+
569
+ def join_continuation(lines):
570
+ r"""
571
+ Join lines continued by a trailing backslash.
572
+
573
+ >>> list(join_continuation(['foo \\', 'bar', 'baz']))
574
+ ['foobar', 'baz']
575
+ >>> list(join_continuation(['foo \\', 'bar', 'baz']))
576
+ ['foobar', 'baz']
577
+ >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
578
+ ['foobarbaz']
579
+
580
+ Not sure why, but...
581
+ The character preceeding the backslash is also elided.
582
+
583
+ >>> list(join_continuation(['goo\\', 'dly']))
584
+ ['godly']
585
+
586
+ A terrible idea, but...
587
+ If no line is available to continue, suppress the lines.
588
+
589
+ >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
590
+ ['foo']
591
+ """
592
+ lines = iter(lines)
593
+ for item in lines:
594
+ while item.endswith('\\'):
595
+ try:
596
+ item = item[:-2].strip() + next(lines)
597
+ except StopIteration:
598
+ return
599
+ yield item
hfenv/Lib/site-packages/pkg_resources/_vendor/more_itertools/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .more import * # noqa
2
+ from .recipes import * # noqa
3
+
4
+ __version__ = '8.12.0'
hfenv/Lib/site-packages/pkg_resources/_vendor/more_itertools/more.py ADDED
The diff for this file is too large to render. See raw diff
 
hfenv/Lib/site-packages/pkg_resources/_vendor/more_itertools/recipes.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Imported from the recipes section of the itertools documentation.
2
+
3
+ All functions taken from the recipes section of the itertools library docs
4
+ [1]_.
5
+ Some backward-compatible usability improvements have been made.
6
+
7
+ .. [1] http://docs.python.org/library/itertools.html#recipes
8
+
9
+ """
10
+ import warnings
11
+ from collections import deque
12
+ from itertools import (
13
+ chain,
14
+ combinations,
15
+ count,
16
+ cycle,
17
+ groupby,
18
+ islice,
19
+ repeat,
20
+ starmap,
21
+ tee,
22
+ zip_longest,
23
+ )
24
+ import operator
25
+ from random import randrange, sample, choice
26
+
27
+ __all__ = [
28
+ 'all_equal',
29
+ 'before_and_after',
30
+ 'consume',
31
+ 'convolve',
32
+ 'dotproduct',
33
+ 'first_true',
34
+ 'flatten',
35
+ 'grouper',
36
+ 'iter_except',
37
+ 'ncycles',
38
+ 'nth',
39
+ 'nth_combination',
40
+ 'padnone',
41
+ 'pad_none',
42
+ 'pairwise',
43
+ 'partition',
44
+ 'powerset',
45
+ 'prepend',
46
+ 'quantify',
47
+ 'random_combination_with_replacement',
48
+ 'random_combination',
49
+ 'random_permutation',
50
+ 'random_product',
51
+ 'repeatfunc',
52
+ 'roundrobin',
53
+ 'sliding_window',
54
+ 'tabulate',
55
+ 'tail',
56
+ 'take',
57
+ 'triplewise',
58
+ 'unique_everseen',
59
+ 'unique_justseen',
60
+ ]
61
+
62
+
63
+ def take(n, iterable):
64
+ """Return first *n* items of the iterable as a list.
65
+
66
+ >>> take(3, range(10))
67
+ [0, 1, 2]
68
+
69
+ If there are fewer than *n* items in the iterable, all of them are
70
+ returned.
71
+
72
+ >>> take(10, range(3))
73
+ [0, 1, 2]
74
+
75
+ """
76
+ return list(islice(iterable, n))
77
+
78
+
79
+ def tabulate(function, start=0):
80
+ """Return an iterator over the results of ``func(start)``,
81
+ ``func(start + 1)``, ``func(start + 2)``...
82
+
83
+ *func* should be a function that accepts one integer argument.
84
+
85
+ If *start* is not specified it defaults to 0. It will be incremented each
86
+ time the iterator is advanced.
87
+
88
+ >>> square = lambda x: x ** 2
89
+ >>> iterator = tabulate(square, -3)
90
+ >>> take(4, iterator)
91
+ [9, 4, 1, 0]
92
+
93
+ """
94
+ return map(function, count(start))
95
+
96
+
97
+ def tail(n, iterable):
98
+ """Return an iterator over the last *n* items of *iterable*.
99
+
100
+ >>> t = tail(3, 'ABCDEFG')
101
+ >>> list(t)
102
+ ['E', 'F', 'G']
103
+
104
+ """
105
+ return iter(deque(iterable, maxlen=n))
106
+
107
+
108
+ def consume(iterator, n=None):
109
+ """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
110
+ entirely.
111
+
112
+ Efficiently exhausts an iterator without returning values. Defaults to
113
+ consuming the whole iterator, but an optional second argument may be
114
+ provided to limit consumption.
115
+
116
+ >>> i = (x for x in range(10))
117
+ >>> next(i)
118
+ 0
119
+ >>> consume(i, 3)
120
+ >>> next(i)
121
+ 4
122
+ >>> consume(i)
123
+ >>> next(i)
124
+ Traceback (most recent call last):
125
+ File "<stdin>", line 1, in <module>
126
+ StopIteration
127
+
128
+ If the iterator has fewer items remaining than the provided limit, the
129
+ whole iterator will be consumed.
130
+
131
+ >>> i = (x for x in range(3))
132
+ >>> consume(i, 5)
133
+ >>> next(i)
134
+ Traceback (most recent call last):
135
+ File "<stdin>", line 1, in <module>
136
+ StopIteration
137
+
138
+ """
139
+ # Use functions that consume iterators at C speed.
140
+ if n is None:
141
+ # feed the entire iterator into a zero-length deque
142
+ deque(iterator, maxlen=0)
143
+ else:
144
+ # advance to the empty slice starting at position n
145
+ next(islice(iterator, n, n), None)
146
+
147
+
148
+ def nth(iterable, n, default=None):
149
+ """Returns the nth item or a default value.
150
+
151
+ >>> l = range(10)
152
+ >>> nth(l, 3)
153
+ 3
154
+ >>> nth(l, 20, "zebra")
155
+ 'zebra'
156
+
157
+ """
158
+ return next(islice(iterable, n, None), default)
159
+
160
+
161
+ def all_equal(iterable):
162
+ """
163
+ Returns ``True`` if all the elements are equal to each other.
164
+
165
+ >>> all_equal('aaaa')
166
+ True
167
+ >>> all_equal('aaab')
168
+ False
169
+
170
+ """
171
+ g = groupby(iterable)
172
+ return next(g, True) and not next(g, False)
173
+
174
+
175
+ def quantify(iterable, pred=bool):
176
+ """Return the how many times the predicate is true.
177
+
178
+ >>> quantify([True, False, True])
179
+ 2
180
+
181
+ """
182
+ return sum(map(pred, iterable))
183
+
184
+
185
+ def pad_none(iterable):
186
+ """Returns the sequence of elements and then returns ``None`` indefinitely.
187
+
188
+ >>> take(5, pad_none(range(3)))
189
+ [0, 1, 2, None, None]
190
+
191
+ Useful for emulating the behavior of the built-in :func:`map` function.
192
+
193
+ See also :func:`padded`.
194
+
195
+ """
196
+ return chain(iterable, repeat(None))
197
+
198
+
199
+ padnone = pad_none
200
+
201
+
202
+ def ncycles(iterable, n):
203
+ """Returns the sequence elements *n* times
204
+
205
+ >>> list(ncycles(["a", "b"], 3))
206
+ ['a', 'b', 'a', 'b', 'a', 'b']
207
+
208
+ """
209
+ return chain.from_iterable(repeat(tuple(iterable), n))
210
+
211
+
212
+ def dotproduct(vec1, vec2):
213
+ """Returns the dot product of the two iterables.
214
+
215
+ >>> dotproduct([10, 10], [20, 20])
216
+ 400
217
+
218
+ """
219
+ return sum(map(operator.mul, vec1, vec2))
220
+
221
+
222
+ def flatten(listOfLists):
223
+ """Return an iterator flattening one level of nesting in a list of lists.
224
+
225
+ >>> list(flatten([[0, 1], [2, 3]]))
226
+ [0, 1, 2, 3]
227
+
228
+ See also :func:`collapse`, which can flatten multiple levels of nesting.
229
+
230
+ """
231
+ return chain.from_iterable(listOfLists)
232
+
233
+
234
+ def repeatfunc(func, times=None, *args):
235
+ """Call *func* with *args* repeatedly, returning an iterable over the
236
+ results.
237
+
238
+ If *times* is specified, the iterable will terminate after that many
239
+ repetitions:
240
+
241
+ >>> from operator import add
242
+ >>> times = 4
243
+ >>> args = 3, 5
244
+ >>> list(repeatfunc(add, times, *args))
245
+ [8, 8, 8, 8]
246
+
247
+ If *times* is ``None`` the iterable will not terminate:
248
+
249
+ >>> from random import randrange
250
+ >>> times = None
251
+ >>> args = 1, 11
252
+ >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
253
+ [2, 4, 8, 1, 8, 4]
254
+
255
+ """
256
+ if times is None:
257
+ return starmap(func, repeat(args))
258
+ return starmap(func, repeat(args, times))
259
+
260
+
261
+ def _pairwise(iterable):
262
+ """Returns an iterator of paired items, overlapping, from the original
263
+
264
+ >>> take(4, pairwise(count()))
265
+ [(0, 1), (1, 2), (2, 3), (3, 4)]
266
+
267
+ On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
268
+
269
+ """
270
+ a, b = tee(iterable)
271
+ next(b, None)
272
+ yield from zip(a, b)
273
+
274
+
275
+ try:
276
+ from itertools import pairwise as itertools_pairwise
277
+ except ImportError:
278
+ pairwise = _pairwise
279
+ else:
280
+
281
+ def pairwise(iterable):
282
+ yield from itertools_pairwise(iterable)
283
+
284
+ pairwise.__doc__ = _pairwise.__doc__
285
+
286
+
287
+ def grouper(iterable, n, fillvalue=None):
288
+ """Collect data into fixed-length chunks or blocks.
289
+
290
+ >>> list(grouper('ABCDEFG', 3, 'x'))
291
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
292
+
293
+ """
294
+ if isinstance(iterable, int):
295
+ warnings.warn(
296
+ "grouper expects iterable as first parameter", DeprecationWarning
297
+ )
298
+ n, iterable = iterable, n
299
+ args = [iter(iterable)] * n
300
+ return zip_longest(fillvalue=fillvalue, *args)
301
+
302
+
303
+ def roundrobin(*iterables):
304
+ """Yields an item from each iterable, alternating between them.
305
+
306
+ >>> list(roundrobin('ABC', 'D', 'EF'))
307
+ ['A', 'D', 'E', 'B', 'F', 'C']
308
+
309
+ This function produces the same output as :func:`interleave_longest`, but
310
+ may perform better for some inputs (in particular when the number of
311
+ iterables is small).
312
+
313
+ """
314
+ # Recipe credited to George Sakkis
315
+ pending = len(iterables)
316
+ nexts = cycle(iter(it).__next__ for it in iterables)
317
+ while pending:
318
+ try:
319
+ for next in nexts:
320
+ yield next()
321
+ except StopIteration:
322
+ pending -= 1
323
+ nexts = cycle(islice(nexts, pending))
324
+
325
+
326
+ def partition(pred, iterable):
327
+ """
328
+ Returns a 2-tuple of iterables derived from the input iterable.
329
+ The first yields the items that have ``pred(item) == False``.
330
+ The second yields the items that have ``pred(item) == True``.
331
+
332
+ >>> is_odd = lambda x: x % 2 != 0
333
+ >>> iterable = range(10)
334
+ >>> even_items, odd_items = partition(is_odd, iterable)
335
+ >>> list(even_items), list(odd_items)
336
+ ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
337
+
338
+ If *pred* is None, :func:`bool` is used.
339
+
340
+ >>> iterable = [0, 1, False, True, '', ' ']
341
+ >>> false_items, true_items = partition(None, iterable)
342
+ >>> list(false_items), list(true_items)
343
+ ([0, False, ''], [1, True, ' '])
344
+
345
+ """
346
+ if pred is None:
347
+ pred = bool
348
+
349
+ evaluations = ((pred(x), x) for x in iterable)
350
+ t1, t2 = tee(evaluations)
351
+ return (
352
+ (x for (cond, x) in t1 if not cond),
353
+ (x for (cond, x) in t2 if cond),
354
+ )
355
+
356
+
357
+ def powerset(iterable):
358
+ """Yields all possible subsets of the iterable.
359
+
360
+ >>> list(powerset([1, 2, 3]))
361
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
362
+
363
+ :func:`powerset` will operate on iterables that aren't :class:`set`
364
+ instances, so repeated elements in the input will produce repeated elements
365
+ in the output. Use :func:`unique_everseen` on the input to avoid generating
366
+ duplicates:
367
+
368
+ >>> seq = [1, 1, 0]
369
+ >>> list(powerset(seq))
370
+ [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
371
+ >>> from more_itertools import unique_everseen
372
+ >>> list(powerset(unique_everseen(seq)))
373
+ [(), (1,), (0,), (1, 0)]
374
+
375
+ """
376
+ s = list(iterable)
377
+ return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
378
+
379
+
380
+ def unique_everseen(iterable, key=None):
381
+ """
382
+ Yield unique elements, preserving order.
383
+
384
+ >>> list(unique_everseen('AAAABBBCCDAABBB'))
385
+ ['A', 'B', 'C', 'D']
386
+ >>> list(unique_everseen('ABBCcAD', str.lower))
387
+ ['A', 'B', 'C', 'D']
388
+
389
+ Sequences with a mix of hashable and unhashable items can be used.
390
+ The function will be slower (i.e., `O(n^2)`) for unhashable items.
391
+
392
+ Remember that ``list`` objects are unhashable - you can use the *key*
393
+ parameter to transform the list to a tuple (which is hashable) to
394
+ avoid a slowdown.
395
+
396
+ >>> iterable = ([1, 2], [2, 3], [1, 2])
397
+ >>> list(unique_everseen(iterable)) # Slow
398
+ [[1, 2], [2, 3]]
399
+ >>> list(unique_everseen(iterable, key=tuple)) # Faster
400
+ [[1, 2], [2, 3]]
401
+
402
+ Similary, you may want to convert unhashable ``set`` objects with
403
+ ``key=frozenset``. For ``dict`` objects,
404
+ ``key=lambda x: frozenset(x.items())`` can be used.
405
+
406
+ """
407
+ seenset = set()
408
+ seenset_add = seenset.add
409
+ seenlist = []
410
+ seenlist_add = seenlist.append
411
+ use_key = key is not None
412
+
413
+ for element in iterable:
414
+ k = key(element) if use_key else element
415
+ try:
416
+ if k not in seenset:
417
+ seenset_add(k)
418
+ yield element
419
+ except TypeError:
420
+ if k not in seenlist:
421
+ seenlist_add(k)
422
+ yield element
423
+
424
+
425
+ def unique_justseen(iterable, key=None):
426
+ """Yields elements in order, ignoring serial duplicates
427
+
428
+ >>> list(unique_justseen('AAAABBBCCDAABBB'))
429
+ ['A', 'B', 'C', 'D', 'A', 'B']
430
+ >>> list(unique_justseen('ABBCcAD', str.lower))
431
+ ['A', 'B', 'C', 'A', 'D']
432
+
433
+ """
434
+ return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
435
+
436
+
437
+ def iter_except(func, exception, first=None):
438
+ """Yields results from a function repeatedly until an exception is raised.
439
+
440
+ Converts a call-until-exception interface to an iterator interface.
441
+ Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
442
+ to end the loop.
443
+
444
+ >>> l = [0, 1, 2]
445
+ >>> list(iter_except(l.pop, IndexError))
446
+ [2, 1, 0]
447
+
448
+ Multiple exceptions can be specified as a stopping condition:
449
+
450
+ >>> l = [1, 2, 3, '...', 4, 5, 6]
451
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
452
+ [7, 6, 5]
453
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
454
+ [4, 3, 2]
455
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
456
+ []
457
+
458
+ """
459
+ try:
460
+ if first is not None:
461
+ yield first()
462
+ while 1:
463
+ yield func()
464
+ except exception:
465
+ pass
466
+
467
+
468
+ def first_true(iterable, default=None, pred=None):
469
+ """
470
+ Returns the first true value in the iterable.
471
+
472
+ If no true value is found, returns *default*
473
+
474
+ If *pred* is not None, returns the first item for which
475
+ ``pred(item) == True`` .
476
+
477
+ >>> first_true(range(10))
478
+ 1
479
+ >>> first_true(range(10), pred=lambda x: x > 5)
480
+ 6
481
+ >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
482
+ 'missing'
483
+
484
+ """
485
+ return next(filter(pred, iterable), default)
486
+
487
+
488
+ def random_product(*args, repeat=1):
489
+ """Draw an item at random from each of the input iterables.
490
+
491
+ >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
492
+ ('c', 3, 'Z')
493
+
494
+ If *repeat* is provided as a keyword argument, that many items will be
495
+ drawn from each iterable.
496
+
497
+ >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
498
+ ('a', 2, 'd', 3)
499
+
500
+ This equivalent to taking a random selection from
501
+ ``itertools.product(*args, **kwarg)``.
502
+
503
+ """
504
+ pools = [tuple(pool) for pool in args] * repeat
505
+ return tuple(choice(pool) for pool in pools)
506
+
507
+
508
+ def random_permutation(iterable, r=None):
509
+ """Return a random *r* length permutation of the elements in *iterable*.
510
+
511
+ If *r* is not specified or is ``None``, then *r* defaults to the length of
512
+ *iterable*.
513
+
514
+ >>> random_permutation(range(5)) # doctest:+SKIP
515
+ (3, 4, 0, 1, 2)
516
+
517
+ This equivalent to taking a random selection from
518
+ ``itertools.permutations(iterable, r)``.
519
+
520
+ """
521
+ pool = tuple(iterable)
522
+ r = len(pool) if r is None else r
523
+ return tuple(sample(pool, r))
524
+
525
+
526
+ def random_combination(iterable, r):
527
+ """Return a random *r* length subsequence of the elements in *iterable*.
528
+
529
+ >>> random_combination(range(5), 3) # doctest:+SKIP
530
+ (2, 3, 4)
531
+
532
+ This equivalent to taking a random selection from
533
+ ``itertools.combinations(iterable, r)``.
534
+
535
+ """
536
+ pool = tuple(iterable)
537
+ n = len(pool)
538
+ indices = sorted(sample(range(n), r))
539
+ return tuple(pool[i] for i in indices)
540
+
541
+
542
+ def random_combination_with_replacement(iterable, r):
543
+ """Return a random *r* length subsequence of elements in *iterable*,
544
+ allowing individual elements to be repeated.
545
+
546
+ >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
547
+ (0, 0, 1, 2, 2)
548
+
549
+ This equivalent to taking a random selection from
550
+ ``itertools.combinations_with_replacement(iterable, r)``.
551
+
552
+ """
553
+ pool = tuple(iterable)
554
+ n = len(pool)
555
+ indices = sorted(randrange(n) for i in range(r))
556
+ return tuple(pool[i] for i in indices)
557
+
558
+
559
+ def nth_combination(iterable, r, index):
560
+ """Equivalent to ``list(combinations(iterable, r))[index]``.
561
+
562
+ The subsequences of *iterable* that are of length *r* can be ordered
563
+ lexicographically. :func:`nth_combination` computes the subsequence at
564
+ sort position *index* directly, without computing the previous
565
+ subsequences.
566
+
567
+ >>> nth_combination(range(5), 3, 5)
568
+ (0, 3, 4)
569
+
570
+ ``ValueError`` will be raised If *r* is negative or greater than the length
571
+ of *iterable*.
572
+ ``IndexError`` will be raised if the given *index* is invalid.
573
+ """
574
+ pool = tuple(iterable)
575
+ n = len(pool)
576
+ if (r < 0) or (r > n):
577
+ raise ValueError
578
+
579
+ c = 1
580
+ k = min(r, n - r)
581
+ for i in range(1, k + 1):
582
+ c = c * (n - k + i) // i
583
+
584
+ if index < 0:
585
+ index += c
586
+
587
+ if (index < 0) or (index >= c):
588
+ raise IndexError
589
+
590
+ result = []
591
+ while r:
592
+ c, n, r = c * r // n, n - 1, r - 1
593
+ while index >= c:
594
+ index -= c
595
+ c, n = c * (n - r) // n, n - 1
596
+ result.append(pool[-1 - n])
597
+
598
+ return tuple(result)
599
+
600
+
601
+ def prepend(value, iterator):
602
+ """Yield *value*, followed by the elements in *iterator*.
603
+
604
+ >>> value = '0'
605
+ >>> iterator = ['1', '2', '3']
606
+ >>> list(prepend(value, iterator))
607
+ ['0', '1', '2', '3']
608
+
609
+ To prepend multiple values, see :func:`itertools.chain`
610
+ or :func:`value_chain`.
611
+
612
+ """
613
+ return chain([value], iterator)
614
+
615
+
616
+ def convolve(signal, kernel):
617
+ """Convolve the iterable *signal* with the iterable *kernel*.
618
+
619
+ >>> signal = (1, 2, 3, 4, 5)
620
+ >>> kernel = [3, 2, 1]
621
+ >>> list(convolve(signal, kernel))
622
+ [3, 8, 14, 20, 26, 14, 5]
623
+
624
+ Note: the input arguments are not interchangeable, as the *kernel*
625
+ is immediately consumed and stored.
626
+
627
+ """
628
+ kernel = tuple(kernel)[::-1]
629
+ n = len(kernel)
630
+ window = deque([0], maxlen=n) * n
631
+ for x in chain(signal, repeat(0, n - 1)):
632
+ window.append(x)
633
+ yield sum(map(operator.mul, kernel, window))
634
+
635
+
636
+ def before_and_after(predicate, it):
637
+ """A variant of :func:`takewhile` that allows complete access to the
638
+ remainder of the iterator.
639
+
640
+ >>> it = iter('ABCdEfGhI')
641
+ >>> all_upper, remainder = before_and_after(str.isupper, it)
642
+ >>> ''.join(all_upper)
643
+ 'ABC'
644
+ >>> ''.join(remainder) # takewhile() would lose the 'd'
645
+ 'dEfGhI'
646
+
647
+ Note that the first iterator must be fully consumed before the second
648
+ iterator can generate valid results.
649
+ """
650
+ it = iter(it)
651
+ transition = []
652
+
653
+ def true_iterator():
654
+ for elem in it:
655
+ if predicate(elem):
656
+ yield elem
657
+ else:
658
+ transition.append(elem)
659
+ return
660
+
661
+ def remainder_iterator():
662
+ yield from transition
663
+ yield from it
664
+
665
+ return true_iterator(), remainder_iterator()
666
+
667
+
668
+ def triplewise(iterable):
669
+ """Return overlapping triplets from *iterable*.
670
+
671
+ >>> list(triplewise('ABCDE'))
672
+ [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
673
+
674
+ """
675
+ for (a, _), (b, c) in pairwise(pairwise(iterable)):
676
+ yield a, b, c
677
+
678
+
679
+ def sliding_window(iterable, n):
680
+ """Return a sliding window of width *n* over *iterable*.
681
+
682
+ >>> list(sliding_window(range(6), 4))
683
+ [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
684
+
685
+ If *iterable* has fewer than *n* items, then nothing is yielded:
686
+
687
+ >>> list(sliding_window(range(3), 4))
688
+ []
689
+
690
+ For a variant with more features, see :func:`windowed`.
691
+ """
692
+ it = iter(iterable)
693
+ window = deque(islice(it, n), maxlen=n)
694
+ if len(window) == n:
695
+ yield tuple(window)
696
+ for x in it:
697
+ window.append(x)
698
+ yield tuple(window)
hfenv/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import operator
6
+ import os
7
+ import platform
8
+ import sys
9
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
10
+
11
+ from pkg_resources.extern.pyparsing import ( # noqa: N817
12
+ Forward,
13
+ Group,
14
+ Literal as L,
15
+ ParseException,
16
+ ParseResults,
17
+ QuotedString,
18
+ ZeroOrMore,
19
+ stringEnd,
20
+ stringStart,
21
+ )
22
+
23
+ from .specifiers import InvalidSpecifier, Specifier
24
+
25
+ __all__ = [
26
+ "InvalidMarker",
27
+ "UndefinedComparison",
28
+ "UndefinedEnvironmentName",
29
+ "Marker",
30
+ "default_environment",
31
+ ]
32
+
33
+ Operator = Callable[[str, str], bool]
34
+
35
+
36
+ class InvalidMarker(ValueError):
37
+ """
38
+ An invalid marker was found, users should refer to PEP 508.
39
+ """
40
+
41
+
42
+ class UndefinedComparison(ValueError):
43
+ """
44
+ An invalid operation was attempted on a value that doesn't support it.
45
+ """
46
+
47
+
48
+ class UndefinedEnvironmentName(ValueError):
49
+ """
50
+ A name was attempted to be used that does not exist inside of the
51
+ environment.
52
+ """
53
+
54
+
55
+ class Node:
56
+ def __init__(self, value: Any) -> None:
57
+ self.value = value
58
+
59
+ def __str__(self) -> str:
60
+ return str(self.value)
61
+
62
+ def __repr__(self) -> str:
63
+ return f"<{self.__class__.__name__}('{self}')>"
64
+
65
+ def serialize(self) -> str:
66
+ raise NotImplementedError
67
+
68
+
69
+ class Variable(Node):
70
+ def serialize(self) -> str:
71
+ return str(self)
72
+
73
+
74
+ class Value(Node):
75
+ def serialize(self) -> str:
76
+ return f'"{self}"'
77
+
78
+
79
+ class Op(Node):
80
+ def serialize(self) -> str:
81
+ return str(self)
82
+
83
+
84
+ VARIABLE = (
85
+ L("implementation_version")
86
+ | L("platform_python_implementation")
87
+ | L("implementation_name")
88
+ | L("python_full_version")
89
+ | L("platform_release")
90
+ | L("platform_version")
91
+ | L("platform_machine")
92
+ | L("platform_system")
93
+ | L("python_version")
94
+ | L("sys_platform")
95
+ | L("os_name")
96
+ | L("os.name") # PEP-345
97
+ | L("sys.platform") # PEP-345
98
+ | L("platform.version") # PEP-345
99
+ | L("platform.machine") # PEP-345
100
+ | L("platform.python_implementation") # PEP-345
101
+ | L("python_implementation") # undocumented setuptools legacy
102
+ | L("extra") # PEP-508
103
+ )
104
+ ALIASES = {
105
+ "os.name": "os_name",
106
+ "sys.platform": "sys_platform",
107
+ "platform.version": "platform_version",
108
+ "platform.machine": "platform_machine",
109
+ "platform.python_implementation": "platform_python_implementation",
110
+ "python_implementation": "platform_python_implementation",
111
+ }
112
+ VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
113
+
114
+ VERSION_CMP = (
115
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
116
+ )
117
+
118
+ MARKER_OP = VERSION_CMP | L("not in") | L("in")
119
+ MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
120
+
121
+ MARKER_VALUE = QuotedString("'") | QuotedString('"')
122
+ MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
123
+
124
+ BOOLOP = L("and") | L("or")
125
+
126
+ MARKER_VAR = VARIABLE | MARKER_VALUE
127
+
128
+ MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
129
+ MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
130
+
131
+ LPAREN = L("(").suppress()
132
+ RPAREN = L(")").suppress()
133
+
134
+ MARKER_EXPR = Forward()
135
+ MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
136
+ MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
137
+
138
+ MARKER = stringStart + MARKER_EXPR + stringEnd
139
+
140
+
141
+ def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
142
+ if isinstance(results, ParseResults):
143
+ return [_coerce_parse_result(i) for i in results]
144
+ else:
145
+ return results
146
+
147
+
148
+ def _format_marker(
149
+ marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
150
+ ) -> str:
151
+
152
+ assert isinstance(marker, (list, tuple, str))
153
+
154
+ # Sometimes we have a structure like [[...]] which is a single item list
155
+ # where the single item is itself it's own list. In that case we want skip
156
+ # the rest of this function so that we don't get extraneous () on the
157
+ # outside.
158
+ if (
159
+ isinstance(marker, list)
160
+ and len(marker) == 1
161
+ and isinstance(marker[0], (list, tuple))
162
+ ):
163
+ return _format_marker(marker[0])
164
+
165
+ if isinstance(marker, list):
166
+ inner = (_format_marker(m, first=False) for m in marker)
167
+ if first:
168
+ return " ".join(inner)
169
+ else:
170
+ return "(" + " ".join(inner) + ")"
171
+ elif isinstance(marker, tuple):
172
+ return " ".join([m.serialize() for m in marker])
173
+ else:
174
+ return marker
175
+
176
+
177
+ _operators: Dict[str, Operator] = {
178
+ "in": lambda lhs, rhs: lhs in rhs,
179
+ "not in": lambda lhs, rhs: lhs not in rhs,
180
+ "<": operator.lt,
181
+ "<=": operator.le,
182
+ "==": operator.eq,
183
+ "!=": operator.ne,
184
+ ">=": operator.ge,
185
+ ">": operator.gt,
186
+ }
187
+
188
+
189
+ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
190
+ try:
191
+ spec = Specifier("".join([op.serialize(), rhs]))
192
+ except InvalidSpecifier:
193
+ pass
194
+ else:
195
+ return spec.contains(lhs)
196
+
197
+ oper: Optional[Operator] = _operators.get(op.serialize())
198
+ if oper is None:
199
+ raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
200
+
201
+ return oper(lhs, rhs)
202
+
203
+
204
+ class Undefined:
205
+ pass
206
+
207
+
208
+ _undefined = Undefined()
209
+
210
+
211
+ def _get_env(environment: Dict[str, str], name: str) -> str:
212
+ value: Union[str, Undefined] = environment.get(name, _undefined)
213
+
214
+ if isinstance(value, Undefined):
215
+ raise UndefinedEnvironmentName(
216
+ f"{name!r} does not exist in evaluation environment."
217
+ )
218
+
219
+ return value
220
+
221
+
222
+ def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
223
+ groups: List[List[bool]] = [[]]
224
+
225
+ for marker in markers:
226
+ assert isinstance(marker, (list, tuple, str))
227
+
228
+ if isinstance(marker, list):
229
+ groups[-1].append(_evaluate_markers(marker, environment))
230
+ elif isinstance(marker, tuple):
231
+ lhs, op, rhs = marker
232
+
233
+ if isinstance(lhs, Variable):
234
+ lhs_value = _get_env(environment, lhs.value)
235
+ rhs_value = rhs.value
236
+ else:
237
+ lhs_value = lhs.value
238
+ rhs_value = _get_env(environment, rhs.value)
239
+
240
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
241
+ else:
242
+ assert marker in ["and", "or"]
243
+ if marker == "or":
244
+ groups.append([])
245
+
246
+ return any(all(item) for item in groups)
247
+
248
+
249
+ def format_full_version(info: "sys._version_info") -> str:
250
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
251
+ kind = info.releaselevel
252
+ if kind != "final":
253
+ version += kind[0] + str(info.serial)
254
+ return version
255
+
256
+
257
+ def default_environment() -> Dict[str, str]:
258
+ iver = format_full_version(sys.implementation.version)
259
+ implementation_name = sys.implementation.name
260
+ return {
261
+ "implementation_name": implementation_name,
262
+ "implementation_version": iver,
263
+ "os_name": os.name,
264
+ "platform_machine": platform.machine(),
265
+ "platform_release": platform.release(),
266
+ "platform_system": platform.system(),
267
+ "platform_version": platform.version(),
268
+ "python_full_version": platform.python_version(),
269
+ "platform_python_implementation": platform.python_implementation(),
270
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
271
+ "sys_platform": sys.platform,
272
+ }
273
+
274
+
275
+ class Marker:
276
+ def __init__(self, marker: str) -> None:
277
+ try:
278
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
279
+ except ParseException as e:
280
+ raise InvalidMarker(
281
+ f"Invalid marker: {marker!r}, parse error at "
282
+ f"{marker[e.loc : e.loc + 8]!r}"
283
+ )
284
+
285
+ def __str__(self) -> str:
286
+ return _format_marker(self._markers)
287
+
288
+ def __repr__(self) -> str:
289
+ return f"<Marker('{self}')>"
290
+
291
+ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
292
+ """Evaluate a marker.
293
+
294
+ Return the boolean from evaluating the given marker against the
295
+ environment. environment is an optional argument to override all or
296
+ part of the determined environment.
297
+
298
+ The environment is determined from the current Python process.
299
+ """
300
+ current_environment = default_environment()
301
+ if environment is not None:
302
+ current_environment.update(environment)
303
+
304
+ return _evaluate_markers(self._markers, current_environment)
hfenv/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import re
6
+ import string
7
+ import urllib.parse
8
+ from typing import List, Optional as TOptional, Set
9
+
10
+ from pkg_resources.extern.pyparsing import ( # noqa
11
+ Combine,
12
+ Literal as L,
13
+ Optional,
14
+ ParseException,
15
+ Regex,
16
+ Word,
17
+ ZeroOrMore,
18
+ originalTextFor,
19
+ stringEnd,
20
+ stringStart,
21
+ )
22
+
23
+ from .markers import MARKER_EXPR, Marker
24
+ from .specifiers import LegacySpecifier, Specifier, SpecifierSet
25
+
26
+
27
+ class InvalidRequirement(ValueError):
28
+ """
29
+ An invalid requirement was found, users should refer to PEP 508.
30
+ """
31
+
32
+
33
+ ALPHANUM = Word(string.ascii_letters + string.digits)
34
+
35
+ LBRACKET = L("[").suppress()
36
+ RBRACKET = L("]").suppress()
37
+ LPAREN = L("(").suppress()
38
+ RPAREN = L(")").suppress()
39
+ COMMA = L(",").suppress()
40
+ SEMICOLON = L(";").suppress()
41
+ AT = L("@").suppress()
42
+
43
+ PUNCTUATION = Word("-_.")
44
+ IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
45
+ IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
46
+
47
+ NAME = IDENTIFIER("name")
48
+ EXTRA = IDENTIFIER
49
+
50
+ URI = Regex(r"[^ ]+")("url")
51
+ URL = AT + URI
52
+
53
+ EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
54
+ EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
55
+
56
+ VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
57
+ VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
58
+
59
+ VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
60
+ VERSION_MANY = Combine(
61
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
62
+ )("_raw_spec")
63
+ _VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
64
+ _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
65
+
66
+ VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
67
+ VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
68
+
69
+ MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
70
+ MARKER_EXPR.setParseAction(
71
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
72
+ )
73
+ MARKER_SEPARATOR = SEMICOLON
74
+ MARKER = MARKER_SEPARATOR + MARKER_EXPR
75
+
76
+ VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
77
+ URL_AND_MARKER = URL + Optional(MARKER)
78
+
79
+ NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
80
+
81
+ REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
82
+ # pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
83
+ # issue #104
84
+ REQUIREMENT.parseString("x[]")
85
+
86
+
87
+ class Requirement:
88
+ """Parse a requirement.
89
+
90
+ Parse a given requirement string into its parts, such as name, specifier,
91
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
92
+ string.
93
+ """
94
+
95
+ # TODO: Can we test whether something is contained within a requirement?
96
+ # If so how do we do that? Do we need to test against the _name_ of
97
+ # the thing as well as the version? What about the markers?
98
+ # TODO: Can we normalize the name and extra name?
99
+
100
+ def __init__(self, requirement_string: str) -> None:
101
+ try:
102
+ req = REQUIREMENT.parseString(requirement_string)
103
+ except ParseException as e:
104
+ raise InvalidRequirement(
105
+ f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
106
+ )
107
+
108
+ self.name: str = req.name
109
+ if req.url:
110
+ parsed_url = urllib.parse.urlparse(req.url)
111
+ if parsed_url.scheme == "file":
112
+ if urllib.parse.urlunparse(parsed_url) != req.url:
113
+ raise InvalidRequirement("Invalid URL given")
114
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
115
+ not parsed_url.scheme and not parsed_url.netloc
116
+ ):
117
+ raise InvalidRequirement(f"Invalid URL: {req.url}")
118
+ self.url: TOptional[str] = req.url
119
+ else:
120
+ self.url = None
121
+ self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
122
+ self.specifier: SpecifierSet = SpecifierSet(req.specifier)
123
+ self.marker: TOptional[Marker] = req.marker if req.marker else None
124
+
125
+ def __str__(self) -> str:
126
+ parts: List[str] = [self.name]
127
+
128
+ if self.extras:
129
+ formatted_extras = ",".join(sorted(self.extras))
130
+ parts.append(f"[{formatted_extras}]")
131
+
132
+ if self.specifier:
133
+ parts.append(str(self.specifier))
134
+
135
+ if self.url:
136
+ parts.append(f"@ {self.url}")
137
+ if self.marker:
138
+ parts.append(" ")
139
+
140
+ if self.marker:
141
+ parts.append(f"; {self.marker}")
142
+
143
+ return "".join(parts)
144
+
145
+ def __repr__(self) -> str:
146
+ return f"<Requirement('{self}')>"
hfenv/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import abc
6
+ import functools
7
+ import itertools
8
+ import re
9
+ import warnings
10
+ from typing import (
11
+ Callable,
12
+ Dict,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ Optional,
17
+ Pattern,
18
+ Set,
19
+ Tuple,
20
+ TypeVar,
21
+ Union,
22
+ )
23
+
24
+ from .utils import canonicalize_version
25
+ from .version import LegacyVersion, Version, parse
26
+
27
+ ParsedVersion = Union[Version, LegacyVersion]
28
+ UnparsedVersion = Union[Version, LegacyVersion, str]
29
+ VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
30
+ CallableOperator = Callable[[ParsedVersion, str], bool]
31
+
32
+
33
+ class InvalidSpecifier(ValueError):
34
+ """
35
+ An invalid specifier was found, users should refer to PEP 440.
36
+ """
37
+
38
+
39
+ class BaseSpecifier(metaclass=abc.ABCMeta):
40
+ @abc.abstractmethod
41
+ def __str__(self) -> str:
42
+ """
43
+ Returns the str representation of this Specifier like object. This
44
+ should be representative of the Specifier itself.
45
+ """
46
+
47
+ @abc.abstractmethod
48
+ def __hash__(self) -> int:
49
+ """
50
+ Returns a hash value for this Specifier like object.
51
+ """
52
+
53
+ @abc.abstractmethod
54
+ def __eq__(self, other: object) -> bool:
55
+ """
56
+ Returns a boolean representing whether or not the two Specifier like
57
+ objects are equal.
58
+ """
59
+
60
+ @abc.abstractproperty
61
+ def prereleases(self) -> Optional[bool]:
62
+ """
63
+ Returns whether or not pre-releases as a whole are allowed by this
64
+ specifier.
65
+ """
66
+
67
+ @prereleases.setter
68
+ def prereleases(self, value: bool) -> None:
69
+ """
70
+ Sets whether or not pre-releases as a whole are allowed by this
71
+ specifier.
72
+ """
73
+
74
+ @abc.abstractmethod
75
+ def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
76
+ """
77
+ Determines if the given item is contained within this specifier.
78
+ """
79
+
80
+ @abc.abstractmethod
81
+ def filter(
82
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
83
+ ) -> Iterable[VersionTypeVar]:
84
+ """
85
+ Takes an iterable of items and filters them so that only items which
86
+ are contained within this specifier are allowed in it.
87
+ """
88
+
89
+
90
+ class _IndividualSpecifier(BaseSpecifier):
91
+
92
+ _operators: Dict[str, str] = {}
93
+ _regex: Pattern[str]
94
+
95
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
96
+ match = self._regex.search(spec)
97
+ if not match:
98
+ raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
99
+
100
+ self._spec: Tuple[str, str] = (
101
+ match.group("operator").strip(),
102
+ match.group("version").strip(),
103
+ )
104
+
105
+ # Store whether or not this Specifier should accept prereleases
106
+ self._prereleases = prereleases
107
+
108
+ def __repr__(self) -> str:
109
+ pre = (
110
+ f", prereleases={self.prereleases!r}"
111
+ if self._prereleases is not None
112
+ else ""
113
+ )
114
+
115
+ return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
116
+
117
+ def __str__(self) -> str:
118
+ return "{}{}".format(*self._spec)
119
+
120
+ @property
121
+ def _canonical_spec(self) -> Tuple[str, str]:
122
+ return self._spec[0], canonicalize_version(self._spec[1])
123
+
124
+ def __hash__(self) -> int:
125
+ return hash(self._canonical_spec)
126
+
127
+ def __eq__(self, other: object) -> bool:
128
+ if isinstance(other, str):
129
+ try:
130
+ other = self.__class__(str(other))
131
+ except InvalidSpecifier:
132
+ return NotImplemented
133
+ elif not isinstance(other, self.__class__):
134
+ return NotImplemented
135
+
136
+ return self._canonical_spec == other._canonical_spec
137
+
138
+ def _get_operator(self, op: str) -> CallableOperator:
139
+ operator_callable: CallableOperator = getattr(
140
+ self, f"_compare_{self._operators[op]}"
141
+ )
142
+ return operator_callable
143
+
144
+ def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
145
+ if not isinstance(version, (LegacyVersion, Version)):
146
+ version = parse(version)
147
+ return version
148
+
149
+ @property
150
+ def operator(self) -> str:
151
+ return self._spec[0]
152
+
153
+ @property
154
+ def version(self) -> str:
155
+ return self._spec[1]
156
+
157
+ @property
158
+ def prereleases(self) -> Optional[bool]:
159
+ return self._prereleases
160
+
161
+ @prereleases.setter
162
+ def prereleases(self, value: bool) -> None:
163
+ self._prereleases = value
164
+
165
+ def __contains__(self, item: str) -> bool:
166
+ return self.contains(item)
167
+
168
+ def contains(
169
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
170
+ ) -> bool:
171
+
172
+ # Determine if prereleases are to be allowed or not.
173
+ if prereleases is None:
174
+ prereleases = self.prereleases
175
+
176
+ # Normalize item to a Version or LegacyVersion, this allows us to have
177
+ # a shortcut for ``"2.0" in Specifier(">=2")
178
+ normalized_item = self._coerce_version(item)
179
+
180
+ # Determine if we should be supporting prereleases in this specifier
181
+ # or not, if we do not support prereleases than we can short circuit
182
+ # logic if this version is a prereleases.
183
+ if normalized_item.is_prerelease and not prereleases:
184
+ return False
185
+
186
+ # Actually do the comparison to determine if this item is contained
187
+ # within this Specifier or not.
188
+ operator_callable: CallableOperator = self._get_operator(self.operator)
189
+ return operator_callable(normalized_item, self.version)
190
+
191
+ def filter(
192
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
193
+ ) -> Iterable[VersionTypeVar]:
194
+
195
+ yielded = False
196
+ found_prereleases = []
197
+
198
+ kw = {"prereleases": prereleases if prereleases is not None else True}
199
+
200
+ # Attempt to iterate over all the values in the iterable and if any of
201
+ # them match, yield them.
202
+ for version in iterable:
203
+ parsed_version = self._coerce_version(version)
204
+
205
+ if self.contains(parsed_version, **kw):
206
+ # If our version is a prerelease, and we were not set to allow
207
+ # prereleases, then we'll store it for later in case nothing
208
+ # else matches this specifier.
209
+ if parsed_version.is_prerelease and not (
210
+ prereleases or self.prereleases
211
+ ):
212
+ found_prereleases.append(version)
213
+ # Either this is not a prerelease, or we should have been
214
+ # accepting prereleases from the beginning.
215
+ else:
216
+ yielded = True
217
+ yield version
218
+
219
+ # Now that we've iterated over everything, determine if we've yielded
220
+ # any values, and if we have not and we have any prereleases stored up
221
+ # then we will go ahead and yield the prereleases.
222
+ if not yielded and found_prereleases:
223
+ for version in found_prereleases:
224
+ yield version
225
+
226
+
227
+ class LegacySpecifier(_IndividualSpecifier):
228
+
229
+ _regex_str = r"""
230
+ (?P<operator>(==|!=|<=|>=|<|>))
231
+ \s*
232
+ (?P<version>
233
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
234
+ # string can be just about anything, we match everything
235
+ # except for whitespace, a semi-colon for marker support,
236
+ # a closing paren since versions can be enclosed in
237
+ # them, and a comma since it's a version separator.
238
+ )
239
+ """
240
+
241
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
242
+
243
+ _operators = {
244
+ "==": "equal",
245
+ "!=": "not_equal",
246
+ "<=": "less_than_equal",
247
+ ">=": "greater_than_equal",
248
+ "<": "less_than",
249
+ ">": "greater_than",
250
+ }
251
+
252
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
253
+ super().__init__(spec, prereleases)
254
+
255
+ warnings.warn(
256
+ "Creating a LegacyVersion has been deprecated and will be "
257
+ "removed in the next major release",
258
+ DeprecationWarning,
259
+ )
260
+
261
+ def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
262
+ if not isinstance(version, LegacyVersion):
263
+ version = LegacyVersion(str(version))
264
+ return version
265
+
266
+ def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
267
+ return prospective == self._coerce_version(spec)
268
+
269
+ def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
270
+ return prospective != self._coerce_version(spec)
271
+
272
+ def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
273
+ return prospective <= self._coerce_version(spec)
274
+
275
+ def _compare_greater_than_equal(
276
+ self, prospective: LegacyVersion, spec: str
277
+ ) -> bool:
278
+ return prospective >= self._coerce_version(spec)
279
+
280
+ def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
281
+ return prospective < self._coerce_version(spec)
282
+
283
+ def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
284
+ return prospective > self._coerce_version(spec)
285
+
286
+
287
+ def _require_version_compare(
288
+ fn: Callable[["Specifier", ParsedVersion, str], bool]
289
+ ) -> Callable[["Specifier", ParsedVersion, str], bool]:
290
+ @functools.wraps(fn)
291
+ def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
292
+ if not isinstance(prospective, Version):
293
+ return False
294
+ return fn(self, prospective, spec)
295
+
296
+ return wrapped
297
+
298
+
299
+ class Specifier(_IndividualSpecifier):
300
+
301
+ _regex_str = r"""
302
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
303
+ (?P<version>
304
+ (?:
305
+ # The identity operators allow for an escape hatch that will
306
+ # do an exact string match of the version you wish to install.
307
+ # This will not be parsed by PEP 440 and we cannot determine
308
+ # any semantic meaning from it. This operator is discouraged
309
+ # but included entirely as an escape hatch.
310
+ (?<====) # Only match for the identity operator
311
+ \s*
312
+ [^\s]* # We just match everything, except for whitespace
313
+ # since we are only testing for strict identity.
314
+ )
315
+ |
316
+ (?:
317
+ # The (non)equality operators allow for wild card and local
318
+ # versions to be specified so we have to define these two
319
+ # operators separately to enable that.
320
+ (?<===|!=) # Only match for equals and not equals
321
+
322
+ \s*
323
+ v?
324
+ (?:[0-9]+!)? # epoch
325
+ [0-9]+(?:\.[0-9]+)* # release
326
+ (?: # pre release
327
+ [-_\.]?
328
+ (a|b|c|rc|alpha|beta|pre|preview)
329
+ [-_\.]?
330
+ [0-9]*
331
+ )?
332
+ (?: # post release
333
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
334
+ )?
335
+
336
+ # You cannot use a wild card and a dev or local version
337
+ # together so group them with a | and make them optional.
338
+ (?:
339
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
340
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
341
+ |
342
+ \.\* # Wild card syntax of .*
343
+ )?
344
+ )
345
+ |
346
+ (?:
347
+ # The compatible operator requires at least two digits in the
348
+ # release segment.
349
+ (?<=~=) # Only match for the compatible operator
350
+
351
+ \s*
352
+ v?
353
+ (?:[0-9]+!)? # epoch
354
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
355
+ (?: # pre release
356
+ [-_\.]?
357
+ (a|b|c|rc|alpha|beta|pre|preview)
358
+ [-_\.]?
359
+ [0-9]*
360
+ )?
361
+ (?: # post release
362
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
363
+ )?
364
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
365
+ )
366
+ |
367
+ (?:
368
+ # All other operators only allow a sub set of what the
369
+ # (non)equality operators do. Specifically they do not allow
370
+ # local versions to be specified nor do they allow the prefix
371
+ # matching wild cards.
372
+ (?<!==|!=|~=) # We have special cases for these
373
+ # operators so we want to make sure they
374
+ # don't match here.
375
+
376
+ \s*
377
+ v?
378
+ (?:[0-9]+!)? # epoch
379
+ [0-9]+(?:\.[0-9]+)* # release
380
+ (?: # pre release
381
+ [-_\.]?
382
+ (a|b|c|rc|alpha|beta|pre|preview)
383
+ [-_\.]?
384
+ [0-9]*
385
+ )?
386
+ (?: # post release
387
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
388
+ )?
389
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
390
+ )
391
+ )
392
+ """
393
+
394
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
395
+
396
+ _operators = {
397
+ "~=": "compatible",
398
+ "==": "equal",
399
+ "!=": "not_equal",
400
+ "<=": "less_than_equal",
401
+ ">=": "greater_than_equal",
402
+ "<": "less_than",
403
+ ">": "greater_than",
404
+ "===": "arbitrary",
405
+ }
406
+
407
+ @_require_version_compare
408
+ def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
409
+
410
+ # Compatible releases have an equivalent combination of >= and ==. That
411
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
412
+ # implement this in terms of the other specifiers instead of
413
+ # implementing it ourselves. The only thing we need to do is construct
414
+ # the other specifiers.
415
+
416
+ # We want everything but the last item in the version, but we want to
417
+ # ignore suffix segments.
418
+ prefix = ".".join(
419
+ list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
420
+ )
421
+
422
+ # Add the prefix notation to the end of our string
423
+ prefix += ".*"
424
+
425
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
426
+ prospective, prefix
427
+ )
428
+
429
+ @_require_version_compare
430
+ def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
431
+
432
+ # We need special logic to handle prefix matching
433
+ if spec.endswith(".*"):
434
+ # In the case of prefix matching we want to ignore local segment.
435
+ prospective = Version(prospective.public)
436
+ # Split the spec out by dots, and pretend that there is an implicit
437
+ # dot in between a release segment and a pre-release segment.
438
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
439
+
440
+ # Split the prospective version out by dots, and pretend that there
441
+ # is an implicit dot in between a release segment and a pre-release
442
+ # segment.
443
+ split_prospective = _version_split(str(prospective))
444
+
445
+ # Shorten the prospective version to be the same length as the spec
446
+ # so that we can determine if the specifier is a prefix of the
447
+ # prospective version or not.
448
+ shortened_prospective = split_prospective[: len(split_spec)]
449
+
450
+ # Pad out our two sides with zeros so that they both equal the same
451
+ # length.
452
+ padded_spec, padded_prospective = _pad_version(
453
+ split_spec, shortened_prospective
454
+ )
455
+
456
+ return padded_prospective == padded_spec
457
+ else:
458
+ # Convert our spec string into a Version
459
+ spec_version = Version(spec)
460
+
461
+ # If the specifier does not have a local segment, then we want to
462
+ # act as if the prospective version also does not have a local
463
+ # segment.
464
+ if not spec_version.local:
465
+ prospective = Version(prospective.public)
466
+
467
+ return prospective == spec_version
468
+
469
+ @_require_version_compare
470
+ def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
471
+ return not self._compare_equal(prospective, spec)
472
+
473
+ @_require_version_compare
474
+ def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
475
+
476
+ # NB: Local version identifiers are NOT permitted in the version
477
+ # specifier, so local version labels can be universally removed from
478
+ # the prospective version.
479
+ return Version(prospective.public) <= Version(spec)
480
+
481
+ @_require_version_compare
482
+ def _compare_greater_than_equal(
483
+ self, prospective: ParsedVersion, spec: str
484
+ ) -> bool:
485
+
486
+ # NB: Local version identifiers are NOT permitted in the version
487
+ # specifier, so local version labels can be universally removed from
488
+ # the prospective version.
489
+ return Version(prospective.public) >= Version(spec)
490
+
491
+ @_require_version_compare
492
+ def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
493
+
494
+ # Convert our spec to a Version instance, since we'll want to work with
495
+ # it as a version.
496
+ spec = Version(spec_str)
497
+
498
+ # Check to see if the prospective version is less than the spec
499
+ # version. If it's not we can short circuit and just return False now
500
+ # instead of doing extra unneeded work.
501
+ if not prospective < spec:
502
+ return False
503
+
504
+ # This special case is here so that, unless the specifier itself
505
+ # includes is a pre-release version, that we do not accept pre-release
506
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
507
+ # not match 3.1.dev0, but should match 3.0.dev0).
508
+ if not spec.is_prerelease and prospective.is_prerelease:
509
+ if Version(prospective.base_version) == Version(spec.base_version):
510
+ return False
511
+
512
+ # If we've gotten to here, it means that prospective version is both
513
+ # less than the spec version *and* it's not a pre-release of the same
514
+ # version in the spec.
515
+ return True
516
+
517
+ @_require_version_compare
518
+ def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
519
+
520
+ # Convert our spec to a Version instance, since we'll want to work with
521
+ # it as a version.
522
+ spec = Version(spec_str)
523
+
524
+ # Check to see if the prospective version is greater than the spec
525
+ # version. If it's not we can short circuit and just return False now
526
+ # instead of doing extra unneeded work.
527
+ if not prospective > spec:
528
+ return False
529
+
530
+ # This special case is here so that, unless the specifier itself
531
+ # includes is a post-release version, that we do not accept
532
+ # post-release versions for the version mentioned in the specifier
533
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
534
+ if not spec.is_postrelease and prospective.is_postrelease:
535
+ if Version(prospective.base_version) == Version(spec.base_version):
536
+ return False
537
+
538
+ # Ensure that we do not allow a local version of the version mentioned
539
+ # in the specifier, which is technically greater than, to match.
540
+ if prospective.local is not None:
541
+ if Version(prospective.base_version) == Version(spec.base_version):
542
+ return False
543
+
544
+ # If we've gotten to here, it means that prospective version is both
545
+ # greater than the spec version *and* it's not a pre-release of the
546
+ # same version in the spec.
547
+ return True
548
+
549
+ def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
550
+ return str(prospective).lower() == str(spec).lower()
551
+
552
+ @property
553
+ def prereleases(self) -> bool:
554
+
555
+ # If there is an explicit prereleases set for this, then we'll just
556
+ # blindly use that.
557
+ if self._prereleases is not None:
558
+ return self._prereleases
559
+
560
+ # Look at all of our specifiers and determine if they are inclusive
561
+ # operators, and if they are if they are including an explicit
562
+ # prerelease.
563
+ operator, version = self._spec
564
+ if operator in ["==", ">=", "<=", "~=", "==="]:
565
+ # The == specifier can include a trailing .*, if it does we
566
+ # want to remove before parsing.
567
+ if operator == "==" and version.endswith(".*"):
568
+ version = version[:-2]
569
+
570
+ # Parse the version, and if it is a pre-release than this
571
+ # specifier allows pre-releases.
572
+ if parse(version).is_prerelease:
573
+ return True
574
+
575
+ return False
576
+
577
+ @prereleases.setter
578
+ def prereleases(self, value: bool) -> None:
579
+ self._prereleases = value
580
+
581
+
582
+ _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
583
+
584
+
585
+ def _version_split(version: str) -> List[str]:
586
+ result: List[str] = []
587
+ for item in version.split("."):
588
+ match = _prefix_regex.search(item)
589
+ if match:
590
+ result.extend(match.groups())
591
+ else:
592
+ result.append(item)
593
+ return result
594
+
595
+
596
+ def _is_not_suffix(segment: str) -> bool:
597
+ return not any(
598
+ segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
599
+ )
600
+
601
+
602
+ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
603
+ left_split, right_split = [], []
604
+
605
+ # Get the release segment of our versions
606
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
607
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
608
+
609
+ # Get the rest of our versions
610
+ left_split.append(left[len(left_split[0]) :])
611
+ right_split.append(right[len(right_split[0]) :])
612
+
613
+ # Insert our padding
614
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
615
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
616
+
617
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
618
+
619
+
620
+ class SpecifierSet(BaseSpecifier):
621
+ def __init__(
622
+ self, specifiers: str = "", prereleases: Optional[bool] = None
623
+ ) -> None:
624
+
625
+ # Split on , to break each individual specifier into it's own item, and
626
+ # strip each item to remove leading/trailing whitespace.
627
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
628
+
629
+ # Parsed each individual specifier, attempting first to make it a
630
+ # Specifier and falling back to a LegacySpecifier.
631
+ parsed: Set[_IndividualSpecifier] = set()
632
+ for specifier in split_specifiers:
633
+ try:
634
+ parsed.add(Specifier(specifier))
635
+ except InvalidSpecifier:
636
+ parsed.add(LegacySpecifier(specifier))
637
+
638
+ # Turn our parsed specifiers into a frozen set and save them for later.
639
+ self._specs = frozenset(parsed)
640
+
641
+ # Store our prereleases value so we can use it later to determine if
642
+ # we accept prereleases or not.
643
+ self._prereleases = prereleases
644
+
645
+ def __repr__(self) -> str:
646
+ pre = (
647
+ f", prereleases={self.prereleases!r}"
648
+ if self._prereleases is not None
649
+ else ""
650
+ )
651
+
652
+ return f"<SpecifierSet({str(self)!r}{pre})>"
653
+
654
+ def __str__(self) -> str:
655
+ return ",".join(sorted(str(s) for s in self._specs))
656
+
657
+ def __hash__(self) -> int:
658
+ return hash(self._specs)
659
+
660
+ def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
661
+ if isinstance(other, str):
662
+ other = SpecifierSet(other)
663
+ elif not isinstance(other, SpecifierSet):
664
+ return NotImplemented
665
+
666
+ specifier = SpecifierSet()
667
+ specifier._specs = frozenset(self._specs | other._specs)
668
+
669
+ if self._prereleases is None and other._prereleases is not None:
670
+ specifier._prereleases = other._prereleases
671
+ elif self._prereleases is not None and other._prereleases is None:
672
+ specifier._prereleases = self._prereleases
673
+ elif self._prereleases == other._prereleases:
674
+ specifier._prereleases = self._prereleases
675
+ else:
676
+ raise ValueError(
677
+ "Cannot combine SpecifierSets with True and False prerelease "
678
+ "overrides."
679
+ )
680
+
681
+ return specifier
682
+
683
+ def __eq__(self, other: object) -> bool:
684
+ if isinstance(other, (str, _IndividualSpecifier)):
685
+ other = SpecifierSet(str(other))
686
+ elif not isinstance(other, SpecifierSet):
687
+ return NotImplemented
688
+
689
+ return self._specs == other._specs
690
+
691
+ def __len__(self) -> int:
692
+ return len(self._specs)
693
+
694
+ def __iter__(self) -> Iterator[_IndividualSpecifier]:
695
+ return iter(self._specs)
696
+
697
+ @property
698
+ def prereleases(self) -> Optional[bool]:
699
+
700
+ # If we have been given an explicit prerelease modifier, then we'll
701
+ # pass that through here.
702
+ if self._prereleases is not None:
703
+ return self._prereleases
704
+
705
+ # If we don't have any specifiers, and we don't have a forced value,
706
+ # then we'll just return None since we don't know if this should have
707
+ # pre-releases or not.
708
+ if not self._specs:
709
+ return None
710
+
711
+ # Otherwise we'll see if any of the given specifiers accept
712
+ # prereleases, if any of them do we'll return True, otherwise False.
713
+ return any(s.prereleases for s in self._specs)
714
+
715
+ @prereleases.setter
716
+ def prereleases(self, value: bool) -> None:
717
+ self._prereleases = value
718
+
719
+ def __contains__(self, item: UnparsedVersion) -> bool:
720
+ return self.contains(item)
721
+
722
+ def contains(
723
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
724
+ ) -> bool:
725
+
726
+ # Ensure that our item is a Version or LegacyVersion instance.
727
+ if not isinstance(item, (LegacyVersion, Version)):
728
+ item = parse(item)
729
+
730
+ # Determine if we're forcing a prerelease or not, if we're not forcing
731
+ # one for this particular filter call, then we'll use whatever the
732
+ # SpecifierSet thinks for whether or not we should support prereleases.
733
+ if prereleases is None:
734
+ prereleases = self.prereleases
735
+
736
+ # We can determine if we're going to allow pre-releases by looking to
737
+ # see if any of the underlying items supports them. If none of them do
738
+ # and this item is a pre-release then we do not allow it and we can
739
+ # short circuit that here.
740
+ # Note: This means that 1.0.dev1 would not be contained in something
741
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
742
+ if not prereleases and item.is_prerelease:
743
+ return False
744
+
745
+ # We simply dispatch to the underlying specs here to make sure that the
746
+ # given version is contained within all of them.
747
+ # Note: This use of all() here means that an empty set of specifiers
748
+ # will always return True, this is an explicit design decision.
749
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
750
+
751
+ def filter(
752
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
753
+ ) -> Iterable[VersionTypeVar]:
754
+
755
+ # Determine if we're forcing a prerelease or not, if we're not forcing
756
+ # one for this particular filter call, then we'll use whatever the
757
+ # SpecifierSet thinks for whether or not we should support prereleases.
758
+ if prereleases is None:
759
+ prereleases = self.prereleases
760
+
761
+ # If we have any specifiers, then we want to wrap our iterable in the
762
+ # filter method for each one, this will act as a logical AND amongst
763
+ # each specifier.
764
+ if self._specs:
765
+ for spec in self._specs:
766
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
767
+ return iterable
768
+ # If we do not have any specifiers, then we need to have a rough filter
769
+ # which will filter out any pre-releases, unless there are no final
770
+ # releases, and which will filter out LegacyVersion in general.
771
+ else:
772
+ filtered: List[VersionTypeVar] = []
773
+ found_prereleases: List[VersionTypeVar] = []
774
+
775
+ item: UnparsedVersion
776
+ parsed_version: Union[Version, LegacyVersion]
777
+
778
+ for item in iterable:
779
+ # Ensure that we some kind of Version class for this item.
780
+ if not isinstance(item, (LegacyVersion, Version)):
781
+ parsed_version = parse(item)
782
+ else:
783
+ parsed_version = item
784
+
785
+ # Filter out any item which is parsed as a LegacyVersion
786
+ if isinstance(parsed_version, LegacyVersion):
787
+ continue
788
+
789
+ # Store any item which is a pre-release for later unless we've
790
+ # already found a final version or we are accepting prereleases
791
+ if parsed_version.is_prerelease and not prereleases:
792
+ if not filtered:
793
+ found_prereleases.append(item)
794
+ else:
795
+ filtered.append(item)
796
+
797
+ # If we've found no items except for pre-releases, then we'll go
798
+ # ahead and use the pre-releases
799
+ if not filtered and found_prereleases and prereleases is None:
800
+ return found_prereleases
801
+
802
+ return filtered