prasb commited on
Commit
4a97ea7
·
verified ·
1 Parent(s): f0d9c1e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-38.pyc +0 -0
  2. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-38.pyc +0 -0
  3. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-38.pyc +0 -0
  4. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-38.pyc +0 -0
  5. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/main.cpython-38.pyc +0 -0
  6. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-38.pyc +0 -0
  7. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/parser.cpython-38.pyc +0 -0
  8. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-38.pyc +0 -0
  9. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-38.pyc +0 -0
  10. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-38.pyc +0 -0
  11. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/__pycache__/base.cpython-38.pyc +0 -0
  12. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/base.py +141 -0
  13. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py +597 -0
  14. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/requirements.py +166 -0
  15. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/resolver.py +317 -0
  16. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-38.pyc +0 -0
  17. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/__pycache__/typing_extensions.cpython-38.pyc +0 -0
  18. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/__pycache__/zipp.cpython-38.pyc +0 -0
  19. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py +36 -0
  20. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py +170 -0
  21. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_common.py +207 -0
  22. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py +108 -0
  23. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py +35 -0
  24. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py +120 -0
  25. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/abc.py +170 -0
  26. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/py.typed +0 -0
  27. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/readers.py +120 -0
  28. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/simple.py +106 -0
  29. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__init__.py +6 -0
  30. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/__init__.cpython-38.pyc +0 -0
  31. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/recipes.cpython-38.pyc +0 -0
  32. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/recipes.py +930 -0
  33. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__init__.py +15 -0
  34. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-38.pyc +0 -0
  35. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_elffile.cpython-38.pyc +0 -0
  36. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-38.pyc +0 -0
  37. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-38.pyc +0 -0
  38. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_tokenizer.cpython-38.pyc +0 -0
  39. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/metadata.cpython-38.pyc +0 -0
  40. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-38.pyc +0 -0
  41. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-38.pyc +0 -0
  42. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-38.pyc +0 -0
  43. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-38.pyc +0 -0
  44. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_elffile.py +108 -0
  45. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_manylinux.py +240 -0
  46. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_musllinux.py +80 -0
  47. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_parser.py +353 -0
  48. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_structures.py +61 -0
  49. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py +192 -0
  50. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/markers.py +252 -0
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (231 Bytes). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-38.pyc ADDED
Binary file (5.35 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-38.pyc ADDED
Binary file (23.6 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/command_context.cpython-38.pyc ADDED
Binary file (1.25 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/main.cpython-38.pyc ADDED
Binary file (1.46 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-38.pyc ADDED
Binary file (2.97 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/parser.cpython-38.pyc ADDED
Binary file (9.87 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/progress_bars.cpython-38.pyc ADDED
Binary file (1.84 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/req_command.cpython-38.pyc ADDED
Binary file (13.1 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/cli/__pycache__/spinners.cpython-38.pyc ADDED
Binary file (4.91 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/__pycache__/base.cpython-38.pyc ADDED
Binary file (1.01 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/base.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import FrozenSet, Iterable, Optional, Tuple, Union
2
+
3
+ from pip._vendor.packaging.specifiers import SpecifierSet
4
+ from pip._vendor.packaging.utils import NormalizedName
5
+ from pip._vendor.packaging.version import LegacyVersion, Version
6
+
7
+ from pip._internal.models.link import Link, links_equivalent
8
+ from pip._internal.req.req_install import InstallRequirement
9
+ from pip._internal.utils.hashes import Hashes
10
+
11
+ CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]]
12
+ CandidateVersion = Union[LegacyVersion, Version]
13
+
14
+
15
+ def format_name(project: NormalizedName, extras: FrozenSet[NormalizedName]) -> str:
16
+ if not extras:
17
+ return project
18
+ extras_expr = ",".join(sorted(extras))
19
+ return f"{project}[{extras_expr}]"
20
+
21
+
22
+ class Constraint:
23
+ def __init__(
24
+ self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link]
25
+ ) -> None:
26
+ self.specifier = specifier
27
+ self.hashes = hashes
28
+ self.links = links
29
+
30
+ @classmethod
31
+ def empty(cls) -> "Constraint":
32
+ return Constraint(SpecifierSet(), Hashes(), frozenset())
33
+
34
+ @classmethod
35
+ def from_ireq(cls, ireq: InstallRequirement) -> "Constraint":
36
+ links = frozenset([ireq.link]) if ireq.link else frozenset()
37
+ return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)
38
+
39
+ def __bool__(self) -> bool:
40
+ return bool(self.specifier) or bool(self.hashes) or bool(self.links)
41
+
42
+ def __and__(self, other: InstallRequirement) -> "Constraint":
43
+ if not isinstance(other, InstallRequirement):
44
+ return NotImplemented
45
+ specifier = self.specifier & other.specifier
46
+ hashes = self.hashes & other.hashes(trust_internet=False)
47
+ links = self.links
48
+ if other.link:
49
+ links = links.union([other.link])
50
+ return Constraint(specifier, hashes, links)
51
+
52
+ def is_satisfied_by(self, candidate: "Candidate") -> bool:
53
+ # Reject if there are any mismatched URL constraints on this package.
54
+ if self.links and not all(_match_link(link, candidate) for link in self.links):
55
+ return False
56
+ # We can safely always allow prereleases here since PackageFinder
57
+ # already implements the prerelease logic, and would have filtered out
58
+ # prerelease candidates if the user does not expect them.
59
+ return self.specifier.contains(candidate.version, prereleases=True)
60
+
61
+
62
+ class Requirement:
63
+ @property
64
+ def project_name(self) -> NormalizedName:
65
+ """The "project name" of a requirement.
66
+
67
+ This is different from ``name`` if this requirement contains extras,
68
+ in which case ``name`` would contain the ``[...]`` part, while this
69
+ refers to the name of the project.
70
+ """
71
+ raise NotImplementedError("Subclass should override")
72
+
73
+ @property
74
+ def name(self) -> str:
75
+ """The name identifying this requirement in the resolver.
76
+
77
+ This is different from ``project_name`` if this requirement contains
78
+ extras, where ``project_name`` would not contain the ``[...]`` part.
79
+ """
80
+ raise NotImplementedError("Subclass should override")
81
+
82
+ def is_satisfied_by(self, candidate: "Candidate") -> bool:
83
+ return False
84
+
85
+ def get_candidate_lookup(self) -> CandidateLookup:
86
+ raise NotImplementedError("Subclass should override")
87
+
88
+ def format_for_error(self) -> str:
89
+ raise NotImplementedError("Subclass should override")
90
+
91
+
92
+ def _match_link(link: Link, candidate: "Candidate") -> bool:
93
+ if candidate.source_link:
94
+ return links_equivalent(link, candidate.source_link)
95
+ return False
96
+
97
+
98
+ class Candidate:
99
+ @property
100
+ def project_name(self) -> NormalizedName:
101
+ """The "project name" of the candidate.
102
+
103
+ This is different from ``name`` if this candidate contains extras,
104
+ in which case ``name`` would contain the ``[...]`` part, while this
105
+ refers to the name of the project.
106
+ """
107
+ raise NotImplementedError("Override in subclass")
108
+
109
+ @property
110
+ def name(self) -> str:
111
+ """The name identifying this candidate in the resolver.
112
+
113
+ This is different from ``project_name`` if this candidate contains
114
+ extras, where ``project_name`` would not contain the ``[...]`` part.
115
+ """
116
+ raise NotImplementedError("Override in subclass")
117
+
118
+ @property
119
+ def version(self) -> CandidateVersion:
120
+ raise NotImplementedError("Override in subclass")
121
+
122
+ @property
123
+ def is_installed(self) -> bool:
124
+ raise NotImplementedError("Override in subclass")
125
+
126
+ @property
127
+ def is_editable(self) -> bool:
128
+ raise NotImplementedError("Override in subclass")
129
+
130
+ @property
131
+ def source_link(self) -> Optional[Link]:
132
+ raise NotImplementedError("Override in subclass")
133
+
134
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
135
+ raise NotImplementedError("Override in subclass")
136
+
137
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
138
+ raise NotImplementedError("Override in subclass")
139
+
140
+ def format_for_error(self) -> str:
141
+ raise NotImplementedError("Subclass should override")
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+ from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast
4
+
5
+ from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
6
+ from pip._vendor.packaging.version import Version
7
+
8
+ from pip._internal.exceptions import (
9
+ HashError,
10
+ InstallationSubprocessError,
11
+ MetadataInconsistent,
12
+ )
13
+ from pip._internal.metadata import BaseDistribution
14
+ from pip._internal.models.link import Link, links_equivalent
15
+ from pip._internal.models.wheel import Wheel
16
+ from pip._internal.req.constructors import (
17
+ install_req_from_editable,
18
+ install_req_from_line,
19
+ )
20
+ from pip._internal.req.req_install import InstallRequirement
21
+ from pip._internal.utils.direct_url_helpers import direct_url_from_link
22
+ from pip._internal.utils.misc import normalize_version_info
23
+
24
+ from .base import Candidate, CandidateVersion, Requirement, format_name
25
+
26
+ if TYPE_CHECKING:
27
+ from .factory import Factory
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ BaseCandidate = Union[
32
+ "AlreadyInstalledCandidate",
33
+ "EditableCandidate",
34
+ "LinkCandidate",
35
+ ]
36
+
37
+ # Avoid conflicting with the PyPI package "Python".
38
+ REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "<Python from Requires-Python>")
39
+
40
+
41
+ def as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]:
42
+ """The runtime version of BaseCandidate."""
43
+ base_candidate_classes = (
44
+ AlreadyInstalledCandidate,
45
+ EditableCandidate,
46
+ LinkCandidate,
47
+ )
48
+ if isinstance(candidate, base_candidate_classes):
49
+ return candidate
50
+ return None
51
+
52
+
53
+ def make_install_req_from_link(
54
+ link: Link, template: InstallRequirement
55
+ ) -> InstallRequirement:
56
+ assert not template.editable, "template is editable"
57
+ if template.req:
58
+ line = str(template.req)
59
+ else:
60
+ line = link.url
61
+ ireq = install_req_from_line(
62
+ line,
63
+ user_supplied=template.user_supplied,
64
+ comes_from=template.comes_from,
65
+ use_pep517=template.use_pep517,
66
+ isolated=template.isolated,
67
+ constraint=template.constraint,
68
+ global_options=template.global_options,
69
+ hash_options=template.hash_options,
70
+ config_settings=template.config_settings,
71
+ )
72
+ ireq.original_link = template.original_link
73
+ ireq.link = link
74
+ ireq.extras = template.extras
75
+ return ireq
76
+
77
+
78
+ def make_install_req_from_editable(
79
+ link: Link, template: InstallRequirement
80
+ ) -> InstallRequirement:
81
+ assert template.editable, "template not editable"
82
+ ireq = install_req_from_editable(
83
+ link.url,
84
+ user_supplied=template.user_supplied,
85
+ comes_from=template.comes_from,
86
+ use_pep517=template.use_pep517,
87
+ isolated=template.isolated,
88
+ constraint=template.constraint,
89
+ permit_editable_wheels=template.permit_editable_wheels,
90
+ global_options=template.global_options,
91
+ hash_options=template.hash_options,
92
+ config_settings=template.config_settings,
93
+ )
94
+ ireq.extras = template.extras
95
+ return ireq
96
+
97
+
98
+ def _make_install_req_from_dist(
99
+ dist: BaseDistribution, template: InstallRequirement
100
+ ) -> InstallRequirement:
101
+ if template.req:
102
+ line = str(template.req)
103
+ elif template.link:
104
+ line = f"{dist.canonical_name} @ {template.link.url}"
105
+ else:
106
+ line = f"{dist.canonical_name}=={dist.version}"
107
+ ireq = install_req_from_line(
108
+ line,
109
+ user_supplied=template.user_supplied,
110
+ comes_from=template.comes_from,
111
+ use_pep517=template.use_pep517,
112
+ isolated=template.isolated,
113
+ constraint=template.constraint,
114
+ global_options=template.global_options,
115
+ hash_options=template.hash_options,
116
+ config_settings=template.config_settings,
117
+ )
118
+ ireq.satisfied_by = dist
119
+ return ireq
120
+
121
+
122
+ class _InstallRequirementBackedCandidate(Candidate):
123
+ """A candidate backed by an ``InstallRequirement``.
124
+
125
+ This represents a package request with the target not being already
126
+ in the environment, and needs to be fetched and installed. The backing
127
+ ``InstallRequirement`` is responsible for most of the leg work; this
128
+ class exposes appropriate information to the resolver.
129
+
130
+ :param link: The link passed to the ``InstallRequirement``. The backing
131
+ ``InstallRequirement`` will use this link to fetch the distribution.
132
+ :param source_link: The link this candidate "originates" from. This is
133
+ different from ``link`` when the link is found in the wheel cache.
134
+ ``link`` would point to the wheel cache, while this points to the
135
+ found remote link (e.g. from pypi.org).
136
+ """
137
+
138
+ dist: BaseDistribution
139
+ is_installed = False
140
+
141
+ def __init__(
142
+ self,
143
+ link: Link,
144
+ source_link: Link,
145
+ ireq: InstallRequirement,
146
+ factory: "Factory",
147
+ name: Optional[NormalizedName] = None,
148
+ version: Optional[CandidateVersion] = None,
149
+ ) -> None:
150
+ self._link = link
151
+ self._source_link = source_link
152
+ self._factory = factory
153
+ self._ireq = ireq
154
+ self._name = name
155
+ self._version = version
156
+ self.dist = self._prepare()
157
+
158
+ def __str__(self) -> str:
159
+ return f"{self.name} {self.version}"
160
+
161
+ def __repr__(self) -> str:
162
+ return f"{self.__class__.__name__}({str(self._link)!r})"
163
+
164
+ def __hash__(self) -> int:
165
+ return hash((self.__class__, self._link))
166
+
167
+ def __eq__(self, other: Any) -> bool:
168
+ if isinstance(other, self.__class__):
169
+ return links_equivalent(self._link, other._link)
170
+ return False
171
+
172
+ @property
173
+ def source_link(self) -> Optional[Link]:
174
+ return self._source_link
175
+
176
+ @property
177
+ def project_name(self) -> NormalizedName:
178
+ """The normalised name of the project the candidate refers to"""
179
+ if self._name is None:
180
+ self._name = self.dist.canonical_name
181
+ return self._name
182
+
183
+ @property
184
+ def name(self) -> str:
185
+ return self.project_name
186
+
187
+ @property
188
+ def version(self) -> CandidateVersion:
189
+ if self._version is None:
190
+ self._version = self.dist.version
191
+ return self._version
192
+
193
+ def format_for_error(self) -> str:
194
+ return "{} {} (from {})".format(
195
+ self.name,
196
+ self.version,
197
+ self._link.file_path if self._link.is_file else self._link,
198
+ )
199
+
200
+ def _prepare_distribution(self) -> BaseDistribution:
201
+ raise NotImplementedError("Override in subclass")
202
+
203
+ def _check_metadata_consistency(self, dist: BaseDistribution) -> None:
204
+ """Check for consistency of project name and version of dist."""
205
+ if self._name is not None and self._name != dist.canonical_name:
206
+ raise MetadataInconsistent(
207
+ self._ireq,
208
+ "name",
209
+ self._name,
210
+ dist.canonical_name,
211
+ )
212
+ if self._version is not None and self._version != dist.version:
213
+ raise MetadataInconsistent(
214
+ self._ireq,
215
+ "version",
216
+ str(self._version),
217
+ str(dist.version),
218
+ )
219
+
220
+ def _prepare(self) -> BaseDistribution:
221
+ try:
222
+ dist = self._prepare_distribution()
223
+ except HashError as e:
224
+ # Provide HashError the underlying ireq that caused it. This
225
+ # provides context for the resulting error message to show the
226
+ # offending line to the user.
227
+ e.req = self._ireq
228
+ raise
229
+ except InstallationSubprocessError as exc:
230
+ # The output has been presented already, so don't duplicate it.
231
+ exc.context = "See above for output."
232
+ raise
233
+
234
+ self._check_metadata_consistency(dist)
235
+ return dist
236
+
237
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
238
+ requires = self.dist.iter_dependencies() if with_requires else ()
239
+ for r in requires:
240
+ yield from self._factory.make_requirements_from_spec(str(r), self._ireq)
241
+ yield self._factory.make_requires_python_requirement(self.dist.requires_python)
242
+
243
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
244
+ return self._ireq
245
+
246
+
247
+ class LinkCandidate(_InstallRequirementBackedCandidate):
248
+ is_editable = False
249
+
250
+ def __init__(
251
+ self,
252
+ link: Link,
253
+ template: InstallRequirement,
254
+ factory: "Factory",
255
+ name: Optional[NormalizedName] = None,
256
+ version: Optional[CandidateVersion] = None,
257
+ ) -> None:
258
+ source_link = link
259
+ cache_entry = factory.get_wheel_cache_entry(source_link, name)
260
+ if cache_entry is not None:
261
+ logger.debug("Using cached wheel link: %s", cache_entry.link)
262
+ link = cache_entry.link
263
+ ireq = make_install_req_from_link(link, template)
264
+ assert ireq.link == link
265
+ if ireq.link.is_wheel and not ireq.link.is_file:
266
+ wheel = Wheel(ireq.link.filename)
267
+ wheel_name = canonicalize_name(wheel.name)
268
+ assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel"
269
+ # Version may not be present for PEP 508 direct URLs
270
+ if version is not None:
271
+ wheel_version = Version(wheel.version)
272
+ assert version == wheel_version, "{!r} != {!r} for wheel {}".format(
273
+ version, wheel_version, name
274
+ )
275
+
276
+ if cache_entry is not None:
277
+ assert ireq.link.is_wheel
278
+ assert ireq.link.is_file
279
+ if cache_entry.persistent and template.link is template.original_link:
280
+ ireq.cached_wheel_source_link = source_link
281
+ if cache_entry.origin is not None:
282
+ ireq.download_info = cache_entry.origin
283
+ else:
284
+ # Legacy cache entry that does not have origin.json.
285
+ # download_info may miss the archive_info.hashes field.
286
+ ireq.download_info = direct_url_from_link(
287
+ source_link, link_is_in_wheel_cache=cache_entry.persistent
288
+ )
289
+
290
+ super().__init__(
291
+ link=link,
292
+ source_link=source_link,
293
+ ireq=ireq,
294
+ factory=factory,
295
+ name=name,
296
+ version=version,
297
+ )
298
+
299
+ def _prepare_distribution(self) -> BaseDistribution:
300
+ preparer = self._factory.preparer
301
+ return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)
302
+
303
+
304
+ class EditableCandidate(_InstallRequirementBackedCandidate):
305
+ is_editable = True
306
+
307
+ def __init__(
308
+ self,
309
+ link: Link,
310
+ template: InstallRequirement,
311
+ factory: "Factory",
312
+ name: Optional[NormalizedName] = None,
313
+ version: Optional[CandidateVersion] = None,
314
+ ) -> None:
315
+ super().__init__(
316
+ link=link,
317
+ source_link=link,
318
+ ireq=make_install_req_from_editable(link, template),
319
+ factory=factory,
320
+ name=name,
321
+ version=version,
322
+ )
323
+
324
+ def _prepare_distribution(self) -> BaseDistribution:
325
+ return self._factory.preparer.prepare_editable_requirement(self._ireq)
326
+
327
+
328
+ class AlreadyInstalledCandidate(Candidate):
329
+ is_installed = True
330
+ source_link = None
331
+
332
+ def __init__(
333
+ self,
334
+ dist: BaseDistribution,
335
+ template: InstallRequirement,
336
+ factory: "Factory",
337
+ ) -> None:
338
+ self.dist = dist
339
+ self._ireq = _make_install_req_from_dist(dist, template)
340
+ self._factory = factory
341
+ self._version = None
342
+
343
+ # This is just logging some messages, so we can do it eagerly.
344
+ # The returned dist would be exactly the same as self.dist because we
345
+ # set satisfied_by in _make_install_req_from_dist.
346
+ # TODO: Supply reason based on force_reinstall and upgrade_strategy.
347
+ skip_reason = "already satisfied"
348
+ factory.preparer.prepare_installed_requirement(self._ireq, skip_reason)
349
+
350
+ def __str__(self) -> str:
351
+ return str(self.dist)
352
+
353
+ def __repr__(self) -> str:
354
+ return f"{self.__class__.__name__}({self.dist!r})"
355
+
356
+ def __hash__(self) -> int:
357
+ return hash((self.__class__, self.name, self.version))
358
+
359
+ def __eq__(self, other: Any) -> bool:
360
+ if isinstance(other, self.__class__):
361
+ return self.name == other.name and self.version == other.version
362
+ return False
363
+
364
+ @property
365
+ def project_name(self) -> NormalizedName:
366
+ return self.dist.canonical_name
367
+
368
+ @property
369
+ def name(self) -> str:
370
+ return self.project_name
371
+
372
+ @property
373
+ def version(self) -> CandidateVersion:
374
+ if self._version is None:
375
+ self._version = self.dist.version
376
+ return self._version
377
+
378
+ @property
379
+ def is_editable(self) -> bool:
380
+ return self.dist.editable
381
+
382
+ def format_for_error(self) -> str:
383
+ return f"{self.name} {self.version} (Installed)"
384
+
385
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
386
+ if not with_requires:
387
+ return
388
+ for r in self.dist.iter_dependencies():
389
+ yield from self._factory.make_requirements_from_spec(str(r), self._ireq)
390
+
391
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
392
+ return None
393
+
394
+
395
+ class ExtrasCandidate(Candidate):
396
+ """A candidate that has 'extras', indicating additional dependencies.
397
+
398
+ Requirements can be for a project with dependencies, something like
399
+ foo[extra]. The extras don't affect the project/version being installed
400
+ directly, but indicate that we need additional dependencies. We model that
401
+ by having an artificial ExtrasCandidate that wraps the "base" candidate.
402
+
403
+ The ExtrasCandidate differs from the base in the following ways:
404
+
405
+ 1. It has a unique name, of the form foo[extra]. This causes the resolver
406
+ to treat it as a separate node in the dependency graph.
407
+ 2. When we're getting the candidate's dependencies,
408
+ a) We specify that we want the extra dependencies as well.
409
+ b) We add a dependency on the base candidate.
410
+ See below for why this is needed.
411
+ 3. We return None for the underlying InstallRequirement, as the base
412
+ candidate will provide it, and we don't want to end up with duplicates.
413
+
414
+ The dependency on the base candidate is needed so that the resolver can't
415
+ decide that it should recommend foo[extra1] version 1.0 and foo[extra2]
416
+ version 2.0. Having those candidates depend on foo=1.0 and foo=2.0
417
+ respectively forces the resolver to recognise that this is a conflict.
418
+ """
419
+
420
+ def __init__(
421
+ self,
422
+ base: BaseCandidate,
423
+ extras: FrozenSet[str],
424
+ *,
425
+ comes_from: Optional[InstallRequirement] = None,
426
+ ) -> None:
427
+ """
428
+ :param comes_from: the InstallRequirement that led to this candidate if it
429
+ differs from the base's InstallRequirement. This will often be the
430
+ case in the sense that this candidate's requirement has the extras
431
+ while the base's does not. Unlike the InstallRequirement backed
432
+ candidates, this requirement is used solely for reporting purposes,
433
+ it does not do any leg work.
434
+ """
435
+ self.base = base
436
+ self.extras = frozenset(canonicalize_name(e) for e in extras)
437
+ # If any extras are requested in their non-normalized forms, keep track
438
+ # of their raw values. This is needed when we look up dependencies
439
+ # since PEP 685 has not been implemented for marker-matching, and using
440
+ # the non-normalized extra for lookup ensures the user can select a
441
+ # non-normalized extra in a package with its non-normalized form.
442
+ # TODO: Remove this attribute when packaging is upgraded to support the
443
+ # marker comparison logic specified in PEP 685.
444
+ self._unnormalized_extras = extras.difference(self.extras)
445
+ self._comes_from = comes_from if comes_from is not None else self.base._ireq
446
+
447
+ def __str__(self) -> str:
448
+ name, rest = str(self.base).split(" ", 1)
449
+ return "{}[{}] {}".format(name, ",".join(self.extras), rest)
450
+
451
+ def __repr__(self) -> str:
452
+ return f"{self.__class__.__name__}(base={self.base!r}, extras={self.extras!r})"
453
+
454
+ def __hash__(self) -> int:
455
+ return hash((self.base, self.extras))
456
+
457
+ def __eq__(self, other: Any) -> bool:
458
+ if isinstance(other, self.__class__):
459
+ return self.base == other.base and self.extras == other.extras
460
+ return False
461
+
462
+ @property
463
+ def project_name(self) -> NormalizedName:
464
+ return self.base.project_name
465
+
466
+ @property
467
+ def name(self) -> str:
468
+ """The normalised name of the project the candidate refers to"""
469
+ return format_name(self.base.project_name, self.extras)
470
+
471
+ @property
472
+ def version(self) -> CandidateVersion:
473
+ return self.base.version
474
+
475
+ def format_for_error(self) -> str:
476
+ return "{} [{}]".format(
477
+ self.base.format_for_error(), ", ".join(sorted(self.extras))
478
+ )
479
+
480
+ @property
481
+ def is_installed(self) -> bool:
482
+ return self.base.is_installed
483
+
484
+ @property
485
+ def is_editable(self) -> bool:
486
+ return self.base.is_editable
487
+
488
+ @property
489
+ def source_link(self) -> Optional[Link]:
490
+ return self.base.source_link
491
+
492
+ def _warn_invalid_extras(
493
+ self,
494
+ requested: FrozenSet[str],
495
+ valid: FrozenSet[str],
496
+ ) -> None:
497
+ """Emit warnings for invalid extras being requested.
498
+
499
+ This emits a warning for each requested extra that is not in the
500
+ candidate's ``Provides-Extra`` list.
501
+ """
502
+ invalid_extras_to_warn = frozenset(
503
+ extra
504
+ for extra in requested
505
+ if extra not in valid
506
+ # If an extra is requested in an unnormalized form, skip warning
507
+ # about the normalized form being missing.
508
+ and extra in self.extras
509
+ )
510
+ if not invalid_extras_to_warn:
511
+ return
512
+ for extra in sorted(invalid_extras_to_warn):
513
+ logger.warning(
514
+ "%s %s does not provide the extra '%s'",
515
+ self.base.name,
516
+ self.version,
517
+ extra,
518
+ )
519
+
520
+ def _calculate_valid_requested_extras(self) -> FrozenSet[str]:
521
+ """Get a list of valid extras requested by this candidate.
522
+
523
+ The user (or upstream dependant) may have specified extras that the
524
+ candidate doesn't support. Any unsupported extras are dropped, and each
525
+ cause a warning to be logged here.
526
+ """
527
+ requested_extras = self.extras.union(self._unnormalized_extras)
528
+ valid_extras = frozenset(
529
+ extra
530
+ for extra in requested_extras
531
+ if self.base.dist.is_extra_provided(extra)
532
+ )
533
+ self._warn_invalid_extras(requested_extras, valid_extras)
534
+ return valid_extras
535
+
536
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
537
+ factory = self.base._factory
538
+
539
+ # Add a dependency on the exact base
540
+ # (See note 2b in the class docstring)
541
+ yield factory.make_requirement_from_candidate(self.base)
542
+ if not with_requires:
543
+ return
544
+
545
+ valid_extras = self._calculate_valid_requested_extras()
546
+ for r in self.base.dist.iter_dependencies(valid_extras):
547
+ yield from factory.make_requirements_from_spec(
548
+ str(r),
549
+ self._comes_from,
550
+ valid_extras,
551
+ )
552
+
553
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
554
+ # We don't return anything here, because we always
555
+ # depend on the base candidate, and we'll get the
556
+ # install requirement from that.
557
+ return None
558
+
559
+
560
+ class RequiresPythonCandidate(Candidate):
561
+ is_installed = False
562
+ source_link = None
563
+
564
+ def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None:
565
+ if py_version_info is not None:
566
+ version_info = normalize_version_info(py_version_info)
567
+ else:
568
+ version_info = sys.version_info[:3]
569
+ self._version = Version(".".join(str(c) for c in version_info))
570
+
571
+ # We don't need to implement __eq__() and __ne__() since there is always
572
+ # only one RequiresPythonCandidate in a resolution, i.e. the host Python.
573
+ # The built-in object.__eq__() and object.__ne__() do exactly what we want.
574
+
575
+ def __str__(self) -> str:
576
+ return f"Python {self._version}"
577
+
578
+ @property
579
+ def project_name(self) -> NormalizedName:
580
+ return REQUIRES_PYTHON_IDENTIFIER
581
+
582
+ @property
583
+ def name(self) -> str:
584
+ return REQUIRES_PYTHON_IDENTIFIER
585
+
586
+ @property
587
+ def version(self) -> CandidateVersion:
588
+ return self._version
589
+
590
+ def format_for_error(self) -> str:
591
+ return f"Python {self.version}"
592
+
593
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
594
+ return ()
595
+
596
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
597
+ return None
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/requirements.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pip._vendor.packaging.specifiers import SpecifierSet
2
+ from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
3
+
4
+ from pip._internal.req.constructors import install_req_drop_extras
5
+ from pip._internal.req.req_install import InstallRequirement
6
+
7
+ from .base import Candidate, CandidateLookup, Requirement, format_name
8
+
9
+
10
+ class ExplicitRequirement(Requirement):
11
+ def __init__(self, candidate: Candidate) -> None:
12
+ self.candidate = candidate
13
+
14
+ def __str__(self) -> str:
15
+ return str(self.candidate)
16
+
17
+ def __repr__(self) -> str:
18
+ return f"{self.__class__.__name__}({self.candidate!r})"
19
+
20
+ @property
21
+ def project_name(self) -> NormalizedName:
22
+ # No need to canonicalize - the candidate did this
23
+ return self.candidate.project_name
24
+
25
+ @property
26
+ def name(self) -> str:
27
+ # No need to canonicalize - the candidate did this
28
+ return self.candidate.name
29
+
30
+ def format_for_error(self) -> str:
31
+ return self.candidate.format_for_error()
32
+
33
+ def get_candidate_lookup(self) -> CandidateLookup:
34
+ return self.candidate, None
35
+
36
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
37
+ return candidate == self.candidate
38
+
39
+
40
+ class SpecifierRequirement(Requirement):
41
+ def __init__(self, ireq: InstallRequirement) -> None:
42
+ assert ireq.link is None, "This is a link, not a specifier"
43
+ self._ireq = ireq
44
+ self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)
45
+
46
+ def __str__(self) -> str:
47
+ return str(self._ireq.req)
48
+
49
+ def __repr__(self) -> str:
50
+ return f"{self.__class__.__name__}({str(self._ireq.req)!r})"
51
+
52
+ @property
53
+ def project_name(self) -> NormalizedName:
54
+ assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
55
+ return canonicalize_name(self._ireq.req.name)
56
+
57
+ @property
58
+ def name(self) -> str:
59
+ return format_name(self.project_name, self._extras)
60
+
61
+ def format_for_error(self) -> str:
62
+ # Convert comma-separated specifiers into "A, B, ..., F and G"
63
+ # This makes the specifier a bit more "human readable", without
64
+ # risking a change in meaning. (Hopefully! Not all edge cases have
65
+ # been checked)
66
+ parts = [s.strip() for s in str(self).split(",")]
67
+ if len(parts) == 0:
68
+ return ""
69
+ elif len(parts) == 1:
70
+ return parts[0]
71
+
72
+ return ", ".join(parts[:-1]) + " and " + parts[-1]
73
+
74
+ def get_candidate_lookup(self) -> CandidateLookup:
75
+ return None, self._ireq
76
+
77
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
78
+ assert candidate.name == self.name, (
79
+ f"Internal issue: Candidate is not for this requirement "
80
+ f"{candidate.name} vs {self.name}"
81
+ )
82
+ # We can safely always allow prereleases here since PackageFinder
83
+ # already implements the prerelease logic, and would have filtered out
84
+ # prerelease candidates if the user does not expect them.
85
+ assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
86
+ spec = self._ireq.req.specifier
87
+ return spec.contains(candidate.version, prereleases=True)
88
+
89
+
90
+ class SpecifierWithoutExtrasRequirement(SpecifierRequirement):
91
+ """
92
+ Requirement backed by an install requirement on a base package.
93
+ Trims extras from its install requirement if there are any.
94
+ """
95
+
96
+ def __init__(self, ireq: InstallRequirement) -> None:
97
+ assert ireq.link is None, "This is a link, not a specifier"
98
+ self._ireq = install_req_drop_extras(ireq)
99
+ self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)
100
+
101
+
102
+ class RequiresPythonRequirement(Requirement):
103
+ """A requirement representing Requires-Python metadata."""
104
+
105
+ def __init__(self, specifier: SpecifierSet, match: Candidate) -> None:
106
+ self.specifier = specifier
107
+ self._candidate = match
108
+
109
+ def __str__(self) -> str:
110
+ return f"Python {self.specifier}"
111
+
112
+ def __repr__(self) -> str:
113
+ return f"{self.__class__.__name__}({str(self.specifier)!r})"
114
+
115
+ @property
116
+ def project_name(self) -> NormalizedName:
117
+ return self._candidate.project_name
118
+
119
+ @property
120
+ def name(self) -> str:
121
+ return self._candidate.name
122
+
123
+ def format_for_error(self) -> str:
124
+ return str(self)
125
+
126
+ def get_candidate_lookup(self) -> CandidateLookup:
127
+ if self.specifier.contains(self._candidate.version, prereleases=True):
128
+ return self._candidate, None
129
+ return None, None
130
+
131
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
132
+ assert candidate.name == self._candidate.name, "Not Python candidate"
133
+ # We can safely always allow prereleases here since PackageFinder
134
+ # already implements the prerelease logic, and would have filtered out
135
+ # prerelease candidates if the user does not expect them.
136
+ return self.specifier.contains(candidate.version, prereleases=True)
137
+
138
+
139
+ class UnsatisfiableRequirement(Requirement):
140
+ """A requirement that cannot be satisfied."""
141
+
142
+ def __init__(self, name: NormalizedName) -> None:
143
+ self._name = name
144
+
145
+ def __str__(self) -> str:
146
+ return f"{self._name} (unavailable)"
147
+
148
+ def __repr__(self) -> str:
149
+ return f"{self.__class__.__name__}({str(self._name)!r})"
150
+
151
+ @property
152
+ def project_name(self) -> NormalizedName:
153
+ return self._name
154
+
155
+ @property
156
+ def name(self) -> str:
157
+ return self._name
158
+
159
+ def format_for_error(self) -> str:
160
+ return str(self)
161
+
162
+ def get_candidate_lookup(self) -> CandidateLookup:
163
+ return None, None
164
+
165
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
166
+ return False
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/resolver.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import logging
4
+ import os
5
+ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
6
+
7
+ from pip._vendor.packaging.utils import canonicalize_name
8
+ from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
9
+ from pip._vendor.resolvelib import Resolver as RLResolver
10
+ from pip._vendor.resolvelib.structs import DirectedGraph
11
+
12
+ from pip._internal.cache import WheelCache
13
+ from pip._internal.index.package_finder import PackageFinder
14
+ from pip._internal.operations.prepare import RequirementPreparer
15
+ from pip._internal.req.constructors import install_req_extend_extras
16
+ from pip._internal.req.req_install import InstallRequirement
17
+ from pip._internal.req.req_set import RequirementSet
18
+ from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
19
+ from pip._internal.resolution.resolvelib.provider import PipProvider
20
+ from pip._internal.resolution.resolvelib.reporter import (
21
+ PipDebuggingReporter,
22
+ PipReporter,
23
+ )
24
+ from pip._internal.utils.packaging import get_requirement
25
+
26
+ from .base import Candidate, Requirement
27
+ from .factory import Factory
28
+
29
+ if TYPE_CHECKING:
30
+ from pip._vendor.resolvelib.resolvers import Result as RLResult
31
+
32
+ Result = RLResult[Requirement, Candidate, str]
33
+
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ class Resolver(BaseResolver):
39
+ _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
40
+
41
+ def __init__(
42
+ self,
43
+ preparer: RequirementPreparer,
44
+ finder: PackageFinder,
45
+ wheel_cache: Optional[WheelCache],
46
+ make_install_req: InstallRequirementProvider,
47
+ use_user_site: bool,
48
+ ignore_dependencies: bool,
49
+ ignore_installed: bool,
50
+ ignore_requires_python: bool,
51
+ force_reinstall: bool,
52
+ upgrade_strategy: str,
53
+ py_version_info: Optional[Tuple[int, ...]] = None,
54
+ ):
55
+ super().__init__()
56
+ assert upgrade_strategy in self._allowed_strategies
57
+
58
+ self.factory = Factory(
59
+ finder=finder,
60
+ preparer=preparer,
61
+ make_install_req=make_install_req,
62
+ wheel_cache=wheel_cache,
63
+ use_user_site=use_user_site,
64
+ force_reinstall=force_reinstall,
65
+ ignore_installed=ignore_installed,
66
+ ignore_requires_python=ignore_requires_python,
67
+ py_version_info=py_version_info,
68
+ )
69
+ self.ignore_dependencies = ignore_dependencies
70
+ self.upgrade_strategy = upgrade_strategy
71
+ self._result: Optional[Result] = None
72
+
73
+ def resolve(
74
+ self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
75
+ ) -> RequirementSet:
76
+ collected = self.factory.collect_root_requirements(root_reqs)
77
+ provider = PipProvider(
78
+ factory=self.factory,
79
+ constraints=collected.constraints,
80
+ ignore_dependencies=self.ignore_dependencies,
81
+ upgrade_strategy=self.upgrade_strategy,
82
+ user_requested=collected.user_requested,
83
+ )
84
+ if "PIP_RESOLVER_DEBUG" in os.environ:
85
+ reporter: BaseReporter = PipDebuggingReporter()
86
+ else:
87
+ reporter = PipReporter()
88
+ resolver: RLResolver[Requirement, Candidate, str] = RLResolver(
89
+ provider,
90
+ reporter,
91
+ )
92
+
93
+ try:
94
+ limit_how_complex_resolution_can_be = 200000
95
+ result = self._result = resolver.resolve(
96
+ collected.requirements, max_rounds=limit_how_complex_resolution_can_be
97
+ )
98
+
99
+ except ResolutionImpossible as e:
100
+ error = self.factory.get_installation_error(
101
+ cast("ResolutionImpossible[Requirement, Candidate]", e),
102
+ collected.constraints,
103
+ )
104
+ raise error from e
105
+
106
+ req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
107
+ # process candidates with extras last to ensure their base equivalent is
108
+ # already in the req_set if appropriate.
109
+ # Python's sort is stable so using a binary key function keeps relative order
110
+ # within both subsets.
111
+ for candidate in sorted(
112
+ result.mapping.values(), key=lambda c: c.name != c.project_name
113
+ ):
114
+ ireq = candidate.get_install_requirement()
115
+ if ireq is None:
116
+ if candidate.name != candidate.project_name:
117
+ # extend existing req's extras
118
+ with contextlib.suppress(KeyError):
119
+ req = req_set.get_requirement(candidate.project_name)
120
+ req_set.add_named_requirement(
121
+ install_req_extend_extras(
122
+ req, get_requirement(candidate.name).extras
123
+ )
124
+ )
125
+ continue
126
+
127
+ # Check if there is already an installation under the same name,
128
+ # and set a flag for later stages to uninstall it, if needed.
129
+ installed_dist = self.factory.get_dist_to_uninstall(candidate)
130
+ if installed_dist is None:
131
+ # There is no existing installation -- nothing to uninstall.
132
+ ireq.should_reinstall = False
133
+ elif self.factory.force_reinstall:
134
+ # The --force-reinstall flag is set -- reinstall.
135
+ ireq.should_reinstall = True
136
+ elif installed_dist.version != candidate.version:
137
+ # The installation is different in version -- reinstall.
138
+ ireq.should_reinstall = True
139
+ elif candidate.is_editable or installed_dist.editable:
140
+ # The incoming distribution is editable, or different in
141
+ # editable-ness to installation -- reinstall.
142
+ ireq.should_reinstall = True
143
+ elif candidate.source_link and candidate.source_link.is_file:
144
+ # The incoming distribution is under file://
145
+ if candidate.source_link.is_wheel:
146
+ # is a local wheel -- do nothing.
147
+ logger.info(
148
+ "%s is already installed with the same version as the "
149
+ "provided wheel. Use --force-reinstall to force an "
150
+ "installation of the wheel.",
151
+ ireq.name,
152
+ )
153
+ continue
154
+
155
+ # is a local sdist or path -- reinstall
156
+ ireq.should_reinstall = True
157
+ else:
158
+ continue
159
+
160
+ link = candidate.source_link
161
+ if link and link.is_yanked:
162
+ # The reason can contain non-ASCII characters, Unicode
163
+ # is required for Python 2.
164
+ msg = (
165
+ "The candidate selected for download or install is a "
166
+ "yanked version: {name!r} candidate (version {version} "
167
+ "at {link})\nReason for being yanked: {reason}"
168
+ ).format(
169
+ name=candidate.name,
170
+ version=candidate.version,
171
+ link=link,
172
+ reason=link.yanked_reason or "<none given>",
173
+ )
174
+ logger.warning(msg)
175
+
176
+ req_set.add_named_requirement(ireq)
177
+
178
+ reqs = req_set.all_requirements
179
+ self.factory.preparer.prepare_linked_requirements_more(reqs)
180
+ for req in reqs:
181
+ req.prepared = True
182
+ req.needs_more_preparation = False
183
+ return req_set
184
+
185
+ def get_installation_order(
186
+ self, req_set: RequirementSet
187
+ ) -> List[InstallRequirement]:
188
+ """Get order for installation of requirements in RequirementSet.
189
+
190
+ The returned list contains a requirement before another that depends on
191
+ it. This helps ensure that the environment is kept consistent as they
192
+ get installed one-by-one.
193
+
194
+ The current implementation creates a topological ordering of the
195
+ dependency graph, giving more weight to packages with less
196
+ or no dependencies, while breaking any cycles in the graph at
197
+ arbitrary points. We make no guarantees about where the cycle
198
+ would be broken, other than it *would* be broken.
199
+ """
200
+ assert self._result is not None, "must call resolve() first"
201
+
202
+ if not req_set.requirements:
203
+ # Nothing is left to install, so we do not need an order.
204
+ return []
205
+
206
+ graph = self._result.graph
207
+ weights = get_topological_weights(graph, set(req_set.requirements.keys()))
208
+
209
+ sorted_items = sorted(
210
+ req_set.requirements.items(),
211
+ key=functools.partial(_req_set_item_sorter, weights=weights),
212
+ reverse=True,
213
+ )
214
+ return [ireq for _, ireq in sorted_items]
215
+
216
+
217
+ def get_topological_weights(
218
+ graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]
219
+ ) -> Dict[Optional[str], int]:
220
+ """Assign weights to each node based on how "deep" they are.
221
+
222
+ This implementation may change at any point in the future without prior
223
+ notice.
224
+
225
+ We first simplify the dependency graph by pruning any leaves and giving them
226
+ the highest weight: a package without any dependencies should be installed
227
+ first. This is done again and again in the same way, giving ever less weight
228
+ to the newly found leaves. The loop stops when no leaves are left: all
229
+ remaining packages have at least one dependency left in the graph.
230
+
231
+ Then we continue with the remaining graph, by taking the length for the
232
+ longest path to any node from root, ignoring any paths that contain a single
233
+ node twice (i.e. cycles). This is done through a depth-first search through
234
+ the graph, while keeping track of the path to the node.
235
+
236
+ Cycles in the graph result would result in node being revisited while also
237
+ being on its own path. In this case, take no action. This helps ensure we
238
+ don't get stuck in a cycle.
239
+
240
+ When assigning weight, the longer path (i.e. larger length) is preferred.
241
+
242
+ We are only interested in the weights of packages that are in the
243
+ requirement_keys.
244
+ """
245
+ path: Set[Optional[str]] = set()
246
+ weights: Dict[Optional[str], int] = {}
247
+
248
+ def visit(node: Optional[str]) -> None:
249
+ if node in path:
250
+ # We hit a cycle, so we'll break it here.
251
+ return
252
+
253
+ # Time to visit the children!
254
+ path.add(node)
255
+ for child in graph.iter_children(node):
256
+ visit(child)
257
+ path.remove(node)
258
+
259
+ if node not in requirement_keys:
260
+ return
261
+
262
+ last_known_parent_count = weights.get(node, 0)
263
+ weights[node] = max(last_known_parent_count, len(path))
264
+
265
+ # Simplify the graph, pruning leaves that have no dependencies.
266
+ # This is needed for large graphs (say over 200 packages) because the
267
+ # `visit` function is exponentially slower then, taking minutes.
268
+ # See https://github.com/pypa/pip/issues/10557
269
+ # We will loop until we explicitly break the loop.
270
+ while True:
271
+ leaves = set()
272
+ for key in graph:
273
+ if key is None:
274
+ continue
275
+ for _child in graph.iter_children(key):
276
+ # This means we have at least one child
277
+ break
278
+ else:
279
+ # No child.
280
+ leaves.add(key)
281
+ if not leaves:
282
+ # We are done simplifying.
283
+ break
284
+ # Calculate the weight for the leaves.
285
+ weight = len(graph) - 1
286
+ for leaf in leaves:
287
+ if leaf not in requirement_keys:
288
+ continue
289
+ weights[leaf] = weight
290
+ # Remove the leaves from the graph, making it simpler.
291
+ for leaf in leaves:
292
+ graph.remove(leaf)
293
+
294
+ # Visit the remaining graph.
295
+ # `None` is guaranteed to be the root node by resolvelib.
296
+ visit(None)
297
+
298
+ # Sanity check: all requirement keys should be in the weights,
299
+ # and no other keys should be in the weights.
300
+ difference = set(weights.keys()).difference(requirement_keys)
301
+ assert not difference, difference
302
+
303
+ return weights
304
+
305
+
306
+ def _req_set_item_sorter(
307
+ item: Tuple[str, InstallRequirement],
308
+ weights: Dict[Optional[str], int],
309
+ ) -> Tuple[int, str]:
310
+ """Key function used to sort install requirements for installation.
311
+
312
+ Based on the "weight" mapping calculated in ``get_installation_order()``.
313
+ The canonical package name is returned as the second member as a tie-
314
+ breaker to ensure the result is predictable, which is useful in tests.
315
+ """
316
+ name = canonicalize_name(item[0])
317
+ return weights[name], name
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (152 Bytes). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/__pycache__/typing_extensions.cpython-38.pyc ADDED
Binary file (66.1 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/__pycache__/zipp.cpython-38.pyc ADDED
Binary file (10.3 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Read resources contained within a package."""
2
+
3
+ from ._common import (
4
+ as_file,
5
+ files,
6
+ Package,
7
+ )
8
+
9
+ from ._legacy import (
10
+ contents,
11
+ open_binary,
12
+ read_binary,
13
+ open_text,
14
+ read_text,
15
+ is_resource,
16
+ path,
17
+ Resource,
18
+ )
19
+
20
+ from .abc import ResourceReader
21
+
22
+
23
+ __all__ = [
24
+ 'Package',
25
+ 'Resource',
26
+ 'ResourceReader',
27
+ 'as_file',
28
+ 'contents',
29
+ 'files',
30
+ 'is_resource',
31
+ 'open_binary',
32
+ 'open_text',
33
+ 'path',
34
+ 'read_binary',
35
+ 'read_text',
36
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_adapters.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import suppress
2
+ from io import TextIOWrapper
3
+
4
+ from . import abc
5
+
6
+
7
+ class SpecLoaderAdapter:
8
+ """
9
+ Adapt a package spec to adapt the underlying loader.
10
+ """
11
+
12
+ def __init__(self, spec, adapter=lambda spec: spec.loader):
13
+ self.spec = spec
14
+ self.loader = adapter(spec)
15
+
16
+ def __getattr__(self, name):
17
+ return getattr(self.spec, name)
18
+
19
+
20
+ class TraversableResourcesLoader:
21
+ """
22
+ Adapt a loader to provide TraversableResources.
23
+ """
24
+
25
+ def __init__(self, spec):
26
+ self.spec = spec
27
+
28
+ def get_resource_reader(self, name):
29
+ return CompatibilityFiles(self.spec)._native()
30
+
31
+
32
+ def _io_wrapper(file, mode='r', *args, **kwargs):
33
+ if mode == 'r':
34
+ return TextIOWrapper(file, *args, **kwargs)
35
+ elif mode == 'rb':
36
+ return file
37
+ raise ValueError(
38
+ "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
39
+ )
40
+
41
+
42
+ class CompatibilityFiles:
43
+ """
44
+ Adapter for an existing or non-existent resource reader
45
+ to provide a compatibility .files().
46
+ """
47
+
48
+ class SpecPath(abc.Traversable):
49
+ """
50
+ Path tied to a module spec.
51
+ Can be read and exposes the resource reader children.
52
+ """
53
+
54
+ def __init__(self, spec, reader):
55
+ self._spec = spec
56
+ self._reader = reader
57
+
58
+ def iterdir(self):
59
+ if not self._reader:
60
+ return iter(())
61
+ return iter(
62
+ CompatibilityFiles.ChildPath(self._reader, path)
63
+ for path in self._reader.contents()
64
+ )
65
+
66
+ def is_file(self):
67
+ return False
68
+
69
+ is_dir = is_file
70
+
71
+ def joinpath(self, other):
72
+ if not self._reader:
73
+ return CompatibilityFiles.OrphanPath(other)
74
+ return CompatibilityFiles.ChildPath(self._reader, other)
75
+
76
+ @property
77
+ def name(self):
78
+ return self._spec.name
79
+
80
+ def open(self, mode='r', *args, **kwargs):
81
+ return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
82
+
83
+ class ChildPath(abc.Traversable):
84
+ """
85
+ Path tied to a resource reader child.
86
+ Can be read but doesn't expose any meaningful children.
87
+ """
88
+
89
+ def __init__(self, reader, name):
90
+ self._reader = reader
91
+ self._name = name
92
+
93
+ def iterdir(self):
94
+ return iter(())
95
+
96
+ def is_file(self):
97
+ return self._reader.is_resource(self.name)
98
+
99
+ def is_dir(self):
100
+ return not self.is_file()
101
+
102
+ def joinpath(self, other):
103
+ return CompatibilityFiles.OrphanPath(self.name, other)
104
+
105
+ @property
106
+ def name(self):
107
+ return self._name
108
+
109
+ def open(self, mode='r', *args, **kwargs):
110
+ return _io_wrapper(
111
+ self._reader.open_resource(self.name), mode, *args, **kwargs
112
+ )
113
+
114
+ class OrphanPath(abc.Traversable):
115
+ """
116
+ Orphan path, not tied to a module spec or resource reader.
117
+ Can't be read and doesn't expose any meaningful children.
118
+ """
119
+
120
+ def __init__(self, *path_parts):
121
+ if len(path_parts) < 1:
122
+ raise ValueError('Need at least one path part to construct a path')
123
+ self._path = path_parts
124
+
125
+ def iterdir(self):
126
+ return iter(())
127
+
128
+ def is_file(self):
129
+ return False
130
+
131
+ is_dir = is_file
132
+
133
+ def joinpath(self, other):
134
+ return CompatibilityFiles.OrphanPath(*self._path, other)
135
+
136
+ @property
137
+ def name(self):
138
+ return self._path[-1]
139
+
140
+ def open(self, mode='r', *args, **kwargs):
141
+ raise FileNotFoundError("Can't open orphan path")
142
+
143
+ def __init__(self, spec):
144
+ self.spec = spec
145
+
146
+ @property
147
+ def _reader(self):
148
+ with suppress(AttributeError):
149
+ return self.spec.loader.get_resource_reader(self.spec.name)
150
+
151
+ def _native(self):
152
+ """
153
+ Return the native reader if it supports files().
154
+ """
155
+ reader = self._reader
156
+ return reader if hasattr(reader, 'files') else self
157
+
158
+ def __getattr__(self, attr):
159
+ return getattr(self._reader, attr)
160
+
161
+ def files(self):
162
+ return CompatibilityFiles.SpecPath(self.spec, self._reader)
163
+
164
+
165
+ def wrap_spec(package):
166
+ """
167
+ Construct a package spec with traversable compatibility
168
+ on the spec/loader/reader.
169
+ """
170
+ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_common.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ import tempfile
4
+ import functools
5
+ import contextlib
6
+ import types
7
+ import importlib
8
+ import inspect
9
+ import warnings
10
+ import itertools
11
+
12
+ from typing import Union, Optional, cast
13
+ from .abc import ResourceReader, Traversable
14
+
15
+ from ._compat import wrap_spec
16
+
17
+ Package = Union[types.ModuleType, str]
18
+ Anchor = Package
19
+
20
+
21
+ def package_to_anchor(func):
22
+ """
23
+ Replace 'package' parameter as 'anchor' and warn about the change.
24
+
25
+ Other errors should fall through.
26
+
27
+ >>> files('a', 'b')
28
+ Traceback (most recent call last):
29
+ TypeError: files() takes from 0 to 1 positional arguments but 2 were given
30
+ """
31
+ undefined = object()
32
+
33
+ @functools.wraps(func)
34
+ def wrapper(anchor=undefined, package=undefined):
35
+ if package is not undefined:
36
+ if anchor is not undefined:
37
+ return func(anchor, package)
38
+ warnings.warn(
39
+ "First parameter to files is renamed to 'anchor'",
40
+ DeprecationWarning,
41
+ stacklevel=2,
42
+ )
43
+ return func(package)
44
+ elif anchor is undefined:
45
+ return func()
46
+ return func(anchor)
47
+
48
+ return wrapper
49
+
50
+
51
+ @package_to_anchor
52
+ def files(anchor: Optional[Anchor] = None) -> Traversable:
53
+ """
54
+ Get a Traversable resource for an anchor.
55
+ """
56
+ return from_package(resolve(anchor))
57
+
58
+
59
+ def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
60
+ """
61
+ Return the package's loader if it's a ResourceReader.
62
+ """
63
+ # We can't use
64
+ # a issubclass() check here because apparently abc.'s __subclasscheck__()
65
+ # hook wants to create a weak reference to the object, but
66
+ # zipimport.zipimporter does not support weak references, resulting in a
67
+ # TypeError. That seems terrible.
68
+ spec = package.__spec__
69
+ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
70
+ if reader is None:
71
+ return None
72
+ return reader(spec.name) # type: ignore
73
+
74
+
75
+ @functools.singledispatch
76
+ def resolve(cand: Optional[Anchor]) -> types.ModuleType:
77
+ return cast(types.ModuleType, cand)
78
+
79
+
80
+ @resolve.register
81
+ def _(cand: str) -> types.ModuleType:
82
+ return importlib.import_module(cand)
83
+
84
+
85
+ @resolve.register
86
+ def _(cand: None) -> types.ModuleType:
87
+ return resolve(_infer_caller().f_globals['__name__'])
88
+
89
+
90
+ def _infer_caller():
91
+ """
92
+ Walk the stack and find the frame of the first caller not in this module.
93
+ """
94
+
95
+ def is_this_file(frame_info):
96
+ return frame_info.filename == __file__
97
+
98
+ def is_wrapper(frame_info):
99
+ return frame_info.function == 'wrapper'
100
+
101
+ not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
102
+ # also exclude 'wrapper' due to singledispatch in the call stack
103
+ callers = itertools.filterfalse(is_wrapper, not_this_file)
104
+ return next(callers).frame
105
+
106
+
107
+ def from_package(package: types.ModuleType):
108
+ """
109
+ Return a Traversable object for the given package.
110
+
111
+ """
112
+ spec = wrap_spec(package)
113
+ reader = spec.loader.get_resource_reader(spec.name)
114
+ return reader.files()
115
+
116
+
117
+ @contextlib.contextmanager
118
+ def _tempfile(
119
+ reader,
120
+ suffix='',
121
+ # gh-93353: Keep a reference to call os.remove() in late Python
122
+ # finalization.
123
+ *,
124
+ _os_remove=os.remove,
125
+ ):
126
+ # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
127
+ # blocks due to the need to close the temporary file to work on Windows
128
+ # properly.
129
+ fd, raw_path = tempfile.mkstemp(suffix=suffix)
130
+ try:
131
+ try:
132
+ os.write(fd, reader())
133
+ finally:
134
+ os.close(fd)
135
+ del reader
136
+ yield pathlib.Path(raw_path)
137
+ finally:
138
+ try:
139
+ _os_remove(raw_path)
140
+ except FileNotFoundError:
141
+ pass
142
+
143
+
144
+ def _temp_file(path):
145
+ return _tempfile(path.read_bytes, suffix=path.name)
146
+
147
+
148
+ def _is_present_dir(path: Traversable) -> bool:
149
+ """
150
+ Some Traversables implement ``is_dir()`` to raise an
151
+ exception (i.e. ``FileNotFoundError``) when the
152
+ directory doesn't exist. This function wraps that call
153
+ to always return a boolean and only return True
154
+ if there's a dir and it exists.
155
+ """
156
+ with contextlib.suppress(FileNotFoundError):
157
+ return path.is_dir()
158
+ return False
159
+
160
+
161
+ @functools.singledispatch
162
+ def as_file(path):
163
+ """
164
+ Given a Traversable object, return that object as a
165
+ path on the local file system in a context manager.
166
+ """
167
+ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
168
+
169
+
170
+ @as_file.register(pathlib.Path)
171
+ @contextlib.contextmanager
172
+ def _(path):
173
+ """
174
+ Degenerate behavior for pathlib.Path objects.
175
+ """
176
+ yield path
177
+
178
+
179
+ @contextlib.contextmanager
180
+ def _temp_path(dir: tempfile.TemporaryDirectory):
181
+ """
182
+ Wrap tempfile.TemporyDirectory to return a pathlib object.
183
+ """
184
+ with dir as result:
185
+ yield pathlib.Path(result)
186
+
187
+
188
+ @contextlib.contextmanager
189
+ def _temp_dir(path):
190
+ """
191
+ Given a traversable dir, recursively replicate the whole tree
192
+ to the file system in a context manager.
193
+ """
194
+ assert path.is_dir()
195
+ with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
196
+ yield _write_contents(temp_dir, path)
197
+
198
+
199
+ def _write_contents(target, source):
200
+ child = target.joinpath(source.name)
201
+ if source.is_dir():
202
+ child.mkdir()
203
+ for item in source.iterdir():
204
+ _write_contents(child, item)
205
+ else:
206
+ child.write_bytes(source.read_bytes())
207
+ return child
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_compat.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ import abc
4
+ import os
5
+ import sys
6
+ import pathlib
7
+ from contextlib import suppress
8
+ from typing import Union
9
+
10
+
11
+ if sys.version_info >= (3, 10):
12
+ from zipfile import Path as ZipPath # type: ignore
13
+ else:
14
+ from ..zipp import Path as ZipPath # type: ignore
15
+
16
+
17
+ try:
18
+ from typing import runtime_checkable # type: ignore
19
+ except ImportError:
20
+
21
+ def runtime_checkable(cls): # type: ignore
22
+ return cls
23
+
24
+
25
+ try:
26
+ from typing import Protocol # type: ignore
27
+ except ImportError:
28
+ Protocol = abc.ABC # type: ignore
29
+
30
+
31
+ class TraversableResourcesLoader:
32
+ """
33
+ Adapt loaders to provide TraversableResources and other
34
+ compatibility.
35
+
36
+ Used primarily for Python 3.9 and earlier where the native
37
+ loaders do not yet implement TraversableResources.
38
+ """
39
+
40
+ def __init__(self, spec):
41
+ self.spec = spec
42
+
43
+ @property
44
+ def path(self):
45
+ return self.spec.origin
46
+
47
+ def get_resource_reader(self, name):
48
+ from . import readers, _adapters
49
+
50
+ def _zip_reader(spec):
51
+ with suppress(AttributeError):
52
+ return readers.ZipReader(spec.loader, spec.name)
53
+
54
+ def _namespace_reader(spec):
55
+ with suppress(AttributeError, ValueError):
56
+ return readers.NamespaceReader(spec.submodule_search_locations)
57
+
58
+ def _available_reader(spec):
59
+ with suppress(AttributeError):
60
+ return spec.loader.get_resource_reader(spec.name)
61
+
62
+ def _native_reader(spec):
63
+ reader = _available_reader(spec)
64
+ return reader if hasattr(reader, 'files') else None
65
+
66
+ def _file_reader(spec):
67
+ try:
68
+ path = pathlib.Path(self.path)
69
+ except TypeError:
70
+ return None
71
+ if path.exists():
72
+ return readers.FileReader(self)
73
+
74
+ return (
75
+ # native reader if it supplies 'files'
76
+ _native_reader(self.spec)
77
+ or
78
+ # local ZipReader if a zip module
79
+ _zip_reader(self.spec)
80
+ or
81
+ # local NamespaceReader if a namespace module
82
+ _namespace_reader(self.spec)
83
+ or
84
+ # local FileReader
85
+ _file_reader(self.spec)
86
+ # fallback - adapt the spec ResourceReader to TraversableReader
87
+ or _adapters.CompatibilityFiles(self.spec)
88
+ )
89
+
90
+
91
+ def wrap_spec(package):
92
+ """
93
+ Construct a package spec with traversable compatibility
94
+ on the spec/loader/reader.
95
+
96
+ Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
97
+ from above for older Python compatibility (<3.10).
98
+ """
99
+ from . import _adapters
100
+
101
+ return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
102
+
103
+
104
+ if sys.version_info >= (3, 9):
105
+ StrPath = Union[str, os.PathLike[str]]
106
+ else:
107
+ # PathLike is only subscriptable at runtime in 3.9+
108
+ StrPath = Union[str, "os.PathLike[str]"]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_itertools.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import filterfalse
2
+
3
+ from typing import (
4
+ Callable,
5
+ Iterable,
6
+ Iterator,
7
+ Optional,
8
+ Set,
9
+ TypeVar,
10
+ Union,
11
+ )
12
+
13
+ # Type and type variable definitions
14
+ _T = TypeVar('_T')
15
+ _U = TypeVar('_U')
16
+
17
+
18
+ def unique_everseen(
19
+ iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
20
+ ) -> Iterator[_T]:
21
+ "List unique elements, preserving order. Remember all elements ever seen."
22
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
23
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
24
+ seen: Set[Union[_T, _U]] = set()
25
+ seen_add = seen.add
26
+ if key is None:
27
+ for element in filterfalse(seen.__contains__, iterable):
28
+ seen_add(element)
29
+ yield element
30
+ else:
31
+ for element in iterable:
32
+ k = key(element)
33
+ if k not in seen:
34
+ seen_add(k)
35
+ yield element
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/_legacy.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import os
3
+ import pathlib
4
+ import types
5
+ import warnings
6
+
7
+ from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
8
+
9
+ from . import _common
10
+
11
+ Package = Union[types.ModuleType, str]
12
+ Resource = str
13
+
14
+
15
+ def deprecated(func):
16
+ @functools.wraps(func)
17
+ def wrapper(*args, **kwargs):
18
+ warnings.warn(
19
+ f"{func.__name__} is deprecated. Use files() instead. "
20
+ "Refer to https://importlib-resources.readthedocs.io"
21
+ "/en/latest/using.html#migrating-from-legacy for migration advice.",
22
+ DeprecationWarning,
23
+ stacklevel=2,
24
+ )
25
+ return func(*args, **kwargs)
26
+
27
+ return wrapper
28
+
29
+
30
+ def normalize_path(path: Any) -> str:
31
+ """Normalize a path by ensuring it is a string.
32
+
33
+ If the resulting string contains path separators, an exception is raised.
34
+ """
35
+ str_path = str(path)
36
+ parent, file_name = os.path.split(str_path)
37
+ if parent:
38
+ raise ValueError(f'{path!r} must be only a file name')
39
+ return file_name
40
+
41
+
42
+ @deprecated
43
+ def open_binary(package: Package, resource: Resource) -> BinaryIO:
44
+ """Return a file-like object opened for binary reading of the resource."""
45
+ return (_common.files(package) / normalize_path(resource)).open('rb')
46
+
47
+
48
+ @deprecated
49
+ def read_binary(package: Package, resource: Resource) -> bytes:
50
+ """Return the binary contents of the resource."""
51
+ return (_common.files(package) / normalize_path(resource)).read_bytes()
52
+
53
+
54
+ @deprecated
55
+ def open_text(
56
+ package: Package,
57
+ resource: Resource,
58
+ encoding: str = 'utf-8',
59
+ errors: str = 'strict',
60
+ ) -> TextIO:
61
+ """Return a file-like object opened for text reading of the resource."""
62
+ return (_common.files(package) / normalize_path(resource)).open(
63
+ 'r', encoding=encoding, errors=errors
64
+ )
65
+
66
+
67
+ @deprecated
68
+ def read_text(
69
+ package: Package,
70
+ resource: Resource,
71
+ encoding: str = 'utf-8',
72
+ errors: str = 'strict',
73
+ ) -> str:
74
+ """Return the decoded string of the resource.
75
+
76
+ The decoding-related arguments have the same semantics as those of
77
+ bytes.decode().
78
+ """
79
+ with open_text(package, resource, encoding, errors) as fp:
80
+ return fp.read()
81
+
82
+
83
+ @deprecated
84
+ def contents(package: Package) -> Iterable[str]:
85
+ """Return an iterable of entries in `package`.
86
+
87
+ Note that not all entries are resources. Specifically, directories are
88
+ not considered resources. Use `is_resource()` on each entry returned here
89
+ to check if it is a resource or not.
90
+ """
91
+ return [path.name for path in _common.files(package).iterdir()]
92
+
93
+
94
+ @deprecated
95
+ def is_resource(package: Package, name: str) -> bool:
96
+ """True if `name` is a resource inside `package`.
97
+
98
+ Directories are *not* resources.
99
+ """
100
+ resource = normalize_path(name)
101
+ return any(
102
+ traversable.name == resource and traversable.is_file()
103
+ for traversable in _common.files(package).iterdir()
104
+ )
105
+
106
+
107
+ @deprecated
108
+ def path(
109
+ package: Package,
110
+ resource: Resource,
111
+ ) -> ContextManager[pathlib.Path]:
112
+ """A context manager providing a file path object to the resource.
113
+
114
+ If the resource does not already exist on its own on the file system,
115
+ a temporary file will be created. If the file was created, the file
116
+ will be deleted upon exiting the context manager (no exception is
117
+ raised if the file was deleted prior to the context manager
118
+ exiting).
119
+ """
120
+ return _common.as_file(_common.files(package) / normalize_path(resource))
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/abc.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import io
3
+ import itertools
4
+ import pathlib
5
+ from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
6
+
7
+ from ._compat import runtime_checkable, Protocol, StrPath
8
+
9
+
10
+ __all__ = ["ResourceReader", "Traversable", "TraversableResources"]
11
+
12
+
13
+ class ResourceReader(metaclass=abc.ABCMeta):
14
+ """Abstract base class for loaders to provide resource reading support."""
15
+
16
+ @abc.abstractmethod
17
+ def open_resource(self, resource: Text) -> BinaryIO:
18
+ """Return an opened, file-like object for binary reading.
19
+
20
+ The 'resource' argument is expected to represent only a file name.
21
+ If the resource cannot be found, FileNotFoundError is raised.
22
+ """
23
+ # This deliberately raises FileNotFoundError instead of
24
+ # NotImplementedError so that if this method is accidentally called,
25
+ # it'll still do the right thing.
26
+ raise FileNotFoundError
27
+
28
+ @abc.abstractmethod
29
+ def resource_path(self, resource: Text) -> Text:
30
+ """Return the file system path to the specified resource.
31
+
32
+ The 'resource' argument is expected to represent only a file name.
33
+ If the resource does not exist on the file system, raise
34
+ FileNotFoundError.
35
+ """
36
+ # This deliberately raises FileNotFoundError instead of
37
+ # NotImplementedError so that if this method is accidentally called,
38
+ # it'll still do the right thing.
39
+ raise FileNotFoundError
40
+
41
+ @abc.abstractmethod
42
+ def is_resource(self, path: Text) -> bool:
43
+ """Return True if the named 'path' is a resource.
44
+
45
+ Files are resources, directories are not.
46
+ """
47
+ raise FileNotFoundError
48
+
49
+ @abc.abstractmethod
50
+ def contents(self) -> Iterable[str]:
51
+ """Return an iterable of entries in `package`."""
52
+ raise FileNotFoundError
53
+
54
+
55
+ class TraversalError(Exception):
56
+ pass
57
+
58
+
59
+ @runtime_checkable
60
+ class Traversable(Protocol):
61
+ """
62
+ An object with a subset of pathlib.Path methods suitable for
63
+ traversing directories and opening files.
64
+
65
+ Any exceptions that occur when accessing the backing resource
66
+ may propagate unaltered.
67
+ """
68
+
69
+ @abc.abstractmethod
70
+ def iterdir(self) -> Iterator["Traversable"]:
71
+ """
72
+ Yield Traversable objects in self
73
+ """
74
+
75
+ def read_bytes(self) -> bytes:
76
+ """
77
+ Read contents of self as bytes
78
+ """
79
+ with self.open('rb') as strm:
80
+ return strm.read()
81
+
82
+ def read_text(self, encoding: Optional[str] = None) -> str:
83
+ """
84
+ Read contents of self as text
85
+ """
86
+ with self.open(encoding=encoding) as strm:
87
+ return strm.read()
88
+
89
+ @abc.abstractmethod
90
+ def is_dir(self) -> bool:
91
+ """
92
+ Return True if self is a directory
93
+ """
94
+
95
+ @abc.abstractmethod
96
+ def is_file(self) -> bool:
97
+ """
98
+ Return True if self is a file
99
+ """
100
+
101
+ def joinpath(self, *descendants: StrPath) -> "Traversable":
102
+ """
103
+ Return Traversable resolved with any descendants applied.
104
+
105
+ Each descendant should be a path segment relative to self
106
+ and each may contain multiple levels separated by
107
+ ``posixpath.sep`` (``/``).
108
+ """
109
+ if not descendants:
110
+ return self
111
+ names = itertools.chain.from_iterable(
112
+ path.parts for path in map(pathlib.PurePosixPath, descendants)
113
+ )
114
+ target = next(names)
115
+ matches = (
116
+ traversable for traversable in self.iterdir() if traversable.name == target
117
+ )
118
+ try:
119
+ match = next(matches)
120
+ except StopIteration:
121
+ raise TraversalError(
122
+ "Target not found during traversal.", target, list(names)
123
+ )
124
+ return match.joinpath(*names)
125
+
126
+ def __truediv__(self, child: StrPath) -> "Traversable":
127
+ """
128
+ Return Traversable child in self
129
+ """
130
+ return self.joinpath(child)
131
+
132
+ @abc.abstractmethod
133
+ def open(self, mode='r', *args, **kwargs):
134
+ """
135
+ mode may be 'r' or 'rb' to open as text or binary. Return a handle
136
+ suitable for reading (same as pathlib.Path.open).
137
+
138
+ When opening as text, accepts encoding parameters such as those
139
+ accepted by io.TextIOWrapper.
140
+ """
141
+
142
+ @property
143
+ @abc.abstractmethod
144
+ def name(self) -> str:
145
+ """
146
+ The base name of this object without any parent references.
147
+ """
148
+
149
+
150
+ class TraversableResources(ResourceReader):
151
+ """
152
+ The required interface for providing traversable
153
+ resources.
154
+ """
155
+
156
+ @abc.abstractmethod
157
+ def files(self) -> "Traversable":
158
+ """Return a Traversable object for the loaded package."""
159
+
160
+ def open_resource(self, resource: StrPath) -> io.BufferedReader:
161
+ return self.files().joinpath(resource).open('rb')
162
+
163
+ def resource_path(self, resource: Any) -> NoReturn:
164
+ raise FileNotFoundError(resource)
165
+
166
+ def is_resource(self, path: StrPath) -> bool:
167
+ return self.files().joinpath(path).is_file()
168
+
169
+ def contents(self) -> Iterator[str]:
170
+ return (item.name for item in self.files().iterdir())
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/py.typed ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/readers.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import pathlib
3
+ import operator
4
+
5
+ from . import abc
6
+
7
+ from ._itertools import unique_everseen
8
+ from ._compat import ZipPath
9
+
10
+
11
+ def remove_duplicates(items):
12
+ return iter(collections.OrderedDict.fromkeys(items))
13
+
14
+
15
+ class FileReader(abc.TraversableResources):
16
+ def __init__(self, loader):
17
+ self.path = pathlib.Path(loader.path).parent
18
+
19
+ def resource_path(self, resource):
20
+ """
21
+ Return the file system path to prevent
22
+ `resources.path()` from creating a temporary
23
+ copy.
24
+ """
25
+ return str(self.path.joinpath(resource))
26
+
27
+ def files(self):
28
+ return self.path
29
+
30
+
31
+ class ZipReader(abc.TraversableResources):
32
+ def __init__(self, loader, module):
33
+ _, _, name = module.rpartition('.')
34
+ self.prefix = loader.prefix.replace('\\', '/') + name + '/'
35
+ self.archive = loader.archive
36
+
37
+ def open_resource(self, resource):
38
+ try:
39
+ return super().open_resource(resource)
40
+ except KeyError as exc:
41
+ raise FileNotFoundError(exc.args[0])
42
+
43
+ def is_resource(self, path):
44
+ # workaround for `zipfile.Path.is_file` returning true
45
+ # for non-existent paths.
46
+ target = self.files().joinpath(path)
47
+ return target.is_file() and target.exists()
48
+
49
+ def files(self):
50
+ return ZipPath(self.archive, self.prefix)
51
+
52
+
53
+ class MultiplexedPath(abc.Traversable):
54
+ """
55
+ Given a series of Traversable objects, implement a merged
56
+ version of the interface across all objects. Useful for
57
+ namespace packages which may be multihomed at a single
58
+ name.
59
+ """
60
+
61
+ def __init__(self, *paths):
62
+ self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
63
+ if not self._paths:
64
+ message = 'MultiplexedPath must contain at least one path'
65
+ raise FileNotFoundError(message)
66
+ if not all(path.is_dir() for path in self._paths):
67
+ raise NotADirectoryError('MultiplexedPath only supports directories')
68
+
69
+ def iterdir(self):
70
+ files = (file for path in self._paths for file in path.iterdir())
71
+ return unique_everseen(files, key=operator.attrgetter('name'))
72
+
73
+ def read_bytes(self):
74
+ raise FileNotFoundError(f'{self} is not a file')
75
+
76
+ def read_text(self, *args, **kwargs):
77
+ raise FileNotFoundError(f'{self} is not a file')
78
+
79
+ def is_dir(self):
80
+ return True
81
+
82
+ def is_file(self):
83
+ return False
84
+
85
+ def joinpath(self, *descendants):
86
+ try:
87
+ return super().joinpath(*descendants)
88
+ except abc.TraversalError:
89
+ # One of the paths did not resolve (a directory does not exist).
90
+ # Just return something that will not exist.
91
+ return self._paths[0].joinpath(*descendants)
92
+
93
+ def open(self, *args, **kwargs):
94
+ raise FileNotFoundError(f'{self} is not a file')
95
+
96
+ @property
97
+ def name(self):
98
+ return self._paths[0].name
99
+
100
+ def __repr__(self):
101
+ paths = ', '.join(f"'{path}'" for path in self._paths)
102
+ return f'MultiplexedPath({paths})'
103
+
104
+
105
+ class NamespaceReader(abc.TraversableResources):
106
+ def __init__(self, namespace_path):
107
+ if 'NamespacePath' not in str(namespace_path):
108
+ raise ValueError('Invalid path')
109
+ self.path = MultiplexedPath(*list(namespace_path))
110
+
111
+ def resource_path(self, resource):
112
+ """
113
+ Return the file system path to prevent
114
+ `resources.path()` from creating a temporary
115
+ copy.
116
+ """
117
+ return str(self.path.joinpath(resource))
118
+
119
+ def files(self):
120
+ return self.path
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/importlib_resources/simple.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interface adapters for low-level readers.
3
+ """
4
+
5
+ import abc
6
+ import io
7
+ import itertools
8
+ from typing import BinaryIO, List
9
+
10
+ from .abc import Traversable, TraversableResources
11
+
12
+
13
+ class SimpleReader(abc.ABC):
14
+ """
15
+ The minimum, low-level interface required from a resource
16
+ provider.
17
+ """
18
+
19
+ @property
20
+ @abc.abstractmethod
21
+ def package(self) -> str:
22
+ """
23
+ The name of the package for which this reader loads resources.
24
+ """
25
+
26
+ @abc.abstractmethod
27
+ def children(self) -> List['SimpleReader']:
28
+ """
29
+ Obtain an iterable of SimpleReader for available
30
+ child containers (e.g. directories).
31
+ """
32
+
33
+ @abc.abstractmethod
34
+ def resources(self) -> List[str]:
35
+ """
36
+ Obtain available named resources for this virtual package.
37
+ """
38
+
39
+ @abc.abstractmethod
40
+ def open_binary(self, resource: str) -> BinaryIO:
41
+ """
42
+ Obtain a File-like for a named resource.
43
+ """
44
+
45
+ @property
46
+ def name(self):
47
+ return self.package.split('.')[-1]
48
+
49
+
50
+ class ResourceContainer(Traversable):
51
+ """
52
+ Traversable container for a package's resources via its reader.
53
+ """
54
+
55
+ def __init__(self, reader: SimpleReader):
56
+ self.reader = reader
57
+
58
+ def is_dir(self):
59
+ return True
60
+
61
+ def is_file(self):
62
+ return False
63
+
64
+ def iterdir(self):
65
+ files = (ResourceHandle(self, name) for name in self.reader.resources)
66
+ dirs = map(ResourceContainer, self.reader.children())
67
+ return itertools.chain(files, dirs)
68
+
69
+ def open(self, *args, **kwargs):
70
+ raise IsADirectoryError()
71
+
72
+
73
+ class ResourceHandle(Traversable):
74
+ """
75
+ Handle to a named resource in a ResourceReader.
76
+ """
77
+
78
+ def __init__(self, parent: ResourceContainer, name: str):
79
+ self.parent = parent
80
+ self.name = name # type: ignore
81
+
82
+ def is_file(self):
83
+ return True
84
+
85
+ def is_dir(self):
86
+ return False
87
+
88
+ def open(self, mode='r', *args, **kwargs):
89
+ stream = self.parent.reader.open_binary(self.name)
90
+ if 'b' not in mode:
91
+ stream = io.TextIOWrapper(*args, **kwargs)
92
+ return stream
93
+
94
+ def joinpath(self, name):
95
+ raise RuntimeError("Cannot traverse into a resource")
96
+
97
+
98
+ class TraversableReader(TraversableResources, SimpleReader):
99
+ """
100
+ A TraversableResources based on SimpleReader. Resource providers
101
+ may derive from this class to provide the TraversableResources
102
+ interface by supplying the SimpleReader interface.
103
+ """
104
+
105
+ def files(self):
106
+ return ResourceContainer(self)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """More routines for operating on iterables, beyond itertools"""
2
+
3
+ from .more import * # noqa
4
+ from .recipes import * # noqa
5
+
6
+ __version__ = '9.1.0'
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (308 Bytes). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/recipes.cpython-38.pyc ADDED
Binary file (26.7 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/recipes.py ADDED
@@ -0,0 +1,930 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Imported from the recipes section of the itertools documentation.
2
+
3
+ All functions taken from the recipes section of the itertools library docs
4
+ [1]_.
5
+ Some backward-compatible usability improvements have been made.
6
+
7
+ .. [1] http://docs.python.org/library/itertools.html#recipes
8
+
9
+ """
10
+ import math
11
+ import operator
12
+ import warnings
13
+
14
+ from collections import deque
15
+ from collections.abc import Sized
16
+ from functools import reduce
17
+ from itertools import (
18
+ chain,
19
+ combinations,
20
+ compress,
21
+ count,
22
+ cycle,
23
+ groupby,
24
+ islice,
25
+ product,
26
+ repeat,
27
+ starmap,
28
+ tee,
29
+ zip_longest,
30
+ )
31
+ from random import randrange, sample, choice
32
+ from sys import hexversion
33
+
34
+ __all__ = [
35
+ 'all_equal',
36
+ 'batched',
37
+ 'before_and_after',
38
+ 'consume',
39
+ 'convolve',
40
+ 'dotproduct',
41
+ 'first_true',
42
+ 'factor',
43
+ 'flatten',
44
+ 'grouper',
45
+ 'iter_except',
46
+ 'iter_index',
47
+ 'matmul',
48
+ 'ncycles',
49
+ 'nth',
50
+ 'nth_combination',
51
+ 'padnone',
52
+ 'pad_none',
53
+ 'pairwise',
54
+ 'partition',
55
+ 'polynomial_from_roots',
56
+ 'powerset',
57
+ 'prepend',
58
+ 'quantify',
59
+ 'random_combination_with_replacement',
60
+ 'random_combination',
61
+ 'random_permutation',
62
+ 'random_product',
63
+ 'repeatfunc',
64
+ 'roundrobin',
65
+ 'sieve',
66
+ 'sliding_window',
67
+ 'subslices',
68
+ 'tabulate',
69
+ 'tail',
70
+ 'take',
71
+ 'transpose',
72
+ 'triplewise',
73
+ 'unique_everseen',
74
+ 'unique_justseen',
75
+ ]
76
+
77
+ _marker = object()
78
+
79
+
80
+ def take(n, iterable):
81
+ """Return first *n* items of the iterable as a list.
82
+
83
+ >>> take(3, range(10))
84
+ [0, 1, 2]
85
+
86
+ If there are fewer than *n* items in the iterable, all of them are
87
+ returned.
88
+
89
+ >>> take(10, range(3))
90
+ [0, 1, 2]
91
+
92
+ """
93
+ return list(islice(iterable, n))
94
+
95
+
96
+ def tabulate(function, start=0):
97
+ """Return an iterator over the results of ``func(start)``,
98
+ ``func(start + 1)``, ``func(start + 2)``...
99
+
100
+ *func* should be a function that accepts one integer argument.
101
+
102
+ If *start* is not specified it defaults to 0. It will be incremented each
103
+ time the iterator is advanced.
104
+
105
+ >>> square = lambda x: x ** 2
106
+ >>> iterator = tabulate(square, -3)
107
+ >>> take(4, iterator)
108
+ [9, 4, 1, 0]
109
+
110
+ """
111
+ return map(function, count(start))
112
+
113
+
114
+ def tail(n, iterable):
115
+ """Return an iterator over the last *n* items of *iterable*.
116
+
117
+ >>> t = tail(3, 'ABCDEFG')
118
+ >>> list(t)
119
+ ['E', 'F', 'G']
120
+
121
+ """
122
+ # If the given iterable has a length, then we can use islice to get its
123
+ # final elements. Note that if the iterable is not actually Iterable,
124
+ # either islice or deque will throw a TypeError. This is why we don't
125
+ # check if it is Iterable.
126
+ if isinstance(iterable, Sized):
127
+ yield from islice(iterable, max(0, len(iterable) - n), None)
128
+ else:
129
+ yield from iter(deque(iterable, maxlen=n))
130
+
131
+
132
+ def consume(iterator, n=None):
133
+ """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
134
+ entirely.
135
+
136
+ Efficiently exhausts an iterator without returning values. Defaults to
137
+ consuming the whole iterator, but an optional second argument may be
138
+ provided to limit consumption.
139
+
140
+ >>> i = (x for x in range(10))
141
+ >>> next(i)
142
+ 0
143
+ >>> consume(i, 3)
144
+ >>> next(i)
145
+ 4
146
+ >>> consume(i)
147
+ >>> next(i)
148
+ Traceback (most recent call last):
149
+ File "<stdin>", line 1, in <module>
150
+ StopIteration
151
+
152
+ If the iterator has fewer items remaining than the provided limit, the
153
+ whole iterator will be consumed.
154
+
155
+ >>> i = (x for x in range(3))
156
+ >>> consume(i, 5)
157
+ >>> next(i)
158
+ Traceback (most recent call last):
159
+ File "<stdin>", line 1, in <module>
160
+ StopIteration
161
+
162
+ """
163
+ # Use functions that consume iterators at C speed.
164
+ if n is None:
165
+ # feed the entire iterator into a zero-length deque
166
+ deque(iterator, maxlen=0)
167
+ else:
168
+ # advance to the empty slice starting at position n
169
+ next(islice(iterator, n, n), None)
170
+
171
+
172
+ def nth(iterable, n, default=None):
173
+ """Returns the nth item or a default value.
174
+
175
+ >>> l = range(10)
176
+ >>> nth(l, 3)
177
+ 3
178
+ >>> nth(l, 20, "zebra")
179
+ 'zebra'
180
+
181
+ """
182
+ return next(islice(iterable, n, None), default)
183
+
184
+
185
+ def all_equal(iterable):
186
+ """
187
+ Returns ``True`` if all the elements are equal to each other.
188
+
189
+ >>> all_equal('aaaa')
190
+ True
191
+ >>> all_equal('aaab')
192
+ False
193
+
194
+ """
195
+ g = groupby(iterable)
196
+ return next(g, True) and not next(g, False)
197
+
198
+
199
+ def quantify(iterable, pred=bool):
200
+ """Return the how many times the predicate is true.
201
+
202
+ >>> quantify([True, False, True])
203
+ 2
204
+
205
+ """
206
+ return sum(map(pred, iterable))
207
+
208
+
209
+ def pad_none(iterable):
210
+ """Returns the sequence of elements and then returns ``None`` indefinitely.
211
+
212
+ >>> take(5, pad_none(range(3)))
213
+ [0, 1, 2, None, None]
214
+
215
+ Useful for emulating the behavior of the built-in :func:`map` function.
216
+
217
+ See also :func:`padded`.
218
+
219
+ """
220
+ return chain(iterable, repeat(None))
221
+
222
+
223
+ padnone = pad_none
224
+
225
+
226
+ def ncycles(iterable, n):
227
+ """Returns the sequence elements *n* times
228
+
229
+ >>> list(ncycles(["a", "b"], 3))
230
+ ['a', 'b', 'a', 'b', 'a', 'b']
231
+
232
+ """
233
+ return chain.from_iterable(repeat(tuple(iterable), n))
234
+
235
+
236
+ def dotproduct(vec1, vec2):
237
+ """Returns the dot product of the two iterables.
238
+
239
+ >>> dotproduct([10, 10], [20, 20])
240
+ 400
241
+
242
+ """
243
+ return sum(map(operator.mul, vec1, vec2))
244
+
245
+
246
+ def flatten(listOfLists):
247
+ """Return an iterator flattening one level of nesting in a list of lists.
248
+
249
+ >>> list(flatten([[0, 1], [2, 3]]))
250
+ [0, 1, 2, 3]
251
+
252
+ See also :func:`collapse`, which can flatten multiple levels of nesting.
253
+
254
+ """
255
+ return chain.from_iterable(listOfLists)
256
+
257
+
258
+ def repeatfunc(func, times=None, *args):
259
+ """Call *func* with *args* repeatedly, returning an iterable over the
260
+ results.
261
+
262
+ If *times* is specified, the iterable will terminate after that many
263
+ repetitions:
264
+
265
+ >>> from operator import add
266
+ >>> times = 4
267
+ >>> args = 3, 5
268
+ >>> list(repeatfunc(add, times, *args))
269
+ [8, 8, 8, 8]
270
+
271
+ If *times* is ``None`` the iterable will not terminate:
272
+
273
+ >>> from random import randrange
274
+ >>> times = None
275
+ >>> args = 1, 11
276
+ >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
277
+ [2, 4, 8, 1, 8, 4]
278
+
279
+ """
280
+ if times is None:
281
+ return starmap(func, repeat(args))
282
+ return starmap(func, repeat(args, times))
283
+
284
+
285
+ def _pairwise(iterable):
286
+ """Returns an iterator of paired items, overlapping, from the original
287
+
288
+ >>> take(4, pairwise(count()))
289
+ [(0, 1), (1, 2), (2, 3), (3, 4)]
290
+
291
+ On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
292
+
293
+ """
294
+ a, b = tee(iterable)
295
+ next(b, None)
296
+ yield from zip(a, b)
297
+
298
+
299
+ try:
300
+ from itertools import pairwise as itertools_pairwise
301
+ except ImportError:
302
+ pairwise = _pairwise
303
+ else:
304
+
305
+ def pairwise(iterable):
306
+ yield from itertools_pairwise(iterable)
307
+
308
+ pairwise.__doc__ = _pairwise.__doc__
309
+
310
+
311
+ class UnequalIterablesError(ValueError):
312
+ def __init__(self, details=None):
313
+ msg = 'Iterables have different lengths'
314
+ if details is not None:
315
+ msg += (': index 0 has length {}; index {} has length {}').format(
316
+ *details
317
+ )
318
+
319
+ super().__init__(msg)
320
+
321
+
322
+ def _zip_equal_generator(iterables):
323
+ for combo in zip_longest(*iterables, fillvalue=_marker):
324
+ for val in combo:
325
+ if val is _marker:
326
+ raise UnequalIterablesError()
327
+ yield combo
328
+
329
+
330
+ def _zip_equal(*iterables):
331
+ # Check whether the iterables are all the same size.
332
+ try:
333
+ first_size = len(iterables[0])
334
+ for i, it in enumerate(iterables[1:], 1):
335
+ size = len(it)
336
+ if size != first_size:
337
+ break
338
+ else:
339
+ # If we didn't break out, we can use the built-in zip.
340
+ return zip(*iterables)
341
+
342
+ # If we did break out, there was a mismatch.
343
+ raise UnequalIterablesError(details=(first_size, i, size))
344
+ # If any one of the iterables didn't have a length, start reading
345
+ # them until one runs out.
346
+ except TypeError:
347
+ return _zip_equal_generator(iterables)
348
+
349
+
350
+ def grouper(iterable, n, incomplete='fill', fillvalue=None):
351
+ """Group elements from *iterable* into fixed-length groups of length *n*.
352
+
353
+ >>> list(grouper('ABCDEF', 3))
354
+ [('A', 'B', 'C'), ('D', 'E', 'F')]
355
+
356
+ The keyword arguments *incomplete* and *fillvalue* control what happens for
357
+ iterables whose length is not a multiple of *n*.
358
+
359
+ When *incomplete* is `'fill'`, the last group will contain instances of
360
+ *fillvalue*.
361
+
362
+ >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
363
+ [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
364
+
365
+ When *incomplete* is `'ignore'`, the last group will not be emitted.
366
+
367
+ >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
368
+ [('A', 'B', 'C'), ('D', 'E', 'F')]
369
+
370
+ When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
371
+
372
+ >>> it = grouper('ABCDEFG', 3, incomplete='strict')
373
+ >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
374
+ Traceback (most recent call last):
375
+ ...
376
+ UnequalIterablesError
377
+
378
+ """
379
+ args = [iter(iterable)] * n
380
+ if incomplete == 'fill':
381
+ return zip_longest(*args, fillvalue=fillvalue)
382
+ if incomplete == 'strict':
383
+ return _zip_equal(*args)
384
+ if incomplete == 'ignore':
385
+ return zip(*args)
386
+ else:
387
+ raise ValueError('Expected fill, strict, or ignore')
388
+
389
+
390
+ def roundrobin(*iterables):
391
+ """Yields an item from each iterable, alternating between them.
392
+
393
+ >>> list(roundrobin('ABC', 'D', 'EF'))
394
+ ['A', 'D', 'E', 'B', 'F', 'C']
395
+
396
+ This function produces the same output as :func:`interleave_longest`, but
397
+ may perform better for some inputs (in particular when the number of
398
+ iterables is small).
399
+
400
+ """
401
+ # Recipe credited to George Sakkis
402
+ pending = len(iterables)
403
+ nexts = cycle(iter(it).__next__ for it in iterables)
404
+ while pending:
405
+ try:
406
+ for next in nexts:
407
+ yield next()
408
+ except StopIteration:
409
+ pending -= 1
410
+ nexts = cycle(islice(nexts, pending))
411
+
412
+
413
+ def partition(pred, iterable):
414
+ """
415
+ Returns a 2-tuple of iterables derived from the input iterable.
416
+ The first yields the items that have ``pred(item) == False``.
417
+ The second yields the items that have ``pred(item) == True``.
418
+
419
+ >>> is_odd = lambda x: x % 2 != 0
420
+ >>> iterable = range(10)
421
+ >>> even_items, odd_items = partition(is_odd, iterable)
422
+ >>> list(even_items), list(odd_items)
423
+ ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
424
+
425
+ If *pred* is None, :func:`bool` is used.
426
+
427
+ >>> iterable = [0, 1, False, True, '', ' ']
428
+ >>> false_items, true_items = partition(None, iterable)
429
+ >>> list(false_items), list(true_items)
430
+ ([0, False, ''], [1, True, ' '])
431
+
432
+ """
433
+ if pred is None:
434
+ pred = bool
435
+
436
+ evaluations = ((pred(x), x) for x in iterable)
437
+ t1, t2 = tee(evaluations)
438
+ return (
439
+ (x for (cond, x) in t1 if not cond),
440
+ (x for (cond, x) in t2 if cond),
441
+ )
442
+
443
+
444
+ def powerset(iterable):
445
+ """Yields all possible subsets of the iterable.
446
+
447
+ >>> list(powerset([1, 2, 3]))
448
+ [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
449
+
450
+ :func:`powerset` will operate on iterables that aren't :class:`set`
451
+ instances, so repeated elements in the input will produce repeated elements
452
+ in the output. Use :func:`unique_everseen` on the input to avoid generating
453
+ duplicates:
454
+
455
+ >>> seq = [1, 1, 0]
456
+ >>> list(powerset(seq))
457
+ [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
458
+ >>> from more_itertools import unique_everseen
459
+ >>> list(powerset(unique_everseen(seq)))
460
+ [(), (1,), (0,), (1, 0)]
461
+
462
+ """
463
+ s = list(iterable)
464
+ return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
465
+
466
+
467
+ def unique_everseen(iterable, key=None):
468
+ """
469
+ Yield unique elements, preserving order.
470
+
471
+ >>> list(unique_everseen('AAAABBBCCDAABBB'))
472
+ ['A', 'B', 'C', 'D']
473
+ >>> list(unique_everseen('ABBCcAD', str.lower))
474
+ ['A', 'B', 'C', 'D']
475
+
476
+ Sequences with a mix of hashable and unhashable items can be used.
477
+ The function will be slower (i.e., `O(n^2)`) for unhashable items.
478
+
479
+ Remember that ``list`` objects are unhashable - you can use the *key*
480
+ parameter to transform the list to a tuple (which is hashable) to
481
+ avoid a slowdown.
482
+
483
+ >>> iterable = ([1, 2], [2, 3], [1, 2])
484
+ >>> list(unique_everseen(iterable)) # Slow
485
+ [[1, 2], [2, 3]]
486
+ >>> list(unique_everseen(iterable, key=tuple)) # Faster
487
+ [[1, 2], [2, 3]]
488
+
489
+ Similary, you may want to convert unhashable ``set`` objects with
490
+ ``key=frozenset``. For ``dict`` objects,
491
+ ``key=lambda x: frozenset(x.items())`` can be used.
492
+
493
+ """
494
+ seenset = set()
495
+ seenset_add = seenset.add
496
+ seenlist = []
497
+ seenlist_add = seenlist.append
498
+ use_key = key is not None
499
+
500
+ for element in iterable:
501
+ k = key(element) if use_key else element
502
+ try:
503
+ if k not in seenset:
504
+ seenset_add(k)
505
+ yield element
506
+ except TypeError:
507
+ if k not in seenlist:
508
+ seenlist_add(k)
509
+ yield element
510
+
511
+
512
+ def unique_justseen(iterable, key=None):
513
+ """Yields elements in order, ignoring serial duplicates
514
+
515
+ >>> list(unique_justseen('AAAABBBCCDAABBB'))
516
+ ['A', 'B', 'C', 'D', 'A', 'B']
517
+ >>> list(unique_justseen('ABBCcAD', str.lower))
518
+ ['A', 'B', 'C', 'A', 'D']
519
+
520
+ """
521
+ return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
522
+
523
+
524
+ def iter_except(func, exception, first=None):
525
+ """Yields results from a function repeatedly until an exception is raised.
526
+
527
+ Converts a call-until-exception interface to an iterator interface.
528
+ Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
529
+ to end the loop.
530
+
531
+ >>> l = [0, 1, 2]
532
+ >>> list(iter_except(l.pop, IndexError))
533
+ [2, 1, 0]
534
+
535
+ Multiple exceptions can be specified as a stopping condition:
536
+
537
+ >>> l = [1, 2, 3, '...', 4, 5, 6]
538
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
539
+ [7, 6, 5]
540
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
541
+ [4, 3, 2]
542
+ >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
543
+ []
544
+
545
+ """
546
+ try:
547
+ if first is not None:
548
+ yield first()
549
+ while 1:
550
+ yield func()
551
+ except exception:
552
+ pass
553
+
554
+
555
+ def first_true(iterable, default=None, pred=None):
556
+ """
557
+ Returns the first true value in the iterable.
558
+
559
+ If no true value is found, returns *default*
560
+
561
+ If *pred* is not None, returns the first item for which
562
+ ``pred(item) == True`` .
563
+
564
+ >>> first_true(range(10))
565
+ 1
566
+ >>> first_true(range(10), pred=lambda x: x > 5)
567
+ 6
568
+ >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
569
+ 'missing'
570
+
571
+ """
572
+ return next(filter(pred, iterable), default)
573
+
574
+
575
+ def random_product(*args, repeat=1):
576
+ """Draw an item at random from each of the input iterables.
577
+
578
+ >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
579
+ ('c', 3, 'Z')
580
+
581
+ If *repeat* is provided as a keyword argument, that many items will be
582
+ drawn from each iterable.
583
+
584
+ >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
585
+ ('a', 2, 'd', 3)
586
+
587
+ This equivalent to taking a random selection from
588
+ ``itertools.product(*args, **kwarg)``.
589
+
590
+ """
591
+ pools = [tuple(pool) for pool in args] * repeat
592
+ return tuple(choice(pool) for pool in pools)
593
+
594
+
595
+ def random_permutation(iterable, r=None):
596
+ """Return a random *r* length permutation of the elements in *iterable*.
597
+
598
+ If *r* is not specified or is ``None``, then *r* defaults to the length of
599
+ *iterable*.
600
+
601
+ >>> random_permutation(range(5)) # doctest:+SKIP
602
+ (3, 4, 0, 1, 2)
603
+
604
+ This equivalent to taking a random selection from
605
+ ``itertools.permutations(iterable, r)``.
606
+
607
+ """
608
+ pool = tuple(iterable)
609
+ r = len(pool) if r is None else r
610
+ return tuple(sample(pool, r))
611
+
612
+
613
+ def random_combination(iterable, r):
614
+ """Return a random *r* length subsequence of the elements in *iterable*.
615
+
616
+ >>> random_combination(range(5), 3) # doctest:+SKIP
617
+ (2, 3, 4)
618
+
619
+ This equivalent to taking a random selection from
620
+ ``itertools.combinations(iterable, r)``.
621
+
622
+ """
623
+ pool = tuple(iterable)
624
+ n = len(pool)
625
+ indices = sorted(sample(range(n), r))
626
+ return tuple(pool[i] for i in indices)
627
+
628
+
629
+ def random_combination_with_replacement(iterable, r):
630
+ """Return a random *r* length subsequence of elements in *iterable*,
631
+ allowing individual elements to be repeated.
632
+
633
+ >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
634
+ (0, 0, 1, 2, 2)
635
+
636
+ This equivalent to taking a random selection from
637
+ ``itertools.combinations_with_replacement(iterable, r)``.
638
+
639
+ """
640
+ pool = tuple(iterable)
641
+ n = len(pool)
642
+ indices = sorted(randrange(n) for i in range(r))
643
+ return tuple(pool[i] for i in indices)
644
+
645
+
646
+ def nth_combination(iterable, r, index):
647
+ """Equivalent to ``list(combinations(iterable, r))[index]``.
648
+
649
+ The subsequences of *iterable* that are of length *r* can be ordered
650
+ lexicographically. :func:`nth_combination` computes the subsequence at
651
+ sort position *index* directly, without computing the previous
652
+ subsequences.
653
+
654
+ >>> nth_combination(range(5), 3, 5)
655
+ (0, 3, 4)
656
+
657
+ ``ValueError`` will be raised If *r* is negative or greater than the length
658
+ of *iterable*.
659
+ ``IndexError`` will be raised if the given *index* is invalid.
660
+ """
661
+ pool = tuple(iterable)
662
+ n = len(pool)
663
+ if (r < 0) or (r > n):
664
+ raise ValueError
665
+
666
+ c = 1
667
+ k = min(r, n - r)
668
+ for i in range(1, k + 1):
669
+ c = c * (n - k + i) // i
670
+
671
+ if index < 0:
672
+ index += c
673
+
674
+ if (index < 0) or (index >= c):
675
+ raise IndexError
676
+
677
+ result = []
678
+ while r:
679
+ c, n, r = c * r // n, n - 1, r - 1
680
+ while index >= c:
681
+ index -= c
682
+ c, n = c * (n - r) // n, n - 1
683
+ result.append(pool[-1 - n])
684
+
685
+ return tuple(result)
686
+
687
+
688
+ def prepend(value, iterator):
689
+ """Yield *value*, followed by the elements in *iterator*.
690
+
691
+ >>> value = '0'
692
+ >>> iterator = ['1', '2', '3']
693
+ >>> list(prepend(value, iterator))
694
+ ['0', '1', '2', '3']
695
+
696
+ To prepend multiple values, see :func:`itertools.chain`
697
+ or :func:`value_chain`.
698
+
699
+ """
700
+ return chain([value], iterator)
701
+
702
+
703
+ def convolve(signal, kernel):
704
+ """Convolve the iterable *signal* with the iterable *kernel*.
705
+
706
+ >>> signal = (1, 2, 3, 4, 5)
707
+ >>> kernel = [3, 2, 1]
708
+ >>> list(convolve(signal, kernel))
709
+ [3, 8, 14, 20, 26, 14, 5]
710
+
711
+ Note: the input arguments are not interchangeable, as the *kernel*
712
+ is immediately consumed and stored.
713
+
714
+ """
715
+ kernel = tuple(kernel)[::-1]
716
+ n = len(kernel)
717
+ window = deque([0], maxlen=n) * n
718
+ for x in chain(signal, repeat(0, n - 1)):
719
+ window.append(x)
720
+ yield sum(map(operator.mul, kernel, window))
721
+
722
+
723
+ def before_and_after(predicate, it):
724
+ """A variant of :func:`takewhile` that allows complete access to the
725
+ remainder of the iterator.
726
+
727
+ >>> it = iter('ABCdEfGhI')
728
+ >>> all_upper, remainder = before_and_after(str.isupper, it)
729
+ >>> ''.join(all_upper)
730
+ 'ABC'
731
+ >>> ''.join(remainder) # takewhile() would lose the 'd'
732
+ 'dEfGhI'
733
+
734
+ Note that the first iterator must be fully consumed before the second
735
+ iterator can generate valid results.
736
+ """
737
+ it = iter(it)
738
+ transition = []
739
+
740
+ def true_iterator():
741
+ for elem in it:
742
+ if predicate(elem):
743
+ yield elem
744
+ else:
745
+ transition.append(elem)
746
+ return
747
+
748
+ # Note: this is different from itertools recipes to allow nesting
749
+ # before_and_after remainders into before_and_after again. See tests
750
+ # for an example.
751
+ remainder_iterator = chain(transition, it)
752
+
753
+ return true_iterator(), remainder_iterator
754
+
755
+
756
+ def triplewise(iterable):
757
+ """Return overlapping triplets from *iterable*.
758
+
759
+ >>> list(triplewise('ABCDE'))
760
+ [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
761
+
762
+ """
763
+ for (a, _), (b, c) in pairwise(pairwise(iterable)):
764
+ yield a, b, c
765
+
766
+
767
+ def sliding_window(iterable, n):
768
+ """Return a sliding window of width *n* over *iterable*.
769
+
770
+ >>> list(sliding_window(range(6), 4))
771
+ [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
772
+
773
+ If *iterable* has fewer than *n* items, then nothing is yielded:
774
+
775
+ >>> list(sliding_window(range(3), 4))
776
+ []
777
+
778
+ For a variant with more features, see :func:`windowed`.
779
+ """
780
+ it = iter(iterable)
781
+ window = deque(islice(it, n), maxlen=n)
782
+ if len(window) == n:
783
+ yield tuple(window)
784
+ for x in it:
785
+ window.append(x)
786
+ yield tuple(window)
787
+
788
+
789
+ def subslices(iterable):
790
+ """Return all contiguous non-empty subslices of *iterable*.
791
+
792
+ >>> list(subslices('ABC'))
793
+ [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
794
+
795
+ This is similar to :func:`substrings`, but emits items in a different
796
+ order.
797
+ """
798
+ seq = list(iterable)
799
+ slices = starmap(slice, combinations(range(len(seq) + 1), 2))
800
+ return map(operator.getitem, repeat(seq), slices)
801
+
802
+
803
+ def polynomial_from_roots(roots):
804
+ """Compute a polynomial's coefficients from its roots.
805
+
806
+ >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
807
+ >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
808
+ [1, -4, -17, 60]
809
+ """
810
+ # Use math.prod for Python 3.8+,
811
+ prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1))
812
+ roots = list(map(operator.neg, roots))
813
+ return [
814
+ sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1)
815
+ ]
816
+
817
+
818
+ def iter_index(iterable, value, start=0):
819
+ """Yield the index of each place in *iterable* that *value* occurs,
820
+ beginning with index *start*.
821
+
822
+ See :func:`locate` for a more general means of finding the indexes
823
+ associated with particular values.
824
+
825
+ >>> list(iter_index('AABCADEAF', 'A'))
826
+ [0, 1, 4, 7]
827
+ """
828
+ try:
829
+ seq_index = iterable.index
830
+ except AttributeError:
831
+ # Slow path for general iterables
832
+ it = islice(iterable, start, None)
833
+ for i, element in enumerate(it, start):
834
+ if element is value or element == value:
835
+ yield i
836
+ else:
837
+ # Fast path for sequences
838
+ i = start - 1
839
+ try:
840
+ while True:
841
+ i = seq_index(value, i + 1)
842
+ yield i
843
+ except ValueError:
844
+ pass
845
+
846
+
847
+ def sieve(n):
848
+ """Yield the primes less than n.
849
+
850
+ >>> list(sieve(30))
851
+ [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
852
+ """
853
+ isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
854
+ data = bytearray((0, 1)) * (n // 2)
855
+ data[:3] = 0, 0, 0
856
+ limit = isqrt(n) + 1
857
+ for p in compress(range(limit), data):
858
+ data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
859
+ data[2] = 1
860
+ return iter_index(data, 1) if n > 2 else iter([])
861
+
862
+
863
+ def batched(iterable, n):
864
+ """Batch data into lists of length *n*. The last batch may be shorter.
865
+
866
+ >>> list(batched('ABCDEFG', 3))
867
+ [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
868
+
869
+ This recipe is from the ``itertools`` docs. This library also provides
870
+ :func:`chunked`, which has a different implementation.
871
+ """
872
+ if hexversion >= 0x30C00A0: # Python 3.12.0a0
873
+ warnings.warn(
874
+ (
875
+ 'batched will be removed in a future version of '
876
+ 'more-itertools. Use the standard library '
877
+ 'itertools.batched function instead'
878
+ ),
879
+ DeprecationWarning,
880
+ )
881
+
882
+ it = iter(iterable)
883
+ while True:
884
+ batch = list(islice(it, n))
885
+ if not batch:
886
+ break
887
+ yield batch
888
+
889
+
890
+ def transpose(it):
891
+ """Swap the rows and columns of the input.
892
+
893
+ >>> list(transpose([(1, 2, 3), (11, 22, 33)]))
894
+ [(1, 11), (2, 22), (3, 33)]
895
+
896
+ The caller should ensure that the dimensions of the input are compatible.
897
+ """
898
+ # TODO: when 3.9 goes end-of-life, add stric=True to this.
899
+ return zip(*it)
900
+
901
+
902
+ def matmul(m1, m2):
903
+ """Multiply two matrices.
904
+ >>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
905
+ [[49, 80], [41, 60]]
906
+
907
+ The caller should ensure that the dimensions of the input matrices are
908
+ compatible with each other.
909
+ """
910
+ n = len(m2[0])
911
+ return batched(starmap(dotproduct, product(m1, transpose(m2))), n)
912
+
913
+
914
+ def factor(n):
915
+ """Yield the prime factors of n.
916
+ >>> list(factor(360))
917
+ [2, 2, 2, 3, 3, 5]
918
+ """
919
+ isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
920
+ for prime in sieve(isqrt(n) + 1):
921
+ while True:
922
+ quotient, remainder = divmod(n, prime)
923
+ if remainder:
924
+ break
925
+ yield prime
926
+ n = quotient
927
+ if n == 1:
928
+ return
929
+ if n >= 2:
930
+ yield n
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ __title__ = "packaging"
6
+ __summary__ = "Core utilities for Python packages"
7
+ __uri__ = "https://github.com/pypa/packaging"
8
+
9
+ __version__ = "23.1"
10
+
11
+ __author__ = "Donald Stufft and individual contributors"
12
+ __email__ = "donald@stufft.io"
13
+
14
+ __license__ = "BSD-2-Clause or Apache-2.0"
15
+ __copyright__ = "2014-2019 %s" % __author__
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (497 Bytes). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_elffile.cpython-38.pyc ADDED
Binary file (3.33 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-38.pyc ADDED
Binary file (5.67 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-38.pyc ADDED
Binary file (3.16 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/_tokenizer.cpython-38.pyc ADDED
Binary file (5.7 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/metadata.cpython-38.pyc ADDED
Binary file (6.74 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-38.pyc ADDED
Binary file (2.84 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/tags.cpython-38.pyc ADDED
Binary file (13.2 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-38.pyc ADDED
Binary file (3.68 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-38.pyc ADDED
Binary file (14.1 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_elffile.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ELF file parser.
3
+
4
+ This provides a class ``ELFFile`` that parses an ELF executable in a similar
5
+ interface to ``ZipFile``. Only the read interface is implemented.
6
+
7
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
8
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
9
+ """
10
+
11
+ import enum
12
+ import os
13
+ import struct
14
+ from typing import IO, Optional, Tuple
15
+
16
+
17
+ class ELFInvalid(ValueError):
18
+ pass
19
+
20
+
21
+ class EIClass(enum.IntEnum):
22
+ C32 = 1
23
+ C64 = 2
24
+
25
+
26
+ class EIData(enum.IntEnum):
27
+ Lsb = 1
28
+ Msb = 2
29
+
30
+
31
+ class EMachine(enum.IntEnum):
32
+ I386 = 3
33
+ S390 = 22
34
+ Arm = 40
35
+ X8664 = 62
36
+ AArc64 = 183
37
+
38
+
39
+ class ELFFile:
40
+ """
41
+ Representation of an ELF executable.
42
+ """
43
+
44
+ def __init__(self, f: IO[bytes]) -> None:
45
+ self._f = f
46
+
47
+ try:
48
+ ident = self._read("16B")
49
+ except struct.error:
50
+ raise ELFInvalid("unable to parse identification")
51
+ magic = bytes(ident[:4])
52
+ if magic != b"\x7fELF":
53
+ raise ELFInvalid(f"invalid magic: {magic!r}")
54
+
55
+ self.capacity = ident[4] # Format for program header (bitness).
56
+ self.encoding = ident[5] # Data structure encoding (endianness).
57
+
58
+ try:
59
+ # e_fmt: Format for program header.
60
+ # p_fmt: Format for section header.
61
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
62
+ e_fmt, self._p_fmt, self._p_idx = {
63
+ (1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
64
+ (1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
65
+ (2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
66
+ (2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
67
+ }[(self.capacity, self.encoding)]
68
+ except KeyError:
69
+ raise ELFInvalid(
70
+ f"unrecognized capacity ({self.capacity}) or "
71
+ f"encoding ({self.encoding})"
72
+ )
73
+
74
+ try:
75
+ (
76
+ _,
77
+ self.machine, # Architecture type.
78
+ _,
79
+ _,
80
+ self._e_phoff, # Offset of program header.
81
+ _,
82
+ self.flags, # Processor-specific flags.
83
+ _,
84
+ self._e_phentsize, # Size of section.
85
+ self._e_phnum, # Number of sections.
86
+ ) = self._read(e_fmt)
87
+ except struct.error as e:
88
+ raise ELFInvalid("unable to parse machine and section information") from e
89
+
90
+ def _read(self, fmt: str) -> Tuple[int, ...]:
91
+ return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
92
+
93
+ @property
94
+ def interpreter(self) -> Optional[str]:
95
+ """
96
+ The path recorded in the ``PT_INTERP`` section header.
97
+ """
98
+ for index in range(self._e_phnum):
99
+ self._f.seek(self._e_phoff + self._e_phentsize * index)
100
+ try:
101
+ data = self._read(self._p_fmt)
102
+ except struct.error:
103
+ continue
104
+ if data[self._p_idx[0]] != 3: # Not PT_INTERP.
105
+ continue
106
+ self._f.seek(data[self._p_idx[1]])
107
+ return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
108
+ return None
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_manylinux.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import contextlib
3
+ import functools
4
+ import os
5
+ import re
6
+ import sys
7
+ import warnings
8
+ from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple
9
+
10
+ from ._elffile import EIClass, EIData, ELFFile, EMachine
11
+
12
+ EF_ARM_ABIMASK = 0xFF000000
13
+ EF_ARM_ABI_VER5 = 0x05000000
14
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
15
+
16
+
17
+ # `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
18
+ # as the type for `path` until then.
19
+ @contextlib.contextmanager
20
+ def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
21
+ try:
22
+ with open(path, "rb") as f:
23
+ yield ELFFile(f)
24
+ except (OSError, TypeError, ValueError):
25
+ yield None
26
+
27
+
28
+ def _is_linux_armhf(executable: str) -> bool:
29
+ # hard-float ABI can be detected from the ELF header of the running
30
+ # process
31
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
32
+ with _parse_elf(executable) as f:
33
+ return (
34
+ f is not None
35
+ and f.capacity == EIClass.C32
36
+ and f.encoding == EIData.Lsb
37
+ and f.machine == EMachine.Arm
38
+ and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
39
+ and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
40
+ )
41
+
42
+
43
+ def _is_linux_i686(executable: str) -> bool:
44
+ with _parse_elf(executable) as f:
45
+ return (
46
+ f is not None
47
+ and f.capacity == EIClass.C32
48
+ and f.encoding == EIData.Lsb
49
+ and f.machine == EMachine.I386
50
+ )
51
+
52
+
53
+ def _have_compatible_abi(executable: str, arch: str) -> bool:
54
+ if arch == "armv7l":
55
+ return _is_linux_armhf(executable)
56
+ if arch == "i686":
57
+ return _is_linux_i686(executable)
58
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
59
+
60
+
61
+ # If glibc ever changes its major version, we need to know what the last
62
+ # minor version was, so we can build the complete list of all versions.
63
+ # For now, guess what the highest minor version might be, assume it will
64
+ # be 50 for testing. Once this actually happens, update the dictionary
65
+ # with the actual value.
66
+ _LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
67
+
68
+
69
+ class _GLibCVersion(NamedTuple):
70
+ major: int
71
+ minor: int
72
+
73
+
74
+ def _glibc_version_string_confstr() -> Optional[str]:
75
+ """
76
+ Primary implementation of glibc_version_string using os.confstr.
77
+ """
78
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
79
+ # to be broken or missing. This strategy is used in the standard library
80
+ # platform module.
81
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
82
+ try:
83
+ # Should be a string like "glibc 2.17".
84
+ version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
85
+ assert version_string is not None
86
+ _, version = version_string.rsplit()
87
+ except (AssertionError, AttributeError, OSError, ValueError):
88
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
89
+ return None
90
+ return version
91
+
92
+
93
+ def _glibc_version_string_ctypes() -> Optional[str]:
94
+ """
95
+ Fallback implementation of glibc_version_string using ctypes.
96
+ """
97
+ try:
98
+ import ctypes
99
+ except ImportError:
100
+ return None
101
+
102
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
103
+ # manpage says, "If filename is NULL, then the returned handle is for the
104
+ # main program". This way we can let the linker do the work to figure out
105
+ # which libc our process is actually using.
106
+ #
107
+ # We must also handle the special case where the executable is not a
108
+ # dynamically linked executable. This can occur when using musl libc,
109
+ # for example. In this situation, dlopen() will error, leading to an
110
+ # OSError. Interestingly, at least in the case of musl, there is no
111
+ # errno set on the OSError. The single string argument used to construct
112
+ # OSError comes from libc itself and is therefore not portable to
113
+ # hard code here. In any case, failure to call dlopen() means we
114
+ # can proceed, so we bail on our attempt.
115
+ try:
116
+ process_namespace = ctypes.CDLL(None)
117
+ except OSError:
118
+ return None
119
+
120
+ try:
121
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
122
+ except AttributeError:
123
+ # Symbol doesn't exist -> therefore, we are not linked to
124
+ # glibc.
125
+ return None
126
+
127
+ # Call gnu_get_libc_version, which returns a string like "2.5"
128
+ gnu_get_libc_version.restype = ctypes.c_char_p
129
+ version_str: str = gnu_get_libc_version()
130
+ # py2 / py3 compatibility:
131
+ if not isinstance(version_str, str):
132
+ version_str = version_str.decode("ascii")
133
+
134
+ return version_str
135
+
136
+
137
+ def _glibc_version_string() -> Optional[str]:
138
+ """Returns glibc version string, or None if not using glibc."""
139
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
140
+
141
+
142
+ def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
143
+ """Parse glibc version.
144
+
145
+ We use a regexp instead of str.split because we want to discard any
146
+ random junk that might come after the minor version -- this might happen
147
+ in patched/forked versions of glibc (e.g. Linaro's version of glibc
148
+ uses version strings like "2.20-2014.11"). See gh-3588.
149
+ """
150
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
151
+ if not m:
152
+ warnings.warn(
153
+ f"Expected glibc version with 2 components major.minor,"
154
+ f" got: {version_str}",
155
+ RuntimeWarning,
156
+ )
157
+ return -1, -1
158
+ return int(m.group("major")), int(m.group("minor"))
159
+
160
+
161
+ @functools.lru_cache()
162
+ def _get_glibc_version() -> Tuple[int, int]:
163
+ version_str = _glibc_version_string()
164
+ if version_str is None:
165
+ return (-1, -1)
166
+ return _parse_glibc_version(version_str)
167
+
168
+
169
+ # From PEP 513, PEP 600
170
+ def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
171
+ sys_glibc = _get_glibc_version()
172
+ if sys_glibc < version:
173
+ return False
174
+ # Check for presence of _manylinux module.
175
+ try:
176
+ import _manylinux # noqa
177
+ except ImportError:
178
+ return True
179
+ if hasattr(_manylinux, "manylinux_compatible"):
180
+ result = _manylinux.manylinux_compatible(version[0], version[1], arch)
181
+ if result is not None:
182
+ return bool(result)
183
+ return True
184
+ if version == _GLibCVersion(2, 5):
185
+ if hasattr(_manylinux, "manylinux1_compatible"):
186
+ return bool(_manylinux.manylinux1_compatible)
187
+ if version == _GLibCVersion(2, 12):
188
+ if hasattr(_manylinux, "manylinux2010_compatible"):
189
+ return bool(_manylinux.manylinux2010_compatible)
190
+ if version == _GLibCVersion(2, 17):
191
+ if hasattr(_manylinux, "manylinux2014_compatible"):
192
+ return bool(_manylinux.manylinux2014_compatible)
193
+ return True
194
+
195
+
196
+ _LEGACY_MANYLINUX_MAP = {
197
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
198
+ (2, 17): "manylinux2014",
199
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
200
+ (2, 12): "manylinux2010",
201
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
202
+ (2, 5): "manylinux1",
203
+ }
204
+
205
+
206
+ def platform_tags(linux: str, arch: str) -> Iterator[str]:
207
+ if not _have_compatible_abi(sys.executable, arch):
208
+ return
209
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
210
+ too_old_glibc2 = _GLibCVersion(2, 16)
211
+ if arch in {"x86_64", "i686"}:
212
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
213
+ too_old_glibc2 = _GLibCVersion(2, 4)
214
+ current_glibc = _GLibCVersion(*_get_glibc_version())
215
+ glibc_max_list = [current_glibc]
216
+ # We can assume compatibility across glibc major versions.
217
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
218
+ #
219
+ # Build a list of maximum glibc versions so that we can
220
+ # output the canonical list of all glibc from current_glibc
221
+ # down to too_old_glibc2, including all intermediary versions.
222
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
223
+ glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
224
+ glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
225
+ for glibc_max in glibc_max_list:
226
+ if glibc_max.major == too_old_glibc2.major:
227
+ min_minor = too_old_glibc2.minor
228
+ else:
229
+ # For other glibc major versions oldest supported is (x, 0).
230
+ min_minor = -1
231
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
232
+ glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
233
+ tag = "manylinux_{}_{}".format(*glibc_version)
234
+ if _is_compatible(tag, arch, glibc_version):
235
+ yield linux.replace("linux", tag)
236
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
237
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
238
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
239
+ if _is_compatible(legacy_tag, arch, glibc_version):
240
+ yield linux.replace("linux", legacy_tag)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_musllinux.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PEP 656 support.
2
+
3
+ This module implements logic to detect if the currently running Python is
4
+ linked against musl, and what musl version is used.
5
+ """
6
+
7
+ import functools
8
+ import re
9
+ import subprocess
10
+ import sys
11
+ from typing import Iterator, NamedTuple, Optional
12
+
13
+ from ._elffile import ELFFile
14
+
15
+
16
+ class _MuslVersion(NamedTuple):
17
+ major: int
18
+ minor: int
19
+
20
+
21
+ def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
22
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
23
+ if len(lines) < 2 or lines[0][:4] != "musl":
24
+ return None
25
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
26
+ if not m:
27
+ return None
28
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
29
+
30
+
31
+ @functools.lru_cache()
32
+ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
33
+ """Detect currently-running musl runtime version.
34
+
35
+ This is done by checking the specified executable's dynamic linking
36
+ information, and invoking the loader to parse its output for a version
37
+ string. If the loader is musl, the output would be something like::
38
+
39
+ musl libc (x86_64)
40
+ Version 1.2.2
41
+ Dynamic Program Loader
42
+ """
43
+ try:
44
+ with open(executable, "rb") as f:
45
+ ld = ELFFile(f).interpreter
46
+ except (OSError, TypeError, ValueError):
47
+ return None
48
+ if ld is None or "musl" not in ld:
49
+ return None
50
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
51
+ return _parse_musl_version(proc.stderr)
52
+
53
+
54
+ def platform_tags(arch: str) -> Iterator[str]:
55
+ """Generate musllinux tags compatible to the current platform.
56
+
57
+ :param arch: Should be the part of platform tag after the ``linux_``
58
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
59
+ prerequisite for the current platform to be musllinux-compatible.
60
+
61
+ :returns: An iterator of compatible musllinux tags.
62
+ """
63
+ sys_musl = _get_musl_version(sys.executable)
64
+ if sys_musl is None: # Python not dynamically linked against musl.
65
+ return
66
+ for minor in range(sys_musl.minor, -1, -1):
67
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
68
+
69
+
70
+ if __name__ == "__main__": # pragma: no cover
71
+ import sysconfig
72
+
73
+ plat = sysconfig.get_platform()
74
+ assert plat.startswith("linux-"), "not linux"
75
+
76
+ print("plat:", plat)
77
+ print("musl:", _get_musl_version(sys.executable))
78
+ print("tags:", end=" ")
79
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
80
+ print(t, end="\n ")
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_parser.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Handwritten parser of dependency specifiers.
2
+
3
+ The docstring for each __parse_* function contains ENBF-inspired grammar representing
4
+ the implementation.
5
+ """
6
+
7
+ import ast
8
+ from typing import Any, List, NamedTuple, Optional, Tuple, Union
9
+
10
+ from ._tokenizer import DEFAULT_RULES, Tokenizer
11
+
12
+
13
+ class Node:
14
+ def __init__(self, value: str) -> None:
15
+ self.value = value
16
+
17
+ def __str__(self) -> str:
18
+ return self.value
19
+
20
+ def __repr__(self) -> str:
21
+ return f"<{self.__class__.__name__}('{self}')>"
22
+
23
+ def serialize(self) -> str:
24
+ raise NotImplementedError
25
+
26
+
27
+ class Variable(Node):
28
+ def serialize(self) -> str:
29
+ return str(self)
30
+
31
+
32
+ class Value(Node):
33
+ def serialize(self) -> str:
34
+ return f'"{self}"'
35
+
36
+
37
+ class Op(Node):
38
+ def serialize(self) -> str:
39
+ return str(self)
40
+
41
+
42
+ MarkerVar = Union[Variable, Value]
43
+ MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
44
+ # MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
45
+ # MarkerList = List[Union["MarkerList", MarkerAtom, str]]
46
+ # mypy does not support recursive type definition
47
+ # https://github.com/python/mypy/issues/731
48
+ MarkerAtom = Any
49
+ MarkerList = List[Any]
50
+
51
+
52
+ class ParsedRequirement(NamedTuple):
53
+ name: str
54
+ url: str
55
+ extras: List[str]
56
+ specifier: str
57
+ marker: Optional[MarkerList]
58
+
59
+
60
+ # --------------------------------------------------------------------------------------
61
+ # Recursive descent parser for dependency specifier
62
+ # --------------------------------------------------------------------------------------
63
+ def parse_requirement(source: str) -> ParsedRequirement:
64
+ return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
65
+
66
+
67
+ def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
68
+ """
69
+ requirement = WS? IDENTIFIER WS? extras WS? requirement_details
70
+ """
71
+ tokenizer.consume("WS")
72
+
73
+ name_token = tokenizer.expect(
74
+ "IDENTIFIER", expected="package name at the start of dependency specifier"
75
+ )
76
+ name = name_token.text
77
+ tokenizer.consume("WS")
78
+
79
+ extras = _parse_extras(tokenizer)
80
+ tokenizer.consume("WS")
81
+
82
+ url, specifier, marker = _parse_requirement_details(tokenizer)
83
+ tokenizer.expect("END", expected="end of dependency specifier")
84
+
85
+ return ParsedRequirement(name, url, extras, specifier, marker)
86
+
87
+
88
+ def _parse_requirement_details(
89
+ tokenizer: Tokenizer,
90
+ ) -> Tuple[str, str, Optional[MarkerList]]:
91
+ """
92
+ requirement_details = AT URL (WS requirement_marker?)?
93
+ | specifier WS? (requirement_marker)?
94
+ """
95
+
96
+ specifier = ""
97
+ url = ""
98
+ marker = None
99
+
100
+ if tokenizer.check("AT"):
101
+ tokenizer.read()
102
+ tokenizer.consume("WS")
103
+
104
+ url_start = tokenizer.position
105
+ url = tokenizer.expect("URL", expected="URL after @").text
106
+ if tokenizer.check("END", peek=True):
107
+ return (url, specifier, marker)
108
+
109
+ tokenizer.expect("WS", expected="whitespace after URL")
110
+
111
+ # The input might end after whitespace.
112
+ if tokenizer.check("END", peek=True):
113
+ return (url, specifier, marker)
114
+
115
+ marker = _parse_requirement_marker(
116
+ tokenizer, span_start=url_start, after="URL and whitespace"
117
+ )
118
+ else:
119
+ specifier_start = tokenizer.position
120
+ specifier = _parse_specifier(tokenizer)
121
+ tokenizer.consume("WS")
122
+
123
+ if tokenizer.check("END", peek=True):
124
+ return (url, specifier, marker)
125
+
126
+ marker = _parse_requirement_marker(
127
+ tokenizer,
128
+ span_start=specifier_start,
129
+ after=(
130
+ "version specifier"
131
+ if specifier
132
+ else "name and no valid version specifier"
133
+ ),
134
+ )
135
+
136
+ return (url, specifier, marker)
137
+
138
+
139
+ def _parse_requirement_marker(
140
+ tokenizer: Tokenizer, *, span_start: int, after: str
141
+ ) -> MarkerList:
142
+ """
143
+ requirement_marker = SEMICOLON marker WS?
144
+ """
145
+
146
+ if not tokenizer.check("SEMICOLON"):
147
+ tokenizer.raise_syntax_error(
148
+ f"Expected end or semicolon (after {after})",
149
+ span_start=span_start,
150
+ )
151
+ tokenizer.read()
152
+
153
+ marker = _parse_marker(tokenizer)
154
+ tokenizer.consume("WS")
155
+
156
+ return marker
157
+
158
+
159
+ def _parse_extras(tokenizer: Tokenizer) -> List[str]:
160
+ """
161
+ extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
162
+ """
163
+ if not tokenizer.check("LEFT_BRACKET", peek=True):
164
+ return []
165
+
166
+ with tokenizer.enclosing_tokens(
167
+ "LEFT_BRACKET",
168
+ "RIGHT_BRACKET",
169
+ around="extras",
170
+ ):
171
+ tokenizer.consume("WS")
172
+ extras = _parse_extras_list(tokenizer)
173
+ tokenizer.consume("WS")
174
+
175
+ return extras
176
+
177
+
178
+ def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
179
+ """
180
+ extras_list = identifier (wsp* ',' wsp* identifier)*
181
+ """
182
+ extras: List[str] = []
183
+
184
+ if not tokenizer.check("IDENTIFIER"):
185
+ return extras
186
+
187
+ extras.append(tokenizer.read().text)
188
+
189
+ while True:
190
+ tokenizer.consume("WS")
191
+ if tokenizer.check("IDENTIFIER", peek=True):
192
+ tokenizer.raise_syntax_error("Expected comma between extra names")
193
+ elif not tokenizer.check("COMMA"):
194
+ break
195
+
196
+ tokenizer.read()
197
+ tokenizer.consume("WS")
198
+
199
+ extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
200
+ extras.append(extra_token.text)
201
+
202
+ return extras
203
+
204
+
205
+ def _parse_specifier(tokenizer: Tokenizer) -> str:
206
+ """
207
+ specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
208
+ | WS? version_many WS?
209
+ """
210
+ with tokenizer.enclosing_tokens(
211
+ "LEFT_PARENTHESIS",
212
+ "RIGHT_PARENTHESIS",
213
+ around="version specifier",
214
+ ):
215
+ tokenizer.consume("WS")
216
+ parsed_specifiers = _parse_version_many(tokenizer)
217
+ tokenizer.consume("WS")
218
+
219
+ return parsed_specifiers
220
+
221
+
222
+ def _parse_version_many(tokenizer: Tokenizer) -> str:
223
+ """
224
+ version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
225
+ """
226
+ parsed_specifiers = ""
227
+ while tokenizer.check("SPECIFIER"):
228
+ span_start = tokenizer.position
229
+ parsed_specifiers += tokenizer.read().text
230
+ if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
231
+ tokenizer.raise_syntax_error(
232
+ ".* suffix can only be used with `==` or `!=` operators",
233
+ span_start=span_start,
234
+ span_end=tokenizer.position + 1,
235
+ )
236
+ if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
237
+ tokenizer.raise_syntax_error(
238
+ "Local version label can only be used with `==` or `!=` operators",
239
+ span_start=span_start,
240
+ span_end=tokenizer.position,
241
+ )
242
+ tokenizer.consume("WS")
243
+ if not tokenizer.check("COMMA"):
244
+ break
245
+ parsed_specifiers += tokenizer.read().text
246
+ tokenizer.consume("WS")
247
+
248
+ return parsed_specifiers
249
+
250
+
251
+ # --------------------------------------------------------------------------------------
252
+ # Recursive descent parser for marker expression
253
+ # --------------------------------------------------------------------------------------
254
+ def parse_marker(source: str) -> MarkerList:
255
+ return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
256
+
257
+
258
+ def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
259
+ """
260
+ marker = marker_atom (BOOLOP marker_atom)+
261
+ """
262
+ expression = [_parse_marker_atom(tokenizer)]
263
+ while tokenizer.check("BOOLOP"):
264
+ token = tokenizer.read()
265
+ expr_right = _parse_marker_atom(tokenizer)
266
+ expression.extend((token.text, expr_right))
267
+ return expression
268
+
269
+
270
+ def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
271
+ """
272
+ marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
273
+ | WS? marker_item WS?
274
+ """
275
+
276
+ tokenizer.consume("WS")
277
+ if tokenizer.check("LEFT_PARENTHESIS", peek=True):
278
+ with tokenizer.enclosing_tokens(
279
+ "LEFT_PARENTHESIS",
280
+ "RIGHT_PARENTHESIS",
281
+ around="marker expression",
282
+ ):
283
+ tokenizer.consume("WS")
284
+ marker: MarkerAtom = _parse_marker(tokenizer)
285
+ tokenizer.consume("WS")
286
+ else:
287
+ marker = _parse_marker_item(tokenizer)
288
+ tokenizer.consume("WS")
289
+ return marker
290
+
291
+
292
+ def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
293
+ """
294
+ marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
295
+ """
296
+ tokenizer.consume("WS")
297
+ marker_var_left = _parse_marker_var(tokenizer)
298
+ tokenizer.consume("WS")
299
+ marker_op = _parse_marker_op(tokenizer)
300
+ tokenizer.consume("WS")
301
+ marker_var_right = _parse_marker_var(tokenizer)
302
+ tokenizer.consume("WS")
303
+ return (marker_var_left, marker_op, marker_var_right)
304
+
305
+
306
+ def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
307
+ """
308
+ marker_var = VARIABLE | QUOTED_STRING
309
+ """
310
+ if tokenizer.check("VARIABLE"):
311
+ return process_env_var(tokenizer.read().text.replace(".", "_"))
312
+ elif tokenizer.check("QUOTED_STRING"):
313
+ return process_python_str(tokenizer.read().text)
314
+ else:
315
+ tokenizer.raise_syntax_error(
316
+ message="Expected a marker variable or quoted string"
317
+ )
318
+
319
+
320
+ def process_env_var(env_var: str) -> Variable:
321
+ if (
322
+ env_var == "platform_python_implementation"
323
+ or env_var == "python_implementation"
324
+ ):
325
+ return Variable("platform_python_implementation")
326
+ else:
327
+ return Variable(env_var)
328
+
329
+
330
+ def process_python_str(python_str: str) -> Value:
331
+ value = ast.literal_eval(python_str)
332
+ return Value(str(value))
333
+
334
+
335
+ def _parse_marker_op(tokenizer: Tokenizer) -> Op:
336
+ """
337
+ marker_op = IN | NOT IN | OP
338
+ """
339
+ if tokenizer.check("IN"):
340
+ tokenizer.read()
341
+ return Op("in")
342
+ elif tokenizer.check("NOT"):
343
+ tokenizer.read()
344
+ tokenizer.expect("WS", expected="whitespace after 'not'")
345
+ tokenizer.expect("IN", expected="'in' after 'not'")
346
+ return Op("not in")
347
+ elif tokenizer.check("OP"):
348
+ return Op(tokenizer.read().text)
349
+ else:
350
+ return tokenizer.raise_syntax_error(
351
+ "Expected marker operator, one of "
352
+ "<=, <, !=, ==, >=, >, ~=, ===, in, not in"
353
+ )
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_structures.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+
6
+ class InfinityType:
7
+ def __repr__(self) -> str:
8
+ return "Infinity"
9
+
10
+ def __hash__(self) -> int:
11
+ return hash(repr(self))
12
+
13
+ def __lt__(self, other: object) -> bool:
14
+ return False
15
+
16
+ def __le__(self, other: object) -> bool:
17
+ return False
18
+
19
+ def __eq__(self, other: object) -> bool:
20
+ return isinstance(other, self.__class__)
21
+
22
+ def __gt__(self, other: object) -> bool:
23
+ return True
24
+
25
+ def __ge__(self, other: object) -> bool:
26
+ return True
27
+
28
+ def __neg__(self: object) -> "NegativeInfinityType":
29
+ return NegativeInfinity
30
+
31
+
32
+ Infinity = InfinityType()
33
+
34
+
35
+ class NegativeInfinityType:
36
+ def __repr__(self) -> str:
37
+ return "-Infinity"
38
+
39
+ def __hash__(self) -> int:
40
+ return hash(repr(self))
41
+
42
+ def __lt__(self, other: object) -> bool:
43
+ return True
44
+
45
+ def __le__(self, other: object) -> bool:
46
+ return True
47
+
48
+ def __eq__(self, other: object) -> bool:
49
+ return isinstance(other, self.__class__)
50
+
51
+ def __gt__(self, other: object) -> bool:
52
+ return False
53
+
54
+ def __ge__(self, other: object) -> bool:
55
+ return False
56
+
57
+ def __neg__(self: object) -> InfinityType:
58
+ return Infinity
59
+
60
+
61
+ NegativeInfinity = NegativeInfinityType()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import re
3
+ from dataclasses import dataclass
4
+ from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
5
+
6
+ from .specifiers import Specifier
7
+
8
+
9
+ @dataclass
10
+ class Token:
11
+ name: str
12
+ text: str
13
+ position: int
14
+
15
+
16
+ class ParserSyntaxError(Exception):
17
+ """The provided source text could not be parsed correctly."""
18
+
19
+ def __init__(
20
+ self,
21
+ message: str,
22
+ *,
23
+ source: str,
24
+ span: Tuple[int, int],
25
+ ) -> None:
26
+ self.span = span
27
+ self.message = message
28
+ self.source = source
29
+
30
+ super().__init__()
31
+
32
+ def __str__(self) -> str:
33
+ marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
34
+ return "\n ".join([self.message, self.source, marker])
35
+
36
+
37
+ DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
38
+ "LEFT_PARENTHESIS": r"\(",
39
+ "RIGHT_PARENTHESIS": r"\)",
40
+ "LEFT_BRACKET": r"\[",
41
+ "RIGHT_BRACKET": r"\]",
42
+ "SEMICOLON": r";",
43
+ "COMMA": r",",
44
+ "QUOTED_STRING": re.compile(
45
+ r"""
46
+ (
47
+ ('[^']*')
48
+ |
49
+ ("[^"]*")
50
+ )
51
+ """,
52
+ re.VERBOSE,
53
+ ),
54
+ "OP": r"(===|==|~=|!=|<=|>=|<|>)",
55
+ "BOOLOP": r"\b(or|and)\b",
56
+ "IN": r"\bin\b",
57
+ "NOT": r"\bnot\b",
58
+ "VARIABLE": re.compile(
59
+ r"""
60
+ \b(
61
+ python_version
62
+ |python_full_version
63
+ |os[._]name
64
+ |sys[._]platform
65
+ |platform_(release|system)
66
+ |platform[._](version|machine|python_implementation)
67
+ |python_implementation
68
+ |implementation_(name|version)
69
+ |extra
70
+ )\b
71
+ """,
72
+ re.VERBOSE,
73
+ ),
74
+ "SPECIFIER": re.compile(
75
+ Specifier._operator_regex_str + Specifier._version_regex_str,
76
+ re.VERBOSE | re.IGNORECASE,
77
+ ),
78
+ "AT": r"\@",
79
+ "URL": r"[^ \t]+",
80
+ "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
81
+ "VERSION_PREFIX_TRAIL": r"\.\*",
82
+ "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
83
+ "WS": r"[ \t]+",
84
+ "END": r"$",
85
+ }
86
+
87
+
88
+ class Tokenizer:
89
+ """Context-sensitive token parsing.
90
+
91
+ Provides methods to examine the input stream to check whether the next token
92
+ matches.
93
+ """
94
+
95
+ def __init__(
96
+ self,
97
+ source: str,
98
+ *,
99
+ rules: "Dict[str, Union[str, re.Pattern[str]]]",
100
+ ) -> None:
101
+ self.source = source
102
+ self.rules: Dict[str, re.Pattern[str]] = {
103
+ name: re.compile(pattern) for name, pattern in rules.items()
104
+ }
105
+ self.next_token: Optional[Token] = None
106
+ self.position = 0
107
+
108
+ def consume(self, name: str) -> None:
109
+ """Move beyond provided token name, if at current position."""
110
+ if self.check(name):
111
+ self.read()
112
+
113
+ def check(self, name: str, *, peek: bool = False) -> bool:
114
+ """Check whether the next token has the provided name.
115
+
116
+ By default, if the check succeeds, the token *must* be read before
117
+ another check. If `peek` is set to `True`, the token is not loaded and
118
+ would need to be checked again.
119
+ """
120
+ assert (
121
+ self.next_token is None
122
+ ), f"Cannot check for {name!r}, already have {self.next_token!r}"
123
+ assert name in self.rules, f"Unknown token name: {name!r}"
124
+
125
+ expression = self.rules[name]
126
+
127
+ match = expression.match(self.source, self.position)
128
+ if match is None:
129
+ return False
130
+ if not peek:
131
+ self.next_token = Token(name, match[0], self.position)
132
+ return True
133
+
134
+ def expect(self, name: str, *, expected: str) -> Token:
135
+ """Expect a certain token name next, failing with a syntax error otherwise.
136
+
137
+ The token is *not* read.
138
+ """
139
+ if not self.check(name):
140
+ raise self.raise_syntax_error(f"Expected {expected}")
141
+ return self.read()
142
+
143
+ def read(self) -> Token:
144
+ """Consume the next token and return it."""
145
+ token = self.next_token
146
+ assert token is not None
147
+
148
+ self.position += len(token.text)
149
+ self.next_token = None
150
+
151
+ return token
152
+
153
+ def raise_syntax_error(
154
+ self,
155
+ message: str,
156
+ *,
157
+ span_start: Optional[int] = None,
158
+ span_end: Optional[int] = None,
159
+ ) -> NoReturn:
160
+ """Raise ParserSyntaxError at the given position."""
161
+ span = (
162
+ self.position if span_start is None else span_start,
163
+ self.position if span_end is None else span_end,
164
+ )
165
+ raise ParserSyntaxError(
166
+ message,
167
+ source=self.source,
168
+ span=span,
169
+ )
170
+
171
+ @contextlib.contextmanager
172
+ def enclosing_tokens(
173
+ self, open_token: str, close_token: str, *, around: str
174
+ ) -> Iterator[None]:
175
+ if self.check(open_token):
176
+ open_position = self.position
177
+ self.read()
178
+ else:
179
+ open_position = None
180
+
181
+ yield
182
+
183
+ if open_position is None:
184
+ return
185
+
186
+ if not self.check(close_token):
187
+ self.raise_syntax_error(
188
+ f"Expected matching {close_token} for {open_token}, after {around}",
189
+ span_start=open_position,
190
+ )
191
+
192
+ self.read()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/markers.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import operator
6
+ import os
7
+ import platform
8
+ import sys
9
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
10
+
11
+ from ._parser import (
12
+ MarkerAtom,
13
+ MarkerList,
14
+ Op,
15
+ Value,
16
+ Variable,
17
+ parse_marker as _parse_marker,
18
+ )
19
+ from ._tokenizer import ParserSyntaxError
20
+ from .specifiers import InvalidSpecifier, Specifier
21
+ from .utils import canonicalize_name
22
+
23
+ __all__ = [
24
+ "InvalidMarker",
25
+ "UndefinedComparison",
26
+ "UndefinedEnvironmentName",
27
+ "Marker",
28
+ "default_environment",
29
+ ]
30
+
31
+ Operator = Callable[[str, str], bool]
32
+
33
+
34
+ class InvalidMarker(ValueError):
35
+ """
36
+ An invalid marker was found, users should refer to PEP 508.
37
+ """
38
+
39
+
40
+ class UndefinedComparison(ValueError):
41
+ """
42
+ An invalid operation was attempted on a value that doesn't support it.
43
+ """
44
+
45
+
46
+ class UndefinedEnvironmentName(ValueError):
47
+ """
48
+ A name was attempted to be used that does not exist inside of the
49
+ environment.
50
+ """
51
+
52
+
53
+ def _normalize_extra_values(results: Any) -> Any:
54
+ """
55
+ Normalize extra values.
56
+ """
57
+ if isinstance(results[0], tuple):
58
+ lhs, op, rhs = results[0]
59
+ if isinstance(lhs, Variable) and lhs.value == "extra":
60
+ normalized_extra = canonicalize_name(rhs.value)
61
+ rhs = Value(normalized_extra)
62
+ elif isinstance(rhs, Variable) and rhs.value == "extra":
63
+ normalized_extra = canonicalize_name(lhs.value)
64
+ lhs = Value(normalized_extra)
65
+ results[0] = lhs, op, rhs
66
+ return results
67
+
68
+
69
+ def _format_marker(
70
+ marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
71
+ ) -> str:
72
+
73
+ assert isinstance(marker, (list, tuple, str))
74
+
75
+ # Sometimes we have a structure like [[...]] which is a single item list
76
+ # where the single item is itself it's own list. In that case we want skip
77
+ # the rest of this function so that we don't get extraneous () on the
78
+ # outside.
79
+ if (
80
+ isinstance(marker, list)
81
+ and len(marker) == 1
82
+ and isinstance(marker[0], (list, tuple))
83
+ ):
84
+ return _format_marker(marker[0])
85
+
86
+ if isinstance(marker, list):
87
+ inner = (_format_marker(m, first=False) for m in marker)
88
+ if first:
89
+ return " ".join(inner)
90
+ else:
91
+ return "(" + " ".join(inner) + ")"
92
+ elif isinstance(marker, tuple):
93
+ return " ".join([m.serialize() for m in marker])
94
+ else:
95
+ return marker
96
+
97
+
98
+ _operators: Dict[str, Operator] = {
99
+ "in": lambda lhs, rhs: lhs in rhs,
100
+ "not in": lambda lhs, rhs: lhs not in rhs,
101
+ "<": operator.lt,
102
+ "<=": operator.le,
103
+ "==": operator.eq,
104
+ "!=": operator.ne,
105
+ ">=": operator.ge,
106
+ ">": operator.gt,
107
+ }
108
+
109
+
110
+ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
111
+ try:
112
+ spec = Specifier("".join([op.serialize(), rhs]))
113
+ except InvalidSpecifier:
114
+ pass
115
+ else:
116
+ return spec.contains(lhs, prereleases=True)
117
+
118
+ oper: Optional[Operator] = _operators.get(op.serialize())
119
+ if oper is None:
120
+ raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
121
+
122
+ return oper(lhs, rhs)
123
+
124
+
125
+ def _normalize(*values: str, key: str) -> Tuple[str, ...]:
126
+ # PEP 685 – Comparison of extra names for optional distribution dependencies
127
+ # https://peps.python.org/pep-0685/
128
+ # > When comparing extra names, tools MUST normalize the names being
129
+ # > compared using the semantics outlined in PEP 503 for names
130
+ if key == "extra":
131
+ return tuple(canonicalize_name(v) for v in values)
132
+
133
+ # other environment markers don't have such standards
134
+ return values
135
+
136
+
137
+ def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
138
+ groups: List[List[bool]] = [[]]
139
+
140
+ for marker in markers:
141
+ assert isinstance(marker, (list, tuple, str))
142
+
143
+ if isinstance(marker, list):
144
+ groups[-1].append(_evaluate_markers(marker, environment))
145
+ elif isinstance(marker, tuple):
146
+ lhs, op, rhs = marker
147
+
148
+ if isinstance(lhs, Variable):
149
+ environment_key = lhs.value
150
+ lhs_value = environment[environment_key]
151
+ rhs_value = rhs.value
152
+ else:
153
+ lhs_value = lhs.value
154
+ environment_key = rhs.value
155
+ rhs_value = environment[environment_key]
156
+
157
+ lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
158
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
159
+ else:
160
+ assert marker in ["and", "or"]
161
+ if marker == "or":
162
+ groups.append([])
163
+
164
+ return any(all(item) for item in groups)
165
+
166
+
167
+ def format_full_version(info: "sys._version_info") -> str:
168
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
169
+ kind = info.releaselevel
170
+ if kind != "final":
171
+ version += kind[0] + str(info.serial)
172
+ return version
173
+
174
+
175
+ def default_environment() -> Dict[str, str]:
176
+ iver = format_full_version(sys.implementation.version)
177
+ implementation_name = sys.implementation.name
178
+ return {
179
+ "implementation_name": implementation_name,
180
+ "implementation_version": iver,
181
+ "os_name": os.name,
182
+ "platform_machine": platform.machine(),
183
+ "platform_release": platform.release(),
184
+ "platform_system": platform.system(),
185
+ "platform_version": platform.version(),
186
+ "python_full_version": platform.python_version(),
187
+ "platform_python_implementation": platform.python_implementation(),
188
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
189
+ "sys_platform": sys.platform,
190
+ }
191
+
192
+
193
+ class Marker:
194
+ def __init__(self, marker: str) -> None:
195
+ # Note: We create a Marker object without calling this constructor in
196
+ # packaging.requirements.Requirement. If any additional logic is
197
+ # added here, make sure to mirror/adapt Requirement.
198
+ try:
199
+ self._markers = _normalize_extra_values(_parse_marker(marker))
200
+ # The attribute `_markers` can be described in terms of a recursive type:
201
+ # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
202
+ #
203
+ # For example, the following expression:
204
+ # python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
205
+ #
206
+ # is parsed into:
207
+ # [
208
+ # (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
209
+ # 'and',
210
+ # [
211
+ # (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
212
+ # 'or',
213
+ # (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
214
+ # ]
215
+ # ]
216
+ except ParserSyntaxError as e:
217
+ raise InvalidMarker(str(e)) from e
218
+
219
+ def __str__(self) -> str:
220
+ return _format_marker(self._markers)
221
+
222
+ def __repr__(self) -> str:
223
+ return f"<Marker('{self}')>"
224
+
225
+ def __hash__(self) -> int:
226
+ return hash((self.__class__.__name__, str(self)))
227
+
228
+ def __eq__(self, other: Any) -> bool:
229
+ if not isinstance(other, Marker):
230
+ return NotImplemented
231
+
232
+ return str(self) == str(other)
233
+
234
+ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
235
+ """Evaluate a marker.
236
+
237
+ Return the boolean from evaluating the given marker against the
238
+ environment. environment is an optional argument to override all or
239
+ part of the determined environment.
240
+
241
+ The environment is determined from the current Python process.
242
+ """
243
+ current_environment = default_environment()
244
+ current_environment["extra"] = ""
245
+ if environment is not None:
246
+ current_environment.update(environment)
247
+ # The API used to allow setting extra to None. We need to handle this
248
+ # case for backwards compatibility.
249
+ if current_environment["extra"] is None:
250
+ current_environment["extra"] = ""
251
+
252
+ return _evaluate_markers(self._markers, current_environment)