vijayakumaran92 commited on
Commit
fb0de2a
·
verified ·
1 Parent(s): 65e7152

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__init__.py +21 -0
  2. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-310.pyc +0 -0
  3. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/base.cpython-310.pyc +0 -0
  4. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-310.pyc +0 -0
  5. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-310.pyc +0 -0
  6. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-310.pyc +0 -0
  7. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/base.py +53 -0
  8. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py +29 -0
  9. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/sdist.py +158 -0
  10. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/wheel.py +42 -0
  11. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__init__.py +1 -0
  12. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/__init__.cpython-310.pyc +0 -0
  13. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/collector.cpython-310.pyc +0 -0
  14. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-310.pyc +0 -0
  15. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/sources.cpython-310.pyc +0 -0
  16. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/collector.py +494 -0
  17. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/package_finder.py +1050 -0
  18. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/sources.py +284 -0
  19. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__init__.py +1 -0
  20. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/__init__.cpython-310.pyc +0 -0
  21. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/candidate.cpython-310.pyc +0 -0
  22. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-310.pyc +0 -0
  23. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/format_control.cpython-310.pyc +0 -0
  24. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/index.cpython-310.pyc +0 -0
  25. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-310.pyc +0 -0
  26. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/link.cpython-310.pyc +0 -0
  27. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/pylock.cpython-310.pyc +0 -0
  28. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/scheme.cpython-310.pyc +0 -0
  29. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-310.pyc +0 -0
  30. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-310.pyc +0 -0
  31. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/target_python.cpython-310.pyc +0 -0
  32. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/wheel.cpython-310.pyc +0 -0
  33. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/candidate.py +25 -0
  34. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py +224 -0
  35. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/format_control.py +78 -0
  36. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/index.py +28 -0
  37. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/installation_report.py +56 -0
  38. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/link.py +608 -0
  39. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/pylock.py +183 -0
  40. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/scheme.py +25 -0
  41. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/search_scope.py +127 -0
  42. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/selection_prefs.py +53 -0
  43. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/target_python.py +121 -0
  44. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/wheel.py +139 -0
  45. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__init__.py +1 -0
  46. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/__init__.cpython-310.pyc +0 -0
  47. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/auth.cpython-310.pyc +0 -0
  48. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/cache.cpython-310.pyc +0 -0
  49. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/download.cpython-310.pyc +0 -0
  50. ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-310.pyc +0 -0
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pip._internal.distributions.base import AbstractDistribution
2
+ from pip._internal.distributions.sdist import SourceDistribution
3
+ from pip._internal.distributions.wheel import WheelDistribution
4
+ from pip._internal.req.req_install import InstallRequirement
5
+
6
+
7
+ def make_distribution_for_install_requirement(
8
+ install_req: InstallRequirement,
9
+ ) -> AbstractDistribution:
10
+ """Returns a Distribution for the given InstallRequirement"""
11
+ # Editable requirements will always be source distributions. They use the
12
+ # legacy logic until we create a modern standard for them.
13
+ if install_req.editable:
14
+ return SourceDistribution(install_req)
15
+
16
+ # If it's a wheel, it's a WheelDistribution
17
+ if install_req.is_wheel:
18
+ return WheelDistribution(install_req)
19
+
20
+ # Otherwise, a SourceDistribution
21
+ return SourceDistribution(install_req)
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (810 Bytes). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/base.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/installed.cpython-310.pyc ADDED
Binary file (1.49 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/sdist.cpython-310.pyc ADDED
Binary file (5.33 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/__pycache__/wheel.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/base.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from typing import TYPE_CHECKING, Optional
3
+
4
+ from pip._internal.metadata.base import BaseDistribution
5
+ from pip._internal.req import InstallRequirement
6
+
7
+ if TYPE_CHECKING:
8
+ from pip._internal.index.package_finder import PackageFinder
9
+
10
+
11
+ class AbstractDistribution(metaclass=abc.ABCMeta):
12
+ """A base class for handling installable artifacts.
13
+
14
+ The requirements for anything installable are as follows:
15
+
16
+ - we must be able to determine the requirement name
17
+ (or we can't correctly handle the non-upgrade case).
18
+
19
+ - for packages with setup requirements, we must also be able
20
+ to determine their requirements without installing additional
21
+ packages (for the same reason as run-time dependencies)
22
+
23
+ - we must be able to create a Distribution object exposing the
24
+ above metadata.
25
+
26
+ - if we need to do work in the build tracker, we must be able to generate a unique
27
+ string to identify the requirement in the build tracker.
28
+ """
29
+
30
+ def __init__(self, req: InstallRequirement) -> None:
31
+ super().__init__()
32
+ self.req = req
33
+
34
+ @abc.abstractproperty
35
+ def build_tracker_id(self) -> Optional[str]:
36
+ """A string that uniquely identifies this requirement to the build tracker.
37
+
38
+ If None, then this dist has no work to do in the build tracker, and
39
+ ``.prepare_distribution_metadata()`` will not be called."""
40
+ raise NotImplementedError()
41
+
42
+ @abc.abstractmethod
43
+ def get_metadata_distribution(self) -> BaseDistribution:
44
+ raise NotImplementedError()
45
+
46
+ @abc.abstractmethod
47
+ def prepare_distribution_metadata(
48
+ self,
49
+ finder: "PackageFinder",
50
+ build_isolation: bool,
51
+ check_build_deps: bool,
52
+ ) -> None:
53
+ raise NotImplementedError()
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/installed.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from pip._internal.distributions.base import AbstractDistribution
4
+ from pip._internal.index.package_finder import PackageFinder
5
+ from pip._internal.metadata import BaseDistribution
6
+
7
+
8
+ class InstalledDistribution(AbstractDistribution):
9
+ """Represents an installed package.
10
+
11
+ This does not need any preparation as the required information has already
12
+ been computed.
13
+ """
14
+
15
+ @property
16
+ def build_tracker_id(self) -> Optional[str]:
17
+ return None
18
+
19
+ def get_metadata_distribution(self) -> BaseDistribution:
20
+ assert self.req.satisfied_by is not None, "not actually installed"
21
+ return self.req.satisfied_by
22
+
23
+ def prepare_distribution_metadata(
24
+ self,
25
+ finder: PackageFinder,
26
+ build_isolation: bool,
27
+ check_build_deps: bool,
28
+ ) -> None:
29
+ pass
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/sdist.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import TYPE_CHECKING, Iterable, Optional, Set, Tuple
3
+
4
+ from pip._internal.build_env import BuildEnvironment
5
+ from pip._internal.distributions.base import AbstractDistribution
6
+ from pip._internal.exceptions import InstallationError
7
+ from pip._internal.metadata import BaseDistribution
8
+ from pip._internal.utils.subprocess import runner_with_spinner_message
9
+
10
+ if TYPE_CHECKING:
11
+ from pip._internal.index.package_finder import PackageFinder
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class SourceDistribution(AbstractDistribution):
17
+ """Represents a source distribution.
18
+
19
+ The preparation step for these needs metadata for the packages to be
20
+ generated, either using PEP 517 or using the legacy `setup.py egg_info`.
21
+ """
22
+
23
+ @property
24
+ def build_tracker_id(self) -> Optional[str]:
25
+ """Identify this requirement uniquely by its link."""
26
+ assert self.req.link
27
+ return self.req.link.url_without_fragment
28
+
29
+ def get_metadata_distribution(self) -> BaseDistribution:
30
+ return self.req.get_dist()
31
+
32
+ def prepare_distribution_metadata(
33
+ self,
34
+ finder: "PackageFinder",
35
+ build_isolation: bool,
36
+ check_build_deps: bool,
37
+ ) -> None:
38
+ # Load pyproject.toml, to determine whether PEP 517 is to be used
39
+ self.req.load_pyproject_toml()
40
+
41
+ # Set up the build isolation, if this requirement should be isolated
42
+ should_isolate = self.req.use_pep517 and build_isolation
43
+ if should_isolate:
44
+ # Setup an isolated environment and install the build backend static
45
+ # requirements in it.
46
+ self._prepare_build_backend(finder)
47
+ # Check that if the requirement is editable, it either supports PEP 660 or
48
+ # has a setup.py or a setup.cfg. This cannot be done earlier because we need
49
+ # to setup the build backend to verify it supports build_editable, nor can
50
+ # it be done later, because we want to avoid installing build requirements
51
+ # needlessly. Doing it here also works around setuptools generating
52
+ # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory
53
+ # without setup.py nor setup.cfg.
54
+ self.req.isolated_editable_sanity_check()
55
+ # Install the dynamic build requirements.
56
+ self._install_build_reqs(finder)
57
+ # Check if the current environment provides build dependencies
58
+ should_check_deps = self.req.use_pep517 and check_build_deps
59
+ if should_check_deps:
60
+ pyproject_requires = self.req.pyproject_requires
61
+ assert pyproject_requires is not None
62
+ conflicting, missing = self.req.build_env.check_requirements(
63
+ pyproject_requires
64
+ )
65
+ if conflicting:
66
+ self._raise_conflicts("the backend dependencies", conflicting)
67
+ if missing:
68
+ self._raise_missing_reqs(missing)
69
+ self.req.prepare_metadata()
70
+
71
+ def _prepare_build_backend(self, finder: "PackageFinder") -> None:
72
+ # Isolate in a BuildEnvironment and install the build-time
73
+ # requirements.
74
+ pyproject_requires = self.req.pyproject_requires
75
+ assert pyproject_requires is not None
76
+
77
+ self.req.build_env = BuildEnvironment()
78
+ self.req.build_env.install_requirements(
79
+ finder, pyproject_requires, "overlay", kind="build dependencies"
80
+ )
81
+ conflicting, missing = self.req.build_env.check_requirements(
82
+ self.req.requirements_to_check
83
+ )
84
+ if conflicting:
85
+ self._raise_conflicts("PEP 517/518 supported requirements", conflicting)
86
+ if missing:
87
+ logger.warning(
88
+ "Missing build requirements in pyproject.toml for %s.",
89
+ self.req,
90
+ )
91
+ logger.warning(
92
+ "The project does not specify a build backend, and "
93
+ "pip cannot fall back to setuptools without %s.",
94
+ " and ".join(map(repr, sorted(missing))),
95
+ )
96
+
97
+ def _get_build_requires_wheel(self) -> Iterable[str]:
98
+ with self.req.build_env:
99
+ runner = runner_with_spinner_message("Getting requirements to build wheel")
100
+ backend = self.req.pep517_backend
101
+ assert backend is not None
102
+ with backend.subprocess_runner(runner):
103
+ return backend.get_requires_for_build_wheel()
104
+
105
+ def _get_build_requires_editable(self) -> Iterable[str]:
106
+ with self.req.build_env:
107
+ runner = runner_with_spinner_message(
108
+ "Getting requirements to build editable"
109
+ )
110
+ backend = self.req.pep517_backend
111
+ assert backend is not None
112
+ with backend.subprocess_runner(runner):
113
+ return backend.get_requires_for_build_editable()
114
+
115
+ def _install_build_reqs(self, finder: "PackageFinder") -> None:
116
+ # Install any extra build dependencies that the backend requests.
117
+ # This must be done in a second pass, as the pyproject.toml
118
+ # dependencies must be installed before we can call the backend.
119
+ if (
120
+ self.req.editable
121
+ and self.req.permit_editable_wheels
122
+ and self.req.supports_pyproject_editable
123
+ ):
124
+ build_reqs = self._get_build_requires_editable()
125
+ else:
126
+ build_reqs = self._get_build_requires_wheel()
127
+ conflicting, missing = self.req.build_env.check_requirements(build_reqs)
128
+ if conflicting:
129
+ self._raise_conflicts("the backend dependencies", conflicting)
130
+ self.req.build_env.install_requirements(
131
+ finder, missing, "normal", kind="backend dependencies"
132
+ )
133
+
134
+ def _raise_conflicts(
135
+ self, conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]]
136
+ ) -> None:
137
+ format_string = (
138
+ "Some build dependencies for {requirement} "
139
+ "conflict with {conflicting_with}: {description}."
140
+ )
141
+ error_message = format_string.format(
142
+ requirement=self.req,
143
+ conflicting_with=conflicting_with,
144
+ description=", ".join(
145
+ f"{installed} is incompatible with {wanted}"
146
+ for installed, wanted in sorted(conflicting_reqs)
147
+ ),
148
+ )
149
+ raise InstallationError(error_message)
150
+
151
+ def _raise_missing_reqs(self, missing: Set[str]) -> None:
152
+ format_string = (
153
+ "Some build dependencies for {requirement} are missing: {missing}."
154
+ )
155
+ error_message = format_string.format(
156
+ requirement=self.req, missing=", ".join(map(repr, sorted(missing)))
157
+ )
158
+ raise InstallationError(error_message)
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/distributions/wheel.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING, Optional
2
+
3
+ from pip._vendor.packaging.utils import canonicalize_name
4
+
5
+ from pip._internal.distributions.base import AbstractDistribution
6
+ from pip._internal.metadata import (
7
+ BaseDistribution,
8
+ FilesystemWheel,
9
+ get_wheel_distribution,
10
+ )
11
+
12
+ if TYPE_CHECKING:
13
+ from pip._internal.index.package_finder import PackageFinder
14
+
15
+
16
+ class WheelDistribution(AbstractDistribution):
17
+ """Represents a wheel distribution.
18
+
19
+ This does not need any preparation as wheels can be directly unpacked.
20
+ """
21
+
22
+ @property
23
+ def build_tracker_id(self) -> Optional[str]:
24
+ return None
25
+
26
+ def get_metadata_distribution(self) -> BaseDistribution:
27
+ """Loads the metadata from the wheel file into memory and returns a
28
+ Distribution that uses it, not relying on the wheel file or
29
+ requirement.
30
+ """
31
+ assert self.req.local_file_path, "Set as part of preparation during download"
32
+ assert self.req.name, "Wheels are never unnamed"
33
+ wheel = FilesystemWheel(self.req.local_file_path)
34
+ return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
35
+
36
+ def prepare_distribution_metadata(
37
+ self,
38
+ finder: "PackageFinder",
39
+ build_isolation: bool,
40
+ check_build_deps: bool,
41
+ ) -> None:
42
+ pass
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Index interaction code"""
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (236 Bytes). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/collector.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/package_finder.cpython-310.pyc ADDED
Binary file (29.6 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/__pycache__/sources.cpython-310.pyc ADDED
Binary file (8.89 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/collector.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The main purpose of this module is to expose LinkCollector.collect_sources().
3
+ """
4
+
5
+ import collections
6
+ import email.message
7
+ import functools
8
+ import itertools
9
+ import json
10
+ import logging
11
+ import os
12
+ import urllib.parse
13
+ import urllib.request
14
+ from dataclasses import dataclass
15
+ from html.parser import HTMLParser
16
+ from optparse import Values
17
+ from typing import (
18
+ Callable,
19
+ Dict,
20
+ Iterable,
21
+ List,
22
+ MutableMapping,
23
+ NamedTuple,
24
+ Optional,
25
+ Protocol,
26
+ Sequence,
27
+ Tuple,
28
+ Union,
29
+ )
30
+
31
+ from pip._vendor import requests
32
+ from pip._vendor.requests import Response
33
+ from pip._vendor.requests.exceptions import RetryError, SSLError
34
+
35
+ from pip._internal.exceptions import NetworkConnectionError
36
+ from pip._internal.models.link import Link
37
+ from pip._internal.models.search_scope import SearchScope
38
+ from pip._internal.network.session import PipSession
39
+ from pip._internal.network.utils import raise_for_status
40
+ from pip._internal.utils.filetypes import is_archive_file
41
+ from pip._internal.utils.misc import redact_auth_from_url
42
+ from pip._internal.vcs import vcs
43
+
44
+ from .sources import CandidatesFromPage, LinkSource, build_source
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+ ResponseHeaders = MutableMapping[str, str]
49
+
50
+
51
+ def _match_vcs_scheme(url: str) -> Optional[str]:
52
+ """Look for VCS schemes in the URL.
53
+
54
+ Returns the matched VCS scheme, or None if there's no match.
55
+ """
56
+ for scheme in vcs.schemes:
57
+ if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
58
+ return scheme
59
+ return None
60
+
61
+
62
+ class _NotAPIContent(Exception):
63
+ def __init__(self, content_type: str, request_desc: str) -> None:
64
+ super().__init__(content_type, request_desc)
65
+ self.content_type = content_type
66
+ self.request_desc = request_desc
67
+
68
+
69
+ def _ensure_api_header(response: Response) -> None:
70
+ """
71
+ Check the Content-Type header to ensure the response contains a Simple
72
+ API Response.
73
+
74
+ Raises `_NotAPIContent` if the content type is not a valid content-type.
75
+ """
76
+ content_type = response.headers.get("Content-Type", "Unknown")
77
+
78
+ content_type_l = content_type.lower()
79
+ if content_type_l.startswith(
80
+ (
81
+ "text/html",
82
+ "application/vnd.pypi.simple.v1+html",
83
+ "application/vnd.pypi.simple.v1+json",
84
+ )
85
+ ):
86
+ return
87
+
88
+ raise _NotAPIContent(content_type, response.request.method)
89
+
90
+
91
+ class _NotHTTP(Exception):
92
+ pass
93
+
94
+
95
+ def _ensure_api_response(url: str, session: PipSession) -> None:
96
+ """
97
+ Send a HEAD request to the URL, and ensure the response contains a simple
98
+ API Response.
99
+
100
+ Raises `_NotHTTP` if the URL is not available for a HEAD request, or
101
+ `_NotAPIContent` if the content type is not a valid content type.
102
+ """
103
+ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
104
+ if scheme not in {"http", "https"}:
105
+ raise _NotHTTP()
106
+
107
+ resp = session.head(url, allow_redirects=True)
108
+ raise_for_status(resp)
109
+
110
+ _ensure_api_header(resp)
111
+
112
+
113
+ def _get_simple_response(url: str, session: PipSession) -> Response:
114
+ """Access an Simple API response with GET, and return the response.
115
+
116
+ This consists of three parts:
117
+
118
+ 1. If the URL looks suspiciously like an archive, send a HEAD first to
119
+ check the Content-Type is HTML or Simple API, to avoid downloading a
120
+ large file. Raise `_NotHTTP` if the content type cannot be determined, or
121
+ `_NotAPIContent` if it is not HTML or a Simple API.
122
+ 2. Actually perform the request. Raise HTTP exceptions on network failures.
123
+ 3. Check the Content-Type header to make sure we got a Simple API response,
124
+ and raise `_NotAPIContent` otherwise.
125
+ """
126
+ if is_archive_file(Link(url).filename):
127
+ _ensure_api_response(url, session=session)
128
+
129
+ logger.debug("Getting page %s", redact_auth_from_url(url))
130
+
131
+ resp = session.get(
132
+ url,
133
+ headers={
134
+ "Accept": ", ".join(
135
+ [
136
+ "application/vnd.pypi.simple.v1+json",
137
+ "application/vnd.pypi.simple.v1+html; q=0.1",
138
+ "text/html; q=0.01",
139
+ ]
140
+ ),
141
+ # We don't want to blindly returned cached data for
142
+ # /simple/, because authors generally expecting that
143
+ # twine upload && pip install will function, but if
144
+ # they've done a pip install in the last ~10 minutes
145
+ # it won't. Thus by setting this to zero we will not
146
+ # blindly use any cached data, however the benefit of
147
+ # using max-age=0 instead of no-cache, is that we will
148
+ # still support conditional requests, so we will still
149
+ # minimize traffic sent in cases where the page hasn't
150
+ # changed at all, we will just always incur the round
151
+ # trip for the conditional GET now instead of only
152
+ # once per 10 minutes.
153
+ # For more information, please see pypa/pip#5670.
154
+ "Cache-Control": "max-age=0",
155
+ },
156
+ )
157
+ raise_for_status(resp)
158
+
159
+ # The check for archives above only works if the url ends with
160
+ # something that looks like an archive. However that is not a
161
+ # requirement of an url. Unless we issue a HEAD request on every
162
+ # url we cannot know ahead of time for sure if something is a
163
+ # Simple API response or not. However we can check after we've
164
+ # downloaded it.
165
+ _ensure_api_header(resp)
166
+
167
+ logger.debug(
168
+ "Fetched page %s as %s",
169
+ redact_auth_from_url(url),
170
+ resp.headers.get("Content-Type", "Unknown"),
171
+ )
172
+
173
+ return resp
174
+
175
+
176
+ def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
177
+ """Determine if we have any encoding information in our headers."""
178
+ if headers and "Content-Type" in headers:
179
+ m = email.message.Message()
180
+ m["content-type"] = headers["Content-Type"]
181
+ charset = m.get_param("charset")
182
+ if charset:
183
+ return str(charset)
184
+ return None
185
+
186
+
187
+ class CacheablePageContent:
188
+ def __init__(self, page: "IndexContent") -> None:
189
+ assert page.cache_link_parsing
190
+ self.page = page
191
+
192
+ def __eq__(self, other: object) -> bool:
193
+ return isinstance(other, type(self)) and self.page.url == other.page.url
194
+
195
+ def __hash__(self) -> int:
196
+ return hash(self.page.url)
197
+
198
+
199
+ class ParseLinks(Protocol):
200
+ def __call__(self, page: "IndexContent") -> Iterable[Link]: ...
201
+
202
+
203
+ def with_cached_index_content(fn: ParseLinks) -> ParseLinks:
204
+ """
205
+ Given a function that parses an Iterable[Link] from an IndexContent, cache the
206
+ function's result (keyed by CacheablePageContent), unless the IndexContent
207
+ `page` has `page.cache_link_parsing == False`.
208
+ """
209
+
210
+ @functools.lru_cache(maxsize=None)
211
+ def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
212
+ return list(fn(cacheable_page.page))
213
+
214
+ @functools.wraps(fn)
215
+ def wrapper_wrapper(page: "IndexContent") -> List[Link]:
216
+ if page.cache_link_parsing:
217
+ return wrapper(CacheablePageContent(page))
218
+ return list(fn(page))
219
+
220
+ return wrapper_wrapper
221
+
222
+
223
+ @with_cached_index_content
224
+ def parse_links(page: "IndexContent") -> Iterable[Link]:
225
+ """
226
+ Parse a Simple API's Index Content, and yield its anchor elements as Link objects.
227
+ """
228
+
229
+ content_type_l = page.content_type.lower()
230
+ if content_type_l.startswith("application/vnd.pypi.simple.v1+json"):
231
+ data = json.loads(page.content)
232
+ for file in data.get("files", []):
233
+ link = Link.from_json(file, page.url)
234
+ if link is None:
235
+ continue
236
+ yield link
237
+ return
238
+
239
+ parser = HTMLLinkParser(page.url)
240
+ encoding = page.encoding or "utf-8"
241
+ parser.feed(page.content.decode(encoding))
242
+
243
+ url = page.url
244
+ base_url = parser.base_url or url
245
+ for anchor in parser.anchors:
246
+ link = Link.from_element(anchor, page_url=url, base_url=base_url)
247
+ if link is None:
248
+ continue
249
+ yield link
250
+
251
+
252
+ @dataclass(frozen=True)
253
+ class IndexContent:
254
+ """Represents one response (or page), along with its URL.
255
+
256
+ :param encoding: the encoding to decode the given content.
257
+ :param url: the URL from which the HTML was downloaded.
258
+ :param cache_link_parsing: whether links parsed from this page's url
259
+ should be cached. PyPI index urls should
260
+ have this set to False, for example.
261
+ """
262
+
263
+ content: bytes
264
+ content_type: str
265
+ encoding: Optional[str]
266
+ url: str
267
+ cache_link_parsing: bool = True
268
+
269
+ def __str__(self) -> str:
270
+ return redact_auth_from_url(self.url)
271
+
272
+
273
+ class HTMLLinkParser(HTMLParser):
274
+ """
275
+ HTMLParser that keeps the first base HREF and a list of all anchor
276
+ elements' attributes.
277
+ """
278
+
279
+ def __init__(self, url: str) -> None:
280
+ super().__init__(convert_charrefs=True)
281
+
282
+ self.url: str = url
283
+ self.base_url: Optional[str] = None
284
+ self.anchors: List[Dict[str, Optional[str]]] = []
285
+
286
+ def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
287
+ if tag == "base" and self.base_url is None:
288
+ href = self.get_href(attrs)
289
+ if href is not None:
290
+ self.base_url = href
291
+ elif tag == "a":
292
+ self.anchors.append(dict(attrs))
293
+
294
+ def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
295
+ for name, value in attrs:
296
+ if name == "href":
297
+ return value
298
+ return None
299
+
300
+
301
+ def _handle_get_simple_fail(
302
+ link: Link,
303
+ reason: Union[str, Exception],
304
+ meth: Optional[Callable[..., None]] = None,
305
+ ) -> None:
306
+ if meth is None:
307
+ meth = logger.debug
308
+ meth("Could not fetch URL %s: %s - skipping", link, reason)
309
+
310
+
311
+ def _make_index_content(
312
+ response: Response, cache_link_parsing: bool = True
313
+ ) -> IndexContent:
314
+ encoding = _get_encoding_from_headers(response.headers)
315
+ return IndexContent(
316
+ response.content,
317
+ response.headers["Content-Type"],
318
+ encoding=encoding,
319
+ url=response.url,
320
+ cache_link_parsing=cache_link_parsing,
321
+ )
322
+
323
+
324
+ def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
325
+ url = link.url.split("#", 1)[0]
326
+
327
+ # Check for VCS schemes that do not support lookup as web pages.
328
+ vcs_scheme = _match_vcs_scheme(url)
329
+ if vcs_scheme:
330
+ logger.warning(
331
+ "Cannot look at %s URL %s because it does not support lookup as web pages.",
332
+ vcs_scheme,
333
+ link,
334
+ )
335
+ return None
336
+
337
+ # Tack index.html onto file:// URLs that point to directories
338
+ scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
339
+ if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
340
+ # add trailing slash if not present so urljoin doesn't trim
341
+ # final segment
342
+ if not url.endswith("/"):
343
+ url += "/"
344
+ # TODO: In the future, it would be nice if pip supported PEP 691
345
+ # style responses in the file:// URLs, however there's no
346
+ # standard file extension for application/vnd.pypi.simple.v1+json
347
+ # so we'll need to come up with something on our own.
348
+ url = urllib.parse.urljoin(url, "index.html")
349
+ logger.debug(" file: URL is directory, getting %s", url)
350
+
351
+ try:
352
+ resp = _get_simple_response(url, session=session)
353
+ except _NotHTTP:
354
+ logger.warning(
355
+ "Skipping page %s because it looks like an archive, and cannot "
356
+ "be checked by a HTTP HEAD request.",
357
+ link,
358
+ )
359
+ except _NotAPIContent as exc:
360
+ logger.warning(
361
+ "Skipping page %s because the %s request got Content-Type: %s. "
362
+ "The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
363
+ "application/vnd.pypi.simple.v1+html, and text/html",
364
+ link,
365
+ exc.request_desc,
366
+ exc.content_type,
367
+ )
368
+ except NetworkConnectionError as exc:
369
+ _handle_get_simple_fail(link, exc)
370
+ except RetryError as exc:
371
+ _handle_get_simple_fail(link, exc)
372
+ except SSLError as exc:
373
+ reason = "There was a problem confirming the ssl certificate: "
374
+ reason += str(exc)
375
+ _handle_get_simple_fail(link, reason, meth=logger.info)
376
+ except requests.ConnectionError as exc:
377
+ _handle_get_simple_fail(link, f"connection error: {exc}")
378
+ except requests.Timeout:
379
+ _handle_get_simple_fail(link, "timed out")
380
+ else:
381
+ return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
382
+ return None
383
+
384
+
385
+ class CollectedSources(NamedTuple):
386
+ find_links: Sequence[Optional[LinkSource]]
387
+ index_urls: Sequence[Optional[LinkSource]]
388
+
389
+
390
+ class LinkCollector:
391
+ """
392
+ Responsible for collecting Link objects from all configured locations,
393
+ making network requests as needed.
394
+
395
+ The class's main method is its collect_sources() method.
396
+ """
397
+
398
+ def __init__(
399
+ self,
400
+ session: PipSession,
401
+ search_scope: SearchScope,
402
+ ) -> None:
403
+ self.search_scope = search_scope
404
+ self.session = session
405
+
406
+ @classmethod
407
+ def create(
408
+ cls,
409
+ session: PipSession,
410
+ options: Values,
411
+ suppress_no_index: bool = False,
412
+ ) -> "LinkCollector":
413
+ """
414
+ :param session: The Session to use to make requests.
415
+ :param suppress_no_index: Whether to ignore the --no-index option
416
+ when constructing the SearchScope object.
417
+ """
418
+ index_urls = [options.index_url] + options.extra_index_urls
419
+ if options.no_index and not suppress_no_index:
420
+ logger.debug(
421
+ "Ignoring indexes: %s",
422
+ ",".join(redact_auth_from_url(url) for url in index_urls),
423
+ )
424
+ index_urls = []
425
+
426
+ # Make sure find_links is a list before passing to create().
427
+ find_links = options.find_links or []
428
+
429
+ search_scope = SearchScope.create(
430
+ find_links=find_links,
431
+ index_urls=index_urls,
432
+ no_index=options.no_index,
433
+ )
434
+ link_collector = LinkCollector(
435
+ session=session,
436
+ search_scope=search_scope,
437
+ )
438
+ return link_collector
439
+
440
+ @property
441
+ def find_links(self) -> List[str]:
442
+ return self.search_scope.find_links
443
+
444
+ def fetch_response(self, location: Link) -> Optional[IndexContent]:
445
+ """
446
+ Fetch an HTML page containing package links.
447
+ """
448
+ return _get_index_content(location, session=self.session)
449
+
450
+ def collect_sources(
451
+ self,
452
+ project_name: str,
453
+ candidates_from_page: CandidatesFromPage,
454
+ ) -> CollectedSources:
455
+ # The OrderedDict calls deduplicate sources by URL.
456
+ index_url_sources = collections.OrderedDict(
457
+ build_source(
458
+ loc,
459
+ candidates_from_page=candidates_from_page,
460
+ page_validator=self.session.is_secure_origin,
461
+ expand_dir=False,
462
+ cache_link_parsing=False,
463
+ project_name=project_name,
464
+ )
465
+ for loc in self.search_scope.get_index_urls_locations(project_name)
466
+ ).values()
467
+ find_links_sources = collections.OrderedDict(
468
+ build_source(
469
+ loc,
470
+ candidates_from_page=candidates_from_page,
471
+ page_validator=self.session.is_secure_origin,
472
+ expand_dir=True,
473
+ cache_link_parsing=True,
474
+ project_name=project_name,
475
+ )
476
+ for loc in self.find_links
477
+ ).values()
478
+
479
+ if logger.isEnabledFor(logging.DEBUG):
480
+ lines = [
481
+ f"* {s.link}"
482
+ for s in itertools.chain(find_links_sources, index_url_sources)
483
+ if s is not None and s.link is not None
484
+ ]
485
+ lines = [
486
+ f"{len(lines)} location(s) to search "
487
+ f"for versions of {project_name}:"
488
+ ] + lines
489
+ logger.debug("\n".join(lines))
490
+
491
+ return CollectedSources(
492
+ find_links=list(find_links_sources),
493
+ index_urls=list(index_url_sources),
494
+ )
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/package_finder.py ADDED
@@ -0,0 +1,1050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Routines related to PyPI, indexes"""
2
+
3
+ import enum
4
+ import functools
5
+ import itertools
6
+ import logging
7
+ import re
8
+ from dataclasses import dataclass
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Dict,
12
+ FrozenSet,
13
+ Iterable,
14
+ List,
15
+ Optional,
16
+ Set,
17
+ Tuple,
18
+ Union,
19
+ )
20
+
21
+ from pip._vendor.packaging import specifiers
22
+ from pip._vendor.packaging.tags import Tag
23
+ from pip._vendor.packaging.utils import canonicalize_name
24
+ from pip._vendor.packaging.version import InvalidVersion, _BaseVersion
25
+ from pip._vendor.packaging.version import parse as parse_version
26
+
27
+ from pip._internal.exceptions import (
28
+ BestVersionAlreadyInstalled,
29
+ DistributionNotFound,
30
+ InvalidWheelFilename,
31
+ UnsupportedWheel,
32
+ )
33
+ from pip._internal.index.collector import LinkCollector, parse_links
34
+ from pip._internal.models.candidate import InstallationCandidate
35
+ from pip._internal.models.format_control import FormatControl
36
+ from pip._internal.models.link import Link
37
+ from pip._internal.models.search_scope import SearchScope
38
+ from pip._internal.models.selection_prefs import SelectionPreferences
39
+ from pip._internal.models.target_python import TargetPython
40
+ from pip._internal.models.wheel import Wheel
41
+ from pip._internal.req import InstallRequirement
42
+ from pip._internal.utils._log import getLogger
43
+ from pip._internal.utils.filetypes import WHEEL_EXTENSION
44
+ from pip._internal.utils.hashes import Hashes
45
+ from pip._internal.utils.logging import indent_log
46
+ from pip._internal.utils.misc import build_netloc
47
+ from pip._internal.utils.packaging import check_requires_python
48
+ from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
49
+
50
+ if TYPE_CHECKING:
51
+ from pip._vendor.typing_extensions import TypeGuard
52
+
53
+ __all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
54
+
55
+
56
+ logger = getLogger(__name__)
57
+
58
+ BuildTag = Union[Tuple[()], Tuple[int, str]]
59
+ CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
60
+
61
+
62
+ def _check_link_requires_python(
63
+ link: Link,
64
+ version_info: Tuple[int, int, int],
65
+ ignore_requires_python: bool = False,
66
+ ) -> bool:
67
+ """
68
+ Return whether the given Python version is compatible with a link's
69
+ "Requires-Python" value.
70
+
71
+ :param version_info: A 3-tuple of ints representing the Python
72
+ major-minor-micro version to check.
73
+ :param ignore_requires_python: Whether to ignore the "Requires-Python"
74
+ value if the given Python version isn't compatible.
75
+ """
76
+ try:
77
+ is_compatible = check_requires_python(
78
+ link.requires_python,
79
+ version_info=version_info,
80
+ )
81
+ except specifiers.InvalidSpecifier:
82
+ logger.debug(
83
+ "Ignoring invalid Requires-Python (%r) for link: %s",
84
+ link.requires_python,
85
+ link,
86
+ )
87
+ else:
88
+ if not is_compatible:
89
+ version = ".".join(map(str, version_info))
90
+ if not ignore_requires_python:
91
+ logger.verbose(
92
+ "Link requires a different Python (%s not in: %r): %s",
93
+ version,
94
+ link.requires_python,
95
+ link,
96
+ )
97
+ return False
98
+
99
+ logger.debug(
100
+ "Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
101
+ version,
102
+ link.requires_python,
103
+ link,
104
+ )
105
+
106
+ return True
107
+
108
+
109
+ class LinkType(enum.Enum):
110
+ candidate = enum.auto()
111
+ different_project = enum.auto()
112
+ yanked = enum.auto()
113
+ format_unsupported = enum.auto()
114
+ format_invalid = enum.auto()
115
+ platform_mismatch = enum.auto()
116
+ requires_python_mismatch = enum.auto()
117
+
118
+
119
+ class LinkEvaluator:
120
+ """
121
+ Responsible for evaluating links for a particular project.
122
+ """
123
+
124
+ _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
125
+
126
+ # Don't include an allow_yanked default value to make sure each call
127
+ # site considers whether yanked releases are allowed. This also causes
128
+ # that decision to be made explicit in the calling code, which helps
129
+ # people when reading the code.
130
+ def __init__(
131
+ self,
132
+ project_name: str,
133
+ canonical_name: str,
134
+ formats: FrozenSet[str],
135
+ target_python: TargetPython,
136
+ allow_yanked: bool,
137
+ ignore_requires_python: Optional[bool] = None,
138
+ ) -> None:
139
+ """
140
+ :param project_name: The user supplied package name.
141
+ :param canonical_name: The canonical package name.
142
+ :param formats: The formats allowed for this package. Should be a set
143
+ with 'binary' or 'source' or both in it.
144
+ :param target_python: The target Python interpreter to use when
145
+ evaluating link compatibility. This is used, for example, to
146
+ check wheel compatibility, as well as when checking the Python
147
+ version, e.g. the Python version embedded in a link filename
148
+ (or egg fragment) and against an HTML link's optional PEP 503
149
+ "data-requires-python" attribute.
150
+ :param allow_yanked: Whether files marked as yanked (in the sense
151
+ of PEP 592) are permitted to be candidates for install.
152
+ :param ignore_requires_python: Whether to ignore incompatible
153
+ PEP 503 "data-requires-python" values in HTML links. Defaults
154
+ to False.
155
+ """
156
+ if ignore_requires_python is None:
157
+ ignore_requires_python = False
158
+
159
+ self._allow_yanked = allow_yanked
160
+ self._canonical_name = canonical_name
161
+ self._ignore_requires_python = ignore_requires_python
162
+ self._formats = formats
163
+ self._target_python = target_python
164
+
165
+ self.project_name = project_name
166
+
167
+ def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
168
+ """
169
+ Determine whether a link is a candidate for installation.
170
+
171
+ :return: A tuple (result, detail), where *result* is an enum
172
+ representing whether the evaluation found a candidate, or the reason
173
+ why one is not found. If a candidate is found, *detail* will be the
174
+ candidate's version string; if one is not found, it contains the
175
+ reason the link fails to qualify.
176
+ """
177
+ version = None
178
+ if link.is_yanked and not self._allow_yanked:
179
+ reason = link.yanked_reason or "<none given>"
180
+ return (LinkType.yanked, f"yanked for reason: {reason}")
181
+
182
+ if link.egg_fragment:
183
+ egg_info = link.egg_fragment
184
+ ext = link.ext
185
+ else:
186
+ egg_info, ext = link.splitext()
187
+ if not ext:
188
+ return (LinkType.format_unsupported, "not a file")
189
+ if ext not in SUPPORTED_EXTENSIONS:
190
+ return (
191
+ LinkType.format_unsupported,
192
+ f"unsupported archive format: {ext}",
193
+ )
194
+ if "binary" not in self._formats and ext == WHEEL_EXTENSION:
195
+ reason = f"No binaries permitted for {self.project_name}"
196
+ return (LinkType.format_unsupported, reason)
197
+ if "macosx10" in link.path and ext == ".zip":
198
+ return (LinkType.format_unsupported, "macosx10 one")
199
+ if ext == WHEEL_EXTENSION:
200
+ try:
201
+ wheel = Wheel(link.filename)
202
+ except InvalidWheelFilename:
203
+ return (
204
+ LinkType.format_invalid,
205
+ "invalid wheel filename",
206
+ )
207
+ if canonicalize_name(wheel.name) != self._canonical_name:
208
+ reason = f"wrong project name (not {self.project_name})"
209
+ return (LinkType.different_project, reason)
210
+
211
+ supported_tags = self._target_python.get_unsorted_tags()
212
+ if not wheel.supported(supported_tags):
213
+ # Include the wheel's tags in the reason string to
214
+ # simplify troubleshooting compatibility issues.
215
+ file_tags = ", ".join(wheel.get_formatted_file_tags())
216
+ reason = (
217
+ f"none of the wheel's tags ({file_tags}) are compatible "
218
+ f"(run pip debug --verbose to show compatible tags)"
219
+ )
220
+ return (LinkType.platform_mismatch, reason)
221
+
222
+ version = wheel.version
223
+
224
+ # This should be up by the self.ok_binary check, but see issue 2700.
225
+ if "source" not in self._formats and ext != WHEEL_EXTENSION:
226
+ reason = f"No sources permitted for {self.project_name}"
227
+ return (LinkType.format_unsupported, reason)
228
+
229
+ if not version:
230
+ version = _extract_version_from_fragment(
231
+ egg_info,
232
+ self._canonical_name,
233
+ )
234
+ if not version:
235
+ reason = f"Missing project version for {self.project_name}"
236
+ return (LinkType.format_invalid, reason)
237
+
238
+ match = self._py_version_re.search(version)
239
+ if match:
240
+ version = version[: match.start()]
241
+ py_version = match.group(1)
242
+ if py_version != self._target_python.py_version:
243
+ return (
244
+ LinkType.platform_mismatch,
245
+ "Python version is incorrect",
246
+ )
247
+
248
+ supports_python = _check_link_requires_python(
249
+ link,
250
+ version_info=self._target_python.py_version_info,
251
+ ignore_requires_python=self._ignore_requires_python,
252
+ )
253
+ if not supports_python:
254
+ reason = f"{version} Requires-Python {link.requires_python}"
255
+ return (LinkType.requires_python_mismatch, reason)
256
+
257
+ logger.debug("Found link %s, version: %s", link, version)
258
+
259
+ return (LinkType.candidate, version)
260
+
261
+
262
+ def filter_unallowed_hashes(
263
+ candidates: List[InstallationCandidate],
264
+ hashes: Optional[Hashes],
265
+ project_name: str,
266
+ ) -> List[InstallationCandidate]:
267
+ """
268
+ Filter out candidates whose hashes aren't allowed, and return a new
269
+ list of candidates.
270
+
271
+ If at least one candidate has an allowed hash, then all candidates with
272
+ either an allowed hash or no hash specified are returned. Otherwise,
273
+ the given candidates are returned.
274
+
275
+ Including the candidates with no hash specified when there is a match
276
+ allows a warning to be logged if there is a more preferred candidate
277
+ with no hash specified. Returning all candidates in the case of no
278
+ matches lets pip report the hash of the candidate that would otherwise
279
+ have been installed (e.g. permitting the user to more easily update
280
+ their requirements file with the desired hash).
281
+ """
282
+ if not hashes:
283
+ logger.debug(
284
+ "Given no hashes to check %s links for project %r: "
285
+ "discarding no candidates",
286
+ len(candidates),
287
+ project_name,
288
+ )
289
+ # Make sure we're not returning back the given value.
290
+ return list(candidates)
291
+
292
+ matches_or_no_digest = []
293
+ # Collect the non-matches for logging purposes.
294
+ non_matches = []
295
+ match_count = 0
296
+ for candidate in candidates:
297
+ link = candidate.link
298
+ if not link.has_hash:
299
+ pass
300
+ elif link.is_hash_allowed(hashes=hashes):
301
+ match_count += 1
302
+ else:
303
+ non_matches.append(candidate)
304
+ continue
305
+
306
+ matches_or_no_digest.append(candidate)
307
+
308
+ if match_count:
309
+ filtered = matches_or_no_digest
310
+ else:
311
+ # Make sure we're not returning back the given value.
312
+ filtered = list(candidates)
313
+
314
+ if len(filtered) == len(candidates):
315
+ discard_message = "discarding no candidates"
316
+ else:
317
+ discard_message = "discarding {} non-matches:\n {}".format(
318
+ len(non_matches),
319
+ "\n ".join(str(candidate.link) for candidate in non_matches),
320
+ )
321
+
322
+ logger.debug(
323
+ "Checked %s links for project %r against %s hashes "
324
+ "(%s matches, %s no digest): %s",
325
+ len(candidates),
326
+ project_name,
327
+ hashes.digest_count,
328
+ match_count,
329
+ len(matches_or_no_digest) - match_count,
330
+ discard_message,
331
+ )
332
+
333
+ return filtered
334
+
335
+
336
+ @dataclass
337
+ class CandidatePreferences:
338
+ """
339
+ Encapsulates some of the preferences for filtering and sorting
340
+ InstallationCandidate objects.
341
+ """
342
+
343
+ prefer_binary: bool = False
344
+ allow_all_prereleases: bool = False
345
+
346
+
347
+ @dataclass(frozen=True)
348
+ class BestCandidateResult:
349
+ """A collection of candidates, returned by `PackageFinder.find_best_candidate`.
350
+
351
+ This class is only intended to be instantiated by CandidateEvaluator's
352
+ `compute_best_candidate()` method.
353
+
354
+ :param all_candidates: A sequence of all available candidates found.
355
+ :param applicable_candidates: The applicable candidates.
356
+ :param best_candidate: The most preferred candidate found, or None
357
+ if no applicable candidates were found.
358
+ """
359
+
360
+ all_candidates: List[InstallationCandidate]
361
+ applicable_candidates: List[InstallationCandidate]
362
+ best_candidate: Optional[InstallationCandidate]
363
+
364
+ def __post_init__(self) -> None:
365
+ assert set(self.applicable_candidates) <= set(self.all_candidates)
366
+
367
+ if self.best_candidate is None:
368
+ assert not self.applicable_candidates
369
+ else:
370
+ assert self.best_candidate in self.applicable_candidates
371
+
372
+
373
+ class CandidateEvaluator:
374
+ """
375
+ Responsible for filtering and sorting candidates for installation based
376
+ on what tags are valid.
377
+ """
378
+
379
+ @classmethod
380
+ def create(
381
+ cls,
382
+ project_name: str,
383
+ target_python: Optional[TargetPython] = None,
384
+ prefer_binary: bool = False,
385
+ allow_all_prereleases: bool = False,
386
+ specifier: Optional[specifiers.BaseSpecifier] = None,
387
+ hashes: Optional[Hashes] = None,
388
+ ) -> "CandidateEvaluator":
389
+ """Create a CandidateEvaluator object.
390
+
391
+ :param target_python: The target Python interpreter to use when
392
+ checking compatibility. If None (the default), a TargetPython
393
+ object will be constructed from the running Python.
394
+ :param specifier: An optional object implementing `filter`
395
+ (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
396
+ versions.
397
+ :param hashes: An optional collection of allowed hashes.
398
+ """
399
+ if target_python is None:
400
+ target_python = TargetPython()
401
+ if specifier is None:
402
+ specifier = specifiers.SpecifierSet()
403
+
404
+ supported_tags = target_python.get_sorted_tags()
405
+
406
+ return cls(
407
+ project_name=project_name,
408
+ supported_tags=supported_tags,
409
+ specifier=specifier,
410
+ prefer_binary=prefer_binary,
411
+ allow_all_prereleases=allow_all_prereleases,
412
+ hashes=hashes,
413
+ )
414
+
415
+ def __init__(
416
+ self,
417
+ project_name: str,
418
+ supported_tags: List[Tag],
419
+ specifier: specifiers.BaseSpecifier,
420
+ prefer_binary: bool = False,
421
+ allow_all_prereleases: bool = False,
422
+ hashes: Optional[Hashes] = None,
423
+ ) -> None:
424
+ """
425
+ :param supported_tags: The PEP 425 tags supported by the target
426
+ Python in order of preference (most preferred first).
427
+ """
428
+ self._allow_all_prereleases = allow_all_prereleases
429
+ self._hashes = hashes
430
+ self._prefer_binary = prefer_binary
431
+ self._project_name = project_name
432
+ self._specifier = specifier
433
+ self._supported_tags = supported_tags
434
+ # Since the index of the tag in the _supported_tags list is used
435
+ # as a priority, precompute a map from tag to index/priority to be
436
+ # used in wheel.find_most_preferred_tag.
437
+ self._wheel_tag_preferences = {
438
+ tag: idx for idx, tag in enumerate(supported_tags)
439
+ }
440
+
441
+ def get_applicable_candidates(
442
+ self,
443
+ candidates: List[InstallationCandidate],
444
+ ) -> List[InstallationCandidate]:
445
+ """
446
+ Return the applicable candidates from a list of candidates.
447
+ """
448
+ # Using None infers from the specifier instead.
449
+ allow_prereleases = self._allow_all_prereleases or None
450
+ specifier = self._specifier
451
+
452
+ # We turn the version object into a str here because otherwise
453
+ # when we're debundled but setuptools isn't, Python will see
454
+ # packaging.version.Version and
455
+ # pkg_resources._vendor.packaging.version.Version as different
456
+ # types. This way we'll use a str as a common data interchange
457
+ # format. If we stop using the pkg_resources provided specifier
458
+ # and start using our own, we can drop the cast to str().
459
+ candidates_and_versions = [(c, str(c.version)) for c in candidates]
460
+ versions = set(
461
+ specifier.filter(
462
+ (v for _, v in candidates_and_versions),
463
+ prereleases=allow_prereleases,
464
+ )
465
+ )
466
+
467
+ applicable_candidates = [c for c, v in candidates_and_versions if v in versions]
468
+ filtered_applicable_candidates = filter_unallowed_hashes(
469
+ candidates=applicable_candidates,
470
+ hashes=self._hashes,
471
+ project_name=self._project_name,
472
+ )
473
+
474
+ return sorted(filtered_applicable_candidates, key=self._sort_key)
475
+
476
+ def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
477
+ """
478
+ Function to pass as the `key` argument to a call to sorted() to sort
479
+ InstallationCandidates by preference.
480
+
481
+ Returns a tuple such that tuples sorting as greater using Python's
482
+ default comparison operator are more preferred.
483
+
484
+ The preference is as follows:
485
+
486
+ First and foremost, candidates with allowed (matching) hashes are
487
+ always preferred over candidates without matching hashes. This is
488
+ because e.g. if the only candidate with an allowed hash is yanked,
489
+ we still want to use that candidate.
490
+
491
+ Second, excepting hash considerations, candidates that have been
492
+ yanked (in the sense of PEP 592) are always less preferred than
493
+ candidates that haven't been yanked. Then:
494
+
495
+ If not finding wheels, they are sorted by version only.
496
+ If finding wheels, then the sort order is by version, then:
497
+ 1. existing installs
498
+ 2. wheels ordered via Wheel.support_index_min(self._supported_tags)
499
+ 3. source archives
500
+ If prefer_binary was set, then all wheels are sorted above sources.
501
+
502
+ Note: it was considered to embed this logic into the Link
503
+ comparison operators, but then different sdist links
504
+ with the same version, would have to be considered equal
505
+ """
506
+ valid_tags = self._supported_tags
507
+ support_num = len(valid_tags)
508
+ build_tag: BuildTag = ()
509
+ binary_preference = 0
510
+ link = candidate.link
511
+ if link.is_wheel:
512
+ # can raise InvalidWheelFilename
513
+ wheel = Wheel(link.filename)
514
+ try:
515
+ pri = -(
516
+ wheel.find_most_preferred_tag(
517
+ valid_tags, self._wheel_tag_preferences
518
+ )
519
+ )
520
+ except ValueError:
521
+ raise UnsupportedWheel(
522
+ f"{wheel.filename} is not a supported wheel for this platform. It "
523
+ "can't be sorted."
524
+ )
525
+ if self._prefer_binary:
526
+ binary_preference = 1
527
+ build_tag = wheel.build_tag
528
+ else: # sdist
529
+ pri = -(support_num)
530
+ has_allowed_hash = int(link.is_hash_allowed(self._hashes))
531
+ yank_value = -1 * int(link.is_yanked) # -1 for yanked.
532
+ return (
533
+ has_allowed_hash,
534
+ yank_value,
535
+ binary_preference,
536
+ candidate.version,
537
+ pri,
538
+ build_tag,
539
+ )
540
+
541
+ def sort_best_candidate(
542
+ self,
543
+ candidates: List[InstallationCandidate],
544
+ ) -> Optional[InstallationCandidate]:
545
+ """
546
+ Return the best candidate per the instance's sort order, or None if
547
+ no candidate is acceptable.
548
+ """
549
+ if not candidates:
550
+ return None
551
+ best_candidate = max(candidates, key=self._sort_key)
552
+ return best_candidate
553
+
554
+ def compute_best_candidate(
555
+ self,
556
+ candidates: List[InstallationCandidate],
557
+ ) -> BestCandidateResult:
558
+ """
559
+ Compute and return a `BestCandidateResult` instance.
560
+ """
561
+ applicable_candidates = self.get_applicable_candidates(candidates)
562
+
563
+ best_candidate = self.sort_best_candidate(applicable_candidates)
564
+
565
+ return BestCandidateResult(
566
+ candidates,
567
+ applicable_candidates=applicable_candidates,
568
+ best_candidate=best_candidate,
569
+ )
570
+
571
+
572
+ class PackageFinder:
573
+ """This finds packages.
574
+
575
+ This is meant to match easy_install's technique for looking for
576
+ packages, by reading pages and looking for appropriate links.
577
+ """
578
+
579
+ def __init__(
580
+ self,
581
+ link_collector: LinkCollector,
582
+ target_python: TargetPython,
583
+ allow_yanked: bool,
584
+ format_control: Optional[FormatControl] = None,
585
+ candidate_prefs: Optional[CandidatePreferences] = None,
586
+ ignore_requires_python: Optional[bool] = None,
587
+ ) -> None:
588
+ """
589
+ This constructor is primarily meant to be used by the create() class
590
+ method and from tests.
591
+
592
+ :param format_control: A FormatControl object, used to control
593
+ the selection of source packages / binary packages when consulting
594
+ the index and links.
595
+ :param candidate_prefs: Options to use when creating a
596
+ CandidateEvaluator object.
597
+ """
598
+ if candidate_prefs is None:
599
+ candidate_prefs = CandidatePreferences()
600
+
601
+ format_control = format_control or FormatControl(set(), set())
602
+
603
+ self._allow_yanked = allow_yanked
604
+ self._candidate_prefs = candidate_prefs
605
+ self._ignore_requires_python = ignore_requires_python
606
+ self._link_collector = link_collector
607
+ self._target_python = target_python
608
+
609
+ self.format_control = format_control
610
+
611
+ # These are boring links that have already been logged somehow.
612
+ self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
613
+
614
+ # Cache of the result of finding candidates
615
+ self._all_candidates: Dict[str, List[InstallationCandidate]] = {}
616
+ self._best_candidates: Dict[
617
+ Tuple[str, Optional[specifiers.BaseSpecifier], Optional[Hashes]],
618
+ BestCandidateResult,
619
+ ] = {}
620
+
621
+ # Don't include an allow_yanked default value to make sure each call
622
+ # site considers whether yanked releases are allowed. This also causes
623
+ # that decision to be made explicit in the calling code, which helps
624
+ # people when reading the code.
625
+ @classmethod
626
+ def create(
627
+ cls,
628
+ link_collector: LinkCollector,
629
+ selection_prefs: SelectionPreferences,
630
+ target_python: Optional[TargetPython] = None,
631
+ ) -> "PackageFinder":
632
+ """Create a PackageFinder.
633
+
634
+ :param selection_prefs: The candidate selection preferences, as a
635
+ SelectionPreferences object.
636
+ :param target_python: The target Python interpreter to use when
637
+ checking compatibility. If None (the default), a TargetPython
638
+ object will be constructed from the running Python.
639
+ """
640
+ if target_python is None:
641
+ target_python = TargetPython()
642
+
643
+ candidate_prefs = CandidatePreferences(
644
+ prefer_binary=selection_prefs.prefer_binary,
645
+ allow_all_prereleases=selection_prefs.allow_all_prereleases,
646
+ )
647
+
648
+ return cls(
649
+ candidate_prefs=candidate_prefs,
650
+ link_collector=link_collector,
651
+ target_python=target_python,
652
+ allow_yanked=selection_prefs.allow_yanked,
653
+ format_control=selection_prefs.format_control,
654
+ ignore_requires_python=selection_prefs.ignore_requires_python,
655
+ )
656
+
657
+ @property
658
+ def target_python(self) -> TargetPython:
659
+ return self._target_python
660
+
661
+ @property
662
+ def search_scope(self) -> SearchScope:
663
+ return self._link_collector.search_scope
664
+
665
+ @search_scope.setter
666
+ def search_scope(self, search_scope: SearchScope) -> None:
667
+ self._link_collector.search_scope = search_scope
668
+
669
+ @property
670
+ def find_links(self) -> List[str]:
671
+ return self._link_collector.find_links
672
+
673
+ @property
674
+ def index_urls(self) -> List[str]:
675
+ return self.search_scope.index_urls
676
+
677
+ @property
678
+ def proxy(self) -> Optional[str]:
679
+ return self._link_collector.session.pip_proxy
680
+
681
+ @property
682
+ def trusted_hosts(self) -> Iterable[str]:
683
+ for host_port in self._link_collector.session.pip_trusted_origins:
684
+ yield build_netloc(*host_port)
685
+
686
+ @property
687
+ def custom_cert(self) -> Optional[str]:
688
+ # session.verify is either a boolean (use default bundle/no SSL
689
+ # verification) or a string path to a custom CA bundle to use. We only
690
+ # care about the latter.
691
+ verify = self._link_collector.session.verify
692
+ return verify if isinstance(verify, str) else None
693
+
694
+ @property
695
+ def client_cert(self) -> Optional[str]:
696
+ cert = self._link_collector.session.cert
697
+ assert not isinstance(cert, tuple), "pip only supports PEM client certs"
698
+ return cert
699
+
700
+ @property
701
+ def allow_all_prereleases(self) -> bool:
702
+ return self._candidate_prefs.allow_all_prereleases
703
+
704
+ def set_allow_all_prereleases(self) -> None:
705
+ self._candidate_prefs.allow_all_prereleases = True
706
+
707
+ @property
708
+ def prefer_binary(self) -> bool:
709
+ return self._candidate_prefs.prefer_binary
710
+
711
+ def set_prefer_binary(self) -> None:
712
+ self._candidate_prefs.prefer_binary = True
713
+
714
+ def requires_python_skipped_reasons(self) -> List[str]:
715
+ reasons = {
716
+ detail
717
+ for _, result, detail in self._logged_links
718
+ if result == LinkType.requires_python_mismatch
719
+ }
720
+ return sorted(reasons)
721
+
722
+ def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
723
+ canonical_name = canonicalize_name(project_name)
724
+ formats = self.format_control.get_allowed_formats(canonical_name)
725
+
726
+ return LinkEvaluator(
727
+ project_name=project_name,
728
+ canonical_name=canonical_name,
729
+ formats=formats,
730
+ target_python=self._target_python,
731
+ allow_yanked=self._allow_yanked,
732
+ ignore_requires_python=self._ignore_requires_python,
733
+ )
734
+
735
+ def _sort_links(self, links: Iterable[Link]) -> List[Link]:
736
+ """
737
+ Returns elements of links in order, non-egg links first, egg links
738
+ second, while eliminating duplicates
739
+ """
740
+ eggs, no_eggs = [], []
741
+ seen: Set[Link] = set()
742
+ for link in links:
743
+ if link not in seen:
744
+ seen.add(link)
745
+ if link.egg_fragment:
746
+ eggs.append(link)
747
+ else:
748
+ no_eggs.append(link)
749
+ return no_eggs + eggs
750
+
751
+ def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
752
+ entry = (link, result, detail)
753
+ if entry not in self._logged_links:
754
+ # Put the link at the end so the reason is more visible and because
755
+ # the link string is usually very long.
756
+ logger.debug("Skipping link: %s: %s", detail, link)
757
+ self._logged_links.add(entry)
758
+
759
+ def get_install_candidate(
760
+ self, link_evaluator: LinkEvaluator, link: Link
761
+ ) -> Optional[InstallationCandidate]:
762
+ """
763
+ If the link is a candidate for install, convert it to an
764
+ InstallationCandidate and return it. Otherwise, return None.
765
+ """
766
+ result, detail = link_evaluator.evaluate_link(link)
767
+ if result != LinkType.candidate:
768
+ self._log_skipped_link(link, result, detail)
769
+ return None
770
+
771
+ try:
772
+ return InstallationCandidate(
773
+ name=link_evaluator.project_name,
774
+ link=link,
775
+ version=detail,
776
+ )
777
+ except InvalidVersion:
778
+ return None
779
+
780
+ def evaluate_links(
781
+ self, link_evaluator: LinkEvaluator, links: Iterable[Link]
782
+ ) -> List[InstallationCandidate]:
783
+ """
784
+ Convert links that are candidates to InstallationCandidate objects.
785
+ """
786
+ candidates = []
787
+ for link in self._sort_links(links):
788
+ candidate = self.get_install_candidate(link_evaluator, link)
789
+ if candidate is not None:
790
+ candidates.append(candidate)
791
+
792
+ return candidates
793
+
794
+ def process_project_url(
795
+ self, project_url: Link, link_evaluator: LinkEvaluator
796
+ ) -> List[InstallationCandidate]:
797
+ logger.debug(
798
+ "Fetching project page and analyzing links: %s",
799
+ project_url,
800
+ )
801
+ index_response = self._link_collector.fetch_response(project_url)
802
+ if index_response is None:
803
+ return []
804
+
805
+ page_links = list(parse_links(index_response))
806
+
807
+ with indent_log():
808
+ package_links = self.evaluate_links(
809
+ link_evaluator,
810
+ links=page_links,
811
+ )
812
+
813
+ return package_links
814
+
815
+ def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
816
+ """Find all available InstallationCandidate for project_name
817
+
818
+ This checks index_urls and find_links.
819
+ All versions found are returned as an InstallationCandidate list.
820
+
821
+ See LinkEvaluator.evaluate_link() for details on which files
822
+ are accepted.
823
+ """
824
+ if project_name in self._all_candidates:
825
+ return self._all_candidates[project_name]
826
+
827
+ link_evaluator = self.make_link_evaluator(project_name)
828
+
829
+ collected_sources = self._link_collector.collect_sources(
830
+ project_name=project_name,
831
+ candidates_from_page=functools.partial(
832
+ self.process_project_url,
833
+ link_evaluator=link_evaluator,
834
+ ),
835
+ )
836
+
837
+ page_candidates_it = itertools.chain.from_iterable(
838
+ source.page_candidates()
839
+ for sources in collected_sources
840
+ for source in sources
841
+ if source is not None
842
+ )
843
+ page_candidates = list(page_candidates_it)
844
+
845
+ file_links_it = itertools.chain.from_iterable(
846
+ source.file_links()
847
+ for sources in collected_sources
848
+ for source in sources
849
+ if source is not None
850
+ )
851
+ file_candidates = self.evaluate_links(
852
+ link_evaluator,
853
+ sorted(file_links_it, reverse=True),
854
+ )
855
+
856
+ if logger.isEnabledFor(logging.DEBUG) and file_candidates:
857
+ paths = []
858
+ for candidate in file_candidates:
859
+ assert candidate.link.url # we need to have a URL
860
+ try:
861
+ paths.append(candidate.link.file_path)
862
+ except Exception:
863
+ paths.append(candidate.link.url) # it's not a local file
864
+
865
+ logger.debug("Local files found: %s", ", ".join(paths))
866
+
867
+ # This is an intentional priority ordering
868
+ self._all_candidates[project_name] = file_candidates + page_candidates
869
+
870
+ return self._all_candidates[project_name]
871
+
872
+ def make_candidate_evaluator(
873
+ self,
874
+ project_name: str,
875
+ specifier: Optional[specifiers.BaseSpecifier] = None,
876
+ hashes: Optional[Hashes] = None,
877
+ ) -> CandidateEvaluator:
878
+ """Create a CandidateEvaluator object to use."""
879
+ candidate_prefs = self._candidate_prefs
880
+ return CandidateEvaluator.create(
881
+ project_name=project_name,
882
+ target_python=self._target_python,
883
+ prefer_binary=candidate_prefs.prefer_binary,
884
+ allow_all_prereleases=candidate_prefs.allow_all_prereleases,
885
+ specifier=specifier,
886
+ hashes=hashes,
887
+ )
888
+
889
+ def find_best_candidate(
890
+ self,
891
+ project_name: str,
892
+ specifier: Optional[specifiers.BaseSpecifier] = None,
893
+ hashes: Optional[Hashes] = None,
894
+ ) -> BestCandidateResult:
895
+ """Find matches for the given project and specifier.
896
+
897
+ :param specifier: An optional object implementing `filter`
898
+ (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
899
+ versions.
900
+
901
+ :return: A `BestCandidateResult` instance.
902
+ """
903
+ if (project_name, specifier, hashes) in self._best_candidates:
904
+ return self._best_candidates[project_name, specifier, hashes]
905
+
906
+ candidates = self.find_all_candidates(project_name)
907
+ candidate_evaluator = self.make_candidate_evaluator(
908
+ project_name=project_name,
909
+ specifier=specifier,
910
+ hashes=hashes,
911
+ )
912
+ self._best_candidates[project_name, specifier, hashes] = (
913
+ candidate_evaluator.compute_best_candidate(candidates)
914
+ )
915
+
916
+ return self._best_candidates[project_name, specifier, hashes]
917
+
918
+ def find_requirement(
919
+ self, req: InstallRequirement, upgrade: bool
920
+ ) -> Optional[InstallationCandidate]:
921
+ """Try to find a Link matching req
922
+
923
+ Expects req, an InstallRequirement and upgrade, a boolean
924
+ Returns a InstallationCandidate if found,
925
+ Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
926
+ """
927
+ name = req.name
928
+ assert name is not None, "find_requirement() called with no name"
929
+
930
+ hashes = req.hashes(trust_internet=False)
931
+ best_candidate_result = self.find_best_candidate(
932
+ name,
933
+ specifier=req.specifier,
934
+ hashes=hashes,
935
+ )
936
+ best_candidate = best_candidate_result.best_candidate
937
+
938
+ installed_version: Optional[_BaseVersion] = None
939
+ if req.satisfied_by is not None:
940
+ installed_version = req.satisfied_by.version
941
+
942
+ def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
943
+ # This repeated parse_version and str() conversion is needed to
944
+ # handle different vendoring sources from pip and pkg_resources.
945
+ # If we stop using the pkg_resources provided specifier and start
946
+ # using our own, we can drop the cast to str().
947
+ return (
948
+ ", ".join(
949
+ sorted(
950
+ {str(c.version) for c in cand_iter},
951
+ key=parse_version,
952
+ )
953
+ )
954
+ or "none"
955
+ )
956
+
957
+ if installed_version is None and best_candidate is None:
958
+ logger.critical(
959
+ "Could not find a version that satisfies the requirement %s "
960
+ "(from versions: %s)",
961
+ req,
962
+ _format_versions(best_candidate_result.all_candidates),
963
+ )
964
+
965
+ raise DistributionNotFound(f"No matching distribution found for {req}")
966
+
967
+ def _should_install_candidate(
968
+ candidate: Optional[InstallationCandidate],
969
+ ) -> "TypeGuard[InstallationCandidate]":
970
+ if installed_version is None:
971
+ return True
972
+ if best_candidate is None:
973
+ return False
974
+ return best_candidate.version > installed_version
975
+
976
+ if not upgrade and installed_version is not None:
977
+ if _should_install_candidate(best_candidate):
978
+ logger.debug(
979
+ "Existing installed version (%s) satisfies requirement "
980
+ "(most up-to-date version is %s)",
981
+ installed_version,
982
+ best_candidate.version,
983
+ )
984
+ else:
985
+ logger.debug(
986
+ "Existing installed version (%s) is most up-to-date and "
987
+ "satisfies requirement",
988
+ installed_version,
989
+ )
990
+ return None
991
+
992
+ if _should_install_candidate(best_candidate):
993
+ logger.debug(
994
+ "Using version %s (newest of versions: %s)",
995
+ best_candidate.version,
996
+ _format_versions(best_candidate_result.applicable_candidates),
997
+ )
998
+ return best_candidate
999
+
1000
+ # We have an existing version, and its the best version
1001
+ logger.debug(
1002
+ "Installed version (%s) is most up-to-date (past versions: %s)",
1003
+ installed_version,
1004
+ _format_versions(best_candidate_result.applicable_candidates),
1005
+ )
1006
+ raise BestVersionAlreadyInstalled
1007
+
1008
+
1009
+ def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
1010
+ """Find the separator's index based on the package's canonical name.
1011
+
1012
+ :param fragment: A <package>+<version> filename "fragment" (stem) or
1013
+ egg fragment.
1014
+ :param canonical_name: The package's canonical name.
1015
+
1016
+ This function is needed since the canonicalized name does not necessarily
1017
+ have the same length as the egg info's name part. An example::
1018
+
1019
+ >>> fragment = 'foo__bar-1.0'
1020
+ >>> canonical_name = 'foo-bar'
1021
+ >>> _find_name_version_sep(fragment, canonical_name)
1022
+ 8
1023
+ """
1024
+ # Project name and version must be separated by one single dash. Find all
1025
+ # occurrences of dashes; if the string in front of it matches the canonical
1026
+ # name, this is the one separating the name and version parts.
1027
+ for i, c in enumerate(fragment):
1028
+ if c != "-":
1029
+ continue
1030
+ if canonicalize_name(fragment[:i]) == canonical_name:
1031
+ return i
1032
+ raise ValueError(f"{fragment} does not match {canonical_name}")
1033
+
1034
+
1035
+ def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
1036
+ """Parse the version string from a <package>+<version> filename
1037
+ "fragment" (stem) or egg fragment.
1038
+
1039
+ :param fragment: The string to parse. E.g. foo-2.1
1040
+ :param canonical_name: The canonicalized name of the package this
1041
+ belongs to.
1042
+ """
1043
+ try:
1044
+ version_start = _find_name_version_sep(fragment, canonical_name) + 1
1045
+ except ValueError:
1046
+ return None
1047
+ version = fragment[version_start:]
1048
+ if not version:
1049
+ return None
1050
+ return version
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/index/sources.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import mimetypes
3
+ import os
4
+ from collections import defaultdict
5
+ from typing import Callable, Dict, Iterable, List, Optional, Tuple
6
+
7
+ from pip._vendor.packaging.utils import (
8
+ InvalidSdistFilename,
9
+ InvalidWheelFilename,
10
+ canonicalize_name,
11
+ parse_sdist_filename,
12
+ parse_wheel_filename,
13
+ )
14
+
15
+ from pip._internal.models.candidate import InstallationCandidate
16
+ from pip._internal.models.link import Link
17
+ from pip._internal.utils.urls import path_to_url, url_to_path
18
+ from pip._internal.vcs import is_url
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ FoundCandidates = Iterable[InstallationCandidate]
23
+ FoundLinks = Iterable[Link]
24
+ CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]]
25
+ PageValidator = Callable[[Link], bool]
26
+
27
+
28
+ class LinkSource:
29
+ @property
30
+ def link(self) -> Optional[Link]:
31
+ """Returns the underlying link, if there's one."""
32
+ raise NotImplementedError()
33
+
34
+ def page_candidates(self) -> FoundCandidates:
35
+ """Candidates found by parsing an archive listing HTML file."""
36
+ raise NotImplementedError()
37
+
38
+ def file_links(self) -> FoundLinks:
39
+ """Links found by specifying archives directly."""
40
+ raise NotImplementedError()
41
+
42
+
43
+ def _is_html_file(file_url: str) -> bool:
44
+ return mimetypes.guess_type(file_url, strict=False)[0] == "text/html"
45
+
46
+
47
+ class _FlatDirectoryToUrls:
48
+ """Scans directory and caches results"""
49
+
50
+ def __init__(self, path: str) -> None:
51
+ self._path = path
52
+ self._page_candidates: List[str] = []
53
+ self._project_name_to_urls: Dict[str, List[str]] = defaultdict(list)
54
+ self._scanned_directory = False
55
+
56
+ def _scan_directory(self) -> None:
57
+ """Scans directory once and populates both page_candidates
58
+ and project_name_to_urls at the same time
59
+ """
60
+ for entry in os.scandir(self._path):
61
+ url = path_to_url(entry.path)
62
+ if _is_html_file(url):
63
+ self._page_candidates.append(url)
64
+ continue
65
+
66
+ # File must have a valid wheel or sdist name,
67
+ # otherwise not worth considering as a package
68
+ try:
69
+ project_filename = parse_wheel_filename(entry.name)[0]
70
+ except InvalidWheelFilename:
71
+ try:
72
+ project_filename = parse_sdist_filename(entry.name)[0]
73
+ except InvalidSdistFilename:
74
+ continue
75
+
76
+ self._project_name_to_urls[project_filename].append(url)
77
+ self._scanned_directory = True
78
+
79
+ @property
80
+ def page_candidates(self) -> List[str]:
81
+ if not self._scanned_directory:
82
+ self._scan_directory()
83
+
84
+ return self._page_candidates
85
+
86
+ @property
87
+ def project_name_to_urls(self) -> Dict[str, List[str]]:
88
+ if not self._scanned_directory:
89
+ self._scan_directory()
90
+
91
+ return self._project_name_to_urls
92
+
93
+
94
+ class _FlatDirectorySource(LinkSource):
95
+ """Link source specified by ``--find-links=<path-to-dir>``.
96
+
97
+ This looks the content of the directory, and returns:
98
+
99
+ * ``page_candidates``: Links listed on each HTML file in the directory.
100
+ * ``file_candidates``: Archives in the directory.
101
+ """
102
+
103
+ _paths_to_urls: Dict[str, _FlatDirectoryToUrls] = {}
104
+
105
+ def __init__(
106
+ self,
107
+ candidates_from_page: CandidatesFromPage,
108
+ path: str,
109
+ project_name: str,
110
+ ) -> None:
111
+ self._candidates_from_page = candidates_from_page
112
+ self._project_name = canonicalize_name(project_name)
113
+
114
+ # Get existing instance of _FlatDirectoryToUrls if it exists
115
+ if path in self._paths_to_urls:
116
+ self._path_to_urls = self._paths_to_urls[path]
117
+ else:
118
+ self._path_to_urls = _FlatDirectoryToUrls(path=path)
119
+ self._paths_to_urls[path] = self._path_to_urls
120
+
121
+ @property
122
+ def link(self) -> Optional[Link]:
123
+ return None
124
+
125
+ def page_candidates(self) -> FoundCandidates:
126
+ for url in self._path_to_urls.page_candidates:
127
+ yield from self._candidates_from_page(Link(url))
128
+
129
+ def file_links(self) -> FoundLinks:
130
+ for url in self._path_to_urls.project_name_to_urls[self._project_name]:
131
+ yield Link(url)
132
+
133
+
134
+ class _LocalFileSource(LinkSource):
135
+ """``--find-links=<path-or-url>`` or ``--[extra-]index-url=<path-or-url>``.
136
+
137
+ If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to
138
+ the option, it is converted to a URL first. This returns:
139
+
140
+ * ``page_candidates``: Links listed on an HTML file.
141
+ * ``file_candidates``: The non-HTML file.
142
+ """
143
+
144
+ def __init__(
145
+ self,
146
+ candidates_from_page: CandidatesFromPage,
147
+ link: Link,
148
+ ) -> None:
149
+ self._candidates_from_page = candidates_from_page
150
+ self._link = link
151
+
152
+ @property
153
+ def link(self) -> Optional[Link]:
154
+ return self._link
155
+
156
+ def page_candidates(self) -> FoundCandidates:
157
+ if not _is_html_file(self._link.url):
158
+ return
159
+ yield from self._candidates_from_page(self._link)
160
+
161
+ def file_links(self) -> FoundLinks:
162
+ if _is_html_file(self._link.url):
163
+ return
164
+ yield self._link
165
+
166
+
167
+ class _RemoteFileSource(LinkSource):
168
+ """``--find-links=<url>`` or ``--[extra-]index-url=<url>``.
169
+
170
+ This returns:
171
+
172
+ * ``page_candidates``: Links listed on an HTML file.
173
+ * ``file_candidates``: The non-HTML file.
174
+ """
175
+
176
+ def __init__(
177
+ self,
178
+ candidates_from_page: CandidatesFromPage,
179
+ page_validator: PageValidator,
180
+ link: Link,
181
+ ) -> None:
182
+ self._candidates_from_page = candidates_from_page
183
+ self._page_validator = page_validator
184
+ self._link = link
185
+
186
+ @property
187
+ def link(self) -> Optional[Link]:
188
+ return self._link
189
+
190
+ def page_candidates(self) -> FoundCandidates:
191
+ if not self._page_validator(self._link):
192
+ return
193
+ yield from self._candidates_from_page(self._link)
194
+
195
+ def file_links(self) -> FoundLinks:
196
+ yield self._link
197
+
198
+
199
+ class _IndexDirectorySource(LinkSource):
200
+ """``--[extra-]index-url=<path-to-directory>``.
201
+
202
+ This is treated like a remote URL; ``candidates_from_page`` contains logic
203
+ for this by appending ``index.html`` to the link.
204
+ """
205
+
206
+ def __init__(
207
+ self,
208
+ candidates_from_page: CandidatesFromPage,
209
+ link: Link,
210
+ ) -> None:
211
+ self._candidates_from_page = candidates_from_page
212
+ self._link = link
213
+
214
+ @property
215
+ def link(self) -> Optional[Link]:
216
+ return self._link
217
+
218
+ def page_candidates(self) -> FoundCandidates:
219
+ yield from self._candidates_from_page(self._link)
220
+
221
+ def file_links(self) -> FoundLinks:
222
+ return ()
223
+
224
+
225
+ def build_source(
226
+ location: str,
227
+ *,
228
+ candidates_from_page: CandidatesFromPage,
229
+ page_validator: PageValidator,
230
+ expand_dir: bool,
231
+ cache_link_parsing: bool,
232
+ project_name: str,
233
+ ) -> Tuple[Optional[str], Optional[LinkSource]]:
234
+ path: Optional[str] = None
235
+ url: Optional[str] = None
236
+ if os.path.exists(location): # Is a local path.
237
+ url = path_to_url(location)
238
+ path = location
239
+ elif location.startswith("file:"): # A file: URL.
240
+ url = location
241
+ path = url_to_path(location)
242
+ elif is_url(location):
243
+ url = location
244
+
245
+ if url is None:
246
+ msg = (
247
+ "Location '%s' is ignored: "
248
+ "it is either a non-existing path or lacks a specific scheme."
249
+ )
250
+ logger.warning(msg, location)
251
+ return (None, None)
252
+
253
+ if path is None:
254
+ source: LinkSource = _RemoteFileSource(
255
+ candidates_from_page=candidates_from_page,
256
+ page_validator=page_validator,
257
+ link=Link(url, cache_link_parsing=cache_link_parsing),
258
+ )
259
+ return (url, source)
260
+
261
+ if os.path.isdir(path):
262
+ if expand_dir:
263
+ source = _FlatDirectorySource(
264
+ candidates_from_page=candidates_from_page,
265
+ path=path,
266
+ project_name=project_name,
267
+ )
268
+ else:
269
+ source = _IndexDirectorySource(
270
+ candidates_from_page=candidates_from_page,
271
+ link=Link(url, cache_link_parsing=cache_link_parsing),
272
+ )
273
+ return (url, source)
274
+ elif os.path.isfile(path):
275
+ source = _LocalFileSource(
276
+ candidates_from_page=candidates_from_page,
277
+ link=Link(url, cache_link_parsing=cache_link_parsing),
278
+ )
279
+ return (url, source)
280
+ logger.warning(
281
+ "Location '%s' is ignored: it is neither a file nor a directory.",
282
+ location,
283
+ )
284
+ return (url, None)
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """A package that contains models that represent entities."""
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (270 Bytes). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/candidate.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/direct_url.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/format_control.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/index.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/installation_report.cpython-310.pyc ADDED
Binary file (1.76 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/link.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/pylock.cpython-310.pyc ADDED
Binary file (5.16 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/scheme.cpython-310.pyc ADDED
Binary file (942 Bytes). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/search_scope.cpython-310.pyc ADDED
Binary file (3.48 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/selection_prefs.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/target_python.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/__pycache__/wheel.cpython-310.pyc ADDED
Binary file (5.51 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/candidate.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from pip._vendor.packaging.version import Version
4
+ from pip._vendor.packaging.version import parse as parse_version
5
+
6
+ from pip._internal.models.link import Link
7
+
8
+
9
+ @dataclass(frozen=True)
10
+ class InstallationCandidate:
11
+ """Represents a potential "candidate" for installation."""
12
+
13
+ __slots__ = ["name", "version", "link"]
14
+
15
+ name: str
16
+ version: Version
17
+ link: Link
18
+
19
+ def __init__(self, name: str, version: str, link: Link) -> None:
20
+ object.__setattr__(self, "name", name)
21
+ object.__setattr__(self, "version", parse_version(version))
22
+ object.__setattr__(self, "link", link)
23
+
24
+ def __str__(self) -> str:
25
+ return f"{self.name!r} candidate (version {self.version} at {self.link})"
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/direct_url.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PEP 610"""
2
+
3
+ import json
4
+ import re
5
+ import urllib.parse
6
+ from dataclasses import dataclass
7
+ from typing import Any, ClassVar, Dict, Iterable, Optional, Type, TypeVar, Union
8
+
9
+ __all__ = [
10
+ "DirectUrl",
11
+ "DirectUrlValidationError",
12
+ "DirInfo",
13
+ "ArchiveInfo",
14
+ "VcsInfo",
15
+ ]
16
+
17
+ T = TypeVar("T")
18
+
19
+ DIRECT_URL_METADATA_NAME = "direct_url.json"
20
+ ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$")
21
+
22
+
23
+ class DirectUrlValidationError(Exception):
24
+ pass
25
+
26
+
27
+ def _get(
28
+ d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
29
+ ) -> Optional[T]:
30
+ """Get value from dictionary and verify expected type."""
31
+ if key not in d:
32
+ return default
33
+ value = d[key]
34
+ if not isinstance(value, expected_type):
35
+ raise DirectUrlValidationError(
36
+ f"{value!r} has unexpected type for {key} (expected {expected_type})"
37
+ )
38
+ return value
39
+
40
+
41
+ def _get_required(
42
+ d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
43
+ ) -> T:
44
+ value = _get(d, expected_type, key, default)
45
+ if value is None:
46
+ raise DirectUrlValidationError(f"{key} must have a value")
47
+ return value
48
+
49
+
50
+ def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType":
51
+ infos = [info for info in infos if info is not None]
52
+ if not infos:
53
+ raise DirectUrlValidationError(
54
+ "missing one of archive_info, dir_info, vcs_info"
55
+ )
56
+ if len(infos) > 1:
57
+ raise DirectUrlValidationError(
58
+ "more than one of archive_info, dir_info, vcs_info"
59
+ )
60
+ assert infos[0] is not None
61
+ return infos[0]
62
+
63
+
64
+ def _filter_none(**kwargs: Any) -> Dict[str, Any]:
65
+ """Make dict excluding None values."""
66
+ return {k: v for k, v in kwargs.items() if v is not None}
67
+
68
+
69
+ @dataclass
70
+ class VcsInfo:
71
+ name: ClassVar = "vcs_info"
72
+
73
+ vcs: str
74
+ commit_id: str
75
+ requested_revision: Optional[str] = None
76
+
77
+ @classmethod
78
+ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
79
+ if d is None:
80
+ return None
81
+ return cls(
82
+ vcs=_get_required(d, str, "vcs"),
83
+ commit_id=_get_required(d, str, "commit_id"),
84
+ requested_revision=_get(d, str, "requested_revision"),
85
+ )
86
+
87
+ def _to_dict(self) -> Dict[str, Any]:
88
+ return _filter_none(
89
+ vcs=self.vcs,
90
+ requested_revision=self.requested_revision,
91
+ commit_id=self.commit_id,
92
+ )
93
+
94
+
95
+ class ArchiveInfo:
96
+ name = "archive_info"
97
+
98
+ def __init__(
99
+ self,
100
+ hash: Optional[str] = None,
101
+ hashes: Optional[Dict[str, str]] = None,
102
+ ) -> None:
103
+ # set hashes before hash, since the hash setter will further populate hashes
104
+ self.hashes = hashes
105
+ self.hash = hash
106
+
107
+ @property
108
+ def hash(self) -> Optional[str]:
109
+ return self._hash
110
+
111
+ @hash.setter
112
+ def hash(self, value: Optional[str]) -> None:
113
+ if value is not None:
114
+ # Auto-populate the hashes key to upgrade to the new format automatically.
115
+ # We don't back-populate the legacy hash key from hashes.
116
+ try:
117
+ hash_name, hash_value = value.split("=", 1)
118
+ except ValueError:
119
+ raise DirectUrlValidationError(
120
+ f"invalid archive_info.hash format: {value!r}"
121
+ )
122
+ if self.hashes is None:
123
+ self.hashes = {hash_name: hash_value}
124
+ elif hash_name not in self.hashes:
125
+ self.hashes = self.hashes.copy()
126
+ self.hashes[hash_name] = hash_value
127
+ self._hash = value
128
+
129
+ @classmethod
130
+ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]:
131
+ if d is None:
132
+ return None
133
+ return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes"))
134
+
135
+ def _to_dict(self) -> Dict[str, Any]:
136
+ return _filter_none(hash=self.hash, hashes=self.hashes)
137
+
138
+
139
+ @dataclass
140
+ class DirInfo:
141
+ name: ClassVar = "dir_info"
142
+
143
+ editable: bool = False
144
+
145
+ @classmethod
146
+ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]:
147
+ if d is None:
148
+ return None
149
+ return cls(editable=_get_required(d, bool, "editable", default=False))
150
+
151
+ def _to_dict(self) -> Dict[str, Any]:
152
+ return _filter_none(editable=self.editable or None)
153
+
154
+
155
+ InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
156
+
157
+
158
+ @dataclass
159
+ class DirectUrl:
160
+ url: str
161
+ info: InfoType
162
+ subdirectory: Optional[str] = None
163
+
164
+ def _remove_auth_from_netloc(self, netloc: str) -> str:
165
+ if "@" not in netloc:
166
+ return netloc
167
+ user_pass, netloc_no_user_pass = netloc.split("@", 1)
168
+ if (
169
+ isinstance(self.info, VcsInfo)
170
+ and self.info.vcs == "git"
171
+ and user_pass == "git"
172
+ ):
173
+ return netloc
174
+ if ENV_VAR_RE.match(user_pass):
175
+ return netloc
176
+ return netloc_no_user_pass
177
+
178
+ @property
179
+ def redacted_url(self) -> str:
180
+ """url with user:password part removed unless it is formed with
181
+ environment variables as specified in PEP 610, or it is ``git``
182
+ in the case of a git URL.
183
+ """
184
+ purl = urllib.parse.urlsplit(self.url)
185
+ netloc = self._remove_auth_from_netloc(purl.netloc)
186
+ surl = urllib.parse.urlunsplit(
187
+ (purl.scheme, netloc, purl.path, purl.query, purl.fragment)
188
+ )
189
+ return surl
190
+
191
+ def validate(self) -> None:
192
+ self.from_dict(self.to_dict())
193
+
194
+ @classmethod
195
+ def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl":
196
+ return DirectUrl(
197
+ url=_get_required(d, str, "url"),
198
+ subdirectory=_get(d, str, "subdirectory"),
199
+ info=_exactly_one_of(
200
+ [
201
+ ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
202
+ DirInfo._from_dict(_get(d, dict, "dir_info")),
203
+ VcsInfo._from_dict(_get(d, dict, "vcs_info")),
204
+ ]
205
+ ),
206
+ )
207
+
208
+ def to_dict(self) -> Dict[str, Any]:
209
+ res = _filter_none(
210
+ url=self.redacted_url,
211
+ subdirectory=self.subdirectory,
212
+ )
213
+ res[self.info.name] = self.info._to_dict()
214
+ return res
215
+
216
+ @classmethod
217
+ def from_json(cls, s: str) -> "DirectUrl":
218
+ return cls.from_dict(json.loads(s))
219
+
220
+ def to_json(self) -> str:
221
+ return json.dumps(self.to_dict(), sort_keys=True)
222
+
223
+ def is_local_editable(self) -> bool:
224
+ return isinstance(self.info, DirInfo) and self.info.editable
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/format_control.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import FrozenSet, Optional, Set
2
+
3
+ from pip._vendor.packaging.utils import canonicalize_name
4
+
5
+ from pip._internal.exceptions import CommandError
6
+
7
+
8
+ class FormatControl:
9
+ """Helper for managing formats from which a package can be installed."""
10
+
11
+ __slots__ = ["no_binary", "only_binary"]
12
+
13
+ def __init__(
14
+ self,
15
+ no_binary: Optional[Set[str]] = None,
16
+ only_binary: Optional[Set[str]] = None,
17
+ ) -> None:
18
+ if no_binary is None:
19
+ no_binary = set()
20
+ if only_binary is None:
21
+ only_binary = set()
22
+
23
+ self.no_binary = no_binary
24
+ self.only_binary = only_binary
25
+
26
+ def __eq__(self, other: object) -> bool:
27
+ if not isinstance(other, self.__class__):
28
+ return NotImplemented
29
+
30
+ if self.__slots__ != other.__slots__:
31
+ return False
32
+
33
+ return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
34
+
35
+ def __repr__(self) -> str:
36
+ return f"{self.__class__.__name__}({self.no_binary}, {self.only_binary})"
37
+
38
+ @staticmethod
39
+ def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None:
40
+ if value.startswith("-"):
41
+ raise CommandError(
42
+ "--no-binary / --only-binary option requires 1 argument."
43
+ )
44
+ new = value.split(",")
45
+ while ":all:" in new:
46
+ other.clear()
47
+ target.clear()
48
+ target.add(":all:")
49
+ del new[: new.index(":all:") + 1]
50
+ # Without a none, we want to discard everything as :all: covers it
51
+ if ":none:" not in new:
52
+ return
53
+ for name in new:
54
+ if name == ":none:":
55
+ target.clear()
56
+ continue
57
+ name = canonicalize_name(name)
58
+ other.discard(name)
59
+ target.add(name)
60
+
61
+ def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]:
62
+ result = {"binary", "source"}
63
+ if canonical_name in self.only_binary:
64
+ result.discard("source")
65
+ elif canonical_name in self.no_binary:
66
+ result.discard("binary")
67
+ elif ":all:" in self.only_binary:
68
+ result.discard("source")
69
+ elif ":all:" in self.no_binary:
70
+ result.discard("binary")
71
+ return frozenset(result)
72
+
73
+ def disallow_binaries(self) -> None:
74
+ self.handle_mutual_excludes(
75
+ ":all:",
76
+ self.no_binary,
77
+ self.only_binary,
78
+ )
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/index.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import urllib.parse
2
+
3
+
4
+ class PackageIndex:
5
+ """Represents a Package Index and provides easier access to endpoints"""
6
+
7
+ __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"]
8
+
9
+ def __init__(self, url: str, file_storage_domain: str) -> None:
10
+ super().__init__()
11
+ self.url = url
12
+ self.netloc = urllib.parse.urlsplit(url).netloc
13
+ self.simple_url = self._url_for_path("simple")
14
+ self.pypi_url = self._url_for_path("pypi")
15
+
16
+ # This is part of a temporary hack used to block installs of PyPI
17
+ # packages which depend on external urls only necessary until PyPI can
18
+ # block such packages themselves
19
+ self.file_storage_domain = file_storage_domain
20
+
21
+ def _url_for_path(self, path: str) -> str:
22
+ return urllib.parse.urljoin(self.url, path)
23
+
24
+
25
+ PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org")
26
+ TestPyPI = PackageIndex(
27
+ "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org"
28
+ )
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/installation_report.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Sequence
2
+
3
+ from pip._vendor.packaging.markers import default_environment
4
+
5
+ from pip import __version__
6
+ from pip._internal.req.req_install import InstallRequirement
7
+
8
+
9
+ class InstallationReport:
10
+ def __init__(self, install_requirements: Sequence[InstallRequirement]):
11
+ self._install_requirements = install_requirements
12
+
13
+ @classmethod
14
+ def _install_req_to_dict(cls, ireq: InstallRequirement) -> Dict[str, Any]:
15
+ assert ireq.download_info, f"No download_info for {ireq}"
16
+ res = {
17
+ # PEP 610 json for the download URL. download_info.archive_info.hashes may
18
+ # be absent when the requirement was installed from the wheel cache
19
+ # and the cache entry was populated by an older pip version that did not
20
+ # record origin.json.
21
+ "download_info": ireq.download_info.to_dict(),
22
+ # is_direct is true if the requirement was a direct URL reference (which
23
+ # includes editable requirements), and false if the requirement was
24
+ # downloaded from a PEP 503 index or --find-links.
25
+ "is_direct": ireq.is_direct,
26
+ # is_yanked is true if the requirement was yanked from the index, but
27
+ # was still selected by pip to conform to PEP 592.
28
+ "is_yanked": ireq.link.is_yanked if ireq.link else False,
29
+ # requested is true if the requirement was specified by the user (aka
30
+ # top level requirement), and false if it was installed as a dependency of a
31
+ # requirement. https://peps.python.org/pep-0376/#requested
32
+ "requested": ireq.user_supplied,
33
+ # PEP 566 json encoding for metadata
34
+ # https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata
35
+ "metadata": ireq.get_dist().metadata_dict,
36
+ }
37
+ if ireq.user_supplied and ireq.extras:
38
+ # For top level requirements, the list of requested extras, if any.
39
+ res["requested_extras"] = sorted(ireq.extras)
40
+ return res
41
+
42
+ def to_dict(self) -> Dict[str, Any]:
43
+ return {
44
+ "version": "1",
45
+ "pip_version": __version__,
46
+ "install": [
47
+ self._install_req_to_dict(ireq) for ireq in self._install_requirements
48
+ ],
49
+ # https://peps.python.org/pep-0508/#environment-markers
50
+ # TODO: currently, the resolver uses the default environment to evaluate
51
+ # environment markers, so that is what we report here. In the future, it
52
+ # should also take into account options such as --python-version or
53
+ # --platform, perhaps under the form of an environment_override field?
54
+ # https://github.com/pypa/pip/issues/11198
55
+ "environment": default_environment(),
56
+ }
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/link.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import itertools
3
+ import logging
4
+ import os
5
+ import posixpath
6
+ import re
7
+ import urllib.parse
8
+ from dataclasses import dataclass
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Dict,
13
+ List,
14
+ Mapping,
15
+ NamedTuple,
16
+ Optional,
17
+ Tuple,
18
+ Union,
19
+ )
20
+
21
+ from pip._internal.utils.deprecation import deprecated
22
+ from pip._internal.utils.filetypes import WHEEL_EXTENSION
23
+ from pip._internal.utils.hashes import Hashes
24
+ from pip._internal.utils.misc import (
25
+ pairwise,
26
+ redact_auth_from_url,
27
+ split_auth_from_netloc,
28
+ splitext,
29
+ )
30
+ from pip._internal.utils.urls import path_to_url, url_to_path
31
+
32
+ if TYPE_CHECKING:
33
+ from pip._internal.index.collector import IndexContent
34
+
35
+ logger = logging.getLogger(__name__)
36
+
37
+
38
+ # Order matters, earlier hashes have a precedence over later hashes for what
39
+ # we will pick to use.
40
+ _SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5")
41
+
42
+
43
+ @dataclass(frozen=True)
44
+ class LinkHash:
45
+ """Links to content may have embedded hash values. This class parses those.
46
+
47
+ `name` must be any member of `_SUPPORTED_HASHES`.
48
+
49
+ This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to
50
+ be JSON-serializable to conform to PEP 610, this class contains the logic for
51
+ parsing a hash name and value for correctness, and then checking whether that hash
52
+ conforms to a schema with `.is_hash_allowed()`."""
53
+
54
+ name: str
55
+ value: str
56
+
57
+ _hash_url_fragment_re = re.compile(
58
+ # NB: we do not validate that the second group (.*) is a valid hex
59
+ # digest. Instead, we simply keep that string in this class, and then check it
60
+ # against Hashes when hash-checking is needed. This is easier to debug than
61
+ # proactively discarding an invalid hex digest, as we handle incorrect hashes
62
+ # and malformed hashes in the same place.
63
+ r"[#&]({choices})=([^&]*)".format(
64
+ choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)
65
+ ),
66
+ )
67
+
68
+ def __post_init__(self) -> None:
69
+ assert self.name in _SUPPORTED_HASHES
70
+
71
+ @classmethod
72
+ @functools.lru_cache(maxsize=None)
73
+ def find_hash_url_fragment(cls, url: str) -> Optional["LinkHash"]:
74
+ """Search a string for a checksum algorithm name and encoded output value."""
75
+ match = cls._hash_url_fragment_re.search(url)
76
+ if match is None:
77
+ return None
78
+ name, value = match.groups()
79
+ return cls(name=name, value=value)
80
+
81
+ def as_dict(self) -> Dict[str, str]:
82
+ return {self.name: self.value}
83
+
84
+ def as_hashes(self) -> Hashes:
85
+ """Return a Hashes instance which checks only for the current hash."""
86
+ return Hashes({self.name: [self.value]})
87
+
88
+ def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
89
+ """
90
+ Return True if the current hash is allowed by `hashes`.
91
+ """
92
+ if hashes is None:
93
+ return False
94
+ return hashes.is_hash_allowed(self.name, hex_digest=self.value)
95
+
96
+
97
+ @dataclass(frozen=True)
98
+ class MetadataFile:
99
+ """Information about a core metadata file associated with a distribution."""
100
+
101
+ hashes: Optional[Dict[str, str]]
102
+
103
+ def __post_init__(self) -> None:
104
+ if self.hashes is not None:
105
+ assert all(name in _SUPPORTED_HASHES for name in self.hashes)
106
+
107
+
108
+ def supported_hashes(hashes: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
109
+ # Remove any unsupported hash types from the mapping. If this leaves no
110
+ # supported hashes, return None
111
+ if hashes is None:
112
+ return None
113
+ hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES}
114
+ if not hashes:
115
+ return None
116
+ return hashes
117
+
118
+
119
+ def _clean_url_path_part(part: str) -> str:
120
+ """
121
+ Clean a "part" of a URL path (i.e. after splitting on "@" characters).
122
+ """
123
+ # We unquote prior to quoting to make sure nothing is double quoted.
124
+ return urllib.parse.quote(urllib.parse.unquote(part))
125
+
126
+
127
+ def _clean_file_url_path(part: str) -> str:
128
+ """
129
+ Clean the first part of a URL path that corresponds to a local
130
+ filesystem path (i.e. the first part after splitting on "@" characters).
131
+ """
132
+ # We unquote prior to quoting to make sure nothing is double quoted.
133
+ # Also, on Windows the path part might contain a drive letter which
134
+ # should not be quoted. On Linux where drive letters do not
135
+ # exist, the colon should be quoted. We rely on urllib.request
136
+ # to do the right thing here.
137
+ return urllib.request.pathname2url(urllib.request.url2pathname(part))
138
+
139
+
140
+ # percent-encoded: /
141
+ _reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
142
+
143
+
144
+ def _clean_url_path(path: str, is_local_path: bool) -> str:
145
+ """
146
+ Clean the path portion of a URL.
147
+ """
148
+ if is_local_path:
149
+ clean_func = _clean_file_url_path
150
+ else:
151
+ clean_func = _clean_url_path_part
152
+
153
+ # Split on the reserved characters prior to cleaning so that
154
+ # revision strings in VCS URLs are properly preserved.
155
+ parts = _reserved_chars_re.split(path)
156
+
157
+ cleaned_parts = []
158
+ for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
159
+ cleaned_parts.append(clean_func(to_clean))
160
+ # Normalize %xx escapes (e.g. %2f -> %2F)
161
+ cleaned_parts.append(reserved.upper())
162
+
163
+ return "".join(cleaned_parts)
164
+
165
+
166
+ def _ensure_quoted_url(url: str) -> str:
167
+ """
168
+ Make sure a link is fully quoted.
169
+ For example, if ' ' occurs in the URL, it will be replaced with "%20",
170
+ and without double-quoting other characters.
171
+ """
172
+ # Split the URL into parts according to the general structure
173
+ # `scheme://netloc/path?query#fragment`.
174
+ result = urllib.parse.urlsplit(url)
175
+ # If the netloc is empty, then the URL refers to a local filesystem path.
176
+ is_local_path = not result.netloc
177
+ path = _clean_url_path(result.path, is_local_path=is_local_path)
178
+ return urllib.parse.urlunsplit(result._replace(path=path))
179
+
180
+
181
+ def _absolute_link_url(base_url: str, url: str) -> str:
182
+ """
183
+ A faster implementation of urllib.parse.urljoin with a shortcut
184
+ for absolute http/https URLs.
185
+ """
186
+ if url.startswith(("https://", "http://")):
187
+ return url
188
+ else:
189
+ return urllib.parse.urljoin(base_url, url)
190
+
191
+
192
+ @functools.total_ordering
193
+ class Link:
194
+ """Represents a parsed link from a Package Index's simple URL"""
195
+
196
+ __slots__ = [
197
+ "_parsed_url",
198
+ "_url",
199
+ "_path",
200
+ "_hashes",
201
+ "comes_from",
202
+ "requires_python",
203
+ "yanked_reason",
204
+ "metadata_file_data",
205
+ "cache_link_parsing",
206
+ "egg_fragment",
207
+ ]
208
+
209
+ def __init__(
210
+ self,
211
+ url: str,
212
+ comes_from: Optional[Union[str, "IndexContent"]] = None,
213
+ requires_python: Optional[str] = None,
214
+ yanked_reason: Optional[str] = None,
215
+ metadata_file_data: Optional[MetadataFile] = None,
216
+ cache_link_parsing: bool = True,
217
+ hashes: Optional[Mapping[str, str]] = None,
218
+ ) -> None:
219
+ """
220
+ :param url: url of the resource pointed to (href of the link)
221
+ :param comes_from: instance of IndexContent where the link was found,
222
+ or string.
223
+ :param requires_python: String containing the `Requires-Python`
224
+ metadata field, specified in PEP 345. This may be specified by
225
+ a data-requires-python attribute in the HTML link tag, as
226
+ described in PEP 503.
227
+ :param yanked_reason: the reason the file has been yanked, if the
228
+ file has been yanked, or None if the file hasn't been yanked.
229
+ This is the value of the "data-yanked" attribute, if present, in
230
+ a simple repository HTML link. If the file has been yanked but
231
+ no reason was provided, this should be the empty string. See
232
+ PEP 592 for more information and the specification.
233
+ :param metadata_file_data: the metadata attached to the file, or None if
234
+ no such metadata is provided. This argument, if not None, indicates
235
+ that a separate metadata file exists, and also optionally supplies
236
+ hashes for that file.
237
+ :param cache_link_parsing: A flag that is used elsewhere to determine
238
+ whether resources retrieved from this link should be cached. PyPI
239
+ URLs should generally have this set to False, for example.
240
+ :param hashes: A mapping of hash names to digests to allow us to
241
+ determine the validity of a download.
242
+ """
243
+
244
+ # The comes_from, requires_python, and metadata_file_data arguments are
245
+ # only used by classmethods of this class, and are not used in client
246
+ # code directly.
247
+
248
+ # url can be a UNC windows share
249
+ if url.startswith("\\\\"):
250
+ url = path_to_url(url)
251
+
252
+ self._parsed_url = urllib.parse.urlsplit(url)
253
+ # Store the url as a private attribute to prevent accidentally
254
+ # trying to set a new value.
255
+ self._url = url
256
+ # The .path property is hot, so calculate its value ahead of time.
257
+ self._path = urllib.parse.unquote(self._parsed_url.path)
258
+
259
+ link_hash = LinkHash.find_hash_url_fragment(url)
260
+ hashes_from_link = {} if link_hash is None else link_hash.as_dict()
261
+ if hashes is None:
262
+ self._hashes = hashes_from_link
263
+ else:
264
+ self._hashes = {**hashes, **hashes_from_link}
265
+
266
+ self.comes_from = comes_from
267
+ self.requires_python = requires_python if requires_python else None
268
+ self.yanked_reason = yanked_reason
269
+ self.metadata_file_data = metadata_file_data
270
+
271
+ self.cache_link_parsing = cache_link_parsing
272
+ self.egg_fragment = self._egg_fragment()
273
+
274
+ @classmethod
275
+ def from_json(
276
+ cls,
277
+ file_data: Dict[str, Any],
278
+ page_url: str,
279
+ ) -> Optional["Link"]:
280
+ """
281
+ Convert an pypi json document from a simple repository page into a Link.
282
+ """
283
+ file_url = file_data.get("url")
284
+ if file_url is None:
285
+ return None
286
+
287
+ url = _ensure_quoted_url(_absolute_link_url(page_url, file_url))
288
+ pyrequire = file_data.get("requires-python")
289
+ yanked_reason = file_data.get("yanked")
290
+ hashes = file_data.get("hashes", {})
291
+
292
+ # PEP 714: Indexes must use the name core-metadata, but
293
+ # clients should support the old name as a fallback for compatibility.
294
+ metadata_info = file_data.get("core-metadata")
295
+ if metadata_info is None:
296
+ metadata_info = file_data.get("dist-info-metadata")
297
+
298
+ # The metadata info value may be a boolean, or a dict of hashes.
299
+ if isinstance(metadata_info, dict):
300
+ # The file exists, and hashes have been supplied
301
+ metadata_file_data = MetadataFile(supported_hashes(metadata_info))
302
+ elif metadata_info:
303
+ # The file exists, but there are no hashes
304
+ metadata_file_data = MetadataFile(None)
305
+ else:
306
+ # False or not present: the file does not exist
307
+ metadata_file_data = None
308
+
309
+ # The Link.yanked_reason expects an empty string instead of a boolean.
310
+ if yanked_reason and not isinstance(yanked_reason, str):
311
+ yanked_reason = ""
312
+ # The Link.yanked_reason expects None instead of False.
313
+ elif not yanked_reason:
314
+ yanked_reason = None
315
+
316
+ return cls(
317
+ url,
318
+ comes_from=page_url,
319
+ requires_python=pyrequire,
320
+ yanked_reason=yanked_reason,
321
+ hashes=hashes,
322
+ metadata_file_data=metadata_file_data,
323
+ )
324
+
325
+ @classmethod
326
+ def from_element(
327
+ cls,
328
+ anchor_attribs: Dict[str, Optional[str]],
329
+ page_url: str,
330
+ base_url: str,
331
+ ) -> Optional["Link"]:
332
+ """
333
+ Convert an anchor element's attributes in a simple repository page to a Link.
334
+ """
335
+ href = anchor_attribs.get("href")
336
+ if not href:
337
+ return None
338
+
339
+ url = _ensure_quoted_url(_absolute_link_url(base_url, href))
340
+ pyrequire = anchor_attribs.get("data-requires-python")
341
+ yanked_reason = anchor_attribs.get("data-yanked")
342
+
343
+ # PEP 714: Indexes must use the name data-core-metadata, but
344
+ # clients should support the old name as a fallback for compatibility.
345
+ metadata_info = anchor_attribs.get("data-core-metadata")
346
+ if metadata_info is None:
347
+ metadata_info = anchor_attribs.get("data-dist-info-metadata")
348
+ # The metadata info value may be the string "true", or a string of
349
+ # the form "hashname=hashval"
350
+ if metadata_info == "true":
351
+ # The file exists, but there are no hashes
352
+ metadata_file_data = MetadataFile(None)
353
+ elif metadata_info is None:
354
+ # The file does not exist
355
+ metadata_file_data = None
356
+ else:
357
+ # The file exists, and hashes have been supplied
358
+ hashname, sep, hashval = metadata_info.partition("=")
359
+ if sep == "=":
360
+ metadata_file_data = MetadataFile(supported_hashes({hashname: hashval}))
361
+ else:
362
+ # Error - data is wrong. Treat as no hashes supplied.
363
+ logger.debug(
364
+ "Index returned invalid data-dist-info-metadata value: %s",
365
+ metadata_info,
366
+ )
367
+ metadata_file_data = MetadataFile(None)
368
+
369
+ return cls(
370
+ url,
371
+ comes_from=page_url,
372
+ requires_python=pyrequire,
373
+ yanked_reason=yanked_reason,
374
+ metadata_file_data=metadata_file_data,
375
+ )
376
+
377
+ def __str__(self) -> str:
378
+ if self.requires_python:
379
+ rp = f" (requires-python:{self.requires_python})"
380
+ else:
381
+ rp = ""
382
+ if self.comes_from:
383
+ return f"{self.redacted_url} (from {self.comes_from}){rp}"
384
+ else:
385
+ return self.redacted_url
386
+
387
+ def __repr__(self) -> str:
388
+ return f"<Link {self}>"
389
+
390
+ def __hash__(self) -> int:
391
+ return hash(self.url)
392
+
393
+ def __eq__(self, other: Any) -> bool:
394
+ if not isinstance(other, Link):
395
+ return NotImplemented
396
+ return self.url == other.url
397
+
398
+ def __lt__(self, other: Any) -> bool:
399
+ if not isinstance(other, Link):
400
+ return NotImplemented
401
+ return self.url < other.url
402
+
403
+ @property
404
+ def url(self) -> str:
405
+ return self._url
406
+
407
+ @property
408
+ def redacted_url(self) -> str:
409
+ return redact_auth_from_url(self.url)
410
+
411
+ @property
412
+ def filename(self) -> str:
413
+ path = self.path.rstrip("/")
414
+ name = posixpath.basename(path)
415
+ if not name:
416
+ # Make sure we don't leak auth information if the netloc
417
+ # includes a username and password.
418
+ netloc, user_pass = split_auth_from_netloc(self.netloc)
419
+ return netloc
420
+
421
+ name = urllib.parse.unquote(name)
422
+ assert name, f"URL {self._url!r} produced no filename"
423
+ return name
424
+
425
+ @property
426
+ def file_path(self) -> str:
427
+ return url_to_path(self.url)
428
+
429
+ @property
430
+ def scheme(self) -> str:
431
+ return self._parsed_url.scheme
432
+
433
+ @property
434
+ def netloc(self) -> str:
435
+ """
436
+ This can contain auth information.
437
+ """
438
+ return self._parsed_url.netloc
439
+
440
+ @property
441
+ def path(self) -> str:
442
+ return self._path
443
+
444
+ def splitext(self) -> Tuple[str, str]:
445
+ return splitext(posixpath.basename(self.path.rstrip("/")))
446
+
447
+ @property
448
+ def ext(self) -> str:
449
+ return self.splitext()[1]
450
+
451
+ @property
452
+ def url_without_fragment(self) -> str:
453
+ scheme, netloc, path, query, fragment = self._parsed_url
454
+ return urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
455
+
456
+ _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
457
+
458
+ # Per PEP 508.
459
+ _project_name_re = re.compile(
460
+ r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
461
+ )
462
+
463
+ def _egg_fragment(self) -> Optional[str]:
464
+ match = self._egg_fragment_re.search(self._url)
465
+ if not match:
466
+ return None
467
+
468
+ # An egg fragment looks like a PEP 508 project name, along with
469
+ # an optional extras specifier. Anything else is invalid.
470
+ project_name = match.group(1)
471
+ if not self._project_name_re.match(project_name):
472
+ deprecated(
473
+ reason=f"{self} contains an egg fragment with a non-PEP 508 name.",
474
+ replacement="to use the req @ url syntax, and remove the egg fragment",
475
+ gone_in="25.2",
476
+ issue=13157,
477
+ )
478
+
479
+ return project_name
480
+
481
+ _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")
482
+
483
+ @property
484
+ def subdirectory_fragment(self) -> Optional[str]:
485
+ match = self._subdirectory_fragment_re.search(self._url)
486
+ if not match:
487
+ return None
488
+ return match.group(1)
489
+
490
+ def metadata_link(self) -> Optional["Link"]:
491
+ """Return a link to the associated core metadata file (if any)."""
492
+ if self.metadata_file_data is None:
493
+ return None
494
+ metadata_url = f"{self.url_without_fragment}.metadata"
495
+ if self.metadata_file_data.hashes is None:
496
+ return Link(metadata_url)
497
+ return Link(metadata_url, hashes=self.metadata_file_data.hashes)
498
+
499
+ def as_hashes(self) -> Hashes:
500
+ return Hashes({k: [v] for k, v in self._hashes.items()})
501
+
502
+ @property
503
+ def hash(self) -> Optional[str]:
504
+ return next(iter(self._hashes.values()), None)
505
+
506
+ @property
507
+ def hash_name(self) -> Optional[str]:
508
+ return next(iter(self._hashes), None)
509
+
510
+ @property
511
+ def show_url(self) -> str:
512
+ return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])
513
+
514
+ @property
515
+ def is_file(self) -> bool:
516
+ return self.scheme == "file"
517
+
518
+ def is_existing_dir(self) -> bool:
519
+ return self.is_file and os.path.isdir(self.file_path)
520
+
521
+ @property
522
+ def is_wheel(self) -> bool:
523
+ return self.ext == WHEEL_EXTENSION
524
+
525
+ @property
526
+ def is_vcs(self) -> bool:
527
+ from pip._internal.vcs import vcs
528
+
529
+ return self.scheme in vcs.all_schemes
530
+
531
+ @property
532
+ def is_yanked(self) -> bool:
533
+ return self.yanked_reason is not None
534
+
535
+ @property
536
+ def has_hash(self) -> bool:
537
+ return bool(self._hashes)
538
+
539
+ def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
540
+ """
541
+ Return True if the link has a hash and it is allowed by `hashes`.
542
+ """
543
+ if hashes is None:
544
+ return False
545
+ return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items())
546
+
547
+
548
+ class _CleanResult(NamedTuple):
549
+ """Convert link for equivalency check.
550
+
551
+ This is used in the resolver to check whether two URL-specified requirements
552
+ likely point to the same distribution and can be considered equivalent. This
553
+ equivalency logic avoids comparing URLs literally, which can be too strict
554
+ (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.
555
+
556
+ Currently this does three things:
557
+
558
+ 1. Drop the basic auth part. This is technically wrong since a server can
559
+ serve different content based on auth, but if it does that, it is even
560
+ impossible to guarantee two URLs without auth are equivalent, since
561
+ the user can input different auth information when prompted. So the
562
+ practical solution is to assume the auth doesn't affect the response.
563
+ 2. Parse the query to avoid the ordering issue. Note that ordering under the
564
+ same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are
565
+ still considered different.
566
+ 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and
567
+ hash values, since it should have no impact the downloaded content. Note
568
+ that this drops the "egg=" part historically used to denote the requested
569
+ project (and extras), which is wrong in the strictest sense, but too many
570
+ people are supplying it inconsistently to cause superfluous resolution
571
+ conflicts, so we choose to also ignore them.
572
+ """
573
+
574
+ parsed: urllib.parse.SplitResult
575
+ query: Dict[str, List[str]]
576
+ subdirectory: str
577
+ hashes: Dict[str, str]
578
+
579
+
580
+ def _clean_link(link: Link) -> _CleanResult:
581
+ parsed = link._parsed_url
582
+ netloc = parsed.netloc.rsplit("@", 1)[-1]
583
+ # According to RFC 8089, an empty host in file: means localhost.
584
+ if parsed.scheme == "file" and not netloc:
585
+ netloc = "localhost"
586
+ fragment = urllib.parse.parse_qs(parsed.fragment)
587
+ if "egg" in fragment:
588
+ logger.debug("Ignoring egg= fragment in %s", link)
589
+ try:
590
+ # If there are multiple subdirectory values, use the first one.
591
+ # This matches the behavior of Link.subdirectory_fragment.
592
+ subdirectory = fragment["subdirectory"][0]
593
+ except (IndexError, KeyError):
594
+ subdirectory = ""
595
+ # If there are multiple hash values under the same algorithm, use the
596
+ # first one. This matches the behavior of Link.hash_value.
597
+ hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}
598
+ return _CleanResult(
599
+ parsed=parsed._replace(netloc=netloc, query="", fragment=""),
600
+ query=urllib.parse.parse_qs(parsed.query),
601
+ subdirectory=subdirectory,
602
+ hashes=hashes,
603
+ )
604
+
605
+
606
+ @functools.lru_cache(maxsize=None)
607
+ def links_equivalent(link1: Link, link2: Link) -> bool:
608
+ return _clean_link(link1) == _clean_link(link2)
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/pylock.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import re
3
+ from dataclasses import dataclass
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
6
+
7
+ from pip._vendor import tomli_w
8
+ from pip._vendor.typing_extensions import Self
9
+
10
+ from pip._internal.models.direct_url import ArchiveInfo, DirInfo, VcsInfo
11
+ from pip._internal.models.link import Link
12
+ from pip._internal.req.req_install import InstallRequirement
13
+ from pip._internal.utils.urls import url_to_path
14
+
15
+ PYLOCK_FILE_NAME_RE = re.compile(r"^pylock\.([^.]+)\.toml$")
16
+
17
+
18
+ def is_valid_pylock_file_name(path: Path) -> bool:
19
+ return path.name == "pylock.toml" or bool(re.match(PYLOCK_FILE_NAME_RE, path.name))
20
+
21
+
22
+ def _toml_dict_factory(data: List[Tuple[str, Any]]) -> Dict[str, Any]:
23
+ return {key.replace("_", "-"): value for key, value in data if value is not None}
24
+
25
+
26
+ @dataclass
27
+ class PackageVcs:
28
+ type: str
29
+ url: Optional[str]
30
+ # (not supported) path: Optional[str]
31
+ requested_revision: Optional[str]
32
+ commit_id: str
33
+ subdirectory: Optional[str]
34
+
35
+
36
+ @dataclass
37
+ class PackageDirectory:
38
+ path: str
39
+ editable: Optional[bool]
40
+ subdirectory: Optional[str]
41
+
42
+
43
+ @dataclass
44
+ class PackageArchive:
45
+ url: Optional[str]
46
+ # (not supported) path: Optional[str]
47
+ # (not supported) size: Optional[int]
48
+ # (not supported) upload_time: Optional[datetime]
49
+ hashes: Dict[str, str]
50
+ subdirectory: Optional[str]
51
+
52
+
53
+ @dataclass
54
+ class PackageSdist:
55
+ name: str
56
+ # (not supported) upload_time: Optional[datetime]
57
+ url: Optional[str]
58
+ # (not supported) path: Optional[str]
59
+ # (not supported) size: Optional[int]
60
+ hashes: Dict[str, str]
61
+
62
+
63
+ @dataclass
64
+ class PackageWheel:
65
+ name: str
66
+ # (not supported) upload_time: Optional[datetime]
67
+ url: Optional[str]
68
+ # (not supported) path: Optional[str]
69
+ # (not supported) size: Optional[int]
70
+ hashes: Dict[str, str]
71
+
72
+
73
+ @dataclass
74
+ class Package:
75
+ name: str
76
+ version: Optional[str] = None
77
+ # (not supported) marker: Optional[str]
78
+ # (not supported) requires_python: Optional[str]
79
+ # (not supported) dependencies
80
+ vcs: Optional[PackageVcs] = None
81
+ directory: Optional[PackageDirectory] = None
82
+ archive: Optional[PackageArchive] = None
83
+ # (not supported) index: Optional[str]
84
+ sdist: Optional[PackageSdist] = None
85
+ wheels: Optional[List[PackageWheel]] = None
86
+ # (not supported) attestation_identities: Optional[List[Dict[str, Any]]]
87
+ # (not supported) tool: Optional[Dict[str, Any]]
88
+
89
+ @classmethod
90
+ def from_install_requirement(cls, ireq: InstallRequirement, base_dir: Path) -> Self:
91
+ base_dir = base_dir.resolve()
92
+ dist = ireq.get_dist()
93
+ download_info = ireq.download_info
94
+ assert download_info
95
+ package = cls(name=dist.canonical_name)
96
+ if ireq.is_direct:
97
+ if isinstance(download_info.info, VcsInfo):
98
+ package.vcs = PackageVcs(
99
+ type=download_info.info.vcs,
100
+ url=download_info.url,
101
+ requested_revision=download_info.info.requested_revision,
102
+ commit_id=download_info.info.commit_id,
103
+ subdirectory=download_info.subdirectory,
104
+ )
105
+ elif isinstance(download_info.info, DirInfo):
106
+ package.directory = PackageDirectory(
107
+ path=(
108
+ Path(url_to_path(download_info.url))
109
+ .resolve()
110
+ .relative_to(base_dir)
111
+ .as_posix()
112
+ ),
113
+ editable=(
114
+ download_info.info.editable
115
+ if download_info.info.editable
116
+ else None
117
+ ),
118
+ subdirectory=download_info.subdirectory,
119
+ )
120
+ elif isinstance(download_info.info, ArchiveInfo):
121
+ if not download_info.info.hashes:
122
+ raise NotImplementedError()
123
+ package.archive = PackageArchive(
124
+ url=download_info.url,
125
+ hashes=download_info.info.hashes,
126
+ subdirectory=download_info.subdirectory,
127
+ )
128
+ else:
129
+ # should never happen
130
+ raise NotImplementedError()
131
+ else:
132
+ package.version = str(dist.version)
133
+ if isinstance(download_info.info, ArchiveInfo):
134
+ if not download_info.info.hashes:
135
+ raise NotImplementedError()
136
+ link = Link(download_info.url)
137
+ if link.is_wheel:
138
+ package.wheels = [
139
+ PackageWheel(
140
+ name=link.filename,
141
+ url=download_info.url,
142
+ hashes=download_info.info.hashes,
143
+ )
144
+ ]
145
+ else:
146
+ package.sdist = PackageSdist(
147
+ name=link.filename,
148
+ url=download_info.url,
149
+ hashes=download_info.info.hashes,
150
+ )
151
+ else:
152
+ # should never happen
153
+ raise NotImplementedError()
154
+ return package
155
+
156
+
157
+ @dataclass
158
+ class Pylock:
159
+ lock_version: str = "1.0"
160
+ # (not supported) environments: Optional[List[str]]
161
+ # (not supported) requires_python: Optional[str]
162
+ # (not supported) extras: List[str] = []
163
+ # (not supported) dependency_groups: List[str] = []
164
+ created_by: str = "pip"
165
+ packages: List[Package] = dataclasses.field(default_factory=list)
166
+ # (not supported) tool: Optional[Dict[str, Any]]
167
+
168
+ def as_toml(self) -> str:
169
+ return tomli_w.dumps(dataclasses.asdict(self, dict_factory=_toml_dict_factory))
170
+
171
+ @classmethod
172
+ def from_install_requirements(
173
+ cls, install_requirements: Iterable[InstallRequirement], base_dir: Path
174
+ ) -> Self:
175
+ return cls(
176
+ packages=sorted(
177
+ (
178
+ Package.from_install_requirement(ireq, base_dir)
179
+ for ireq in install_requirements
180
+ ),
181
+ key=lambda p: p.name,
182
+ )
183
+ )
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/scheme.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For types associated with installation schemes.
3
+
4
+ For a general overview of available schemes and their context, see
5
+ https://docs.python.org/3/install/index.html#alternate-installation.
6
+ """
7
+
8
+ from dataclasses import dataclass
9
+
10
+ SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"]
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class Scheme:
15
+ """A Scheme holds paths which are used as the base directories for
16
+ artifacts associated with a Python package.
17
+ """
18
+
19
+ __slots__ = SCHEME_KEYS
20
+
21
+ platlib: str
22
+ purelib: str
23
+ headers: str
24
+ scripts: str
25
+ data: str
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/search_scope.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import logging
3
+ import os
4
+ import posixpath
5
+ import urllib.parse
6
+ from dataclasses import dataclass
7
+ from typing import List
8
+
9
+ from pip._vendor.packaging.utils import canonicalize_name
10
+
11
+ from pip._internal.models.index import PyPI
12
+ from pip._internal.utils.compat import has_tls
13
+ from pip._internal.utils.misc import normalize_path, redact_auth_from_url
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ @dataclass(frozen=True)
19
+ class SearchScope:
20
+ """
21
+ Encapsulates the locations that pip is configured to search.
22
+ """
23
+
24
+ __slots__ = ["find_links", "index_urls", "no_index"]
25
+
26
+ find_links: List[str]
27
+ index_urls: List[str]
28
+ no_index: bool
29
+
30
+ @classmethod
31
+ def create(
32
+ cls,
33
+ find_links: List[str],
34
+ index_urls: List[str],
35
+ no_index: bool,
36
+ ) -> "SearchScope":
37
+ """
38
+ Create a SearchScope object after normalizing the `find_links`.
39
+ """
40
+ # Build find_links. If an argument starts with ~, it may be
41
+ # a local file relative to a home directory. So try normalizing
42
+ # it and if it exists, use the normalized version.
43
+ # This is deliberately conservative - it might be fine just to
44
+ # blindly normalize anything starting with a ~...
45
+ built_find_links: List[str] = []
46
+ for link in find_links:
47
+ if link.startswith("~"):
48
+ new_link = normalize_path(link)
49
+ if os.path.exists(new_link):
50
+ link = new_link
51
+ built_find_links.append(link)
52
+
53
+ # If we don't have TLS enabled, then WARN if anyplace we're looking
54
+ # relies on TLS.
55
+ if not has_tls():
56
+ for link in itertools.chain(index_urls, built_find_links):
57
+ parsed = urllib.parse.urlparse(link)
58
+ if parsed.scheme == "https":
59
+ logger.warning(
60
+ "pip is configured with locations that require "
61
+ "TLS/SSL, however the ssl module in Python is not "
62
+ "available."
63
+ )
64
+ break
65
+
66
+ return cls(
67
+ find_links=built_find_links,
68
+ index_urls=index_urls,
69
+ no_index=no_index,
70
+ )
71
+
72
+ def get_formatted_locations(self) -> str:
73
+ lines = []
74
+ redacted_index_urls = []
75
+ if self.index_urls and self.index_urls != [PyPI.simple_url]:
76
+ for url in self.index_urls:
77
+ redacted_index_url = redact_auth_from_url(url)
78
+
79
+ # Parse the URL
80
+ purl = urllib.parse.urlsplit(redacted_index_url)
81
+
82
+ # URL is generally invalid if scheme and netloc is missing
83
+ # there are issues with Python and URL parsing, so this test
84
+ # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
85
+ # always parse invalid URLs correctly - it should raise
86
+ # exceptions for malformed URLs
87
+ if not purl.scheme and not purl.netloc:
88
+ logger.warning(
89
+ 'The index url "%s" seems invalid, please provide a scheme.',
90
+ redacted_index_url,
91
+ )
92
+
93
+ redacted_index_urls.append(redacted_index_url)
94
+
95
+ lines.append(
96
+ "Looking in indexes: {}".format(", ".join(redacted_index_urls))
97
+ )
98
+
99
+ if self.find_links:
100
+ lines.append(
101
+ "Looking in links: {}".format(
102
+ ", ".join(redact_auth_from_url(url) for url in self.find_links)
103
+ )
104
+ )
105
+ return "\n".join(lines)
106
+
107
+ def get_index_urls_locations(self, project_name: str) -> List[str]:
108
+ """Returns the locations found via self.index_urls
109
+
110
+ Checks the url_name on the main (first in the list) index and
111
+ use this url_name to produce all locations
112
+ """
113
+
114
+ def mkurl_pypi_url(url: str) -> str:
115
+ loc = posixpath.join(
116
+ url, urllib.parse.quote(canonicalize_name(project_name))
117
+ )
118
+ # For maximum compatibility with easy_install, ensure the path
119
+ # ends in a trailing slash. Although this isn't in the spec
120
+ # (and PyPI can handle it without the slash) some other index
121
+ # implementations might break if they relied on easy_install's
122
+ # behavior.
123
+ if not loc.endswith("/"):
124
+ loc = loc + "/"
125
+ return loc
126
+
127
+ return [mkurl_pypi_url(url) for url in self.index_urls]
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/selection_prefs.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from pip._internal.models.format_control import FormatControl
4
+
5
+
6
+ # TODO: This needs Python 3.10's improved slots support for dataclasses
7
+ # to be converted into a dataclass.
8
+ class SelectionPreferences:
9
+ """
10
+ Encapsulates the candidate selection preferences for downloading
11
+ and installing files.
12
+ """
13
+
14
+ __slots__ = [
15
+ "allow_yanked",
16
+ "allow_all_prereleases",
17
+ "format_control",
18
+ "prefer_binary",
19
+ "ignore_requires_python",
20
+ ]
21
+
22
+ # Don't include an allow_yanked default value to make sure each call
23
+ # site considers whether yanked releases are allowed. This also causes
24
+ # that decision to be made explicit in the calling code, which helps
25
+ # people when reading the code.
26
+ def __init__(
27
+ self,
28
+ allow_yanked: bool,
29
+ allow_all_prereleases: bool = False,
30
+ format_control: Optional[FormatControl] = None,
31
+ prefer_binary: bool = False,
32
+ ignore_requires_python: Optional[bool] = None,
33
+ ) -> None:
34
+ """Create a SelectionPreferences object.
35
+
36
+ :param allow_yanked: Whether files marked as yanked (in the sense
37
+ of PEP 592) are permitted to be candidates for install.
38
+ :param format_control: A FormatControl object or None. Used to control
39
+ the selection of source packages / binary packages when consulting
40
+ the index and links.
41
+ :param prefer_binary: Whether to prefer an old, but valid, binary
42
+ dist over a new source dist.
43
+ :param ignore_requires_python: Whether to ignore incompatible
44
+ "Requires-Python" values in links. Defaults to False.
45
+ """
46
+ if ignore_requires_python is None:
47
+ ignore_requires_python = False
48
+
49
+ self.allow_yanked = allow_yanked
50
+ self.allow_all_prereleases = allow_all_prereleases
51
+ self.format_control = format_control
52
+ self.prefer_binary = prefer_binary
53
+ self.ignore_requires_python = ignore_requires_python
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/target_python.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import List, Optional, Set, Tuple
3
+
4
+ from pip._vendor.packaging.tags import Tag
5
+
6
+ from pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot
7
+ from pip._internal.utils.misc import normalize_version_info
8
+
9
+
10
+ class TargetPython:
11
+ """
12
+ Encapsulates the properties of a Python interpreter one is targeting
13
+ for a package install, download, etc.
14
+ """
15
+
16
+ __slots__ = [
17
+ "_given_py_version_info",
18
+ "abis",
19
+ "implementation",
20
+ "platforms",
21
+ "py_version",
22
+ "py_version_info",
23
+ "_valid_tags",
24
+ "_valid_tags_set",
25
+ ]
26
+
27
+ def __init__(
28
+ self,
29
+ platforms: Optional[List[str]] = None,
30
+ py_version_info: Optional[Tuple[int, ...]] = None,
31
+ abis: Optional[List[str]] = None,
32
+ implementation: Optional[str] = None,
33
+ ) -> None:
34
+ """
35
+ :param platforms: A list of strings or None. If None, searches for
36
+ packages that are supported by the current system. Otherwise, will
37
+ find packages that can be built on the platforms passed in. These
38
+ packages will only be downloaded for distribution: they will
39
+ not be built locally.
40
+ :param py_version_info: An optional tuple of ints representing the
41
+ Python version information to use (e.g. `sys.version_info[:3]`).
42
+ This can have length 1, 2, or 3 when provided.
43
+ :param abis: A list of strings or None. This is passed to
44
+ compatibility_tags.py's get_supported() function as is.
45
+ :param implementation: A string or None. This is passed to
46
+ compatibility_tags.py's get_supported() function as is.
47
+ """
48
+ # Store the given py_version_info for when we call get_supported().
49
+ self._given_py_version_info = py_version_info
50
+
51
+ if py_version_info is None:
52
+ py_version_info = sys.version_info[:3]
53
+ else:
54
+ py_version_info = normalize_version_info(py_version_info)
55
+
56
+ py_version = ".".join(map(str, py_version_info[:2]))
57
+
58
+ self.abis = abis
59
+ self.implementation = implementation
60
+ self.platforms = platforms
61
+ self.py_version = py_version
62
+ self.py_version_info = py_version_info
63
+
64
+ # This is used to cache the return value of get_(un)sorted_tags.
65
+ self._valid_tags: Optional[List[Tag]] = None
66
+ self._valid_tags_set: Optional[Set[Tag]] = None
67
+
68
+ def format_given(self) -> str:
69
+ """
70
+ Format the given, non-None attributes for display.
71
+ """
72
+ display_version = None
73
+ if self._given_py_version_info is not None:
74
+ display_version = ".".join(
75
+ str(part) for part in self._given_py_version_info
76
+ )
77
+
78
+ key_values = [
79
+ ("platforms", self.platforms),
80
+ ("version_info", display_version),
81
+ ("abis", self.abis),
82
+ ("implementation", self.implementation),
83
+ ]
84
+ return " ".join(
85
+ f"{key}={value!r}" for key, value in key_values if value is not None
86
+ )
87
+
88
+ def get_sorted_tags(self) -> List[Tag]:
89
+ """
90
+ Return the supported PEP 425 tags to check wheel candidates against.
91
+
92
+ The tags are returned in order of preference (most preferred first).
93
+ """
94
+ if self._valid_tags is None:
95
+ # Pass versions=None if no py_version_info was given since
96
+ # versions=None uses special default logic.
97
+ py_version_info = self._given_py_version_info
98
+ if py_version_info is None:
99
+ version = None
100
+ else:
101
+ version = version_info_to_nodot(py_version_info)
102
+
103
+ tags = get_supported(
104
+ version=version,
105
+ platforms=self.platforms,
106
+ abis=self.abis,
107
+ impl=self.implementation,
108
+ )
109
+ self._valid_tags = tags
110
+
111
+ return self._valid_tags
112
+
113
+ def get_unsorted_tags(self) -> Set[Tag]:
114
+ """Exactly the same as get_sorted_tags, but returns a set.
115
+
116
+ This is important for performance.
117
+ """
118
+ if self._valid_tags_set is None:
119
+ self._valid_tags_set = set(self.get_sorted_tags())
120
+
121
+ return self._valid_tags_set
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/models/wheel.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Represents a wheel file and provides access to the various parts of the
2
+ name that have meaning.
3
+ """
4
+
5
+ import re
6
+ from typing import Dict, Iterable, List, Optional
7
+
8
+ from pip._vendor.packaging.tags import Tag
9
+ from pip._vendor.packaging.utils import BuildTag, parse_wheel_filename
10
+ from pip._vendor.packaging.utils import (
11
+ InvalidWheelFilename as _PackagingInvalidWheelFilename,
12
+ )
13
+
14
+ from pip._internal.exceptions import InvalidWheelFilename
15
+ from pip._internal.utils.deprecation import deprecated
16
+
17
+
18
+ class Wheel:
19
+ """A wheel file"""
20
+
21
+ legacy_wheel_file_re = re.compile(
22
+ r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
23
+ ((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
24
+ \.whl|\.dist-info)$""",
25
+ re.VERBOSE,
26
+ )
27
+
28
+ def __init__(self, filename: str) -> None:
29
+ self.filename = filename
30
+
31
+ # To make mypy happy specify type hints that can come from either
32
+ # parse_wheel_filename or the legacy_wheel_file_re match.
33
+ self.name: str
34
+ self._build_tag: Optional[BuildTag] = None
35
+
36
+ try:
37
+ wheel_info = parse_wheel_filename(filename)
38
+ self.name, _version, self._build_tag, self.file_tags = wheel_info
39
+ self.version = str(_version)
40
+ except _PackagingInvalidWheelFilename as e:
41
+ # Check if the wheel filename is in the legacy format
42
+ legacy_wheel_info = self.legacy_wheel_file_re.match(filename)
43
+ if not legacy_wheel_info:
44
+ raise InvalidWheelFilename(e.args[0]) from None
45
+
46
+ deprecated(
47
+ reason=(
48
+ f"Wheel filename {filename!r} is not correctly normalised. "
49
+ "Future versions of pip will raise the following error:\n"
50
+ f"{e.args[0]}\n\n"
51
+ ),
52
+ replacement=(
53
+ "to rename the wheel to use a correctly normalised "
54
+ "name (this may require updating the version in "
55
+ "the project metadata)"
56
+ ),
57
+ gone_in="25.3",
58
+ issue=12938,
59
+ )
60
+
61
+ self.name = legacy_wheel_info.group("name").replace("_", "-")
62
+ self.version = legacy_wheel_info.group("ver").replace("_", "-")
63
+
64
+ # Generate the file tags from the legacy wheel filename
65
+ pyversions = legacy_wheel_info.group("pyver").split(".")
66
+ abis = legacy_wheel_info.group("abi").split(".")
67
+ plats = legacy_wheel_info.group("plat").split(".")
68
+ self.file_tags = frozenset(
69
+ Tag(interpreter=py, abi=abi, platform=plat)
70
+ for py in pyversions
71
+ for abi in abis
72
+ for plat in plats
73
+ )
74
+
75
+ @property
76
+ def build_tag(self) -> BuildTag:
77
+ if self._build_tag is not None:
78
+ return self._build_tag
79
+
80
+ # Parse the build tag from the legacy wheel filename
81
+ legacy_wheel_info = self.legacy_wheel_file_re.match(self.filename)
82
+ assert legacy_wheel_info is not None, "guaranteed by filename validation"
83
+ build_tag = legacy_wheel_info.group("build")
84
+ match = re.match(r"^(\d+)(.*)$", build_tag)
85
+ assert match is not None, "guaranteed by filename validation"
86
+ build_tag_groups = match.groups()
87
+ self._build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
88
+
89
+ return self._build_tag
90
+
91
+ def get_formatted_file_tags(self) -> List[str]:
92
+ """Return the wheel's tags as a sorted list of strings."""
93
+ return sorted(str(tag) for tag in self.file_tags)
94
+
95
+ def support_index_min(self, tags: List[Tag]) -> int:
96
+ """Return the lowest index that one of the wheel's file_tag combinations
97
+ achieves in the given list of supported tags.
98
+
99
+ For example, if there are 8 supported tags and one of the file tags
100
+ is first in the list, then return 0.
101
+
102
+ :param tags: the PEP 425 tags to check the wheel against, in order
103
+ with most preferred first.
104
+
105
+ :raises ValueError: If none of the wheel's file tags match one of
106
+ the supported tags.
107
+ """
108
+ try:
109
+ return next(i for i, t in enumerate(tags) if t in self.file_tags)
110
+ except StopIteration:
111
+ raise ValueError()
112
+
113
+ def find_most_preferred_tag(
114
+ self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
115
+ ) -> int:
116
+ """Return the priority of the most preferred tag that one of the wheel's file
117
+ tag combinations achieves in the given list of supported tags using the given
118
+ tag_to_priority mapping, where lower priorities are more-preferred.
119
+
120
+ This is used in place of support_index_min in some cases in order to avoid
121
+ an expensive linear scan of a large list of tags.
122
+
123
+ :param tags: the PEP 425 tags to check the wheel against.
124
+ :param tag_to_priority: a mapping from tag to priority of that tag, where
125
+ lower is more preferred.
126
+
127
+ :raises ValueError: If none of the wheel's file tags match one of
128
+ the supported tags.
129
+ """
130
+ return min(
131
+ tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
132
+ )
133
+
134
+ def supported(self, tags: Iterable[Tag]) -> bool:
135
+ """Return whether the wheel is compatible with one of the given tags.
136
+
137
+ :param tags: the PEP 425 tags to check the wheel against.
138
+ """
139
+ return not self.file_tags.isdisjoint(tags)
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Contains purely network-related utilities."""
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/auth.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/cache.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/download.cpython-310.pyc ADDED
Binary file (8.48 kB). View file
 
ACE_plus/flashenv/lib/python3.10/site-packages/pip/_internal/network/__pycache__/lazy_wheel.cpython-310.pyc ADDED
Binary file (8.43 kB). View file