content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
import functools\nimport itertools\nimport logging\nimport os\nimport posixpath\nimport re\nimport urllib.parse\nfrom dataclasses import dataclass\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Mapping,\n NamedTuple,\n Optional,\n Tuple,\n Union,\n)\n\nfrom pip._internal.utils.deprecation import deprecated\nfrom pip._internal.utils.filetypes import WHEEL_EXTENSION\nfrom pip._internal.utils.hashes import Hashes\nfrom pip._internal.utils.misc import (\n pairwise,\n redact_auth_from_url,\n split_auth_from_netloc,\n splitext,\n)\nfrom pip._internal.utils.urls import path_to_url, url_to_path\n\nif TYPE_CHECKING:\n from pip._internal.index.collector import IndexContent\n\nlogger = logging.getLogger(__name__)\n\n\n# Order matters, earlier hashes have a precedence over later hashes for what\n# we will pick to use.\n_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5")\n\n\n@dataclass(frozen=True)\nclass LinkHash:\n """Links to content may have embedded hash values. This class parses those.\n\n `name` must be any member of `_SUPPORTED_HASHES`.\n\n This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to\n be JSON-serializable to conform to PEP 610, this class contains the logic for\n parsing a hash name and value for correctness, and then checking whether that hash\n conforms to a schema with `.is_hash_allowed()`."""\n\n name: str\n value: str\n\n _hash_url_fragment_re = re.compile(\n # NB: we do not validate that the second group (.*) is a valid hex\n # digest. Instead, we simply keep that string in this class, and then check it\n # against Hashes when hash-checking is needed. This is easier to debug than\n # proactively discarding an invalid hex digest, as we handle incorrect hashes\n # and malformed hashes in the same place.\n r"[#&]({choices})=([^&]*)".format(\n choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)\n ),\n )\n\n def __post_init__(self) -> None:\n assert self.name in _SUPPORTED_HASHES\n\n @classmethod\n @functools.lru_cache(maxsize=None)\n def find_hash_url_fragment(cls, url: str) -> Optional["LinkHash"]:\n """Search a string for a checksum algorithm name and encoded output value."""\n match = cls._hash_url_fragment_re.search(url)\n if match is None:\n return None\n name, value = match.groups()\n return cls(name=name, value=value)\n\n def as_dict(self) -> Dict[str, str]:\n return {self.name: self.value}\n\n def as_hashes(self) -> Hashes:\n """Return a Hashes instance which checks only for the current hash."""\n return Hashes({self.name: [self.value]})\n\n def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:\n """\n Return True if the current hash is allowed by `hashes`.\n """\n if hashes is None:\n return False\n return hashes.is_hash_allowed(self.name, hex_digest=self.value)\n\n\n@dataclass(frozen=True)\nclass MetadataFile:\n """Information about a core metadata file associated with a distribution."""\n\n hashes: Optional[Dict[str, str]]\n\n def __post_init__(self) -> None:\n if self.hashes is not None:\n assert all(name in _SUPPORTED_HASHES for name in self.hashes)\n\n\ndef supported_hashes(hashes: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:\n # Remove any unsupported hash types from the mapping. If this leaves no\n # supported hashes, return None\n if hashes is None:\n return None\n hashes = {n: v for n, v in hashes.items() if n in _SUPPORTED_HASHES}\n if not hashes:\n return None\n return hashes\n\n\ndef _clean_url_path_part(part: str) -> str:\n """\n Clean a "part" of a URL path (i.e. after splitting on "@" characters).\n """\n # We unquote prior to quoting to make sure nothing is double quoted.\n return urllib.parse.quote(urllib.parse.unquote(part))\n\n\ndef _clean_file_url_path(part: str) -> str:\n """\n Clean the first part of a URL path that corresponds to a local\n filesystem path (i.e. the first part after splitting on "@" characters).\n """\n # We unquote prior to quoting to make sure nothing is double quoted.\n # Also, on Windows the path part might contain a drive letter which\n # should not be quoted. On Linux where drive letters do not\n # exist, the colon should be quoted. We rely on urllib.request\n # to do the right thing here.\n return urllib.request.pathname2url(urllib.request.url2pathname(part))\n\n\n# percent-encoded: /\n_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)\n\n\ndef _clean_url_path(path: str, is_local_path: bool) -> str:\n """\n Clean the path portion of a URL.\n """\n if is_local_path:\n clean_func = _clean_file_url_path\n else:\n clean_func = _clean_url_path_part\n\n # Split on the reserved characters prior to cleaning so that\n # revision strings in VCS URLs are properly preserved.\n parts = _reserved_chars_re.split(path)\n\n cleaned_parts = []\n for to_clean, reserved in pairwise(itertools.chain(parts, [""])):\n cleaned_parts.append(clean_func(to_clean))\n # Normalize %xx escapes (e.g. %2f -> %2F)\n cleaned_parts.append(reserved.upper())\n\n return "".join(cleaned_parts)\n\n\ndef _ensure_quoted_url(url: str) -> str:\n """\n Make sure a link is fully quoted.\n For example, if ' ' occurs in the URL, it will be replaced with "%20",\n and without double-quoting other characters.\n """\n # Split the URL into parts according to the general structure\n # `scheme://netloc/path?query#fragment`.\n result = urllib.parse.urlsplit(url)\n # If the netloc is empty, then the URL refers to a local filesystem path.\n is_local_path = not result.netloc\n path = _clean_url_path(result.path, is_local_path=is_local_path)\n return urllib.parse.urlunsplit(result._replace(path=path))\n\n\ndef _absolute_link_url(base_url: str, url: str) -> str:\n """\n A faster implementation of urllib.parse.urljoin with a shortcut\n for absolute http/https URLs.\n """\n if url.startswith(("https://", "http://")):\n return url\n else:\n return urllib.parse.urljoin(base_url, url)\n\n\n@functools.total_ordering\nclass Link:\n """Represents a parsed link from a Package Index's simple URL"""\n\n __slots__ = [\n "_parsed_url",\n "_url",\n "_path",\n "_hashes",\n "comes_from",\n "requires_python",\n "yanked_reason",\n "metadata_file_data",\n "cache_link_parsing",\n "egg_fragment",\n ]\n\n def __init__(\n self,\n url: str,\n comes_from: Optional[Union[str, "IndexContent"]] = None,\n requires_python: Optional[str] = None,\n yanked_reason: Optional[str] = None,\n metadata_file_data: Optional[MetadataFile] = None,\n cache_link_parsing: bool = True,\n hashes: Optional[Mapping[str, str]] = None,\n ) -> None:\n """\n :param url: url of the resource pointed to (href of the link)\n :param comes_from: instance of IndexContent where the link was found,\n or string.\n :param requires_python: String containing the `Requires-Python`\n metadata field, specified in PEP 345. This may be specified by\n a data-requires-python attribute in the HTML link tag, as\n described in PEP 503.\n :param yanked_reason: the reason the file has been yanked, if the\n file has been yanked, or None if the file hasn't been yanked.\n This is the value of the "data-yanked" attribute, if present, in\n a simple repository HTML link. If the file has been yanked but\n no reason was provided, this should be the empty string. See\n PEP 592 for more information and the specification.\n :param metadata_file_data: the metadata attached to the file, or None if\n no such metadata is provided. This argument, if not None, indicates\n that a separate metadata file exists, and also optionally supplies\n hashes for that file.\n :param cache_link_parsing: A flag that is used elsewhere to determine\n whether resources retrieved from this link should be cached. PyPI\n URLs should generally have this set to False, for example.\n :param hashes: A mapping of hash names to digests to allow us to\n determine the validity of a download.\n """\n\n # The comes_from, requires_python, and metadata_file_data arguments are\n # only used by classmethods of this class, and are not used in client\n # code directly.\n\n # url can be a UNC windows share\n if url.startswith("\\\\"):\n url = path_to_url(url)\n\n self._parsed_url = urllib.parse.urlsplit(url)\n # Store the url as a private attribute to prevent accidentally\n # trying to set a new value.\n self._url = url\n # The .path property is hot, so calculate its value ahead of time.\n self._path = urllib.parse.unquote(self._parsed_url.path)\n\n link_hash = LinkHash.find_hash_url_fragment(url)\n hashes_from_link = {} if link_hash is None else link_hash.as_dict()\n if hashes is None:\n self._hashes = hashes_from_link\n else:\n self._hashes = {**hashes, **hashes_from_link}\n\n self.comes_from = comes_from\n self.requires_python = requires_python if requires_python else None\n self.yanked_reason = yanked_reason\n self.metadata_file_data = metadata_file_data\n\n self.cache_link_parsing = cache_link_parsing\n self.egg_fragment = self._egg_fragment()\n\n @classmethod\n def from_json(\n cls,\n file_data: Dict[str, Any],\n page_url: str,\n ) -> Optional["Link"]:\n """\n Convert an pypi json document from a simple repository page into a Link.\n """\n file_url = file_data.get("url")\n if file_url is None:\n return None\n\n url = _ensure_quoted_url(_absolute_link_url(page_url, file_url))\n pyrequire = file_data.get("requires-python")\n yanked_reason = file_data.get("yanked")\n hashes = file_data.get("hashes", {})\n\n # PEP 714: Indexes must use the name core-metadata, but\n # clients should support the old name as a fallback for compatibility.\n metadata_info = file_data.get("core-metadata")\n if metadata_info is None:\n metadata_info = file_data.get("dist-info-metadata")\n\n # The metadata info value may be a boolean, or a dict of hashes.\n if isinstance(metadata_info, dict):\n # The file exists, and hashes have been supplied\n metadata_file_data = MetadataFile(supported_hashes(metadata_info))\n elif metadata_info:\n # The file exists, but there are no hashes\n metadata_file_data = MetadataFile(None)\n else:\n # False or not present: the file does not exist\n metadata_file_data = None\n\n # The Link.yanked_reason expects an empty string instead of a boolean.\n if yanked_reason and not isinstance(yanked_reason, str):\n yanked_reason = ""\n # The Link.yanked_reason expects None instead of False.\n elif not yanked_reason:\n yanked_reason = None\n\n return cls(\n url,\n comes_from=page_url,\n requires_python=pyrequire,\n yanked_reason=yanked_reason,\n hashes=hashes,\n metadata_file_data=metadata_file_data,\n )\n\n @classmethod\n def from_element(\n cls,\n anchor_attribs: Dict[str, Optional[str]],\n page_url: str,\n base_url: str,\n ) -> Optional["Link"]:\n """\n Convert an anchor element's attributes in a simple repository page to a Link.\n """\n href = anchor_attribs.get("href")\n if not href:\n return None\n\n url = _ensure_quoted_url(_absolute_link_url(base_url, href))\n pyrequire = anchor_attribs.get("data-requires-python")\n yanked_reason = anchor_attribs.get("data-yanked")\n\n # PEP 714: Indexes must use the name data-core-metadata, but\n # clients should support the old name as a fallback for compatibility.\n metadata_info = anchor_attribs.get("data-core-metadata")\n if metadata_info is None:\n metadata_info = anchor_attribs.get("data-dist-info-metadata")\n # The metadata info value may be the string "true", or a string of\n # the form "hashname=hashval"\n if metadata_info == "true":\n # The file exists, but there are no hashes\n metadata_file_data = MetadataFile(None)\n elif metadata_info is None:\n # The file does not exist\n metadata_file_data = None\n else:\n # The file exists, and hashes have been supplied\n hashname, sep, hashval = metadata_info.partition("=")\n if sep == "=":\n metadata_file_data = MetadataFile(supported_hashes({hashname: hashval}))\n else:\n # Error - data is wrong. Treat as no hashes supplied.\n logger.debug(\n "Index returned invalid data-dist-info-metadata value: %s",\n metadata_info,\n )\n metadata_file_data = MetadataFile(None)\n\n return cls(\n url,\n comes_from=page_url,\n requires_python=pyrequire,\n yanked_reason=yanked_reason,\n metadata_file_data=metadata_file_data,\n )\n\n def __str__(self) -> str:\n if self.requires_python:\n rp = f" (requires-python:{self.requires_python})"\n else:\n rp = ""\n if self.comes_from:\n return f"{self.redacted_url} (from {self.comes_from}){rp}"\n else:\n return self.redacted_url\n\n def __repr__(self) -> str:\n return f"<Link {self}>"\n\n def __hash__(self) -> int:\n return hash(self.url)\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Link):\n return NotImplemented\n return self.url == other.url\n\n def __lt__(self, other: Any) -> bool:\n if not isinstance(other, Link):\n return NotImplemented\n return self.url < other.url\n\n @property\n def url(self) -> str:\n return self._url\n\n @property\n def redacted_url(self) -> str:\n return redact_auth_from_url(self.url)\n\n @property\n def filename(self) -> str:\n path = self.path.rstrip("/")\n name = posixpath.basename(path)\n if not name:\n # Make sure we don't leak auth information if the netloc\n # includes a username and password.\n netloc, user_pass = split_auth_from_netloc(self.netloc)\n return netloc\n\n name = urllib.parse.unquote(name)\n assert name, f"URL {self._url!r} produced no filename"\n return name\n\n @property\n def file_path(self) -> str:\n return url_to_path(self.url)\n\n @property\n def scheme(self) -> str:\n return self._parsed_url.scheme\n\n @property\n def netloc(self) -> str:\n """\n This can contain auth information.\n """\n return self._parsed_url.netloc\n\n @property\n def path(self) -> str:\n return self._path\n\n def splitext(self) -> Tuple[str, str]:\n return splitext(posixpath.basename(self.path.rstrip("/")))\n\n @property\n def ext(self) -> str:\n return self.splitext()[1]\n\n @property\n def url_without_fragment(self) -> str:\n scheme, netloc, path, query, fragment = self._parsed_url\n return urllib.parse.urlunsplit((scheme, netloc, path, query, ""))\n\n _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")\n\n # Per PEP 508.\n _project_name_re = re.compile(\n r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE\n )\n\n def _egg_fragment(self) -> Optional[str]:\n match = self._egg_fragment_re.search(self._url)\n if not match:\n return None\n\n # An egg fragment looks like a PEP 508 project name, along with\n # an optional extras specifier. Anything else is invalid.\n project_name = match.group(1)\n if not self._project_name_re.match(project_name):\n deprecated(\n reason=f"{self} contains an egg fragment with a non-PEP 508 name.",\n replacement="to use the req @ url syntax, and remove the egg fragment",\n gone_in="25.2",\n issue=13157,\n )\n\n return project_name\n\n _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")\n\n @property\n def subdirectory_fragment(self) -> Optional[str]:\n match = self._subdirectory_fragment_re.search(self._url)\n if not match:\n return None\n return match.group(1)\n\n def metadata_link(self) -> Optional["Link"]:\n """Return a link to the associated core metadata file (if any)."""\n if self.metadata_file_data is None:\n return None\n metadata_url = f"{self.url_without_fragment}.metadata"\n if self.metadata_file_data.hashes is None:\n return Link(metadata_url)\n return Link(metadata_url, hashes=self.metadata_file_data.hashes)\n\n def as_hashes(self) -> Hashes:\n return Hashes({k: [v] for k, v in self._hashes.items()})\n\n @property\n def hash(self) -> Optional[str]:\n return next(iter(self._hashes.values()), None)\n\n @property\n def hash_name(self) -> Optional[str]:\n return next(iter(self._hashes), None)\n\n @property\n def show_url(self) -> str:\n return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])\n\n @property\n def is_file(self) -> bool:\n return self.scheme == "file"\n\n def is_existing_dir(self) -> bool:\n return self.is_file and os.path.isdir(self.file_path)\n\n @property\n def is_wheel(self) -> bool:\n return self.ext == WHEEL_EXTENSION\n\n @property\n def is_vcs(self) -> bool:\n from pip._internal.vcs import vcs\n\n return self.scheme in vcs.all_schemes\n\n @property\n def is_yanked(self) -> bool:\n return self.yanked_reason is not None\n\n @property\n def has_hash(self) -> bool:\n return bool(self._hashes)\n\n def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:\n """\n Return True if the link has a hash and it is allowed by `hashes`.\n """\n if hashes is None:\n return False\n return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items())\n\n\nclass _CleanResult(NamedTuple):\n """Convert link for equivalency check.\n\n This is used in the resolver to check whether two URL-specified requirements\n likely point to the same distribution and can be considered equivalent. This\n equivalency logic avoids comparing URLs literally, which can be too strict\n (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.\n\n Currently this does three things:\n\n 1. Drop the basic auth part. This is technically wrong since a server can\n serve different content based on auth, but if it does that, it is even\n impossible to guarantee two URLs without auth are equivalent, since\n the user can input different auth information when prompted. So the\n practical solution is to assume the auth doesn't affect the response.\n 2. Parse the query to avoid the ordering issue. Note that ordering under the\n same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are\n still considered different.\n 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and\n hash values, since it should have no impact the downloaded content. Note\n that this drops the "egg=" part historically used to denote the requested\n project (and extras), which is wrong in the strictest sense, but too many\n people are supplying it inconsistently to cause superfluous resolution\n conflicts, so we choose to also ignore them.\n """\n\n parsed: urllib.parse.SplitResult\n query: Dict[str, List[str]]\n subdirectory: str\n hashes: Dict[str, str]\n\n\ndef _clean_link(link: Link) -> _CleanResult:\n parsed = link._parsed_url\n netloc = parsed.netloc.rsplit("@", 1)[-1]\n # According to RFC 8089, an empty host in file: means localhost.\n if parsed.scheme == "file" and not netloc:\n netloc = "localhost"\n fragment = urllib.parse.parse_qs(parsed.fragment)\n if "egg" in fragment:\n logger.debug("Ignoring egg= fragment in %s", link)\n try:\n # If there are multiple subdirectory values, use the first one.\n # This matches the behavior of Link.subdirectory_fragment.\n subdirectory = fragment["subdirectory"][0]\n except (IndexError, KeyError):\n subdirectory = ""\n # If there are multiple hash values under the same algorithm, use the\n # first one. This matches the behavior of Link.hash_value.\n hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}\n return _CleanResult(\n parsed=parsed._replace(netloc=netloc, query="", fragment=""),\n query=urllib.parse.parse_qs(parsed.query),\n subdirectory=subdirectory,\n hashes=hashes,\n )\n\n\n@functools.lru_cache(maxsize=None)\ndef links_equivalent(link1: Link, link2: Link) -> bool:\n return _clean_link(link1) == _clean_link(link2)\n
.venv\Lib\site-packages\pip\_internal\models\link.py
link.py
Python
21,511
0.95
0.199013
0.108911
vue-tools
867
2025-02-26T03:38:05.190596
MIT
false
7cebe49493da6a7c44b1fed0aafc3631
import dataclasses\nimport re\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple\n\nfrom pip._vendor import tomli_w\nfrom pip._vendor.typing_extensions import Self\n\nfrom pip._internal.models.direct_url import ArchiveInfo, DirInfo, VcsInfo\nfrom pip._internal.models.link import Link\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.utils.urls import url_to_path\n\nPYLOCK_FILE_NAME_RE = re.compile(r"^pylock\.([^.]+)\.toml$")\n\n\ndef is_valid_pylock_file_name(path: Path) -> bool:\n return path.name == "pylock.toml" or bool(re.match(PYLOCK_FILE_NAME_RE, path.name))\n\n\ndef _toml_dict_factory(data: List[Tuple[str, Any]]) -> Dict[str, Any]:\n return {key.replace("_", "-"): value for key, value in data if value is not None}\n\n\n@dataclass\nclass PackageVcs:\n type: str\n url: Optional[str]\n # (not supported) path: Optional[str]\n requested_revision: Optional[str]\n commit_id: str\n subdirectory: Optional[str]\n\n\n@dataclass\nclass PackageDirectory:\n path: str\n editable: Optional[bool]\n subdirectory: Optional[str]\n\n\n@dataclass\nclass PackageArchive:\n url: Optional[str]\n # (not supported) path: Optional[str]\n # (not supported) size: Optional[int]\n # (not supported) upload_time: Optional[datetime]\n hashes: Dict[str, str]\n subdirectory: Optional[str]\n\n\n@dataclass\nclass PackageSdist:\n name: str\n # (not supported) upload_time: Optional[datetime]\n url: Optional[str]\n # (not supported) path: Optional[str]\n # (not supported) size: Optional[int]\n hashes: Dict[str, str]\n\n\n@dataclass\nclass PackageWheel:\n name: str\n # (not supported) upload_time: Optional[datetime]\n url: Optional[str]\n # (not supported) path: Optional[str]\n # (not supported) size: Optional[int]\n hashes: Dict[str, str]\n\n\n@dataclass\nclass Package:\n name: str\n version: Optional[str] = None\n # (not supported) marker: Optional[str]\n # (not supported) requires_python: Optional[str]\n # (not supported) dependencies\n vcs: Optional[PackageVcs] = None\n directory: Optional[PackageDirectory] = None\n archive: Optional[PackageArchive] = None\n # (not supported) index: Optional[str]\n sdist: Optional[PackageSdist] = None\n wheels: Optional[List[PackageWheel]] = None\n # (not supported) attestation_identities: Optional[List[Dict[str, Any]]]\n # (not supported) tool: Optional[Dict[str, Any]]\n\n @classmethod\n def from_install_requirement(cls, ireq: InstallRequirement, base_dir: Path) -> Self:\n base_dir = base_dir.resolve()\n dist = ireq.get_dist()\n download_info = ireq.download_info\n assert download_info\n package = cls(name=dist.canonical_name)\n if ireq.is_direct:\n if isinstance(download_info.info, VcsInfo):\n package.vcs = PackageVcs(\n type=download_info.info.vcs,\n url=download_info.url,\n requested_revision=download_info.info.requested_revision,\n commit_id=download_info.info.commit_id,\n subdirectory=download_info.subdirectory,\n )\n elif isinstance(download_info.info, DirInfo):\n package.directory = PackageDirectory(\n path=(\n Path(url_to_path(download_info.url))\n .resolve()\n .relative_to(base_dir)\n .as_posix()\n ),\n editable=(\n download_info.info.editable\n if download_info.info.editable\n else None\n ),\n subdirectory=download_info.subdirectory,\n )\n elif isinstance(download_info.info, ArchiveInfo):\n if not download_info.info.hashes:\n raise NotImplementedError()\n package.archive = PackageArchive(\n url=download_info.url,\n hashes=download_info.info.hashes,\n subdirectory=download_info.subdirectory,\n )\n else:\n # should never happen\n raise NotImplementedError()\n else:\n package.version = str(dist.version)\n if isinstance(download_info.info, ArchiveInfo):\n if not download_info.info.hashes:\n raise NotImplementedError()\n link = Link(download_info.url)\n if link.is_wheel:\n package.wheels = [\n PackageWheel(\n name=link.filename,\n url=download_info.url,\n hashes=download_info.info.hashes,\n )\n ]\n else:\n package.sdist = PackageSdist(\n name=link.filename,\n url=download_info.url,\n hashes=download_info.info.hashes,\n )\n else:\n # should never happen\n raise NotImplementedError()\n return package\n\n\n@dataclass\nclass Pylock:\n lock_version: str = "1.0"\n # (not supported) environments: Optional[List[str]]\n # (not supported) requires_python: Optional[str]\n # (not supported) extras: List[str] = []\n # (not supported) dependency_groups: List[str] = []\n created_by: str = "pip"\n packages: List[Package] = dataclasses.field(default_factory=list)\n # (not supported) tool: Optional[Dict[str, Any]]\n\n def as_toml(self) -> str:\n return tomli_w.dumps(dataclasses.asdict(self, dict_factory=_toml_dict_factory))\n\n @classmethod\n def from_install_requirements(\n cls, install_requirements: Iterable[InstallRequirement], base_dir: Path\n ) -> Self:\n return cls(\n packages=sorted(\n (\n Package.from_install_requirement(ireq, base_dir)\n for ireq in install_requirements\n ),\n key=lambda p: p.name,\n )\n )\n
.venv\Lib\site-packages\pip\_internal\models\pylock.py
pylock.py
Python
6,196
0.95
0.120219
0.144654
node-utils
852
2024-02-10T14:11:17.358238
Apache-2.0
false
9b86c5e64cfb11185275531f3ca64af1
"""\nFor types associated with installation schemes.\n\nFor a general overview of available schemes and their context, see\nhttps://docs.python.org/3/install/index.html#alternate-installation.\n"""\n\nfrom dataclasses import dataclass\n\nSCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"]\n\n\n@dataclass(frozen=True)\nclass Scheme:\n """A Scheme holds paths which are used as the base directories for\n artifacts associated with a Python package.\n """\n\n __slots__ = SCHEME_KEYS\n\n platlib: str\n purelib: str\n headers: str\n scripts: str\n data: str\n
.venv\Lib\site-packages\pip\_internal\models\scheme.py
scheme.py
Python
575
0.95
0.08
0
react-lib
686
2023-11-12T23:45:17.914680
Apache-2.0
false
f866549721be296f523dac33e08edcb4
import itertools\nimport logging\nimport os\nimport posixpath\nimport urllib.parse\nfrom dataclasses import dataclass\nfrom typing import List\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.models.index import PyPI\nfrom pip._internal.utils.compat import has_tls\nfrom pip._internal.utils.misc import normalize_path, redact_auth_from_url\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass(frozen=True)\nclass SearchScope:\n """\n Encapsulates the locations that pip is configured to search.\n """\n\n __slots__ = ["find_links", "index_urls", "no_index"]\n\n find_links: List[str]\n index_urls: List[str]\n no_index: bool\n\n @classmethod\n def create(\n cls,\n find_links: List[str],\n index_urls: List[str],\n no_index: bool,\n ) -> "SearchScope":\n """\n Create a SearchScope object after normalizing the `find_links`.\n """\n # Build find_links. If an argument starts with ~, it may be\n # a local file relative to a home directory. So try normalizing\n # it and if it exists, use the normalized version.\n # This is deliberately conservative - it might be fine just to\n # blindly normalize anything starting with a ~...\n built_find_links: List[str] = []\n for link in find_links:\n if link.startswith("~"):\n new_link = normalize_path(link)\n if os.path.exists(new_link):\n link = new_link\n built_find_links.append(link)\n\n # If we don't have TLS enabled, then WARN if anyplace we're looking\n # relies on TLS.\n if not has_tls():\n for link in itertools.chain(index_urls, built_find_links):\n parsed = urllib.parse.urlparse(link)\n if parsed.scheme == "https":\n logger.warning(\n "pip is configured with locations that require "\n "TLS/SSL, however the ssl module in Python is not "\n "available."\n )\n break\n\n return cls(\n find_links=built_find_links,\n index_urls=index_urls,\n no_index=no_index,\n )\n\n def get_formatted_locations(self) -> str:\n lines = []\n redacted_index_urls = []\n if self.index_urls and self.index_urls != [PyPI.simple_url]:\n for url in self.index_urls:\n redacted_index_url = redact_auth_from_url(url)\n\n # Parse the URL\n purl = urllib.parse.urlsplit(redacted_index_url)\n\n # URL is generally invalid if scheme and netloc is missing\n # there are issues with Python and URL parsing, so this test\n # is a bit crude. See bpo-20271, bpo-23505. Python doesn't\n # always parse invalid URLs correctly - it should raise\n # exceptions for malformed URLs\n if not purl.scheme and not purl.netloc:\n logger.warning(\n 'The index url "%s" seems invalid, please provide a scheme.',\n redacted_index_url,\n )\n\n redacted_index_urls.append(redacted_index_url)\n\n lines.append(\n "Looking in indexes: {}".format(", ".join(redacted_index_urls))\n )\n\n if self.find_links:\n lines.append(\n "Looking in links: {}".format(\n ", ".join(redact_auth_from_url(url) for url in self.find_links)\n )\n )\n return "\n".join(lines)\n\n def get_index_urls_locations(self, project_name: str) -> List[str]:\n """Returns the locations found via self.index_urls\n\n Checks the url_name on the main (first in the list) index and\n use this url_name to produce all locations\n """\n\n def mkurl_pypi_url(url: str) -> str:\n loc = posixpath.join(\n url, urllib.parse.quote(canonicalize_name(project_name))\n )\n # For maximum compatibility with easy_install, ensure the path\n # ends in a trailing slash. Although this isn't in the spec\n # (and PyPI can handle it without the slash) some other index\n # implementations might break if they relied on easy_install's\n # behavior.\n if not loc.endswith("/"):\n loc = loc + "/"\n return loc\n\n return [mkurl_pypi_url(url) for url in self.index_urls]\n
.venv\Lib\site-packages\pip\_internal\models\search_scope.py
search_scope.py
Python
4,531
0.95
0.188976
0.168224
react-lib
782
2023-09-18T12:04:06.158366
GPL-3.0
false
5c0fe043789a18c85e1aca89bafe0173
from typing import Optional\n\nfrom pip._internal.models.format_control import FormatControl\n\n\n# TODO: This needs Python 3.10's improved slots support for dataclasses\n# to be converted into a dataclass.\nclass SelectionPreferences:\n """\n Encapsulates the candidate selection preferences for downloading\n and installing files.\n """\n\n __slots__ = [\n "allow_yanked",\n "allow_all_prereleases",\n "format_control",\n "prefer_binary",\n "ignore_requires_python",\n ]\n\n # Don't include an allow_yanked default value to make sure each call\n # site considers whether yanked releases are allowed. This also causes\n # that decision to be made explicit in the calling code, which helps\n # people when reading the code.\n def __init__(\n self,\n allow_yanked: bool,\n allow_all_prereleases: bool = False,\n format_control: Optional[FormatControl] = None,\n prefer_binary: bool = False,\n ignore_requires_python: Optional[bool] = None,\n ) -> None:\n """Create a SelectionPreferences object.\n\n :param allow_yanked: Whether files marked as yanked (in the sense\n of PEP 592) are permitted to be candidates for install.\n :param format_control: A FormatControl object or None. Used to control\n the selection of source packages / binary packages when consulting\n the index and links.\n :param prefer_binary: Whether to prefer an old, but valid, binary\n dist over a new source dist.\n :param ignore_requires_python: Whether to ignore incompatible\n "Requires-Python" values in links. Defaults to False.\n """\n if ignore_requires_python is None:\n ignore_requires_python = False\n\n self.allow_yanked = allow_yanked\n self.allow_all_prereleases = allow_all_prereleases\n self.format_control = format_control\n self.prefer_binary = prefer_binary\n self.ignore_requires_python = ignore_requires_python\n
.venv\Lib\site-packages\pip\_internal\models\selection_prefs.py
selection_prefs.py
Python
2,015
0.95
0.113208
0.130435
vue-tools
289
2024-01-25T08:13:22.955968
MIT
false
8e302535ea3e86c2599571aac77b9aba
import sys\nfrom typing import List, Optional, Set, Tuple\n\nfrom pip._vendor.packaging.tags import Tag\n\nfrom pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot\nfrom pip._internal.utils.misc import normalize_version_info\n\n\nclass TargetPython:\n """\n Encapsulates the properties of a Python interpreter one is targeting\n for a package install, download, etc.\n """\n\n __slots__ = [\n "_given_py_version_info",\n "abis",\n "implementation",\n "platforms",\n "py_version",\n "py_version_info",\n "_valid_tags",\n "_valid_tags_set",\n ]\n\n def __init__(\n self,\n platforms: Optional[List[str]] = None,\n py_version_info: Optional[Tuple[int, ...]] = None,\n abis: Optional[List[str]] = None,\n implementation: Optional[str] = None,\n ) -> None:\n """\n :param platforms: A list of strings or None. If None, searches for\n packages that are supported by the current system. Otherwise, will\n find packages that can be built on the platforms passed in. These\n packages will only be downloaded for distribution: they will\n not be built locally.\n :param py_version_info: An optional tuple of ints representing the\n Python version information to use (e.g. `sys.version_info[:3]`).\n This can have length 1, 2, or 3 when provided.\n :param abis: A list of strings or None. This is passed to\n compatibility_tags.py's get_supported() function as is.\n :param implementation: A string or None. This is passed to\n compatibility_tags.py's get_supported() function as is.\n """\n # Store the given py_version_info for when we call get_supported().\n self._given_py_version_info = py_version_info\n\n if py_version_info is None:\n py_version_info = sys.version_info[:3]\n else:\n py_version_info = normalize_version_info(py_version_info)\n\n py_version = ".".join(map(str, py_version_info[:2]))\n\n self.abis = abis\n self.implementation = implementation\n self.platforms = platforms\n self.py_version = py_version\n self.py_version_info = py_version_info\n\n # This is used to cache the return value of get_(un)sorted_tags.\n self._valid_tags: Optional[List[Tag]] = None\n self._valid_tags_set: Optional[Set[Tag]] = None\n\n def format_given(self) -> str:\n """\n Format the given, non-None attributes for display.\n """\n display_version = None\n if self._given_py_version_info is not None:\n display_version = ".".join(\n str(part) for part in self._given_py_version_info\n )\n\n key_values = [\n ("platforms", self.platforms),\n ("version_info", display_version),\n ("abis", self.abis),\n ("implementation", self.implementation),\n ]\n return " ".join(\n f"{key}={value!r}" for key, value in key_values if value is not None\n )\n\n def get_sorted_tags(self) -> List[Tag]:\n """\n Return the supported PEP 425 tags to check wheel candidates against.\n\n The tags are returned in order of preference (most preferred first).\n """\n if self._valid_tags is None:\n # Pass versions=None if no py_version_info was given since\n # versions=None uses special default logic.\n py_version_info = self._given_py_version_info\n if py_version_info is None:\n version = None\n else:\n version = version_info_to_nodot(py_version_info)\n\n tags = get_supported(\n version=version,\n platforms=self.platforms,\n abis=self.abis,\n impl=self.implementation,\n )\n self._valid_tags = tags\n\n return self._valid_tags\n\n def get_unsorted_tags(self) -> Set[Tag]:\n """Exactly the same as get_sorted_tags, but returns a set.\n\n This is important for performance.\n """\n if self._valid_tags_set is None:\n self._valid_tags_set = set(self.get_sorted_tags())\n\n return self._valid_tags_set\n
.venv\Lib\site-packages\pip\_internal\models\target_python.py
target_python.py
Python
4,271
0.95
0.181818
0.039216
react-lib
93
2024-03-15T15:34:46.646415
BSD-3-Clause
false
ed86670d1c14018f47a04a6b8f531bfb
"""Represents a wheel file and provides access to the various parts of the\nname that have meaning.\n"""\n\nimport re\nfrom typing import Dict, Iterable, List, Optional\n\nfrom pip._vendor.packaging.tags import Tag\nfrom pip._vendor.packaging.utils import BuildTag, parse_wheel_filename\nfrom pip._vendor.packaging.utils import (\n InvalidWheelFilename as _PackagingInvalidWheelFilename,\n)\n\nfrom pip._internal.exceptions import InvalidWheelFilename\nfrom pip._internal.utils.deprecation import deprecated\n\n\nclass Wheel:\n """A wheel file"""\n\n legacy_wheel_file_re = re.compile(\n r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))\n ((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)\n \.whl|\.dist-info)$""",\n re.VERBOSE,\n )\n\n def __init__(self, filename: str) -> None:\n self.filename = filename\n\n # To make mypy happy specify type hints that can come from either\n # parse_wheel_filename or the legacy_wheel_file_re match.\n self.name: str\n self._build_tag: Optional[BuildTag] = None\n\n try:\n wheel_info = parse_wheel_filename(filename)\n self.name, _version, self._build_tag, self.file_tags = wheel_info\n self.version = str(_version)\n except _PackagingInvalidWheelFilename as e:\n # Check if the wheel filename is in the legacy format\n legacy_wheel_info = self.legacy_wheel_file_re.match(filename)\n if not legacy_wheel_info:\n raise InvalidWheelFilename(e.args[0]) from None\n\n deprecated(\n reason=(\n f"Wheel filename {filename!r} is not correctly normalised. "\n "Future versions of pip will raise the following error:\n"\n f"{e.args[0]}\n\n"\n ),\n replacement=(\n "to rename the wheel to use a correctly normalised "\n "name (this may require updating the version in "\n "the project metadata)"\n ),\n gone_in="25.3",\n issue=12938,\n )\n\n self.name = legacy_wheel_info.group("name").replace("_", "-")\n self.version = legacy_wheel_info.group("ver").replace("_", "-")\n\n # Generate the file tags from the legacy wheel filename\n pyversions = legacy_wheel_info.group("pyver").split(".")\n abis = legacy_wheel_info.group("abi").split(".")\n plats = legacy_wheel_info.group("plat").split(".")\n self.file_tags = frozenset(\n Tag(interpreter=py, abi=abi, platform=plat)\n for py in pyversions\n for abi in abis\n for plat in plats\n )\n\n @property\n def build_tag(self) -> BuildTag:\n if self._build_tag is not None:\n return self._build_tag\n\n # Parse the build tag from the legacy wheel filename\n legacy_wheel_info = self.legacy_wheel_file_re.match(self.filename)\n assert legacy_wheel_info is not None, "guaranteed by filename validation"\n build_tag = legacy_wheel_info.group("build")\n match = re.match(r"^(\d+)(.*)$", build_tag)\n assert match is not None, "guaranteed by filename validation"\n build_tag_groups = match.groups()\n self._build_tag = (int(build_tag_groups[0]), build_tag_groups[1])\n\n return self._build_tag\n\n def get_formatted_file_tags(self) -> List[str]:\n """Return the wheel's tags as a sorted list of strings."""\n return sorted(str(tag) for tag in self.file_tags)\n\n def support_index_min(self, tags: List[Tag]) -> int:\n """Return the lowest index that one of the wheel's file_tag combinations\n achieves in the given list of supported tags.\n\n For example, if there are 8 supported tags and one of the file tags\n is first in the list, then return 0.\n\n :param tags: the PEP 425 tags to check the wheel against, in order\n with most preferred first.\n\n :raises ValueError: If none of the wheel's file tags match one of\n the supported tags.\n """\n try:\n return next(i for i, t in enumerate(tags) if t in self.file_tags)\n except StopIteration:\n raise ValueError()\n\n def find_most_preferred_tag(\n self, tags: List[Tag], tag_to_priority: Dict[Tag, int]\n ) -> int:\n """Return the priority of the most preferred tag that one of the wheel's file\n tag combinations achieves in the given list of supported tags using the given\n tag_to_priority mapping, where lower priorities are more-preferred.\n\n This is used in place of support_index_min in some cases in order to avoid\n an expensive linear scan of a large list of tags.\n\n :param tags: the PEP 425 tags to check the wheel against.\n :param tag_to_priority: a mapping from tag to priority of that tag, where\n lower is more preferred.\n\n :raises ValueError: If none of the wheel's file tags match one of\n the supported tags.\n """\n return min(\n tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority\n )\n\n def supported(self, tags: Iterable[Tag]) -> bool:\n """Return whether the wheel is compatible with one of the given tags.\n\n :param tags: the PEP 425 tags to check the wheel against.\n """\n return not self.file_tags.isdisjoint(tags)\n
.venv\Lib\site-packages\pip\_internal\models\wheel.py
wheel.py
Python
5,506
0.95
0.151079
0.044248
awesome-app
994
2024-08-20T20:33:46.511688
GPL-3.0
false
85b62fff83ab801b23a51f47b6bdfbef
"""A package that contains models that represent entities."""\n
.venv\Lib\site-packages\pip\_internal\models\__init__.py
__init__.py
Python
62
0.5
0
0
node-utils
220
2023-09-28T21:28:56.070655
MIT
false
c083fceab96ce46c1e7473b499297f32
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\candidate.cpython-313.pyc
candidate.cpython-313.pyc
Other
1,664
0.7
0.0625
0
node-utils
494
2023-11-27T13:10:59.303191
MIT
false
24ef297de02127135ddcea7e4c2d6578
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\direct_url.cpython-313.pyc
direct_url.cpython-313.pyc
Other
11,005
0.95
0.007813
0
node-utils
135
2024-10-09T05:45:53.472294
GPL-3.0
false
64deb7305a48cc27f4995cf29dd5e438
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\format_control.cpython-313.pyc
format_control.cpython-313.pyc
Other
4,260
0.95
0.03125
0
react-lib
869
2023-11-13T02:53:06.149349
GPL-3.0
false
b798373e55971af7d386f05b6b8a8025
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\index.cpython-313.pyc
index.cpython-313.pyc
Other
1,767
0.8
0
0
python-kit
536
2023-10-19T07:59:54.991320
Apache-2.0
false
f09bec3229bfeb23af4d0295cb185612
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\installation_report.cpython-313.pyc
installation_report.cpython-313.pyc
Other
2,381
0.95
0.029412
0
python-kit
206
2024-03-03T17:40:03.525663
Apache-2.0
false
0764700e61f8839095f5121b6f7b9d6c
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\link.cpython-313.pyc
link.cpython-313.pyc
Other
27,164
0.95
0.07483
0
node-utils
903
2024-07-19T11:58:52.705148
BSD-3-Clause
false
2918949fa9bac753a70ff534e6c5d54c
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\pylock.cpython-313.pyc
pylock.cpython-313.pyc
Other
8,360
0.95
0
0.020408
vue-tools
329
2023-12-09T09:17:22.011549
Apache-2.0
false
dc1164fd0f104bb2def40ce6472d7f3c
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\scheme.cpython-313.pyc
scheme.cpython-313.pyc
Other
1,069
0.8
0.045455
0
node-utils
35
2025-05-24T07:22:12.574820
Apache-2.0
false
53c4c9889836fbaac2e318e1fcd1a53d
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\search_scope.cpython-313.pyc
search_scope.cpython-313.pyc
Other
5,113
0.95
0
0
react-lib
951
2024-08-20T03:27:18.178959
GPL-3.0
false
d008e311c0fe21fb99475a4f1867a8c4
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\selection_prefs.cpython-313.pyc
selection_prefs.cpython-313.pyc
Other
1,832
0.95
0.057143
0
awesome-app
720
2024-08-20T07:08:52.916440
BSD-3-Clause
false
e537b671c5ec12216665d6701a73fcb1
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\target_python.cpython-313.pyc
target_python.cpython-313.pyc
Other
4,837
0.95
0.09589
0
vue-tools
997
2024-11-10T09:46:37.198626
Apache-2.0
false
eb17266338197ea46de0347b006f2f55
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\wheel.cpython-313.pyc
wheel.cpython-313.pyc
Other
7,480
0.95
0.012048
0.013889
react-lib
488
2025-04-07T21:43:45.174174
GPL-3.0
false
b02c040b1c538d4d91173cfb85f62943
\n\n
.venv\Lib\site-packages\pip\_internal\models\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
265
0.7
0
0
vue-tools
94
2024-08-29T10:05:37.224682
Apache-2.0
false
d34bf8b8a076a33348677f5761faf9da
"""Network Authentication Helpers\n\nContains interface (MultiDomainBasicAuth) and associated glue code for\nproviding credentials in the context of network requests.\n"""\n\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sysconfig\nimport typing\nimport urllib.parse\nfrom abc import ABC, abstractmethod\nfrom functools import lru_cache\nfrom os.path import commonprefix\nfrom pathlib import Path\nfrom typing import Any, Dict, List, NamedTuple, Optional, Tuple\n\nfrom pip._vendor.requests.auth import AuthBase, HTTPBasicAuth\nfrom pip._vendor.requests.models import Request, Response\nfrom pip._vendor.requests.utils import get_netrc_auth\n\nfrom pip._internal.utils.logging import getLogger\nfrom pip._internal.utils.misc import (\n ask,\n ask_input,\n ask_password,\n remove_auth_from_url,\n split_auth_netloc_from_url,\n)\nfrom pip._internal.vcs.versioncontrol import AuthInfo\n\nlogger = getLogger(__name__)\n\nKEYRING_DISABLED = False\n\n\nclass Credentials(NamedTuple):\n url: str\n username: str\n password: str\n\n\nclass KeyRingBaseProvider(ABC):\n """Keyring base provider interface"""\n\n has_keyring: bool\n\n @abstractmethod\n def get_auth_info(\n self, url: str, username: Optional[str]\n ) -> Optional[AuthInfo]: ...\n\n @abstractmethod\n def save_auth_info(self, url: str, username: str, password: str) -> None: ...\n\n\nclass KeyRingNullProvider(KeyRingBaseProvider):\n """Keyring null provider"""\n\n has_keyring = False\n\n def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:\n return None\n\n def save_auth_info(self, url: str, username: str, password: str) -> None:\n return None\n\n\nclass KeyRingPythonProvider(KeyRingBaseProvider):\n """Keyring interface which uses locally imported `keyring`"""\n\n has_keyring = True\n\n def __init__(self) -> None:\n import keyring\n\n self.keyring = keyring\n\n def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:\n # Support keyring's get_credential interface which supports getting\n # credentials without a username. This is only available for\n # keyring>=15.2.0.\n if hasattr(self.keyring, "get_credential"):\n logger.debug("Getting credentials from keyring for %s", url)\n cred = self.keyring.get_credential(url, username)\n if cred is not None:\n return cred.username, cred.password\n return None\n\n if username is not None:\n logger.debug("Getting password from keyring for %s", url)\n password = self.keyring.get_password(url, username)\n if password:\n return username, password\n return None\n\n def save_auth_info(self, url: str, username: str, password: str) -> None:\n self.keyring.set_password(url, username, password)\n\n\nclass KeyRingCliProvider(KeyRingBaseProvider):\n """Provider which uses `keyring` cli\n\n Instead of calling the keyring package installed alongside pip\n we call keyring on the command line which will enable pip to\n use which ever installation of keyring is available first in\n PATH.\n """\n\n has_keyring = True\n\n def __init__(self, cmd: str) -> None:\n self.keyring = cmd\n\n def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:\n # This is the default implementation of keyring.get_credential\n # https://github.com/jaraco/keyring/blob/97689324abcf01bd1793d49063e7ca01e03d7d07/keyring/backend.py#L134-L139\n if username is not None:\n password = self._get_password(url, username)\n if password is not None:\n return username, password\n return None\n\n def save_auth_info(self, url: str, username: str, password: str) -> None:\n return self._set_password(url, username, password)\n\n def _get_password(self, service_name: str, username: str) -> Optional[str]:\n """Mirror the implementation of keyring.get_password using cli"""\n if self.keyring is None:\n return None\n\n cmd = [self.keyring, "get", service_name, username]\n env = os.environ.copy()\n env["PYTHONIOENCODING"] = "utf-8"\n res = subprocess.run(\n cmd,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n env=env,\n )\n if res.returncode:\n return None\n return res.stdout.decode("utf-8").strip(os.linesep)\n\n def _set_password(self, service_name: str, username: str, password: str) -> None:\n """Mirror the implementation of keyring.set_password using cli"""\n if self.keyring is None:\n return None\n env = os.environ.copy()\n env["PYTHONIOENCODING"] = "utf-8"\n subprocess.run(\n [self.keyring, "set", service_name, username],\n input=f"{password}{os.linesep}".encode(),\n env=env,\n check=True,\n )\n return None\n\n\n@lru_cache(maxsize=None)\ndef get_keyring_provider(provider: str) -> KeyRingBaseProvider:\n logger.verbose("Keyring provider requested: %s", provider)\n\n # keyring has previously failed and been disabled\n if KEYRING_DISABLED:\n provider = "disabled"\n if provider in ["import", "auto"]:\n try:\n impl = KeyRingPythonProvider()\n logger.verbose("Keyring provider set: import")\n return impl\n except ImportError:\n pass\n except Exception as exc:\n # In the event of an unexpected exception\n # we should warn the user\n msg = "Installed copy of keyring fails with exception %s"\n if provider == "auto":\n msg = msg + ", trying to find a keyring executable as a fallback"\n logger.warning(msg, exc, exc_info=logger.isEnabledFor(logging.DEBUG))\n if provider in ["subprocess", "auto"]:\n cli = shutil.which("keyring")\n if cli and cli.startswith(sysconfig.get_path("scripts")):\n # all code within this function is stolen from shutil.which implementation\n @typing.no_type_check\n def PATH_as_shutil_which_determines_it() -> str:\n path = os.environ.get("PATH", None)\n if path is None:\n try:\n path = os.confstr("CS_PATH")\n except (AttributeError, ValueError):\n # os.confstr() or CS_PATH is not available\n path = os.defpath\n # bpo-35755: Don't use os.defpath if the PATH environment variable is\n # set to an empty string\n\n return path\n\n scripts = Path(sysconfig.get_path("scripts"))\n\n paths = []\n for path in PATH_as_shutil_which_determines_it().split(os.pathsep):\n p = Path(path)\n try:\n if not p.samefile(scripts):\n paths.append(path)\n except FileNotFoundError:\n pass\n\n path = os.pathsep.join(paths)\n\n cli = shutil.which("keyring", path=path)\n\n if cli:\n logger.verbose("Keyring provider set: subprocess with executable %s", cli)\n return KeyRingCliProvider(cli)\n\n logger.verbose("Keyring provider set: disabled")\n return KeyRingNullProvider()\n\n\nclass MultiDomainBasicAuth(AuthBase):\n def __init__(\n self,\n prompting: bool = True,\n index_urls: Optional[List[str]] = None,\n keyring_provider: str = "auto",\n ) -> None:\n self.prompting = prompting\n self.index_urls = index_urls\n self.keyring_provider = keyring_provider # type: ignore[assignment]\n self.passwords: Dict[str, AuthInfo] = {}\n # When the user is prompted to enter credentials and keyring is\n # available, we will offer to save them. If the user accepts,\n # this value is set to the credentials they entered. After the\n # request authenticates, the caller should call\n # ``save_credentials`` to save these.\n self._credentials_to_save: Optional[Credentials] = None\n\n @property\n def keyring_provider(self) -> KeyRingBaseProvider:\n return get_keyring_provider(self._keyring_provider)\n\n @keyring_provider.setter\n def keyring_provider(self, provider: str) -> None:\n # The free function get_keyring_provider has been decorated with\n # functools.cache. If an exception occurs in get_keyring_auth that\n # cache will be cleared and keyring disabled, take that into account\n # if you want to remove this indirection.\n self._keyring_provider = provider\n\n @property\n def use_keyring(self) -> bool:\n # We won't use keyring when --no-input is passed unless\n # a specific provider is requested because it might require\n # user interaction\n return self.prompting or self._keyring_provider not in ["auto", "disabled"]\n\n def _get_keyring_auth(\n self,\n url: Optional[str],\n username: Optional[str],\n ) -> Optional[AuthInfo]:\n """Return the tuple auth for a given url from keyring."""\n # Do nothing if no url was provided\n if not url:\n return None\n\n try:\n return self.keyring_provider.get_auth_info(url, username)\n except Exception as exc:\n # Log the full exception (with stacktrace) at debug, so it'll only\n # show up when running in verbose mode.\n logger.debug("Keyring is skipped due to an exception", exc_info=True)\n # Always log a shortened version of the exception.\n logger.warning(\n "Keyring is skipped due to an exception: %s",\n str(exc),\n )\n global KEYRING_DISABLED\n KEYRING_DISABLED = True\n get_keyring_provider.cache_clear()\n return None\n\n def _get_index_url(self, url: str) -> Optional[str]:\n """Return the original index URL matching the requested URL.\n\n Cached or dynamically generated credentials may work against\n the original index URL rather than just the netloc.\n\n The provided url should have had its username and password\n removed already. If the original index url had credentials then\n they will be included in the return value.\n\n Returns None if no matching index was found, or if --no-index\n was specified by the user.\n """\n if not url or not self.index_urls:\n return None\n\n url = remove_auth_from_url(url).rstrip("/") + "/"\n parsed_url = urllib.parse.urlsplit(url)\n\n candidates = []\n\n for index in self.index_urls:\n index = index.rstrip("/") + "/"\n parsed_index = urllib.parse.urlsplit(remove_auth_from_url(index))\n if parsed_url == parsed_index:\n return index\n\n if parsed_url.netloc != parsed_index.netloc:\n continue\n\n candidate = urllib.parse.urlsplit(index)\n candidates.append(candidate)\n\n if not candidates:\n return None\n\n candidates.sort(\n reverse=True,\n key=lambda candidate: commonprefix(\n [\n parsed_url.path,\n candidate.path,\n ]\n ).rfind("/"),\n )\n\n return urllib.parse.urlunsplit(candidates[0])\n\n def _get_new_credentials(\n self,\n original_url: str,\n *,\n allow_netrc: bool = True,\n allow_keyring: bool = False,\n ) -> AuthInfo:\n """Find and return credentials for the specified URL."""\n # Split the credentials and netloc from the url.\n url, netloc, url_user_password = split_auth_netloc_from_url(\n original_url,\n )\n\n # Start with the credentials embedded in the url\n username, password = url_user_password\n if username is not None and password is not None:\n logger.debug("Found credentials in url for %s", netloc)\n return url_user_password\n\n # Find a matching index url for this request\n index_url = self._get_index_url(url)\n if index_url:\n # Split the credentials from the url.\n index_info = split_auth_netloc_from_url(index_url)\n if index_info:\n index_url, _, index_url_user_password = index_info\n logger.debug("Found index url %s", index_url)\n\n # If an index URL was found, try its embedded credentials\n if index_url and index_url_user_password[0] is not None:\n username, password = index_url_user_password\n if username is not None and password is not None:\n logger.debug("Found credentials in index url for %s", netloc)\n return index_url_user_password\n\n # Get creds from netrc if we still don't have them\n if allow_netrc:\n netrc_auth = get_netrc_auth(original_url)\n if netrc_auth:\n logger.debug("Found credentials in netrc for %s", netloc)\n return netrc_auth\n\n # If we don't have a password and keyring is available, use it.\n if allow_keyring:\n # The index url is more specific than the netloc, so try it first\n # fmt: off\n kr_auth = (\n self._get_keyring_auth(index_url, username) or\n self._get_keyring_auth(netloc, username)\n )\n # fmt: on\n if kr_auth:\n logger.debug("Found credentials in keyring for %s", netloc)\n return kr_auth\n\n return username, password\n\n def _get_url_and_credentials(\n self, original_url: str\n ) -> Tuple[str, Optional[str], Optional[str]]:\n """Return the credentials to use for the provided URL.\n\n If allowed, netrc and keyring may be used to obtain the\n correct credentials.\n\n Returns (url_without_credentials, username, password). Note\n that even if the original URL contains credentials, this\n function may return a different username and password.\n """\n url, netloc, _ = split_auth_netloc_from_url(original_url)\n\n # Try to get credentials from original url\n username, password = self._get_new_credentials(original_url)\n\n # If credentials not found, use any stored credentials for this netloc.\n # Do this if either the username or the password is missing.\n # This accounts for the situation in which the user has specified\n # the username in the index url, but the password comes from keyring.\n if (username is None or password is None) and netloc in self.passwords:\n un, pw = self.passwords[netloc]\n # It is possible that the cached credentials are for a different username,\n # in which case the cache should be ignored.\n if username is None or username == un:\n username, password = un, pw\n\n if username is not None or password is not None:\n # Convert the username and password if they're None, so that\n # this netloc will show up as "cached" in the conditional above.\n # Further, HTTPBasicAuth doesn't accept None, so it makes sense to\n # cache the value that is going to be used.\n username = username or ""\n password = password or ""\n\n # Store any acquired credentials.\n self.passwords[netloc] = (username, password)\n\n assert (\n # Credentials were found\n (username is not None and password is not None)\n # Credentials were not found\n or (username is None and password is None)\n ), f"Could not load credentials from url: {original_url}"\n\n return url, username, password\n\n def __call__(self, req: Request) -> Request:\n # Get credentials for this request\n url, username, password = self._get_url_and_credentials(req.url)\n\n # Set the url of the request to the url without any credentials\n req.url = url\n\n if username is not None and password is not None:\n # Send the basic auth with this request\n req = HTTPBasicAuth(username, password)(req)\n\n # Attach a hook to handle 401 responses\n req.register_hook("response", self.handle_401)\n\n return req\n\n # Factored out to allow for easy patching in tests\n def _prompt_for_password(\n self, netloc: str\n ) -> Tuple[Optional[str], Optional[str], bool]:\n username = ask_input(f"User for {netloc}: ") if self.prompting else None\n if not username:\n return None, None, False\n if self.use_keyring:\n auth = self._get_keyring_auth(netloc, username)\n if auth and auth[0] is not None and auth[1] is not None:\n return auth[0], auth[1], False\n password = ask_password("Password: ")\n return username, password, True\n\n # Factored out to allow for easy patching in tests\n def _should_save_password_to_keyring(self) -> bool:\n if (\n not self.prompting\n or not self.use_keyring\n or not self.keyring_provider.has_keyring\n ):\n return False\n return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"\n\n def handle_401(self, resp: Response, **kwargs: Any) -> Response:\n # We only care about 401 responses, anything else we want to just\n # pass through the actual response\n if resp.status_code != 401:\n return resp\n\n username, password = None, None\n\n # Query the keyring for credentials:\n if self.use_keyring:\n username, password = self._get_new_credentials(\n resp.url,\n allow_netrc=False,\n allow_keyring=True,\n )\n\n # We are not able to prompt the user so simply return the response\n if not self.prompting and not username and not password:\n return resp\n\n parsed = urllib.parse.urlparse(resp.url)\n\n # Prompt the user for a new username and password\n save = False\n if not username and not password:\n username, password, save = self._prompt_for_password(parsed.netloc)\n\n # Store the new username and password to use for future requests\n self._credentials_to_save = None\n if username is not None and password is not None:\n self.passwords[parsed.netloc] = (username, password)\n\n # Prompt to save the password to keyring\n if save and self._should_save_password_to_keyring():\n self._credentials_to_save = Credentials(\n url=parsed.netloc,\n username=username,\n password=password,\n )\n\n # Consume content and release the original connection to allow our new\n # request to reuse the same one.\n # The result of the assignment isn't used, it's just needed to consume\n # the content.\n _ = resp.content\n resp.raw.release_conn()\n\n # Add our new username and password to the request\n req = HTTPBasicAuth(username or "", password or "")(resp.request)\n req.register_hook("response", self.warn_on_401)\n\n # On successful request, save the credentials that were used to\n # keyring. (Note that if the user responded "no" above, this member\n # is not set and nothing will be saved.)\n if self._credentials_to_save:\n req.register_hook("response", self.save_credentials)\n\n # Send our new request\n new_resp = resp.connection.send(req, **kwargs)\n new_resp.history.append(resp)\n\n return new_resp\n\n def warn_on_401(self, resp: Response, **kwargs: Any) -> None:\n """Response callback to warn about incorrect credentials."""\n if resp.status_code == 401:\n logger.warning(\n "401 Error, Credentials not correct for %s",\n resp.request.url,\n )\n\n def save_credentials(self, resp: Response, **kwargs: Any) -> None:\n """Response callback to save credentials on success."""\n assert (\n self.keyring_provider.has_keyring\n ), "should never reach here without keyring"\n\n creds = self._credentials_to_save\n self._credentials_to_save = None\n if creds and resp.status_code < 400:\n try:\n logger.info("Saving credentials to keyring")\n self.keyring_provider.save_auth_info(\n creds.url, creds.username, creds.password\n )\n except Exception:\n logger.exception("Failed to save credentials")\n
.venv\Lib\site-packages\pip\_internal\network\auth.py
auth.py
Python
20,809
0.95
0.226148
0.162338
react-lib
707
2025-05-01T13:40:34.455024
MIT
false
00eda2949ac78d384259b18cf19e0f6c
"""HTTP cache implementation."""\n\nimport os\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom typing import BinaryIO, Generator, Optional, Union\n\nfrom pip._vendor.cachecontrol.cache import SeparateBodyBaseCache\nfrom pip._vendor.cachecontrol.caches import SeparateBodyFileCache\nfrom pip._vendor.requests.models import Response\n\nfrom pip._internal.utils.filesystem import adjacent_tmp_file, replace\nfrom pip._internal.utils.misc import ensure_dir\n\n\ndef is_from_cache(response: Response) -> bool:\n return getattr(response, "from_cache", False)\n\n\n@contextmanager\ndef suppressed_cache_errors() -> Generator[None, None, None]:\n """If we can't access the cache then we can just skip caching and process\n requests as if caching wasn't enabled.\n """\n try:\n yield\n except OSError:\n pass\n\n\nclass SafeFileCache(SeparateBodyBaseCache):\n """\n A file based cache which is safe to use even when the target directory may\n not be accessible or writable.\n\n There is a race condition when two processes try to write and/or read the\n same entry at the same time, since each entry consists of two separate\n files (https://github.com/psf/cachecontrol/issues/324). We therefore have\n additional logic that makes sure that both files to be present before\n returning an entry; this fixes the read side of the race condition.\n\n For the write side, we assume that the server will only ever return the\n same data for the same URL, which ought to be the case for files pip is\n downloading. PyPI does not have a mechanism to swap out a wheel for\n another wheel, for example. If this assumption is not true, the\n CacheControl issue will need to be fixed.\n """\n\n def __init__(self, directory: str) -> None:\n assert directory is not None, "Cache directory must not be None."\n super().__init__()\n self.directory = directory\n\n def _get_cache_path(self, name: str) -> str:\n # From cachecontrol.caches.file_cache.FileCache._fn, brought into our\n # class for backwards-compatibility and to avoid using a non-public\n # method.\n hashed = SeparateBodyFileCache.encode(name)\n parts = list(hashed[:5]) + [hashed]\n return os.path.join(self.directory, *parts)\n\n def get(self, key: str) -> Optional[bytes]:\n # The cache entry is only valid if both metadata and body exist.\n metadata_path = self._get_cache_path(key)\n body_path = metadata_path + ".body"\n if not (os.path.exists(metadata_path) and os.path.exists(body_path)):\n return None\n with suppressed_cache_errors():\n with open(metadata_path, "rb") as f:\n return f.read()\n\n def _write(self, path: str, data: bytes) -> None:\n with suppressed_cache_errors():\n ensure_dir(os.path.dirname(path))\n\n with adjacent_tmp_file(path) as f:\n f.write(data)\n # Inherit the read/write permissions of the cache directory\n # to enable multi-user cache use-cases.\n mode = (\n os.stat(self.directory).st_mode\n & 0o666 # select read/write permissions of cache directory\n | 0o600 # set owner read/write permissions\n )\n # Change permissions only if there is no risk of following a symlink.\n if os.chmod in os.supports_fd:\n os.chmod(f.fileno(), mode)\n elif os.chmod in os.supports_follow_symlinks:\n os.chmod(f.name, mode, follow_symlinks=False)\n\n replace(f.name, path)\n\n def set(\n self, key: str, value: bytes, expires: Union[int, datetime, None] = None\n ) -> None:\n path = self._get_cache_path(key)\n self._write(path, value)\n\n def delete(self, key: str) -> None:\n path = self._get_cache_path(key)\n with suppressed_cache_errors():\n os.remove(path)\n with suppressed_cache_errors():\n os.remove(path + ".body")\n\n def get_body(self, key: str) -> Optional[BinaryIO]:\n # The cache entry is only valid if both metadata and body exist.\n metadata_path = self._get_cache_path(key)\n body_path = metadata_path + ".body"\n if not (os.path.exists(metadata_path) and os.path.exists(body_path)):\n return None\n with suppressed_cache_errors():\n return open(body_path, "rb")\n\n def set_body(self, key: str, body: bytes) -> None:\n path = self._get_cache_path(key) + ".body"\n self._write(path, body)\n
.venv\Lib\site-packages\pip\_internal\network\cache.py
cache.py
Python
4,613
0.95
0.222222
0.083333
react-lib
938
2024-08-12T16:28:29.899443
BSD-3-Clause
false
3125994547fe52384c1aedb14527c1e0
"""Download files with progress indicators."""\n\nimport email.message\nimport logging\nimport mimetypes\nimport os\nfrom http import HTTPStatus\nfrom typing import BinaryIO, Iterable, Optional, Tuple\n\nfrom pip._vendor.requests.models import Response\nfrom pip._vendor.urllib3.exceptions import ReadTimeoutError\n\nfrom pip._internal.cli.progress_bars import get_download_progress_renderer\nfrom pip._internal.exceptions import IncompleteDownloadError, NetworkConnectionError\nfrom pip._internal.models.index import PyPI\nfrom pip._internal.models.link import Link\nfrom pip._internal.network.cache import is_from_cache\nfrom pip._internal.network.session import PipSession\nfrom pip._internal.network.utils import HEADERS, raise_for_status, response_chunks\nfrom pip._internal.utils.misc import format_size, redact_auth_from_url, splitext\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_http_response_size(resp: Response) -> Optional[int]:\n try:\n return int(resp.headers["content-length"])\n except (ValueError, KeyError, TypeError):\n return None\n\n\ndef _get_http_response_etag_or_last_modified(resp: Response) -> Optional[str]:\n """\n Return either the ETag or Last-Modified header (or None if neither exists).\n The return value can be used in an If-Range header.\n """\n return resp.headers.get("etag", resp.headers.get("last-modified"))\n\n\ndef _prepare_download(\n resp: Response,\n link: Link,\n progress_bar: str,\n total_length: Optional[int],\n range_start: Optional[int] = 0,\n) -> Iterable[bytes]:\n if link.netloc == PyPI.file_storage_domain:\n url = link.show_url\n else:\n url = link.url_without_fragment\n\n logged_url = redact_auth_from_url(url)\n\n if total_length:\n if range_start:\n logged_url = (\n f"{logged_url} ({format_size(range_start)}/{format_size(total_length)})"\n )\n else:\n logged_url = f"{logged_url} ({format_size(total_length)})"\n\n if is_from_cache(resp):\n logger.info("Using cached %s", logged_url)\n elif range_start:\n logger.info("Resuming download %s", logged_url)\n else:\n logger.info("Downloading %s", logged_url)\n\n if logger.getEffectiveLevel() > logging.INFO:\n show_progress = False\n elif is_from_cache(resp):\n show_progress = False\n elif not total_length:\n show_progress = True\n elif total_length > (512 * 1024):\n show_progress = True\n else:\n show_progress = False\n\n chunks = response_chunks(resp)\n\n if not show_progress:\n return chunks\n\n renderer = get_download_progress_renderer(\n bar_type=progress_bar, size=total_length, initial_progress=range_start\n )\n return renderer(chunks)\n\n\ndef sanitize_content_filename(filename: str) -> str:\n """\n Sanitize the "filename" value from a Content-Disposition header.\n """\n return os.path.basename(filename)\n\n\ndef parse_content_disposition(content_disposition: str, default_filename: str) -> str:\n """\n Parse the "filename" value from a Content-Disposition header, and\n return the default filename if the result is empty.\n """\n m = email.message.Message()\n m["content-type"] = content_disposition\n filename = m.get_param("filename")\n if filename:\n # We need to sanitize the filename to prevent directory traversal\n # in case the filename contains ".." path parts.\n filename = sanitize_content_filename(str(filename))\n return filename or default_filename\n\n\ndef _get_http_response_filename(resp: Response, link: Link) -> str:\n """Get an ideal filename from the given HTTP response, falling back to\n the link filename if not provided.\n """\n filename = link.filename # fallback\n # Have a look at the Content-Disposition header for a better guess\n content_disposition = resp.headers.get("content-disposition")\n if content_disposition:\n filename = parse_content_disposition(content_disposition, filename)\n ext: Optional[str] = splitext(filename)[1]\n if not ext:\n ext = mimetypes.guess_extension(resp.headers.get("content-type", ""))\n if ext:\n filename += ext\n if not ext and link.url != resp.url:\n ext = os.path.splitext(resp.url)[1]\n if ext:\n filename += ext\n return filename\n\n\ndef _http_get_download(\n session: PipSession,\n link: Link,\n range_start: Optional[int] = 0,\n if_range: Optional[str] = None,\n) -> Response:\n target_url = link.url.split("#", 1)[0]\n headers = HEADERS.copy()\n # request a partial download\n if range_start:\n headers["Range"] = f"bytes={range_start}-"\n # make sure the file hasn't changed\n if if_range:\n headers["If-Range"] = if_range\n try:\n resp = session.get(target_url, headers=headers, stream=True)\n raise_for_status(resp)\n except NetworkConnectionError as e:\n assert e.response is not None\n logger.critical("HTTP error %s while getting %s", e.response.status_code, link)\n raise\n return resp\n\n\nclass Downloader:\n def __init__(\n self,\n session: PipSession,\n progress_bar: str,\n resume_retries: int,\n ) -> None:\n assert (\n resume_retries >= 0\n ), "Number of max resume retries must be bigger or equal to zero"\n self._session = session\n self._progress_bar = progress_bar\n self._resume_retries = resume_retries\n\n def __call__(self, link: Link, location: str) -> Tuple[str, str]:\n """Download the file given by link into location."""\n resp = _http_get_download(self._session, link)\n # NOTE: The original download size needs to be passed down everywhere\n # so if the download is resumed (with a HTTP Range request) the progress\n # bar will report the right size.\n total_length = _get_http_response_size(resp)\n content_type = resp.headers.get("Content-Type", "")\n\n filename = _get_http_response_filename(resp, link)\n filepath = os.path.join(location, filename)\n\n with open(filepath, "wb") as content_file:\n bytes_received = self._process_response(\n resp, link, content_file, 0, total_length\n )\n # If possible, check for an incomplete download and attempt resuming.\n if total_length and bytes_received < total_length:\n self._attempt_resume(\n resp, link, content_file, total_length, bytes_received\n )\n\n return filepath, content_type\n\n def _process_response(\n self,\n resp: Response,\n link: Link,\n content_file: BinaryIO,\n bytes_received: int,\n total_length: Optional[int],\n ) -> int:\n """Process the response and write the chunks to the file."""\n chunks = _prepare_download(\n resp, link, self._progress_bar, total_length, range_start=bytes_received\n )\n return self._write_chunks_to_file(\n chunks, content_file, allow_partial=bool(total_length)\n )\n\n def _write_chunks_to_file(\n self, chunks: Iterable[bytes], content_file: BinaryIO, *, allow_partial: bool\n ) -> int:\n """Write the chunks to the file and return the number of bytes received."""\n bytes_received = 0\n try:\n for chunk in chunks:\n bytes_received += len(chunk)\n content_file.write(chunk)\n except ReadTimeoutError as e:\n # If partial downloads are OK (the download will be retried), don't bail.\n if not allow_partial:\n raise e\n\n # Ensuring bytes_received is returned to attempt resume\n logger.warning("Connection timed out while downloading.")\n\n return bytes_received\n\n def _attempt_resume(\n self,\n resp: Response,\n link: Link,\n content_file: BinaryIO,\n total_length: Optional[int],\n bytes_received: int,\n ) -> None:\n """Attempt to resume the download if connection was dropped."""\n etag_or_last_modified = _get_http_response_etag_or_last_modified(resp)\n\n attempts_left = self._resume_retries\n while total_length and attempts_left and bytes_received < total_length:\n attempts_left -= 1\n\n logger.warning(\n "Attempting to resume incomplete download (%s/%s, attempt %d)",\n format_size(bytes_received),\n format_size(total_length),\n (self._resume_retries - attempts_left),\n )\n\n try:\n # Try to resume the download using a HTTP range request.\n resume_resp = _http_get_download(\n self._session,\n link,\n range_start=bytes_received,\n if_range=etag_or_last_modified,\n )\n\n # Fallback: if the server responded with 200 (i.e., the file has\n # since been modified or range requests are unsupported) or any\n # other unexpected status, restart the download from the beginning.\n must_restart = resume_resp.status_code != HTTPStatus.PARTIAL_CONTENT\n if must_restart:\n bytes_received, total_length, etag_or_last_modified = (\n self._reset_download_state(resume_resp, content_file)\n )\n\n bytes_received += self._process_response(\n resume_resp, link, content_file, bytes_received, total_length\n )\n except (ConnectionError, ReadTimeoutError, OSError):\n continue\n\n # No more resume attempts. Raise an error if the download is still incomplete.\n if total_length and bytes_received < total_length:\n os.remove(content_file.name)\n raise IncompleteDownloadError(\n link, bytes_received, total_length, retries=self._resume_retries\n )\n\n def _reset_download_state(\n self,\n resp: Response,\n content_file: BinaryIO,\n ) -> Tuple[int, Optional[int], Optional[str]]:\n """Reset the download state to restart downloading from the beginning."""\n content_file.seek(0)\n content_file.truncate()\n bytes_received = 0\n total_length = _get_http_response_size(resp)\n etag_or_last_modified = _get_http_response_etag_or_last_modified(resp)\n\n return bytes_received, total_length, etag_or_last_modified\n\n\nclass BatchDownloader:\n def __init__(\n self,\n session: PipSession,\n progress_bar: str,\n resume_retries: int,\n ) -> None:\n self._downloader = Downloader(session, progress_bar, resume_retries)\n\n def __call__(\n self, links: Iterable[Link], location: str\n ) -> Iterable[Tuple[Link, Tuple[str, str]]]:\n """Download the files given by links into location."""\n for link in links:\n filepath, content_type = self._downloader(link, location)\n yield link, (filepath, content_type)\n
.venv\Lib\site-packages\pip\_internal\network\download.py
download.py
Python
11,078
0.95
0.16879
0.059925
python-kit
897
2023-09-09T22:26:02.201775
MIT
false
cccc62ac3546d5cbc829b523f87735a7
"""Lazy ZIP over HTTP"""\n\n__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]\n\nfrom bisect import bisect_left, bisect_right\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom typing import Any, Dict, Generator, List, Optional, Tuple\nfrom zipfile import BadZipFile, ZipFile\n\nfrom pip._vendor.packaging.utils import canonicalize_name\nfrom pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response\n\nfrom pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution\nfrom pip._internal.network.session import PipSession\nfrom pip._internal.network.utils import HEADERS, raise_for_status, response_chunks\n\n\nclass HTTPRangeRequestUnsupported(Exception):\n pass\n\n\ndef dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:\n """Return a distribution object from the given wheel URL.\n\n This uses HTTP range requests to only fetch the portion of the wheel\n containing metadata, just enough for the object to be constructed.\n If such requests are not supported, HTTPRangeRequestUnsupported\n is raised.\n """\n with LazyZipOverHTTP(url, session) as zf:\n # For read-only ZIP files, ZipFile only needs methods read,\n # seek, seekable and tell, not the whole IO protocol.\n wheel = MemoryWheel(zf.name, zf) # type: ignore\n # After context manager exit, wheel.name\n # is an invalid file by intention.\n return get_wheel_distribution(wheel, canonicalize_name(name))\n\n\nclass LazyZipOverHTTP:\n """File-like object mapped to a ZIP file over HTTP.\n\n This uses HTTP range requests to lazily fetch the file's content,\n which is supposed to be fed to ZipFile. If such requests are not\n supported by the server, raise HTTPRangeRequestUnsupported\n during initialization.\n """\n\n def __init__(\n self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE\n ) -> None:\n head = session.head(url, headers=HEADERS)\n raise_for_status(head)\n assert head.status_code == 200\n self._session, self._url, self._chunk_size = session, url, chunk_size\n self._length = int(head.headers["Content-Length"])\n self._file = NamedTemporaryFile()\n self.truncate(self._length)\n self._left: List[int] = []\n self._right: List[int] = []\n if "bytes" not in head.headers.get("Accept-Ranges", "none"):\n raise HTTPRangeRequestUnsupported("range request is not supported")\n self._check_zip()\n\n @property\n def mode(self) -> str:\n """Opening mode, which is always rb."""\n return "rb"\n\n @property\n def name(self) -> str:\n """Path to the underlying file."""\n return self._file.name\n\n def seekable(self) -> bool:\n """Return whether random access is supported, which is True."""\n return True\n\n def close(self) -> None:\n """Close the file."""\n self._file.close()\n\n @property\n def closed(self) -> bool:\n """Whether the file is closed."""\n return self._file.closed\n\n def read(self, size: int = -1) -> bytes:\n """Read up to size bytes from the object and return them.\n\n As a convenience, if size is unspecified or -1,\n all bytes until EOF are returned. Fewer than\n size bytes may be returned if EOF is reached.\n """\n download_size = max(size, self._chunk_size)\n start, length = self.tell(), self._length\n stop = length if size < 0 else min(start + download_size, length)\n start = max(0, stop - download_size)\n self._download(start, stop - 1)\n return self._file.read(size)\n\n def readable(self) -> bool:\n """Return whether the file is readable, which is True."""\n return True\n\n def seek(self, offset: int, whence: int = 0) -> int:\n """Change stream position and return the new absolute position.\n\n Seek to offset relative position indicated by whence:\n * 0: Start of stream (the default). pos should be >= 0;\n * 1: Current position - pos may be negative;\n * 2: End of stream - pos usually negative.\n """\n return self._file.seek(offset, whence)\n\n def tell(self) -> int:\n """Return the current position."""\n return self._file.tell()\n\n def truncate(self, size: Optional[int] = None) -> int:\n """Resize the stream to the given size in bytes.\n\n If size is unspecified resize to the current position.\n The current stream position isn't changed.\n\n Return the new file size.\n """\n return self._file.truncate(size)\n\n def writable(self) -> bool:\n """Return False."""\n return False\n\n def __enter__(self) -> "LazyZipOverHTTP":\n self._file.__enter__()\n return self\n\n def __exit__(self, *exc: Any) -> None:\n self._file.__exit__(*exc)\n\n @contextmanager\n def _stay(self) -> Generator[None, None, None]:\n """Return a context manager keeping the position.\n\n At the end of the block, seek back to original position.\n """\n pos = self.tell()\n try:\n yield\n finally:\n self.seek(pos)\n\n def _check_zip(self) -> None:\n """Check and download until the file is a valid ZIP."""\n end = self._length - 1\n for start in reversed(range(0, end, self._chunk_size)):\n self._download(start, end)\n with self._stay():\n try:\n # For read-only ZIP files, ZipFile only needs\n # methods read, seek, seekable and tell.\n ZipFile(self)\n except BadZipFile:\n pass\n else:\n break\n\n def _stream_response(\n self, start: int, end: int, base_headers: Dict[str, str] = HEADERS\n ) -> Response:\n """Return HTTP response to a range request from start to end."""\n headers = base_headers.copy()\n headers["Range"] = f"bytes={start}-{end}"\n # TODO: Get range requests to be correctly cached\n headers["Cache-Control"] = "no-cache"\n return self._session.get(self._url, headers=headers, stream=True)\n\n def _merge(\n self, start: int, end: int, left: int, right: int\n ) -> Generator[Tuple[int, int], None, None]:\n """Return a generator of intervals to be fetched.\n\n Args:\n start (int): Start of needed interval\n end (int): End of needed interval\n left (int): Index of first overlapping downloaded data\n right (int): Index after last overlapping downloaded data\n """\n lslice, rslice = self._left[left:right], self._right[left:right]\n i = start = min([start] + lslice[:1])\n end = max([end] + rslice[-1:])\n for j, k in zip(lslice, rslice):\n if j > i:\n yield i, j - 1\n i = k + 1\n if i <= end:\n yield i, end\n self._left[left:right], self._right[left:right] = [start], [end]\n\n def _download(self, start: int, end: int) -> None:\n """Download bytes from start to end inclusively."""\n with self._stay():\n left = bisect_left(self._right, start)\n right = bisect_right(self._left, end)\n for start, end in self._merge(start, end, left, right):\n response = self._stream_response(start, end)\n response.raise_for_status()\n self.seek(start)\n for chunk in response_chunks(response, self._chunk_size):\n self._file.write(chunk)\n
.venv\Lib\site-packages\pip\_internal\network\lazy_wheel.py
lazy_wheel.py
Python
7,622
0.95
0.166667
0.057803
python-kit
29
2024-09-03T08:37:25.236100
MIT
false
e480298b76c85a0d696ae5122655ed7e
"""PipSession and supporting code, containing all pip-specific\nnetwork request configuration and behavior.\n"""\n\nimport email.utils\nimport functools\nimport io\nimport ipaddress\nimport json\nimport logging\nimport mimetypes\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nimport urllib.parse\nimport warnings\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Generator,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom pip._vendor import requests, urllib3\nfrom pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter\nfrom pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter\nfrom pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter\nfrom pip._vendor.requests.models import PreparedRequest, Response\nfrom pip._vendor.requests.structures import CaseInsensitiveDict\nfrom pip._vendor.urllib3.connectionpool import ConnectionPool\nfrom pip._vendor.urllib3.exceptions import InsecureRequestWarning\n\nfrom pip import __version__\nfrom pip._internal.metadata import get_default_environment\nfrom pip._internal.models.link import Link\nfrom pip._internal.network.auth import MultiDomainBasicAuth\nfrom pip._internal.network.cache import SafeFileCache\n\n# Import ssl from compat so the initial import occurs in only one place.\nfrom pip._internal.utils.compat import has_tls\nfrom pip._internal.utils.glibc import libc_ver\nfrom pip._internal.utils.misc import build_url_from_netloc, parse_netloc\nfrom pip._internal.utils.urls import url_to_path\n\nif TYPE_CHECKING:\n from ssl import SSLContext\n\n from pip._vendor.urllib3.poolmanager import PoolManager\n\n\nlogger = logging.getLogger(__name__)\n\nSecureOrigin = Tuple[str, str, Optional[Union[int, str]]]\n\n\n# Ignore warning raised when using --trusted-host.\nwarnings.filterwarnings("ignore", category=InsecureRequestWarning)\n\n\nSECURE_ORIGINS: List[SecureOrigin] = [\n # protocol, hostname, port\n # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)\n ("https", "*", "*"),\n ("*", "localhost", "*"),\n ("*", "127.0.0.0/8", "*"),\n ("*", "::1/128", "*"),\n ("file", "*", None),\n # ssh is always secure.\n ("ssh", "*", "*"),\n]\n\n\n# These are environment variables present when running under various\n# CI systems. For each variable, some CI systems that use the variable\n# are indicated. The collection was chosen so that for each of a number\n# of popular systems, at least one of the environment variables is used.\n# This list is used to provide some indication of and lower bound for\n# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.\n# For more background, see: https://github.com/pypa/pip/issues/5499\nCI_ENVIRONMENT_VARIABLES = (\n # Azure Pipelines\n "BUILD_BUILDID",\n # Jenkins\n "BUILD_ID",\n # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI\n "CI",\n # Explicit environment variable.\n "PIP_IS_CI",\n)\n\n\ndef looks_like_ci() -> bool:\n """\n Return whether it looks like pip is running under CI.\n """\n # We don't use the method of checking for a tty (e.g. using isatty())\n # because some CI systems mimic a tty (e.g. Travis CI). Thus that\n # method doesn't provide definitive information in either direction.\n return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)\n\n\n@functools.lru_cache(maxsize=1)\ndef user_agent() -> str:\n """\n Return a string representing the user agent.\n """\n data: Dict[str, Any] = {\n "installer": {"name": "pip", "version": __version__},\n "python": platform.python_version(),\n "implementation": {\n "name": platform.python_implementation(),\n },\n }\n\n if data["implementation"]["name"] == "CPython":\n data["implementation"]["version"] = platform.python_version()\n elif data["implementation"]["name"] == "PyPy":\n pypy_version_info = sys.pypy_version_info # type: ignore\n if pypy_version_info.releaselevel == "final":\n pypy_version_info = pypy_version_info[:3]\n data["implementation"]["version"] = ".".join(\n [str(x) for x in pypy_version_info]\n )\n elif data["implementation"]["name"] == "Jython":\n # Complete Guess\n data["implementation"]["version"] = platform.python_version()\n elif data["implementation"]["name"] == "IronPython":\n # Complete Guess\n data["implementation"]["version"] = platform.python_version()\n\n if sys.platform.startswith("linux"):\n from pip._vendor import distro\n\n linux_distribution = distro.name(), distro.version(), distro.codename()\n distro_infos: Dict[str, Any] = dict(\n filter(\n lambda x: x[1],\n zip(["name", "version", "id"], linux_distribution),\n )\n )\n libc = dict(\n filter(\n lambda x: x[1],\n zip(["lib", "version"], libc_ver()),\n )\n )\n if libc:\n distro_infos["libc"] = libc\n if distro_infos:\n data["distro"] = distro_infos\n\n if sys.platform.startswith("darwin") and platform.mac_ver()[0]:\n data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}\n\n if platform.system():\n data.setdefault("system", {})["name"] = platform.system()\n\n if platform.release():\n data.setdefault("system", {})["release"] = platform.release()\n\n if platform.machine():\n data["cpu"] = platform.machine()\n\n if has_tls():\n import _ssl as ssl\n\n data["openssl_version"] = ssl.OPENSSL_VERSION\n\n setuptools_dist = get_default_environment().get_distribution("setuptools")\n if setuptools_dist is not None:\n data["setuptools_version"] = str(setuptools_dist.version)\n\n if shutil.which("rustc") is not None:\n # If for any reason `rustc --version` fails, silently ignore it\n try:\n rustc_output = subprocess.check_output(\n ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5\n )\n except Exception:\n pass\n else:\n if rustc_output.startswith(b"rustc "):\n # The format of `rustc --version` is:\n # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'`\n # We extract just the middle (1.52.1) part\n data["rustc_version"] = rustc_output.split(b" ")[1].decode()\n\n # Use None rather than False so as not to give the impression that\n # pip knows it is not being run under CI. Rather, it is a null or\n # inconclusive result. Also, we include some value rather than no\n # value to make it easier to know that the check has been run.\n data["ci"] = True if looks_like_ci() else None\n\n user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")\n if user_data is not None:\n data["user_data"] = user_data\n\n return "{data[installer][name]}/{data[installer][version]} {json}".format(\n data=data,\n json=json.dumps(data, separators=(",", ":"), sort_keys=True),\n )\n\n\nclass LocalFSAdapter(BaseAdapter):\n def send(\n self,\n request: PreparedRequest,\n stream: bool = False,\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n verify: Union[bool, str] = True,\n cert: Optional[Union[str, Tuple[str, str]]] = None,\n proxies: Optional[Mapping[str, str]] = None,\n ) -> Response:\n pathname = url_to_path(request.url)\n\n resp = Response()\n resp.status_code = 200\n resp.url = request.url\n\n try:\n stats = os.stat(pathname)\n except OSError as exc:\n # format the exception raised as a io.BytesIO object,\n # to return a better error message:\n resp.status_code = 404\n resp.reason = type(exc).__name__\n resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode())\n else:\n modified = email.utils.formatdate(stats.st_mtime, usegmt=True)\n content_type = mimetypes.guess_type(pathname)[0] or "text/plain"\n resp.headers = CaseInsensitiveDict(\n {\n "Content-Type": content_type,\n "Content-Length": stats.st_size,\n "Last-Modified": modified,\n }\n )\n\n resp.raw = open(pathname, "rb")\n resp.close = resp.raw.close\n\n return resp\n\n def close(self) -> None:\n pass\n\n\nclass _SSLContextAdapterMixin:\n """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters.\n\n The additional argument is forwarded directly to the pool manager. This allows us\n to dynamically decide what SSL store to use at runtime, which is used to implement\n the optional ``truststore`` backend.\n """\n\n def __init__(\n self,\n *,\n ssl_context: Optional["SSLContext"] = None,\n **kwargs: Any,\n ) -> None:\n self._ssl_context = ssl_context\n super().__init__(**kwargs)\n\n def init_poolmanager(\n self,\n connections: int,\n maxsize: int,\n block: bool = DEFAULT_POOLBLOCK,\n **pool_kwargs: Any,\n ) -> "PoolManager":\n if self._ssl_context is not None:\n pool_kwargs.setdefault("ssl_context", self._ssl_context)\n return super().init_poolmanager( # type: ignore[misc]\n connections=connections,\n maxsize=maxsize,\n block=block,\n **pool_kwargs,\n )\n\n\nclass HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter):\n pass\n\n\nclass CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter):\n pass\n\n\nclass InsecureHTTPAdapter(HTTPAdapter):\n def cert_verify(\n self,\n conn: ConnectionPool,\n url: str,\n verify: Union[bool, str],\n cert: Optional[Union[str, Tuple[str, str]]],\n ) -> None:\n super().cert_verify(conn=conn, url=url, verify=False, cert=cert)\n\n\nclass InsecureCacheControlAdapter(CacheControlAdapter):\n def cert_verify(\n self,\n conn: ConnectionPool,\n url: str,\n verify: Union[bool, str],\n cert: Optional[Union[str, Tuple[str, str]]],\n ) -> None:\n super().cert_verify(conn=conn, url=url, verify=False, cert=cert)\n\n\nclass PipSession(requests.Session):\n timeout: Optional[int] = None\n\n def __init__(\n self,\n *args: Any,\n retries: int = 0,\n cache: Optional[str] = None,\n trusted_hosts: Sequence[str] = (),\n index_urls: Optional[List[str]] = None,\n ssl_context: Optional["SSLContext"] = None,\n **kwargs: Any,\n ) -> None:\n """\n :param trusted_hosts: Domains not to emit warnings for when not using\n HTTPS.\n """\n super().__init__(*args, **kwargs)\n\n # Namespace the attribute with "pip_" just in case to prevent\n # possible conflicts with the base class.\n self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = []\n self.pip_proxy = None\n\n # Attach our User Agent to the request\n self.headers["User-Agent"] = user_agent()\n\n # Attach our Authentication handler to the session\n self.auth = MultiDomainBasicAuth(index_urls=index_urls)\n\n # Create our urllib3.Retry instance which will allow us to customize\n # how we handle retries.\n retries = urllib3.Retry(\n # Set the total number of retries that a particular request can\n # have.\n total=retries,\n # A 503 error from PyPI typically means that the Fastly -> Origin\n # connection got interrupted in some way. A 503 error in general\n # is typically considered a transient error so we'll go ahead and\n # retry it.\n # A 500 may indicate transient error in Amazon S3\n # A 502 may be a transient error from a CDN like CloudFlare or CloudFront\n # A 520 or 527 - may indicate transient error in CloudFlare\n status_forcelist=[500, 502, 503, 520, 527],\n # Add a small amount of back off between failed requests in\n # order to prevent hammering the service.\n backoff_factor=0.25,\n ) # type: ignore\n\n # Our Insecure HTTPAdapter disables HTTPS validation. It does not\n # support caching so we'll use it for all http:// URLs.\n # If caching is disabled, we will also use it for\n # https:// hosts that we've marked as ignoring\n # TLS errors for (trusted-hosts).\n insecure_adapter = InsecureHTTPAdapter(max_retries=retries)\n\n # We want to _only_ cache responses on securely fetched origins or when\n # the host is specified as trusted. We do this because\n # we can't validate the response of an insecurely/untrusted fetched\n # origin, and we don't want someone to be able to poison the cache and\n # require manual eviction from the cache to fix it.\n if cache:\n secure_adapter = CacheControlAdapter(\n cache=SafeFileCache(cache),\n max_retries=retries,\n ssl_context=ssl_context,\n )\n self._trusted_host_adapter = InsecureCacheControlAdapter(\n cache=SafeFileCache(cache),\n max_retries=retries,\n )\n else:\n secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)\n self._trusted_host_adapter = insecure_adapter\n\n self.mount("https://", secure_adapter)\n self.mount("http://", insecure_adapter)\n\n # Enable file:// urls\n self.mount("file://", LocalFSAdapter())\n\n for host in trusted_hosts:\n self.add_trusted_host(host, suppress_logging=True)\n\n def update_index_urls(self, new_index_urls: List[str]) -> None:\n """\n :param new_index_urls: New index urls to update the authentication\n handler with.\n """\n self.auth.index_urls = new_index_urls\n\n def add_trusted_host(\n self, host: str, source: Optional[str] = None, suppress_logging: bool = False\n ) -> None:\n """\n :param host: It is okay to provide a host that has previously been\n added.\n :param source: An optional source string, for logging where the host\n string came from.\n """\n if not suppress_logging:\n msg = f"adding trusted host: {host!r}"\n if source is not None:\n msg += f" (from {source})"\n logger.info(msg)\n\n parsed_host, parsed_port = parse_netloc(host)\n if parsed_host is None:\n raise ValueError(f"Trusted host URL must include a host part: {host!r}")\n if (parsed_host, parsed_port) not in self.pip_trusted_origins:\n self.pip_trusted_origins.append((parsed_host, parsed_port))\n\n self.mount(\n build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter\n )\n self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)\n if not parsed_port:\n self.mount(\n build_url_from_netloc(host, scheme="http") + ":",\n self._trusted_host_adapter,\n )\n # Mount wildcard ports for the same host.\n self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)\n\n def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:\n yield from SECURE_ORIGINS\n for host, port in self.pip_trusted_origins:\n yield ("*", host, "*" if port is None else port)\n\n def is_secure_origin(self, location: Link) -> bool:\n # Determine if this url used a secure transport mechanism\n parsed = urllib.parse.urlparse(str(location))\n origin_protocol, origin_host, origin_port = (\n parsed.scheme,\n parsed.hostname,\n parsed.port,\n )\n\n # The protocol to use to see if the protocol matches.\n # Don't count the repository type as part of the protocol: in\n # cases such as "git+ssh", only use "ssh". (I.e., Only verify against\n # the last scheme.)\n origin_protocol = origin_protocol.rsplit("+", 1)[-1]\n\n # Determine if our origin is a secure origin by looking through our\n # hardcoded list of secure origins, as well as any additional ones\n # configured on this PackageFinder instance.\n for secure_origin in self.iter_secure_origins():\n secure_protocol, secure_host, secure_port = secure_origin\n if origin_protocol != secure_protocol and secure_protocol != "*":\n continue\n\n try:\n addr = ipaddress.ip_address(origin_host or "")\n network = ipaddress.ip_network(secure_host)\n except ValueError:\n # We don't have both a valid address or a valid network, so\n # we'll check this origin against hostnames.\n if (\n origin_host\n and origin_host.lower() != secure_host.lower()\n and secure_host != "*"\n ):\n continue\n else:\n # We have a valid address and network, so see if the address\n # is contained within the network.\n if addr not in network:\n continue\n\n # Check to see if the port matches.\n if (\n origin_port != secure_port\n and secure_port != "*"\n and secure_port is not None\n ):\n continue\n\n # If we've gotten here, then this origin matches the current\n # secure origin and we should return True\n return True\n\n # If we've gotten to this point, then the origin isn't secure and we\n # will not accept it as a valid location to search. We will however\n # log a warning that we are ignoring it.\n logger.warning(\n "The repository located at %s is not a trusted or secure host and "\n "is being ignored. If this repository is available via HTTPS we "\n "recommend you use HTTPS instead, otherwise you may silence "\n "this warning and allow it anyway with '--trusted-host %s'.",\n origin_host,\n origin_host,\n )\n\n return False\n\n def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:\n # Allow setting a default timeout on a session\n kwargs.setdefault("timeout", self.timeout)\n # Allow setting a default proxies on a session\n kwargs.setdefault("proxies", self.proxies)\n\n # Dispatch the actual request\n return super().request(method, url, *args, **kwargs)\n
.venv\Lib\site-packages\pip\_internal\network\session.py
session.py
Python
18,771
0.95
0.141491
0.196388
vue-tools
112
2024-12-30T18:52:29.544718
BSD-3-Clause
false
c5d471029f62c2a2c4188034a7eb9caa
from typing import Dict, Generator\n\nfrom pip._vendor.requests.models import Response\n\nfrom pip._internal.exceptions import NetworkConnectionError\n\n# The following comments and HTTP headers were originally added by\n# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.\n#\n# We use Accept-Encoding: identity here because requests defaults to\n# accepting compressed responses. This breaks in a variety of ways\n# depending on how the server is configured.\n# - Some servers will notice that the file isn't a compressible file\n# and will leave the file alone and with an empty Content-Encoding\n# - Some servers will notice that the file is already compressed and\n# will leave the file alone, adding a Content-Encoding: gzip header\n# - Some servers won't notice anything at all and will take a file\n# that's already been compressed and compress it again, and set\n# the Content-Encoding: gzip header\n# By setting this to request only the identity encoding we're hoping\n# to eliminate the third case. Hopefully there does not exist a server\n# which when given a file will notice it is already compressed and that\n# you're not asking for a compressed file and will then decompress it\n# before sending because if that's the case I don't think it'll ever be\n# possible to make this work.\nHEADERS: Dict[str, str] = {"Accept-Encoding": "identity"}\n\nDOWNLOAD_CHUNK_SIZE = 256 * 1024\n\n\ndef raise_for_status(resp: Response) -> None:\n http_error_msg = ""\n if isinstance(resp.reason, bytes):\n # We attempt to decode utf-8 first because some servers\n # choose to localize their reason strings. If the string\n # isn't utf-8, we fall back to iso-8859-1 for all other\n # encodings.\n try:\n reason = resp.reason.decode("utf-8")\n except UnicodeDecodeError:\n reason = resp.reason.decode("iso-8859-1")\n else:\n reason = resp.reason\n\n if 400 <= resp.status_code < 500:\n http_error_msg = (\n f"{resp.status_code} Client Error: {reason} for url: {resp.url}"\n )\n\n elif 500 <= resp.status_code < 600:\n http_error_msg = (\n f"{resp.status_code} Server Error: {reason} for url: {resp.url}"\n )\n\n if http_error_msg:\n raise NetworkConnectionError(http_error_msg, response=resp)\n\n\ndef response_chunks(\n response: Response, chunk_size: int = DOWNLOAD_CHUNK_SIZE\n) -> Generator[bytes, None, None]:\n """Given a requests Response, provide the data chunks."""\n try:\n # Special case for urllib3.\n for chunk in response.raw.stream(\n chunk_size,\n # We use decode_content=False here because we don't\n # want urllib3 to mess with the raw bytes we get\n # from the server. If we decompress inside of\n # urllib3 then we cannot verify the checksum\n # because the checksum will be of the compressed\n # file. This breakage will only occur if the\n # server adds a Content-Encoding header, which\n # depends on how the server was configured:\n # - Some servers will notice that the file isn't a\n # compressible file and will leave the file alone\n # and with an empty Content-Encoding\n # - Some servers will notice that the file is\n # already compressed and will leave the file\n # alone and will add a Content-Encoding: gzip\n # header\n # - Some servers won't notice anything at all and\n # will take a file that's already been compressed\n # and compress it again and set the\n # Content-Encoding: gzip header\n #\n # By setting this not to decode automatically we\n # hope to eliminate problems with the second case.\n decode_content=False,\n ):\n yield chunk\n except AttributeError:\n # Standard file-like object.\n while True:\n chunk = response.raw.read(chunk_size)\n if not chunk:\n break\n yield chunk\n
.venv\Lib\site-packages\pip\_internal\network\utils.py
utils.py
Python
4,088
0.95
0.173469
0.54023
vue-tools
334
2024-12-29T02:04:43.261134
Apache-2.0
false
41ff339c2fbee741fea1ee45d552debc
"""xmlrpclib.Transport implementation"""\n\nimport logging\nimport urllib.parse\nimport xmlrpc.client\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom pip._internal.exceptions import NetworkConnectionError\nfrom pip._internal.network.session import PipSession\nfrom pip._internal.network.utils import raise_for_status\n\nif TYPE_CHECKING:\n from xmlrpc.client import _HostType, _Marshallable\n\n from _typeshed import SizedBuffer\n\nlogger = logging.getLogger(__name__)\n\n\nclass PipXmlrpcTransport(xmlrpc.client.Transport):\n """Provide a `xmlrpclib.Transport` implementation via a `PipSession`\n object.\n """\n\n def __init__(\n self, index_url: str, session: PipSession, use_datetime: bool = False\n ) -> None:\n super().__init__(use_datetime)\n index_parts = urllib.parse.urlparse(index_url)\n self._scheme = index_parts.scheme\n self._session = session\n\n def request(\n self,\n host: "_HostType",\n handler: str,\n request_body: "SizedBuffer",\n verbose: bool = False,\n ) -> Tuple["_Marshallable", ...]:\n assert isinstance(host, str)\n parts = (self._scheme, host, handler, None, None, None)\n url = urllib.parse.urlunparse(parts)\n try:\n headers = {"Content-Type": "text/xml"}\n response = self._session.post(\n url,\n data=request_body,\n headers=headers,\n stream=True,\n )\n raise_for_status(response)\n self.verbose = verbose\n return self.parse_response(response.raw)\n except NetworkConnectionError as exc:\n assert exc.response\n logger.critical(\n "HTTP error %s while getting %s",\n exc.response.status_code,\n url,\n )\n raise\n
.venv\Lib\site-packages\pip\_internal\network\xmlrpc.py
xmlrpc.py
Python
1,837
0.85
0.098361
0
python-kit
847
2025-01-19T13:06:44.464671
MIT
false
08b3a8aa9a820701e0cf775b2748bb3e
"""Contains purely network-related utilities."""\n
.venv\Lib\site-packages\pip\_internal\network\__init__.py
__init__.py
Python
49
0.5
0
0
vue-tools
652
2024-04-10T08:31:33.126876
Apache-2.0
false
c656ec1e6bea5cd4cd5a639c1ba62dd5
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\auth.cpython-313.pyc
auth.cpython-313.pyc
Other
22,564
0.95
0.090909
0
node-utils
969
2025-04-26T13:22:24.042223
GPL-3.0
false
52512db07847dbba820184b1cd342ce9
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\cache.cpython-313.pyc
cache.cpython-313.pyc
Other
7,201
0.8
0.067416
0
node-utils
293
2025-01-29T15:15:42.261744
BSD-3-Clause
false
6f984fc0e9e5445e18d9b48a9d356f81
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\download.cpython-313.pyc
download.cpython-313.pyc
Other
12,842
0.8
0.044776
0
node-utils
471
2024-08-31T16:32:09.653631
Apache-2.0
false
54c243859894744f359c96fe79acd22a
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\lazy_wheel.cpython-313.pyc
lazy_wheel.cpython-313.pyc
Other
11,504
0.8
0.026087
0.029703
react-lib
323
2024-07-28T14:06:53.208971
GPL-3.0
false
7d5569f9a060d7f83cea6d355f66bce1
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\session.cpython-313.pyc
session.cpython-313.pyc
Other
19,204
0.8
0.009302
0
python-kit
458
2025-05-15T17:35:35.978034
BSD-3-Clause
false
43cd20d48593ccb653888265bac4eabc
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
2,297
0.8
0.033333
0
python-kit
441
2023-08-11T09:43:33.753109
BSD-3-Clause
false
09396fe1082fcd25e7e38781fa94ce14
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\xmlrpc.cpython-313.pyc
xmlrpc.cpython-313.pyc
Other
3,043
0.8
0.027027
0
node-utils
842
2024-04-10T15:56:38.255926
MIT
false
31d02d1d7faca901a4ca70ab366e029f
\n\n
.venv\Lib\site-packages\pip\_internal\network\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
253
0.7
0
0
awesome-app
847
2023-08-10T16:39:38.165758
MIT
false
c21eff8e3e48c41c274cb7f7487ec46f
"""Validation of dependencies of packages"""\n\nimport logging\nfrom contextlib import suppress\nfrom email.parser import Parser\nfrom functools import reduce\nfrom typing import (\n Callable,\n Dict,\n FrozenSet,\n Generator,\n Iterable,\n List,\n NamedTuple,\n Optional,\n Set,\n Tuple,\n)\n\nfrom pip._vendor.packaging.requirements import Requirement\nfrom pip._vendor.packaging.tags import Tag, parse_tag\nfrom pip._vendor.packaging.utils import NormalizedName, canonicalize_name\nfrom pip._vendor.packaging.version import Version\n\nfrom pip._internal.distributions import make_distribution_for_install_requirement\nfrom pip._internal.metadata import get_default_environment\nfrom pip._internal.metadata.base import BaseDistribution\nfrom pip._internal.req.req_install import InstallRequirement\n\nlogger = logging.getLogger(__name__)\n\n\nclass PackageDetails(NamedTuple):\n version: Version\n dependencies: List[Requirement]\n\n\n# Shorthands\nPackageSet = Dict[NormalizedName, PackageDetails]\nMissing = Tuple[NormalizedName, Requirement]\nConflicting = Tuple[NormalizedName, Version, Requirement]\n\nMissingDict = Dict[NormalizedName, List[Missing]]\nConflictingDict = Dict[NormalizedName, List[Conflicting]]\nCheckResult = Tuple[MissingDict, ConflictingDict]\nConflictDetails = Tuple[PackageSet, CheckResult]\n\n\ndef create_package_set_from_installed() -> Tuple[PackageSet, bool]:\n """Converts a list of distributions into a PackageSet."""\n package_set = {}\n problems = False\n env = get_default_environment()\n for dist in env.iter_installed_distributions(local_only=False, skip=()):\n name = dist.canonical_name\n try:\n dependencies = list(dist.iter_dependencies())\n package_set[name] = PackageDetails(dist.version, dependencies)\n except (OSError, ValueError) as e:\n # Don't crash on unreadable or broken metadata.\n logger.warning("Error parsing dependencies of %s: %s", name, e)\n problems = True\n return package_set, problems\n\n\ndef check_package_set(\n package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None\n) -> CheckResult:\n """Check if a package set is consistent\n\n If should_ignore is passed, it should be a callable that takes a\n package name and returns a boolean.\n """\n\n missing = {}\n conflicting = {}\n\n for package_name, package_detail in package_set.items():\n # Info about dependencies of package_name\n missing_deps: Set[Missing] = set()\n conflicting_deps: Set[Conflicting] = set()\n\n if should_ignore and should_ignore(package_name):\n continue\n\n for req in package_detail.dependencies:\n name = canonicalize_name(req.name)\n\n # Check if it's missing\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate({"extra": ""})\n if missed:\n missing_deps.add((name, req))\n continue\n\n # Check if there's a conflict\n version = package_set[name].version\n if not req.specifier.contains(version, prereleases=True):\n conflicting_deps.add((name, version, req))\n\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str)\n\n return missing, conflicting\n\n\ndef check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails:\n """For checking if the dependency graph would be consistent after \\n installing given requirements\n """\n # Start from the current state\n package_set, _ = create_package_set_from_installed()\n # Install packages\n would_be_installed = _simulate_installation_of(to_install, package_set)\n\n # Only warn about directly-dependent packages; create a whitelist of them\n whitelist = _create_whitelist(would_be_installed, package_set)\n\n return (\n package_set,\n check_package_set(\n package_set, should_ignore=lambda name: name not in whitelist\n ),\n )\n\n\ndef check_unsupported(\n packages: Iterable[BaseDistribution],\n supported_tags: Iterable[Tag],\n) -> Generator[BaseDistribution, None, None]:\n for p in packages:\n with suppress(FileNotFoundError):\n wheel_file = p.read_text("WHEEL")\n wheel_tags: FrozenSet[Tag] = reduce(\n frozenset.union,\n map(parse_tag, Parser().parsestr(wheel_file).get_all("Tag", [])),\n frozenset(),\n )\n if wheel_tags.isdisjoint(supported_tags):\n yield p\n\n\ndef _simulate_installation_of(\n to_install: List[InstallRequirement], package_set: PackageSet\n) -> Set[NormalizedName]:\n """Computes the version of packages after installing to_install."""\n # Keep track of packages that were installed\n installed = set()\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n abstract_dist = make_distribution_for_install_requirement(inst_req)\n dist = abstract_dist.get_metadata_distribution()\n name = dist.canonical_name\n package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))\n\n installed.add(name)\n\n return installed\n\n\ndef _create_whitelist(\n would_be_installed: Set[NormalizedName], package_set: PackageSet\n) -> Set[NormalizedName]:\n packages_affected = set(would_be_installed)\n\n for package_name in package_set:\n if package_name in packages_affected:\n continue\n\n for req in package_set[package_name].dependencies:\n if canonicalize_name(req.name) in packages_affected:\n packages_affected.add(package_name)\n break\n\n return packages_affected\n
.venv\Lib\site-packages\pip\_internal\operations\check.py
check.py
Python
5,911
0.95
0.161111
0.070423
awesome-app
626
2024-01-24T11:11:03.722853
BSD-3-Clause
false
939e464c0c585acc010e230725058305
import collections\nimport logging\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Container, Dict, Generator, Iterable, List, NamedTuple, Optional, Set\n\nfrom pip._vendor.packaging.utils import NormalizedName, canonicalize_name\nfrom pip._vendor.packaging.version import InvalidVersion\n\nfrom pip._internal.exceptions import BadCommand, InstallationError\nfrom pip._internal.metadata import BaseDistribution, get_environment\nfrom pip._internal.req.constructors import (\n install_req_from_editable,\n install_req_from_line,\n)\nfrom pip._internal.req.req_file import COMMENT_RE\nfrom pip._internal.utils.direct_url_helpers import direct_url_as_pep440_direct_reference\n\nlogger = logging.getLogger(__name__)\n\n\nclass _EditableInfo(NamedTuple):\n requirement: str\n comments: List[str]\n\n\ndef freeze(\n requirement: Optional[List[str]] = None,\n local_only: bool = False,\n user_only: bool = False,\n paths: Optional[List[str]] = None,\n isolated: bool = False,\n exclude_editable: bool = False,\n skip: Container[str] = (),\n) -> Generator[str, None, None]:\n installations: Dict[str, FrozenRequirement] = {}\n\n dists = get_environment(paths).iter_installed_distributions(\n local_only=local_only,\n skip=(),\n user_only=user_only,\n )\n for dist in dists:\n req = FrozenRequirement.from_dist(dist)\n if exclude_editable and req.editable:\n continue\n installations[req.canonical_name] = req\n\n if requirement:\n # the options that don't get turned into an InstallRequirement\n # should only be emitted once, even if the same option is in multiple\n # requirements files, so we need to keep track of what has been emitted\n # so that we don't emit it again if it's seen again\n emitted_options: Set[str] = set()\n # keep track of which files a requirement is in so that we can\n # give an accurate warning if a requirement appears multiple times.\n req_files: Dict[str, List[str]] = collections.defaultdict(list)\n for req_file_path in requirement:\n with open(req_file_path) as req_file:\n for line in req_file:\n if (\n not line.strip()\n or line.strip().startswith("#")\n or line.startswith(\n (\n "-r",\n "--requirement",\n "-f",\n "--find-links",\n "-i",\n "--index-url",\n "--pre",\n "--trusted-host",\n "--process-dependency-links",\n "--extra-index-url",\n "--use-feature",\n )\n )\n ):\n line = line.rstrip()\n if line not in emitted_options:\n emitted_options.add(line)\n yield line\n continue\n\n if line.startswith("-e") or line.startswith("--editable"):\n if line.startswith("-e"):\n line = line[2:].strip()\n else:\n line = line[len("--editable") :].strip().lstrip("=")\n line_req = install_req_from_editable(\n line,\n isolated=isolated,\n )\n else:\n line_req = install_req_from_line(\n COMMENT_RE.sub("", line).strip(),\n isolated=isolated,\n )\n\n if not line_req.name:\n logger.info(\n "Skipping line in requirement file [%s] because "\n "it's not clear what it would install: %s",\n req_file_path,\n line.strip(),\n )\n logger.info(\n " (add #egg=PackageName to the URL to avoid"\n " this warning)"\n )\n else:\n line_req_canonical_name = canonicalize_name(line_req.name)\n if line_req_canonical_name not in installations:\n # either it's not installed, or it is installed\n # but has been processed already\n if not req_files[line_req.name]:\n logger.warning(\n "Requirement file [%s] contains %s, but "\n "package %r is not installed",\n req_file_path,\n COMMENT_RE.sub("", line).strip(),\n line_req.name,\n )\n else:\n req_files[line_req.name].append(req_file_path)\n else:\n yield str(installations[line_req_canonical_name]).rstrip()\n del installations[line_req_canonical_name]\n req_files[line_req.name].append(req_file_path)\n\n # Warn about requirements that were included multiple times (in a\n # single requirements file or in different requirements files).\n for name, files in req_files.items():\n if len(files) > 1:\n logger.warning(\n "Requirement %s included multiple times [%s]",\n name,\n ", ".join(sorted(set(files))),\n )\n\n yield ("## The following requirements were added by pip freeze:")\n for installation in sorted(installations.values(), key=lambda x: x.name.lower()):\n if installation.canonical_name not in skip:\n yield str(installation).rstrip()\n\n\ndef _format_as_name_version(dist: BaseDistribution) -> str:\n try:\n dist_version = dist.version\n except InvalidVersion:\n # legacy version\n return f"{dist.raw_name}==={dist.raw_version}"\n else:\n return f"{dist.raw_name}=={dist_version}"\n\n\ndef _get_editable_info(dist: BaseDistribution) -> _EditableInfo:\n """\n Compute and return values (req, comments) for use in\n FrozenRequirement.from_dist().\n """\n editable_project_location = dist.editable_project_location\n assert editable_project_location\n location = os.path.normcase(os.path.abspath(editable_project_location))\n\n from pip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs\n\n vcs_backend = vcs.get_backend_for_dir(location)\n\n if vcs_backend is None:\n display = _format_as_name_version(dist)\n logger.debug(\n 'No VCS found for editable requirement "%s" in: %r',\n display,\n location,\n )\n return _EditableInfo(\n requirement=location,\n comments=[f"# Editable install with no version control ({display})"],\n )\n\n vcs_name = type(vcs_backend).__name__\n\n try:\n req = vcs_backend.get_src_requirement(location, dist.raw_name)\n except RemoteNotFoundError:\n display = _format_as_name_version(dist)\n return _EditableInfo(\n requirement=location,\n comments=[f"# Editable {vcs_name} install with no remote ({display})"],\n )\n except RemoteNotValidError as ex:\n display = _format_as_name_version(dist)\n return _EditableInfo(\n requirement=location,\n comments=[\n f"# Editable {vcs_name} install ({display}) with either a deleted "\n f"local remote or invalid URI:",\n f"# '{ex.url}'",\n ],\n )\n except BadCommand:\n logger.warning(\n "cannot determine version of editable source in %s "\n "(%s command not found in path)",\n location,\n vcs_backend.name,\n )\n return _EditableInfo(requirement=location, comments=[])\n except InstallationError as exc:\n logger.warning("Error when trying to get requirement for VCS system %s", exc)\n else:\n return _EditableInfo(requirement=req, comments=[])\n\n logger.warning("Could not determine repository location of %s", location)\n\n return _EditableInfo(\n requirement=location,\n comments=["## !! Could not determine repository location"],\n )\n\n\n@dataclass(frozen=True)\nclass FrozenRequirement:\n name: str\n req: str\n editable: bool\n comments: Iterable[str] = field(default_factory=tuple)\n\n @property\n def canonical_name(self) -> NormalizedName:\n return canonicalize_name(self.name)\n\n @classmethod\n def from_dist(cls, dist: BaseDistribution) -> "FrozenRequirement":\n editable = dist.editable\n if editable:\n req, comments = _get_editable_info(dist)\n else:\n comments = []\n direct_url = dist.direct_url\n if direct_url:\n # if PEP 610 metadata is present, use it\n req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name)\n else:\n # name==version requirement\n req = _format_as_name_version(dist)\n\n return cls(dist.raw_name, req, editable, comments=comments)\n\n def __str__(self) -> str:\n req = self.req\n if self.editable:\n req = f"-e {req}"\n return "\n".join(list(self.comments) + [str(req)]) + "\n"\n
.venv\Lib\site-packages\pip\_internal\operations\freeze.py
freeze.py
Python
9,843
0.95
0.144531
0.057522
vue-tools
897
2024-01-15T05:23:29.253083
Apache-2.0
false
0cf1119e8ae7654782e0f6574dda13a3
"""Prepares a distribution for installation"""\n\n# The following comment should be removed at some point in the future.\n# mypy: strict-optional=False\n\nimport mimetypes\nimport os\nimport shutil\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Dict, Iterable, List, Optional\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.distributions import make_distribution_for_install_requirement\nfrom pip._internal.distributions.installed import InstalledDistribution\nfrom pip._internal.exceptions import (\n DirectoryUrlHashUnsupported,\n HashMismatch,\n HashUnpinned,\n InstallationError,\n MetadataInconsistent,\n NetworkConnectionError,\n VcsHashUnsupported,\n)\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution, get_metadata_distribution\nfrom pip._internal.models.direct_url import ArchiveInfo\nfrom pip._internal.models.link import Link\nfrom pip._internal.models.wheel import Wheel\nfrom pip._internal.network.download import BatchDownloader, Downloader\nfrom pip._internal.network.lazy_wheel import (\n HTTPRangeRequestUnsupported,\n dist_from_wheel_url,\n)\nfrom pip._internal.network.session import PipSession\nfrom pip._internal.operations.build.build_tracker import BuildTracker\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.utils._log import getLogger\nfrom pip._internal.utils.direct_url_helpers import (\n direct_url_for_editable,\n direct_url_from_link,\n)\nfrom pip._internal.utils.hashes import Hashes, MissingHashes\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import (\n display_path,\n hash_file,\n hide_url,\n redact_auth_from_requirement,\n)\nfrom pip._internal.utils.temp_dir import TempDirectory\nfrom pip._internal.utils.unpacking import unpack_file\nfrom pip._internal.vcs import vcs\n\nlogger = getLogger(__name__)\n\n\ndef _get_prepared_distribution(\n req: InstallRequirement,\n build_tracker: BuildTracker,\n finder: PackageFinder,\n build_isolation: bool,\n check_build_deps: bool,\n) -> BaseDistribution:\n """Prepare a distribution for installation."""\n abstract_dist = make_distribution_for_install_requirement(req)\n tracker_id = abstract_dist.build_tracker_id\n if tracker_id is not None:\n with build_tracker.track(req, tracker_id):\n abstract_dist.prepare_distribution_metadata(\n finder, build_isolation, check_build_deps\n )\n return abstract_dist.get_metadata_distribution()\n\n\ndef unpack_vcs_link(link: Link, location: str, verbosity: int) -> None:\n vcs_backend = vcs.get_backend_for_scheme(link.scheme)\n assert vcs_backend is not None\n vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity)\n\n\n@dataclass\nclass File:\n path: str\n content_type: Optional[str] = None\n\n def __post_init__(self) -> None:\n if self.content_type is None:\n # Try to guess the file's MIME type. If the system MIME tables\n # can't be loaded, give up.\n try:\n self.content_type = mimetypes.guess_type(self.path)[0]\n except OSError:\n pass\n\n\ndef get_http_url(\n link: Link,\n download: Downloader,\n download_dir: Optional[str] = None,\n hashes: Optional[Hashes] = None,\n) -> File:\n temp_dir = TempDirectory(kind="unpack", globally_managed=True)\n # If a download dir is specified, is the file already downloaded there?\n already_downloaded_path = None\n if download_dir:\n already_downloaded_path = _check_download_dir(link, download_dir, hashes)\n\n if already_downloaded_path:\n from_path = already_downloaded_path\n content_type = None\n else:\n # let's download to a tmp dir\n from_path, content_type = download(link, temp_dir.path)\n if hashes:\n hashes.check_against_path(from_path)\n\n return File(from_path, content_type)\n\n\ndef get_file_url(\n link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None\n) -> File:\n """Get file and optionally check its hash."""\n # If a download dir is specified, is the file already there and valid?\n already_downloaded_path = None\n if download_dir:\n already_downloaded_path = _check_download_dir(link, download_dir, hashes)\n\n if already_downloaded_path:\n from_path = already_downloaded_path\n else:\n from_path = link.file_path\n\n # If --require-hashes is off, `hashes` is either empty, the\n # link's embedded hash, or MissingHashes; it is required to\n # match. If --require-hashes is on, we are satisfied by any\n # hash in `hashes` matching: a URL-based or an option-based\n # one; no internet-sourced hash will be in `hashes`.\n if hashes:\n hashes.check_against_path(from_path)\n return File(from_path, None)\n\n\ndef unpack_url(\n link: Link,\n location: str,\n download: Downloader,\n verbosity: int,\n download_dir: Optional[str] = None,\n hashes: Optional[Hashes] = None,\n) -> Optional[File]:\n """Unpack link into location, downloading if required.\n\n :param hashes: A Hashes object, one of whose embedded hashes must match,\n or HashMismatch will be raised. If the Hashes is empty, no matches are\n required, and unhashable types of requirements (like VCS ones, which\n would ordinarily raise HashUnsupported) are allowed.\n """\n # non-editable vcs urls\n if link.is_vcs:\n unpack_vcs_link(link, location, verbosity=verbosity)\n return None\n\n assert not link.is_existing_dir()\n\n # file urls\n if link.is_file:\n file = get_file_url(link, download_dir, hashes=hashes)\n\n # http urls\n else:\n file = get_http_url(\n link,\n download,\n download_dir,\n hashes=hashes,\n )\n\n # unpack the archive to the build dir location. even when only downloading\n # archives, they have to be unpacked to parse dependencies, except wheels\n if not link.is_wheel:\n unpack_file(file.path, location, file.content_type)\n\n return file\n\n\ndef _check_download_dir(\n link: Link,\n download_dir: str,\n hashes: Optional[Hashes],\n warn_on_hash_mismatch: bool = True,\n) -> Optional[str]:\n """Check download_dir for previously downloaded file with correct hash\n If a correct file is found return its path else None\n """\n download_path = os.path.join(download_dir, link.filename)\n\n if not os.path.exists(download_path):\n return None\n\n # If already downloaded, does its hash match?\n logger.info("File was already downloaded %s", download_path)\n if hashes:\n try:\n hashes.check_against_path(download_path)\n except HashMismatch:\n if warn_on_hash_mismatch:\n logger.warning(\n "Previously-downloaded file %s has bad hash. Re-downloading.",\n download_path,\n )\n os.unlink(download_path)\n return None\n return download_path\n\n\nclass RequirementPreparer:\n """Prepares a Requirement"""\n\n def __init__(\n self,\n build_dir: str,\n download_dir: Optional[str],\n src_dir: str,\n build_isolation: bool,\n check_build_deps: bool,\n build_tracker: BuildTracker,\n session: PipSession,\n progress_bar: str,\n finder: PackageFinder,\n require_hashes: bool,\n use_user_site: bool,\n lazy_wheel: bool,\n verbosity: int,\n legacy_resolver: bool,\n resume_retries: int,\n ) -> None:\n super().__init__()\n\n self.src_dir = src_dir\n self.build_dir = build_dir\n self.build_tracker = build_tracker\n self._session = session\n self._download = Downloader(session, progress_bar, resume_retries)\n self._batch_download = BatchDownloader(session, progress_bar, resume_retries)\n self.finder = finder\n\n # Where still-packed archives should be written to. If None, they are\n # not saved, and are deleted immediately after unpacking.\n self.download_dir = download_dir\n\n # Is build isolation allowed?\n self.build_isolation = build_isolation\n\n # Should check build dependencies?\n self.check_build_deps = check_build_deps\n\n # Should hash-checking be required?\n self.require_hashes = require_hashes\n\n # Should install in user site-packages?\n self.use_user_site = use_user_site\n\n # Should wheels be downloaded lazily?\n self.use_lazy_wheel = lazy_wheel\n\n # How verbose should underlying tooling be?\n self.verbosity = verbosity\n\n # Are we using the legacy resolver?\n self.legacy_resolver = legacy_resolver\n\n # Memoized downloaded files, as mapping of url: path.\n self._downloaded: Dict[str, str] = {}\n\n # Previous "header" printed for a link-based InstallRequirement\n self._previous_requirement_header = ("", "")\n\n def _log_preparing_link(self, req: InstallRequirement) -> None:\n """Provide context for the requirement being prepared."""\n if req.link.is_file and not req.is_wheel_from_cache:\n message = "Processing %s"\n information = str(display_path(req.link.file_path))\n else:\n message = "Collecting %s"\n information = redact_auth_from_requirement(req.req) if req.req else str(req)\n\n # If we used req.req, inject requirement source if available (this\n # would already be included if we used req directly)\n if req.req and req.comes_from:\n if isinstance(req.comes_from, str):\n comes_from: Optional[str] = req.comes_from\n else:\n comes_from = req.comes_from.from_path()\n if comes_from:\n information += f" (from {comes_from})"\n\n if (message, information) != self._previous_requirement_header:\n self._previous_requirement_header = (message, information)\n logger.info(message, information)\n\n if req.is_wheel_from_cache:\n with indent_log():\n logger.info("Using cached %s", req.link.filename)\n\n def _ensure_link_req_src_dir(\n self, req: InstallRequirement, parallel_builds: bool\n ) -> None:\n """Ensure source_dir of a linked InstallRequirement."""\n # Since source_dir is only set for editable requirements.\n if req.link.is_wheel:\n # We don't need to unpack wheels, so no need for a source\n # directory.\n return\n assert req.source_dir is None\n if req.link.is_existing_dir():\n # build local directories in-tree\n req.source_dir = req.link.file_path\n return\n\n # We always delete unpacked sdists after pip runs.\n req.ensure_has_source_dir(\n self.build_dir,\n autodelete=True,\n parallel_builds=parallel_builds,\n )\n req.ensure_pristine_source_checkout()\n\n def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes:\n # By the time this is called, the requirement's link should have\n # been checked so we can tell what kind of requirements req is\n # and raise some more informative errors than otherwise.\n # (For example, we can raise VcsHashUnsupported for a VCS URL\n # rather than HashMissing.)\n if not self.require_hashes:\n return req.hashes(trust_internet=True)\n\n # We could check these first 2 conditions inside unpack_url\n # and save repetition of conditions, but then we would\n # report less-useful error messages for unhashable\n # requirements, complaining that there's no hash provided.\n if req.link.is_vcs:\n raise VcsHashUnsupported()\n if req.link.is_existing_dir():\n raise DirectoryUrlHashUnsupported()\n\n # Unpinned packages are asking for trouble when a new version\n # is uploaded. This isn't a security check, but it saves users\n # a surprising hash mismatch in the future.\n # file:/// URLs aren't pinnable, so don't complain about them\n # not being pinned.\n if not req.is_direct and not req.is_pinned:\n raise HashUnpinned()\n\n # If known-good hashes are missing for this requirement,\n # shim it with a facade object that will provoke hash\n # computation and then raise a HashMissing exception\n # showing the user what the hash should be.\n return req.hashes(trust_internet=False) or MissingHashes()\n\n def _fetch_metadata_only(\n self,\n req: InstallRequirement,\n ) -> Optional[BaseDistribution]:\n if self.legacy_resolver:\n logger.debug(\n "Metadata-only fetching is not used in the legacy resolver",\n )\n return None\n if self.require_hashes:\n logger.debug(\n "Metadata-only fetching is not used as hash checking is required",\n )\n return None\n # Try PEP 658 metadata first, then fall back to lazy wheel if unavailable.\n return self._fetch_metadata_using_link_data_attr(\n req\n ) or self._fetch_metadata_using_lazy_wheel(req.link)\n\n def _fetch_metadata_using_link_data_attr(\n self,\n req: InstallRequirement,\n ) -> Optional[BaseDistribution]:\n """Fetch metadata from the data-dist-info-metadata attribute, if possible."""\n # (1) Get the link to the metadata file, if provided by the backend.\n metadata_link = req.link.metadata_link()\n if metadata_link is None:\n return None\n assert req.req is not None\n logger.verbose(\n "Obtaining dependency information for %s from %s",\n req.req,\n metadata_link,\n )\n # (2) Download the contents of the METADATA file, separate from the dist itself.\n metadata_file = get_http_url(\n metadata_link,\n self._download,\n hashes=metadata_link.as_hashes(),\n )\n with open(metadata_file.path, "rb") as f:\n metadata_contents = f.read()\n # (3) Generate a dist just from those file contents.\n metadata_dist = get_metadata_distribution(\n metadata_contents,\n req.link.filename,\n req.req.name,\n )\n # (4) Ensure the Name: field from the METADATA file matches the name from the\n # install requirement.\n #\n # NB: raw_name will fall back to the name from the install requirement if\n # the Name: field is not present, but it's noted in the raw_name docstring\n # that that should NEVER happen anyway.\n if canonicalize_name(metadata_dist.raw_name) != canonicalize_name(req.req.name):\n raise MetadataInconsistent(\n req, "Name", req.req.name, metadata_dist.raw_name\n )\n return metadata_dist\n\n def _fetch_metadata_using_lazy_wheel(\n self,\n link: Link,\n ) -> Optional[BaseDistribution]:\n """Fetch metadata using lazy wheel, if possible."""\n # --use-feature=fast-deps must be provided.\n if not self.use_lazy_wheel:\n return None\n if link.is_file or not link.is_wheel:\n logger.debug(\n "Lazy wheel is not used as %r does not point to a remote wheel",\n link,\n )\n return None\n\n wheel = Wheel(link.filename)\n name = canonicalize_name(wheel.name)\n logger.info(\n "Obtaining dependency information from %s %s",\n name,\n wheel.version,\n )\n url = link.url.split("#", 1)[0]\n try:\n return dist_from_wheel_url(name, url, self._session)\n except HTTPRangeRequestUnsupported:\n logger.debug("%s does not support range requests", url)\n return None\n\n def _complete_partial_requirements(\n self,\n partially_downloaded_reqs: Iterable[InstallRequirement],\n parallel_builds: bool = False,\n ) -> None:\n """Download any requirements which were only fetched by metadata."""\n # Download to a temporary directory. These will be copied over as\n # needed for downstream 'download', 'wheel', and 'install' commands.\n temp_dir = TempDirectory(kind="unpack", globally_managed=True).path\n\n # Map each link to the requirement that owns it. This allows us to set\n # `req.local_file_path` on the appropriate requirement after passing\n # all the links at once into BatchDownloader.\n links_to_fully_download: Dict[Link, InstallRequirement] = {}\n for req in partially_downloaded_reqs:\n assert req.link\n links_to_fully_download[req.link] = req\n\n batch_download = self._batch_download(\n links_to_fully_download.keys(),\n temp_dir,\n )\n for link, (filepath, _) in batch_download:\n logger.debug("Downloading link %s to %s", link, filepath)\n req = links_to_fully_download[link]\n # Record the downloaded file path so wheel reqs can extract a Distribution\n # in .get_dist().\n req.local_file_path = filepath\n # Record that the file is downloaded so we don't do it again in\n # _prepare_linked_requirement().\n self._downloaded[req.link.url] = filepath\n\n # If this is an sdist, we need to unpack it after downloading, but the\n # .source_dir won't be set up until we are in _prepare_linked_requirement().\n # Add the downloaded archive to the install requirement to unpack after\n # preparing the source dir.\n if not req.is_wheel:\n req.needs_unpacked_archive(Path(filepath))\n\n # This step is necessary to ensure all lazy wheels are processed\n # successfully by the 'download', 'wheel', and 'install' commands.\n for req in partially_downloaded_reqs:\n self._prepare_linked_requirement(req, parallel_builds)\n\n def prepare_linked_requirement(\n self, req: InstallRequirement, parallel_builds: bool = False\n ) -> BaseDistribution:\n """Prepare a requirement to be obtained from req.link."""\n assert req.link\n self._log_preparing_link(req)\n with indent_log():\n # Check if the relevant file is already available\n # in the download directory\n file_path = None\n if self.download_dir is not None and req.link.is_wheel:\n hashes = self._get_linked_req_hashes(req)\n file_path = _check_download_dir(\n req.link,\n self.download_dir,\n hashes,\n # When a locally built wheel has been found in cache, we don't warn\n # about re-downloading when the already downloaded wheel hash does\n # not match. This is because the hash must be checked against the\n # original link, not the cached link. It that case the already\n # downloaded file will be removed and re-fetched from cache (which\n # implies a hash check against the cache entry's origin.json).\n warn_on_hash_mismatch=not req.is_wheel_from_cache,\n )\n\n if file_path is not None:\n # The file is already available, so mark it as downloaded\n self._downloaded[req.link.url] = file_path\n else:\n # The file is not available, attempt to fetch only metadata\n metadata_dist = self._fetch_metadata_only(req)\n if metadata_dist is not None:\n req.needs_more_preparation = True\n return metadata_dist\n\n # None of the optimizations worked, fully prepare the requirement\n return self._prepare_linked_requirement(req, parallel_builds)\n\n def prepare_linked_requirements_more(\n self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False\n ) -> None:\n """Prepare linked requirements more, if needed."""\n reqs = [req for req in reqs if req.needs_more_preparation]\n for req in reqs:\n # Determine if any of these requirements were already downloaded.\n if self.download_dir is not None and req.link.is_wheel:\n hashes = self._get_linked_req_hashes(req)\n file_path = _check_download_dir(req.link, self.download_dir, hashes)\n if file_path is not None:\n self._downloaded[req.link.url] = file_path\n req.needs_more_preparation = False\n\n # Prepare requirements we found were already downloaded for some\n # reason. The other downloads will be completed separately.\n partially_downloaded_reqs: List[InstallRequirement] = []\n for req in reqs:\n if req.needs_more_preparation:\n partially_downloaded_reqs.append(req)\n else:\n self._prepare_linked_requirement(req, parallel_builds)\n\n # TODO: separate this part out from RequirementPreparer when the v1\n # resolver can be removed!\n self._complete_partial_requirements(\n partially_downloaded_reqs,\n parallel_builds=parallel_builds,\n )\n\n def _prepare_linked_requirement(\n self, req: InstallRequirement, parallel_builds: bool\n ) -> BaseDistribution:\n assert req.link\n link = req.link\n\n hashes = self._get_linked_req_hashes(req)\n\n if hashes and req.is_wheel_from_cache:\n assert req.download_info is not None\n assert link.is_wheel\n assert link.is_file\n # We need to verify hashes, and we have found the requirement in the cache\n # of locally built wheels.\n if (\n isinstance(req.download_info.info, ArchiveInfo)\n and req.download_info.info.hashes\n and hashes.has_one_of(req.download_info.info.hashes)\n ):\n # At this point we know the requirement was built from a hashable source\n # artifact, and we verified that the cache entry's hash of the original\n # artifact matches one of the hashes we expect. We don't verify hashes\n # against the cached wheel, because the wheel is not the original.\n hashes = None\n else:\n logger.warning(\n "The hashes of the source archive found in cache entry "\n "don't match, ignoring cached built wheel "\n "and re-downloading source."\n )\n req.link = req.cached_wheel_source_link\n link = req.link\n\n self._ensure_link_req_src_dir(req, parallel_builds)\n\n if link.is_existing_dir():\n local_file = None\n elif link.url not in self._downloaded:\n try:\n local_file = unpack_url(\n link,\n req.source_dir,\n self._download,\n self.verbosity,\n self.download_dir,\n hashes,\n )\n except NetworkConnectionError as exc:\n raise InstallationError(\n f"Could not install requirement {req} because of HTTP "\n f"error {exc} for URL {link}"\n )\n else:\n file_path = self._downloaded[link.url]\n if hashes:\n hashes.check_against_path(file_path)\n local_file = File(file_path, content_type=None)\n\n # If download_info is set, we got it from the wheel cache.\n if req.download_info is None:\n # Editables don't go through this function (see\n # prepare_editable_requirement).\n assert not req.editable\n req.download_info = direct_url_from_link(link, req.source_dir)\n # Make sure we have a hash in download_info. If we got it as part of the\n # URL, it will have been verified and we can rely on it. Otherwise we\n # compute it from the downloaded file.\n # FIXME: https://github.com/pypa/pip/issues/11943\n if (\n isinstance(req.download_info.info, ArchiveInfo)\n and not req.download_info.info.hashes\n and local_file\n ):\n hash = hash_file(local_file.path)[0].hexdigest()\n # We populate info.hash for backward compatibility.\n # This will automatically populate info.hashes.\n req.download_info.info.hash = f"sha256={hash}"\n\n # For use in later processing,\n # preserve the file path on the requirement.\n if local_file:\n req.local_file_path = local_file.path\n\n dist = _get_prepared_distribution(\n req,\n self.build_tracker,\n self.finder,\n self.build_isolation,\n self.check_build_deps,\n )\n return dist\n\n def save_linked_requirement(self, req: InstallRequirement) -> None:\n assert self.download_dir is not None\n assert req.link is not None\n link = req.link\n if link.is_vcs or (link.is_existing_dir() and req.editable):\n # Make a .zip of the source_dir we already created.\n req.archive(self.download_dir)\n return\n\n if link.is_existing_dir():\n logger.debug(\n "Not copying link to destination directory "\n "since it is a directory: %s",\n link,\n )\n return\n if req.local_file_path is None:\n # No distribution was downloaded for this requirement.\n return\n\n download_location = os.path.join(self.download_dir, link.filename)\n if not os.path.exists(download_location):\n shutil.copy(req.local_file_path, download_location)\n download_path = display_path(download_location)\n logger.info("Saved %s", download_path)\n\n def prepare_editable_requirement(\n self,\n req: InstallRequirement,\n ) -> BaseDistribution:\n """Prepare an editable requirement."""\n assert req.editable, "cannot prepare a non-editable req as editable"\n\n logger.info("Obtaining %s", req)\n\n with indent_log():\n if self.require_hashes:\n raise InstallationError(\n f"The editable requirement {req} cannot be installed when "\n "requiring hashes, because there is no single file to "\n "hash."\n )\n req.ensure_has_source_dir(self.src_dir)\n req.update_editable()\n assert req.source_dir\n req.download_info = direct_url_for_editable(req.unpacked_source_directory)\n\n dist = _get_prepared_distribution(\n req,\n self.build_tracker,\n self.finder,\n self.build_isolation,\n self.check_build_deps,\n )\n\n req.check_if_exists(self.use_user_site)\n\n return dist\n\n def prepare_installed_requirement(\n self,\n req: InstallRequirement,\n skip_reason: str,\n ) -> BaseDistribution:\n """Prepare an already-installed requirement."""\n assert req.satisfied_by, "req should have been satisfied but isn't"\n assert skip_reason is not None, (\n "did not get skip reason skipped but req.satisfied_by "\n f"is set to {req.satisfied_by}"\n )\n logger.info(\n "Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version\n )\n with indent_log():\n if self.require_hashes:\n logger.debug(\n "Since it is already installed, we are trusting this "\n "package without checking its hash. To ensure a "\n "completely repeatable environment, install into an "\n "empty virtualenv."\n )\n return InstalledDistribution(req).get_metadata_distribution()\n
.venv\Lib\site-packages\pip\_internal\operations\prepare.py
prepare.py
Python
28,363
0.95
0.157395
0.177469
python-kit
661
2024-06-27T10:02:45.892378
Apache-2.0
false
a6b74c0ce85429dc3894936f68f7ae87
import contextlib\nimport hashlib\nimport logging\nimport os\nfrom types import TracebackType\nfrom typing import Dict, Generator, Optional, Type, Union\n\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.utils.temp_dir import TempDirectory\n\nlogger = logging.getLogger(__name__)\n\n\n@contextlib.contextmanager\ndef update_env_context_manager(**changes: str) -> Generator[None, None, None]:\n target = os.environ\n\n # Save values from the target and change them.\n non_existent_marker = object()\n saved_values: Dict[str, Union[object, str]] = {}\n for name, new_value in changes.items():\n try:\n saved_values[name] = target[name]\n except KeyError:\n saved_values[name] = non_existent_marker\n target[name] = new_value\n\n try:\n yield\n finally:\n # Restore original values in the target.\n for name, original_value in saved_values.items():\n if original_value is non_existent_marker:\n del target[name]\n else:\n assert isinstance(original_value, str) # for mypy\n target[name] = original_value\n\n\n@contextlib.contextmanager\ndef get_build_tracker() -> Generator["BuildTracker", None, None]:\n root = os.environ.get("PIP_BUILD_TRACKER")\n with contextlib.ExitStack() as ctx:\n if root is None:\n root = ctx.enter_context(TempDirectory(kind="build-tracker")).path\n ctx.enter_context(update_env_context_manager(PIP_BUILD_TRACKER=root))\n logger.debug("Initialized build tracking at %s", root)\n\n with BuildTracker(root) as tracker:\n yield tracker\n\n\nclass TrackerId(str):\n """Uniquely identifying string provided to the build tracker."""\n\n\nclass BuildTracker:\n """Ensure that an sdist cannot request itself as a setup requirement.\n\n When an sdist is prepared, it identifies its setup requirements in the\n context of ``BuildTracker.track()``. If a requirement shows up recursively, this\n raises an exception.\n\n This stops fork bombs embedded in malicious packages."""\n\n def __init__(self, root: str) -> None:\n self._root = root\n self._entries: Dict[TrackerId, InstallRequirement] = {}\n logger.debug("Created build tracker: %s", self._root)\n\n def __enter__(self) -> "BuildTracker":\n logger.debug("Entered build tracker: %s", self._root)\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n self.cleanup()\n\n def _entry_path(self, key: TrackerId) -> str:\n hashed = hashlib.sha224(key.encode()).hexdigest()\n return os.path.join(self._root, hashed)\n\n def add(self, req: InstallRequirement, key: TrackerId) -> None:\n """Add an InstallRequirement to build tracking."""\n\n # Get the file to write information about this requirement.\n entry_path = self._entry_path(key)\n\n # Try reading from the file. If it exists and can be read from, a build\n # is already in progress, so a LookupError is raised.\n try:\n with open(entry_path) as fp:\n contents = fp.read()\n except FileNotFoundError:\n pass\n else:\n message = f"{req.link} is already being built: {contents}"\n raise LookupError(message)\n\n # If we're here, req should really not be building already.\n assert key not in self._entries\n\n # Start tracking this requirement.\n with open(entry_path, "w", encoding="utf-8") as fp:\n fp.write(str(req))\n self._entries[key] = req\n\n logger.debug("Added %s to build tracker %r", req, self._root)\n\n def remove(self, req: InstallRequirement, key: TrackerId) -> None:\n """Remove an InstallRequirement from build tracking."""\n\n # Delete the created file and the corresponding entry.\n os.unlink(self._entry_path(key))\n del self._entries[key]\n\n logger.debug("Removed %s from build tracker %r", req, self._root)\n\n def cleanup(self) -> None:\n for key, req in list(self._entries.items()):\n self.remove(req, key)\n\n logger.debug("Removed build tracker: %r", self._root)\n\n @contextlib.contextmanager\n def track(self, req: InstallRequirement, key: str) -> Generator[None, None, None]:\n """Ensure that `key` cannot install itself as a setup requirement.\n\n :raises LookupError: If `key` was already provided in a parent invocation of\n the context introduced by this method."""\n tracker_id = TrackerId(key)\n self.add(req, tracker_id)\n yield\n self.remove(req, tracker_id)\n
.venv\Lib\site-packages\pip\_internal\operations\build\build_tracker.py
build_tracker.py
Python
4,774
0.95
0.152174
0.075472
react-lib
852
2025-02-22T04:26:31.704746
MIT
false
e1c564b14c012ff5d12bc9c9f58db9b7
"""Metadata generation logic for source distributions."""\n\nimport os\n\nfrom pip._vendor.pyproject_hooks import BuildBackendHookCaller\n\nfrom pip._internal.build_env import BuildEnvironment\nfrom pip._internal.exceptions import (\n InstallationSubprocessError,\n MetadataGenerationFailed,\n)\nfrom pip._internal.utils.subprocess import runner_with_spinner_message\nfrom pip._internal.utils.temp_dir import TempDirectory\n\n\ndef generate_metadata(\n build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str\n) -> str:\n """Generate metadata using mechanisms described in PEP 517.\n\n Returns the generated metadata directory.\n """\n metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)\n\n metadata_dir = metadata_tmpdir.path\n\n with build_env:\n # Note that BuildBackendHookCaller implements a fallback for\n # prepare_metadata_for_build_wheel, so we don't have to\n # consider the possibility that this hook doesn't exist.\n runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)")\n with backend.subprocess_runner(runner):\n try:\n distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir)\n except InstallationSubprocessError as error:\n raise MetadataGenerationFailed(package_details=details) from error\n\n return os.path.join(metadata_dir, distinfo_dir)\n
.venv\Lib\site-packages\pip\_internal\operations\build\metadata.py
metadata.py
Python
1,421
0.95
0.105263
0.103448
react-lib
586
2023-09-22T03:36:46.731433
BSD-3-Clause
false
0563c2531e5bd70edcf774eaac9531a5
"""Metadata generation logic for source distributions."""\n\nimport os\n\nfrom pip._vendor.pyproject_hooks import BuildBackendHookCaller\n\nfrom pip._internal.build_env import BuildEnvironment\nfrom pip._internal.exceptions import (\n InstallationSubprocessError,\n MetadataGenerationFailed,\n)\nfrom pip._internal.utils.subprocess import runner_with_spinner_message\nfrom pip._internal.utils.temp_dir import TempDirectory\n\n\ndef generate_editable_metadata(\n build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str\n) -> str:\n """Generate metadata using mechanisms described in PEP 660.\n\n Returns the generated metadata directory.\n """\n metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)\n\n metadata_dir = metadata_tmpdir.path\n\n with build_env:\n # Note that BuildBackendHookCaller implements a fallback for\n # prepare_metadata_for_build_wheel/editable, so we don't have to\n # consider the possibility that this hook doesn't exist.\n runner = runner_with_spinner_message(\n "Preparing editable metadata (pyproject.toml)"\n )\n with backend.subprocess_runner(runner):\n try:\n distinfo_dir = backend.prepare_metadata_for_build_editable(metadata_dir)\n except InstallationSubprocessError as error:\n raise MetadataGenerationFailed(package_details=details) from error\n\n assert distinfo_dir is not None\n return os.path.join(metadata_dir, distinfo_dir)\n
.venv\Lib\site-packages\pip\_internal\operations\build\metadata_editable.py
metadata_editable.py
Python
1,509
0.95
0.097561
0.09375
node-utils
931
2024-04-17T16:48:39.295861
Apache-2.0
false
98986e4bd25d67d55d3abe87616751c1
"""Metadata generation logic for legacy source distributions."""\n\nimport logging\nimport os\n\nfrom pip._internal.build_env import BuildEnvironment\nfrom pip._internal.cli.spinners import open_spinner\nfrom pip._internal.exceptions import (\n InstallationError,\n InstallationSubprocessError,\n MetadataGenerationFailed,\n)\nfrom pip._internal.utils.setuptools_build import make_setuptools_egg_info_args\nfrom pip._internal.utils.subprocess import call_subprocess\nfrom pip._internal.utils.temp_dir import TempDirectory\n\nlogger = logging.getLogger(__name__)\n\n\ndef _find_egg_info(directory: str) -> str:\n """Find an .egg-info subdirectory in `directory`."""\n filenames = [f for f in os.listdir(directory) if f.endswith(".egg-info")]\n\n if not filenames:\n raise InstallationError(f"No .egg-info directory found in {directory}")\n\n if len(filenames) > 1:\n raise InstallationError(\n f"More than one .egg-info directory found in {directory}"\n )\n\n return os.path.join(directory, filenames[0])\n\n\ndef generate_metadata(\n build_env: BuildEnvironment,\n setup_py_path: str,\n source_dir: str,\n isolated: bool,\n details: str,\n) -> str:\n """Generate metadata using setup.py-based defacto mechanisms.\n\n Returns the generated metadata directory.\n """\n logger.debug(\n "Running setup.py (path:%s) egg_info for package %s",\n setup_py_path,\n details,\n )\n\n egg_info_dir = TempDirectory(kind="pip-egg-info", globally_managed=True).path\n\n args = make_setuptools_egg_info_args(\n setup_py_path,\n egg_info_dir=egg_info_dir,\n no_user_config=isolated,\n )\n\n with build_env:\n with open_spinner("Preparing metadata (setup.py)") as spinner:\n try:\n call_subprocess(\n args,\n cwd=source_dir,\n command_desc="python setup.py egg_info",\n spinner=spinner,\n )\n except InstallationSubprocessError as error:\n raise MetadataGenerationFailed(package_details=details) from error\n\n # Return the .egg-info directory.\n return _find_egg_info(egg_info_dir)\n
.venv\Lib\site-packages\pip\_internal\operations\build\metadata_legacy.py
metadata_legacy.py
Python
2,189
0.95
0.123288
0.017241
vue-tools
333
2024-07-01T14:27:31.786722
Apache-2.0
false
5328e93f50d9248074ee562881dfc87a
import logging\nimport os\nfrom typing import Optional\n\nfrom pip._vendor.pyproject_hooks import BuildBackendHookCaller\n\nfrom pip._internal.utils.subprocess import runner_with_spinner_message\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_wheel_pep517(\n name: str,\n backend: BuildBackendHookCaller,\n metadata_directory: str,\n tempd: str,\n) -> Optional[str]:\n """Build one InstallRequirement using the PEP 517 build process.\n\n Returns path to wheel if successfully built. Otherwise, returns None.\n """\n assert metadata_directory is not None\n try:\n logger.debug("Destination directory: %s", tempd)\n\n runner = runner_with_spinner_message(\n f"Building wheel for {name} (pyproject.toml)"\n )\n with backend.subprocess_runner(runner):\n wheel_name = backend.build_wheel(\n tempd,\n metadata_directory=metadata_directory,\n )\n except Exception:\n logger.error("Failed building wheel for %s", name)\n return None\n return os.path.join(tempd, wheel_name)\n
.venv\Lib\site-packages\pip\_internal\operations\build\wheel.py
wheel.py
Python
1,075
0.85
0.135135
0
react-lib
848
2023-08-13T16:07:54.629219
BSD-3-Clause
false
bfd26e6b7d053beae312119df6233540
import logging\nimport os\nfrom typing import Optional\n\nfrom pip._vendor.pyproject_hooks import BuildBackendHookCaller, HookMissing\n\nfrom pip._internal.utils.subprocess import runner_with_spinner_message\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_wheel_editable(\n name: str,\n backend: BuildBackendHookCaller,\n metadata_directory: str,\n tempd: str,\n) -> Optional[str]:\n """Build one InstallRequirement using the PEP 660 build process.\n\n Returns path to wheel if successfully built. Otherwise, returns None.\n """\n assert metadata_directory is not None\n try:\n logger.debug("Destination directory: %s", tempd)\n\n runner = runner_with_spinner_message(\n f"Building editable for {name} (pyproject.toml)"\n )\n with backend.subprocess_runner(runner):\n try:\n wheel_name = backend.build_editable(\n tempd,\n metadata_directory=metadata_directory,\n )\n except HookMissing as e:\n logger.error(\n "Cannot build editable %s because the build "\n "backend does not have the %s hook",\n name,\n e,\n )\n return None\n except Exception:\n logger.error("Failed building editable for %s", name)\n return None\n return os.path.join(tempd, wheel_name)\n
.venv\Lib\site-packages\pip\_internal\operations\build\wheel_editable.py
wheel_editable.py
Python
1,417
0.85
0.130435
0
react-lib
486
2025-05-26T15:46:44.493062
BSD-3-Clause
false
d481fb9c7608f878a84fb81a8a7aa2d1
import logging\nimport os.path\nfrom typing import List, Optional\n\nfrom pip._internal.cli.spinners import open_spinner\nfrom pip._internal.utils.deprecation import deprecated\nfrom pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args\nfrom pip._internal.utils.subprocess import call_subprocess, format_command_args\n\nlogger = logging.getLogger(__name__)\n\n\ndef format_command_result(\n command_args: List[str],\n command_output: str,\n) -> str:\n """Format command information for logging."""\n command_desc = format_command_args(command_args)\n text = f"Command arguments: {command_desc}\n"\n\n if not command_output:\n text += "Command output: None"\n elif logger.getEffectiveLevel() > logging.DEBUG:\n text += "Command output: [use --verbose to show]"\n else:\n if not command_output.endswith("\n"):\n command_output += "\n"\n text += f"Command output:\n{command_output}"\n\n return text\n\n\ndef get_legacy_build_wheel_path(\n names: List[str],\n temp_dir: str,\n name: str,\n command_args: List[str],\n command_output: str,\n) -> Optional[str]:\n """Return the path to the wheel in the temporary build directory."""\n # Sort for determinism.\n names = sorted(names)\n if not names:\n msg = f"Legacy build of wheel for {name!r} created no files.\n"\n msg += format_command_result(command_args, command_output)\n logger.warning(msg)\n return None\n\n if len(names) > 1:\n msg = (\n f"Legacy build of wheel for {name!r} created more than one file.\n"\n f"Filenames (choosing first): {names}\n"\n )\n msg += format_command_result(command_args, command_output)\n logger.warning(msg)\n\n return os.path.join(temp_dir, names[0])\n\n\ndef build_wheel_legacy(\n name: str,\n setup_py_path: str,\n source_dir: str,\n global_options: List[str],\n build_options: List[str],\n tempd: str,\n) -> Optional[str]:\n """Build one unpacked package using the "legacy" build process.\n\n Returns path to wheel if successfully built. Otherwise, returns None.\n """\n deprecated(\n reason=(\n f"Building {name!r} using the legacy setup.py bdist_wheel mechanism, "\n "which will be removed in a future version."\n ),\n replacement=(\n "to use the standardized build interface by "\n "setting the `--use-pep517` option, "\n "(possibly combined with `--no-build-isolation`), "\n f"or adding a `pyproject.toml` file to the source tree of {name!r}"\n ),\n gone_in="25.3",\n issue=6334,\n )\n\n wheel_args = make_setuptools_bdist_wheel_args(\n setup_py_path,\n global_options=global_options,\n build_options=build_options,\n destination_dir=tempd,\n )\n\n spin_message = f"Building wheel for {name} (setup.py)"\n with open_spinner(spin_message) as spinner:\n logger.debug("Destination directory: %s", tempd)\n\n try:\n output = call_subprocess(\n wheel_args,\n command_desc="python setup.py bdist_wheel",\n cwd=source_dir,\n spinner=spinner,\n )\n except Exception:\n spinner.finish("error")\n logger.error("Failed building wheel for %s", name)\n return None\n\n names = os.listdir(tempd)\n wheel_path = get_legacy_build_wheel_path(\n names=names,\n temp_dir=tempd,\n name=name,\n command_args=wheel_args,\n command_output=output,\n )\n return wheel_path\n
.venv\Lib\site-packages\pip\_internal\operations\build\wheel_legacy.py
wheel_legacy.py
Python
3,620
0.95
0.127119
0.009901
vue-tools
940
2025-06-05T09:11:25.703377
GPL-3.0
false
38b9ff1a156fab35e3138fceb22c31e6
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\build_tracker.cpython-313.pyc
build_tracker.cpython-313.pyc
Other
7,743
0.95
0
0
python-kit
608
2024-12-25T23:22:32.202824
GPL-3.0
false
40e91578d8d88c769f22f99a63875005
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\metadata.cpython-313.pyc
metadata.cpython-313.pyc
Other
1,858
0.8
0.033333
0
vue-tools
304
2024-02-18T21:35:08.354099
MIT
false
db90600a04a2b60ece1ad1ed4faddf32
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\metadata_editable.cpython-313.pyc
metadata_editable.cpython-313.pyc
Other
1,910
0.8
0.032258
0
node-utils
751
2024-07-01T13:18:36.649746
Apache-2.0
false
cda5dbc9dc8c0b46c3be2b30402f3ff6
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\metadata_legacy.cpython-313.pyc
metadata_legacy.cpython-313.pyc
Other
2,993
0.8
0.042553
0
awesome-app
35
2025-01-18T03:56:42.442456
Apache-2.0
false
b592db90db1b46aa4edf39e69ce8be80
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\wheel.cpython-313.pyc
wheel.cpython-313.pyc
Other
1,685
0.8
0.12
0.045455
node-utils
630
2024-11-09T10:18:01.314813
MIT
false
ffdeb22c36083fa09352af995d180de8
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\wheel_editable.cpython-313.pyc
wheel_editable.cpython-313.pyc
Other
2,026
0.8
0.09375
0.035714
awesome-app
394
2024-12-12T01:09:04.931073
GPL-3.0
false
3feeee874c40e760fa07c3b9895cce8e
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\wheel_legacy.cpython-313.pyc
wheel_legacy.cpython-313.pyc
Other
4,380
0.8
0.079365
0
awesome-app
336
2024-02-06T05:39:49.753692
MIT
false
376505513e594ce82f526ab1c15c8bbd
\n\n
.venv\Lib\site-packages\pip\_internal\operations\build\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
205
0.7
0
0
react-lib
479
2025-04-24T20:47:34.869813
BSD-3-Clause
false
540722368a3aecf5761dc8d8c49f8b19
"""Legacy editable installation process, i.e. `setup.py develop`."""\n\nimport logging\nfrom typing import Optional, Sequence\n\nfrom pip._internal.build_env import BuildEnvironment\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.setuptools_build import make_setuptools_develop_args\nfrom pip._internal.utils.subprocess import call_subprocess\n\nlogger = logging.getLogger(__name__)\n\n\ndef install_editable(\n *,\n global_options: Sequence[str],\n prefix: Optional[str],\n home: Optional[str],\n use_user_site: bool,\n name: str,\n setup_py_path: str,\n isolated: bool,\n build_env: BuildEnvironment,\n unpacked_source_directory: str,\n) -> None:\n """Install a package in editable mode. Most arguments are pass-through\n to setuptools.\n """\n logger.info("Running setup.py develop for %s", name)\n\n args = make_setuptools_develop_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=isolated,\n prefix=prefix,\n home=home,\n use_user_site=use_user_site,\n )\n\n with indent_log():\n with build_env:\n call_subprocess(\n args,\n command_desc="python setup.py develop",\n cwd=unpacked_source_directory,\n )\n
.venv\Lib\site-packages\pip\_internal\operations\install\editable_legacy.py
editable_legacy.py
Python
1,282
0.85
0.043478
0.025641
react-lib
475
2025-02-19T08:41:51.939483
MIT
false
f5ac709afccedf27287fcc60c99f4d39
"""Support for installing and building the "wheel" binary package format."""\n\nimport collections\nimport compileall\nimport contextlib\nimport csv\nimport importlib\nimport logging\nimport os.path\nimport re\nimport shutil\nimport sys\nimport warnings\nfrom base64 import urlsafe_b64encode\nfrom email.message import Message\nfrom itertools import chain, filterfalse, starmap\nfrom typing import (\n IO,\n Any,\n BinaryIO,\n Callable,\n Dict,\n Generator,\n Iterable,\n Iterator,\n List,\n NewType,\n Optional,\n Protocol,\n Sequence,\n Set,\n Tuple,\n Union,\n cast,\n)\nfrom zipfile import ZipFile, ZipInfo\n\nfrom pip._vendor.distlib.scripts import ScriptMaker\nfrom pip._vendor.distlib.util import get_export_entry\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.locations import get_major_minor_version\nfrom pip._internal.metadata import (\n BaseDistribution,\n FilesystemWheel,\n get_wheel_distribution,\n)\nfrom pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl\nfrom pip._internal.models.scheme import SCHEME_KEYS, Scheme\nfrom pip._internal.utils.filesystem import adjacent_tmp_file, replace\nfrom pip._internal.utils.misc import StreamWrapper, ensure_dir, hash_file, partition\nfrom pip._internal.utils.unpacking import (\n current_umask,\n is_within_directory,\n set_extracted_file_to_default_mode_plus_executable,\n zip_item_is_executable,\n)\nfrom pip._internal.utils.wheel import parse_wheel\n\n\nclass File(Protocol):\n src_record_path: "RecordPath"\n dest_path: str\n changed: bool\n\n def save(self) -> None:\n pass\n\n\nlogger = logging.getLogger(__name__)\n\nRecordPath = NewType("RecordPath", str)\nInstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]\n\n\ndef rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]:\n """Return (encoded_digest, length) for path using hashlib.sha256()"""\n h, length = hash_file(path, blocksize)\n digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")\n return (digest, str(length))\n\n\ndef csv_io_kwargs(mode: str) -> Dict[str, Any]:\n """Return keyword arguments to properly open a CSV file\n in the given mode.\n """\n return {"mode": mode, "newline": "", "encoding": "utf-8"}\n\n\ndef fix_script(path: str) -> bool:\n """Replace #!python with #!/path/to/python\n Return True if file was changed.\n """\n # XXX RECORD hashes will need to be updated\n assert os.path.isfile(path)\n\n with open(path, "rb") as script:\n firstline = script.readline()\n if not firstline.startswith(b"#!python"):\n return False\n exename = sys.executable.encode(sys.getfilesystemencoding())\n firstline = b"#!" + exename + os.linesep.encode("ascii")\n rest = script.read()\n with open(path, "wb") as script:\n script.write(firstline)\n script.write(rest)\n return True\n\n\ndef wheel_root_is_purelib(metadata: Message) -> bool:\n return metadata.get("Root-Is-Purelib", "").lower() == "true"\n\n\ndef get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]:\n console_scripts = {}\n gui_scripts = {}\n for entry_point in dist.iter_entry_points():\n if entry_point.group == "console_scripts":\n console_scripts[entry_point.name] = entry_point.value\n elif entry_point.group == "gui_scripts":\n gui_scripts[entry_point.name] = entry_point.value\n return console_scripts, gui_scripts\n\n\ndef message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]:\n """Determine if any scripts are not on PATH and format a warning.\n Returns a warning message if one or more scripts are not on PATH,\n otherwise None.\n """\n if not scripts:\n return None\n\n # Group scripts by the path they were installed in\n grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set)\n for destfile in scripts:\n parent_dir = os.path.dirname(destfile)\n script_name = os.path.basename(destfile)\n grouped_by_dir[parent_dir].add(script_name)\n\n # We don't want to warn for directories that are on PATH.\n not_warn_dirs = [\n os.path.normcase(os.path.normpath(i)).rstrip(os.sep)\n for i in os.environ.get("PATH", "").split(os.pathsep)\n ]\n # If an executable sits with sys.executable, we don't warn for it.\n # This covers the case of venv invocations without activating the venv.\n not_warn_dirs.append(\n os.path.normcase(os.path.normpath(os.path.dirname(sys.executable)))\n )\n warn_for: Dict[str, Set[str]] = {\n parent_dir: scripts\n for parent_dir, scripts in grouped_by_dir.items()\n if os.path.normcase(os.path.normpath(parent_dir)) not in not_warn_dirs\n }\n if not warn_for:\n return None\n\n # Format a message\n msg_lines = []\n for parent_dir, dir_scripts in warn_for.items():\n sorted_scripts: List[str] = sorted(dir_scripts)\n if len(sorted_scripts) == 1:\n start_text = f"script {sorted_scripts[0]} is"\n else:\n start_text = "scripts {} are".format(\n ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]\n )\n\n msg_lines.append(\n f"The {start_text} installed in '{parent_dir}' which is not on PATH."\n )\n\n last_line_fmt = (\n "Consider adding {} to PATH or, if you prefer "\n "to suppress this warning, use --no-warn-script-location."\n )\n if len(msg_lines) == 1:\n msg_lines.append(last_line_fmt.format("this directory"))\n else:\n msg_lines.append(last_line_fmt.format("these directories"))\n\n # Add a note if any directory starts with ~\n warn_for_tilde = any(\n i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i\n )\n if warn_for_tilde:\n tilde_warning_msg = (\n "NOTE: The current PATH contains path(s) starting with `~`, "\n "which may not be expanded by all applications."\n )\n msg_lines.append(tilde_warning_msg)\n\n # Returns the formatted multiline message\n return "\n".join(msg_lines)\n\n\ndef _normalized_outrows(\n outrows: Iterable[InstalledCSVRow],\n) -> List[Tuple[str, str, str]]:\n """Normalize the given rows of a RECORD file.\n\n Items in each row are converted into str. Rows are then sorted to make\n the value more predictable for tests.\n\n Each row is a 3-tuple (path, hash, size) and corresponds to a record of\n a RECORD file (see PEP 376 and PEP 427 for details). For the rows\n passed to this function, the size can be an integer as an int or string,\n or the empty string.\n """\n # Normally, there should only be one row per path, in which case the\n # second and third elements don't come into play when sorting.\n # However, in cases in the wild where a path might happen to occur twice,\n # we don't want the sort operation to trigger an error (but still want\n # determinism). Since the third element can be an int or string, we\n # coerce each element to a string to avoid a TypeError in this case.\n # For additional background, see--\n # https://github.com/pypa/pip/issues/5868\n return sorted(\n (record_path, hash_, str(size)) for record_path, hash_, size in outrows\n )\n\n\ndef _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str:\n return os.path.join(lib_dir, record_path)\n\n\ndef _fs_to_record_path(path: str, lib_dir: str) -> RecordPath:\n # On Windows, do not handle relative paths if they belong to different\n # logical disks\n if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower():\n path = os.path.relpath(path, lib_dir)\n\n path = path.replace(os.path.sep, "/")\n return cast("RecordPath", path)\n\n\ndef get_csv_rows_for_installed(\n old_csv_rows: List[List[str]],\n installed: Dict[RecordPath, RecordPath],\n changed: Set[RecordPath],\n generated: List[str],\n lib_dir: str,\n) -> List[InstalledCSVRow]:\n """\n :param installed: A map from archive RECORD path to installation RECORD\n path.\n """\n installed_rows: List[InstalledCSVRow] = []\n for row in old_csv_rows:\n if len(row) > 3:\n logger.warning("RECORD line has more than three elements: %s", row)\n old_record_path = cast("RecordPath", row[0])\n new_record_path = installed.pop(old_record_path, old_record_path)\n if new_record_path in changed:\n digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir))\n else:\n digest = row[1] if len(row) > 1 else ""\n length = row[2] if len(row) > 2 else ""\n installed_rows.append((new_record_path, digest, length))\n for f in generated:\n path = _fs_to_record_path(f, lib_dir)\n digest, length = rehash(f)\n installed_rows.append((path, digest, length))\n return installed_rows + [\n (installed_record_path, "", "") for installed_record_path in installed.values()\n ]\n\n\ndef get_console_script_specs(console: Dict[str, str]) -> List[str]:\n """\n Given the mapping from entrypoint name to callable, return the relevant\n console script specs.\n """\n # Don't mutate caller's version\n console = console.copy()\n\n scripts_to_generate = []\n\n # Special case pip and setuptools to generate versioned wrappers\n #\n # The issue is that some projects (specifically, pip and setuptools) use\n # code in setup.py to create "versioned" entry points - pip2.7 on Python\n # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into\n # the wheel metadata at build time, and so if the wheel is installed with\n # a *different* version of Python the entry points will be wrong. The\n # correct fix for this is to enhance the metadata to be able to describe\n # such versioned entry points.\n # Currently, projects using versioned entry points will either have\n # incorrect versioned entry points, or they will not be able to distribute\n # "universal" wheels (i.e., they will need a wheel per Python version).\n #\n # Because setuptools and pip are bundled with _ensurepip and virtualenv,\n # we need to use universal wheels. As a workaround, we\n # override the versioned entry points in the wheel and generate the\n # correct ones.\n #\n # To add the level of hack in this section of code, in order to support\n # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment\n # variable which will control which version scripts get installed.\n #\n # ENSUREPIP_OPTIONS=altinstall\n # - Only pipX.Y and easy_install-X.Y will be generated and installed\n # ENSUREPIP_OPTIONS=install\n # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note\n # that this option is technically if ENSUREPIP_OPTIONS is set and is\n # not altinstall\n # DEFAULT\n # - The default behavior is to install pip, pipX, pipX.Y, easy_install\n # and easy_install-X.Y.\n pip_script = console.pop("pip", None)\n if pip_script:\n if "ENSUREPIP_OPTIONS" not in os.environ:\n scripts_to_generate.append("pip = " + pip_script)\n\n if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":\n scripts_to_generate.append(f"pip{sys.version_info[0]} = {pip_script}")\n\n scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")\n # Delete any other versioned pip entry points\n pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)]\n for k in pip_ep:\n del console[k]\n easy_install_script = console.pop("easy_install", None)\n if easy_install_script:\n if "ENSUREPIP_OPTIONS" not in os.environ:\n scripts_to_generate.append("easy_install = " + easy_install_script)\n\n scripts_to_generate.append(\n f"easy_install-{get_major_minor_version()} = {easy_install_script}"\n )\n # Delete any other versioned easy_install entry points\n easy_install_ep = [\n k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k)\n ]\n for k in easy_install_ep:\n del console[k]\n\n # Generate the console entry points specified in the wheel\n scripts_to_generate.extend(starmap("{} = {}".format, console.items()))\n\n return scripts_to_generate\n\n\nclass ZipBackedFile:\n def __init__(\n self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile\n ) -> None:\n self.src_record_path = src_record_path\n self.dest_path = dest_path\n self._zip_file = zip_file\n self.changed = False\n\n def _getinfo(self) -> ZipInfo:\n return self._zip_file.getinfo(self.src_record_path)\n\n def save(self) -> None:\n # When we open the output file below, any existing file is truncated\n # before we start writing the new contents. This is fine in most\n # cases, but can cause a segfault if pip has loaded a shared\n # object (e.g. from pyopenssl through its vendored urllib3)\n # Since the shared object is mmap'd an attempt to call a\n # symbol in it will then cause a segfault. Unlinking the file\n # allows writing of new contents while allowing the process to\n # continue to use the old copy.\n if os.path.exists(self.dest_path):\n os.unlink(self.dest_path)\n\n zipinfo = self._getinfo()\n\n # optimization: the file is created by open(),\n # skip the decompression when there is 0 bytes to decompress.\n with open(self.dest_path, "wb") as dest:\n if zipinfo.file_size > 0:\n with self._zip_file.open(zipinfo) as f:\n blocksize = min(zipinfo.file_size, 1024 * 1024)\n shutil.copyfileobj(f, dest, blocksize)\n\n if zip_item_is_executable(zipinfo):\n set_extracted_file_to_default_mode_plus_executable(self.dest_path)\n\n\nclass ScriptFile:\n def __init__(self, file: "File") -> None:\n self._file = file\n self.src_record_path = self._file.src_record_path\n self.dest_path = self._file.dest_path\n self.changed = False\n\n def save(self) -> None:\n self._file.save()\n self.changed = fix_script(self.dest_path)\n\n\nclass MissingCallableSuffix(InstallationError):\n def __init__(self, entry_point: str) -> None:\n super().__init__(\n f"Invalid script entry point: {entry_point} - A callable "\n "suffix is required. Cf https://packaging.python.org/"\n "specifications/entry-points/#use-for-scripts for more "\n "information."\n )\n\n\ndef _raise_for_invalid_entrypoint(specification: str) -> None:\n entry = get_export_entry(specification)\n if entry is not None and entry.suffix is None:\n raise MissingCallableSuffix(str(entry))\n\n\nclass PipScriptMaker(ScriptMaker):\n def make(\n self, specification: str, options: Optional[Dict[str, Any]] = None\n ) -> List[str]:\n _raise_for_invalid_entrypoint(specification)\n return super().make(specification, options)\n\n\ndef _install_wheel( # noqa: C901, PLR0915 function is too long\n name: str,\n wheel_zip: ZipFile,\n wheel_path: str,\n scheme: Scheme,\n pycompile: bool = True,\n warn_script_location: bool = True,\n direct_url: Optional[DirectUrl] = None,\n requested: bool = False,\n) -> None:\n """Install a wheel.\n\n :param name: Name of the project to install\n :param wheel_zip: open ZipFile for wheel being installed\n :param scheme: Distutils scheme dictating the install directories\n :param req_description: String used in place of the requirement, for\n logging\n :param pycompile: Whether to byte-compile installed Python files\n :param warn_script_location: Whether to check that scripts are installed\n into a directory on PATH\n :raises UnsupportedWheel:\n * when the directory holds an unpacked wheel with incompatible\n Wheel-Version\n * when the .dist-info dir does not match the wheel\n """\n info_dir, metadata = parse_wheel(wheel_zip, name)\n\n if wheel_root_is_purelib(metadata):\n lib_dir = scheme.purelib\n else:\n lib_dir = scheme.platlib\n\n # Record details of the files moved\n # installed = files copied from the wheel to the destination\n # changed = files changed while installing (scripts #! line typically)\n # generated = files newly generated during the install (script wrappers)\n installed: Dict[RecordPath, RecordPath] = {}\n changed: Set[RecordPath] = set()\n generated: List[str] = []\n\n def record_installed(\n srcfile: RecordPath, destfile: str, modified: bool = False\n ) -> None:\n """Map archive RECORD paths to installation RECORD paths."""\n newpath = _fs_to_record_path(destfile, lib_dir)\n installed[srcfile] = newpath\n if modified:\n changed.add(newpath)\n\n def is_dir_path(path: RecordPath) -> bool:\n return path.endswith("/")\n\n def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None:\n if not is_within_directory(dest_dir_path, target_path):\n message = (\n "The wheel {!r} has a file {!r} trying to install"\n " outside the target directory {!r}"\n )\n raise InstallationError(\n message.format(wheel_path, target_path, dest_dir_path)\n )\n\n def root_scheme_file_maker(\n zip_file: ZipFile, dest: str\n ) -> Callable[[RecordPath], "File"]:\n def make_root_scheme_file(record_path: RecordPath) -> "File":\n normed_path = os.path.normpath(record_path)\n dest_path = os.path.join(dest, normed_path)\n assert_no_path_traversal(dest, dest_path)\n return ZipBackedFile(record_path, dest_path, zip_file)\n\n return make_root_scheme_file\n\n def data_scheme_file_maker(\n zip_file: ZipFile, scheme: Scheme\n ) -> Callable[[RecordPath], "File"]:\n scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS}\n\n def make_data_scheme_file(record_path: RecordPath) -> "File":\n normed_path = os.path.normpath(record_path)\n try:\n _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)\n except ValueError:\n message = (\n f"Unexpected file in {wheel_path}: {record_path!r}. .data directory"\n " contents should be named like: '<scheme key>/<path>'."\n )\n raise InstallationError(message)\n\n try:\n scheme_path = scheme_paths[scheme_key]\n except KeyError:\n valid_scheme_keys = ", ".join(sorted(scheme_paths))\n message = (\n f"Unknown scheme key used in {wheel_path}: {scheme_key} "\n f"(for file {record_path!r}). .data directory contents "\n f"should be in subdirectories named with a valid scheme "\n f"key ({valid_scheme_keys})"\n )\n raise InstallationError(message)\n\n dest_path = os.path.join(scheme_path, dest_subpath)\n assert_no_path_traversal(scheme_path, dest_path)\n return ZipBackedFile(record_path, dest_path, zip_file)\n\n return make_data_scheme_file\n\n def is_data_scheme_path(path: RecordPath) -> bool:\n return path.split("/", 1)[0].endswith(".data")\n\n paths = cast(List[RecordPath], wheel_zip.namelist())\n file_paths = filterfalse(is_dir_path, paths)\n root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths)\n\n make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir)\n files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths)\n\n def is_script_scheme_path(path: RecordPath) -> bool:\n parts = path.split("/", 2)\n return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts"\n\n other_scheme_paths, script_scheme_paths = partition(\n is_script_scheme_path, data_scheme_paths\n )\n\n make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)\n other_scheme_files = map(make_data_scheme_file, other_scheme_paths)\n files = chain(files, other_scheme_files)\n\n # Get the defined entry points\n distribution = get_wheel_distribution(\n FilesystemWheel(wheel_path),\n canonicalize_name(name),\n )\n console, gui = get_entrypoints(distribution)\n\n def is_entrypoint_wrapper(file: "File") -> bool:\n # EP, EP.exe and EP-script.py are scripts generated for\n # entry point EP by setuptools\n path = file.dest_path\n name = os.path.basename(path)\n if name.lower().endswith(".exe"):\n matchname = name[:-4]\n elif name.lower().endswith("-script.py"):\n matchname = name[:-10]\n elif name.lower().endswith(".pya"):\n matchname = name[:-4]\n else:\n matchname = name\n # Ignore setuptools-generated scripts\n return matchname in console or matchname in gui\n\n script_scheme_files: Iterator[File] = map(\n make_data_scheme_file, script_scheme_paths\n )\n script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files)\n script_scheme_files = map(ScriptFile, script_scheme_files)\n files = chain(files, script_scheme_files)\n\n existing_parents = set()\n for file in files:\n # directory creation is lazy and after file filtering\n # to ensure we don't install empty dirs; empty dirs can't be\n # uninstalled.\n parent_dir = os.path.dirname(file.dest_path)\n if parent_dir not in existing_parents:\n ensure_dir(parent_dir)\n existing_parents.add(parent_dir)\n file.save()\n record_installed(file.src_record_path, file.dest_path, file.changed)\n\n def pyc_source_file_paths() -> Generator[str, None, None]:\n # We de-duplicate installation paths, since there can be overlap (e.g.\n # file in .data maps to same location as file in wheel root).\n # Sorting installation paths makes it easier to reproduce and debug\n # issues related to permissions on existing files.\n for installed_path in sorted(set(installed.values())):\n full_installed_path = os.path.join(lib_dir, installed_path)\n if not os.path.isfile(full_installed_path):\n continue\n if not full_installed_path.endswith(".py"):\n continue\n yield full_installed_path\n\n def pyc_output_path(path: str) -> str:\n """Return the path the pyc file would have been written to."""\n return importlib.util.cache_from_source(path)\n\n # Compile all of the pyc files for the installed files\n if pycompile:\n with contextlib.redirect_stdout(\n StreamWrapper.from_stream(sys.stdout)\n ) as stdout:\n with warnings.catch_warnings():\n warnings.filterwarnings("ignore")\n for path in pyc_source_file_paths():\n success = compileall.compile_file(path, force=True, quiet=True)\n if success:\n pyc_path = pyc_output_path(path)\n assert os.path.exists(pyc_path)\n pyc_record_path = cast(\n "RecordPath", pyc_path.replace(os.path.sep, "/")\n )\n record_installed(pyc_record_path, pyc_path)\n logger.debug(stdout.getvalue())\n\n maker = PipScriptMaker(None, scheme.scripts)\n\n # Ensure old scripts are overwritten.\n # See https://github.com/pypa/pip/issues/1800\n maker.clobber = True\n\n # Ensure we don't generate any variants for scripts because this is almost\n # never what somebody wants.\n # See https://bitbucket.org/pypa/distlib/issue/35/\n maker.variants = {""}\n\n # This is required because otherwise distlib creates scripts that are not\n # executable.\n # See https://bitbucket.org/pypa/distlib/issue/32/\n maker.set_mode = True\n\n # Generate the console and GUI entry points specified in the wheel\n scripts_to_generate = get_console_script_specs(console)\n\n gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items()))\n\n generated_console_scripts = maker.make_multiple(scripts_to_generate)\n generated.extend(generated_console_scripts)\n\n generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True}))\n\n if warn_script_location:\n msg = message_about_scripts_not_on_PATH(generated_console_scripts)\n if msg is not None:\n logger.warning(msg)\n\n generated_file_mode = 0o666 & ~current_umask()\n\n @contextlib.contextmanager\n def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:\n with adjacent_tmp_file(path, **kwargs) as f:\n yield f\n os.chmod(f.name, generated_file_mode)\n replace(f.name, path)\n\n dest_info_dir = os.path.join(lib_dir, info_dir)\n\n # Record pip as the installer\n installer_path = os.path.join(dest_info_dir, "INSTALLER")\n with _generate_file(installer_path) as installer_file:\n installer_file.write(b"pip\n")\n generated.append(installer_path)\n\n # Record the PEP 610 direct URL reference\n if direct_url is not None:\n direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)\n with _generate_file(direct_url_path) as direct_url_file:\n direct_url_file.write(direct_url.to_json().encode("utf-8"))\n generated.append(direct_url_path)\n\n # Record the REQUESTED file\n if requested:\n requested_path = os.path.join(dest_info_dir, "REQUESTED")\n with open(requested_path, "wb"):\n pass\n generated.append(requested_path)\n\n record_text = distribution.read_text("RECORD")\n record_rows = list(csv.reader(record_text.splitlines()))\n\n rows = get_csv_rows_for_installed(\n record_rows,\n installed=installed,\n changed=changed,\n generated=generated,\n lib_dir=lib_dir,\n )\n\n # Record details of all files installed\n record_path = os.path.join(dest_info_dir, "RECORD")\n\n with _generate_file(record_path, **csv_io_kwargs("w")) as record_file:\n # Explicitly cast to typing.IO[str] as a workaround for the mypy error:\n # "writer" has incompatible type "BinaryIO"; expected "_Writer"\n writer = csv.writer(cast("IO[str]", record_file))\n writer.writerows(_normalized_outrows(rows))\n\n\n@contextlib.contextmanager\ndef req_error_context(req_description: str) -> Generator[None, None, None]:\n try:\n yield\n except InstallationError as e:\n message = f"For req: {req_description}. {e.args[0]}"\n raise InstallationError(message) from e\n\n\ndef install_wheel(\n name: str,\n wheel_path: str,\n scheme: Scheme,\n req_description: str,\n pycompile: bool = True,\n warn_script_location: bool = True,\n direct_url: Optional[DirectUrl] = None,\n requested: bool = False,\n) -> None:\n with ZipFile(wheel_path, allowZip64=True) as z:\n with req_error_context(req_description):\n _install_wheel(\n name=name,\n wheel_zip=z,\n wheel_path=wheel_path,\n scheme=scheme,\n pycompile=pycompile,\n warn_script_location=warn_script_location,\n direct_url=direct_url,\n requested=requested,\n )\n
.venv\Lib\site-packages\pip\_internal\operations\install\wheel.py
wheel.py
Python
27,553
0.95
0.176152
0.154341
react-lib
793
2023-10-04T01:57:26.188666
GPL-3.0
false
320c4bd98b5fb796a7c21011120286cf
"""For modules related to installing packages."""\n
.venv\Lib\site-packages\pip\_internal\operations\install\__init__.py
__init__.py
Python
50
0.5
0
0
vue-tools
517
2023-10-05T01:46:21.578996
BSD-3-Clause
false
7c653d902a0b52669fba5f3bf3532a8d
\n\n
.venv\Lib\site-packages\pip\_internal\operations\install\__pycache__\editable_legacy.cpython-313.pyc
editable_legacy.cpython-313.pyc
Other
1,794
0.8
0.033333
0
node-utils
423
2024-07-30T08:47:03.987846
Apache-2.0
false
d5a08c41c2d5738f1d3e056a1d99b3e4
\n\n
.venv\Lib\site-packages\pip\_internal\operations\install\__pycache__\wheel.cpython-313.pyc
wheel.cpython-313.pyc
Other
34,730
0.95
0.04321
0.016077
node-utils
139
2024-06-30T06:52:57.820323
MIT
false
5653f7a5b84b59612bce289f9bb2469d
\n\n
.venv\Lib\site-packages\pip\_internal\operations\install\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
265
0.7
0
0
react-lib
672
2023-11-10T01:56:02.562314
Apache-2.0
false
46a430784b3c74e8309521562dc48d63
\n\n
.venv\Lib\site-packages\pip\_internal\operations\__pycache__\check.cpython-313.pyc
check.cpython-313.pyc
Other
7,221
0.95
0.027397
0.014493
awesome-app
209
2024-02-21T17:47:03.562921
GPL-3.0
false
0afdbf730b00bf5080cc7ce57b5a44f0
\n\n
.venv\Lib\site-packages\pip\_internal\operations\__pycache__\freeze.cpython-313.pyc
freeze.cpython-313.pyc
Other
10,480
0.95
0.031915
0
react-lib
260
2025-06-23T13:15:49.475992
Apache-2.0
false
bab3b45b655f32fc9e1d183aabacf137
\n\n
.venv\Lib\site-packages\pip\_internal\operations\__pycache__\prepare.cpython-313.pyc
prepare.cpython-313.pyc
Other
26,717
0.95
0.045662
0
vue-tools
170
2024-01-22T18:00:30.281252
Apache-2.0
false
f84c543eb68197fc70ac582a57a09e51
\n\n
.venv\Lib\site-packages\pip\_internal\operations\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
199
0.7
0
0
react-lib
780
2025-04-06T08:37:25.553891
GPL-3.0
false
9b9249d0a834bcfd51633e94873e9172
"""Backing implementation for InstallRequirement's various constructors\n\nThe idea here is that these formed a major chunk of InstallRequirement's size\nso, moving them and support code dedicated to them outside of that class\nhelps creates for better understandability for the rest of the code.\n\nThese are meant to be used elsewhere within pip to create instances of\nInstallRequirement.\n"""\n\nimport copy\nimport logging\nimport os\nimport re\nfrom dataclasses import dataclass\nfrom typing import Collection, Dict, List, Optional, Set, Tuple, Union\n\nfrom pip._vendor.packaging.markers import Marker\nfrom pip._vendor.packaging.requirements import InvalidRequirement, Requirement\nfrom pip._vendor.packaging.specifiers import Specifier\n\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.models.index import PyPI, TestPyPI\nfrom pip._internal.models.link import Link\nfrom pip._internal.models.wheel import Wheel\nfrom pip._internal.req.req_file import ParsedRequirement\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.utils.filetypes import is_archive_file\nfrom pip._internal.utils.misc import is_installable_dir\nfrom pip._internal.utils.packaging import get_requirement\nfrom pip._internal.utils.urls import path_to_url\nfrom pip._internal.vcs import is_url, vcs\n\n__all__ = [\n "install_req_from_editable",\n "install_req_from_line",\n "parse_editable",\n]\n\nlogger = logging.getLogger(__name__)\noperators = Specifier._operators.keys()\n\n\ndef _strip_extras(path: str) -> Tuple[str, Optional[str]]:\n m = re.match(r"^(.+)(\[[^\]]+\])$", path)\n extras = None\n if m:\n path_no_extras = m.group(1)\n extras = m.group(2)\n else:\n path_no_extras = path\n\n return path_no_extras, extras\n\n\ndef convert_extras(extras: Optional[str]) -> Set[str]:\n if not extras:\n return set()\n return get_requirement("placeholder" + extras.lower()).extras\n\n\ndef _set_requirement_extras(req: Requirement, new_extras: Set[str]) -> Requirement:\n """\n Returns a new requirement based on the given one, with the supplied extras. If the\n given requirement already has extras those are replaced (or dropped if no new extras\n are given).\n """\n match: Optional[re.Match[str]] = re.fullmatch(\n # see https://peps.python.org/pep-0508/#complete-grammar\n r"([\w\t .-]+)(\[[^\]]*\])?(.*)",\n str(req),\n flags=re.ASCII,\n )\n # ireq.req is a valid requirement so the regex should always match\n assert (\n match is not None\n ), f"regex match on requirement {req} failed, this should never happen"\n pre: Optional[str] = match.group(1)\n post: Optional[str] = match.group(3)\n assert (\n pre is not None and post is not None\n ), f"regex group selection for requirement {req} failed, this should never happen"\n extras: str = "[{}]".format(",".join(sorted(new_extras)) if new_extras else "")\n return get_requirement(f"{pre}{extras}{post}")\n\n\ndef parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]:\n """Parses an editable requirement into:\n - a requirement name\n - an URL\n - extras\n - editable options\n Accepted requirements:\n svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir\n .[some_extra]\n """\n\n url = editable_req\n\n # If a file path is specified with extras, strip off the extras.\n url_no_extras, extras = _strip_extras(url)\n\n if os.path.isdir(url_no_extras):\n # Treating it as code that has already been checked out\n url_no_extras = path_to_url(url_no_extras)\n\n if url_no_extras.lower().startswith("file:"):\n package_name = Link(url_no_extras).egg_fragment\n if extras:\n return (\n package_name,\n url_no_extras,\n get_requirement("placeholder" + extras.lower()).extras,\n )\n else:\n return package_name, url_no_extras, set()\n\n for version_control in vcs:\n if url.lower().startswith(f"{version_control}:"):\n url = f"{version_control}+{url}"\n break\n\n link = Link(url)\n\n if not link.is_vcs:\n backends = ", ".join(vcs.all_schemes)\n raise InstallationError(\n f"{editable_req} is not a valid editable requirement. "\n f"It should either be a path to a local project or a VCS URL "\n f"(beginning with {backends})."\n )\n\n package_name = link.egg_fragment\n if not package_name:\n raise InstallationError(\n f"Could not detect requirement name for '{editable_req}', "\n "please specify one with #egg=your_package_name"\n )\n return package_name, url, set()\n\n\ndef check_first_requirement_in_file(filename: str) -> None:\n """Check if file is parsable as a requirements file.\n\n This is heavily based on ``pkg_resources.parse_requirements``, but\n simplified to just check the first meaningful line.\n\n :raises InvalidRequirement: If the first meaningful line cannot be parsed\n as an requirement.\n """\n with open(filename, encoding="utf-8", errors="ignore") as f:\n # Create a steppable iterator, so we can handle \-continuations.\n lines = (\n line\n for line in (line.strip() for line in f)\n if line and not line.startswith("#") # Skip blank lines/comments.\n )\n\n for line in lines:\n # Drop comments -- a hash without a space may be in a URL.\n if " #" in line:\n line = line[: line.find(" #")]\n # If there is a line continuation, drop it, and append the next line.\n if line.endswith("\\"):\n line = line[:-2].strip() + next(lines, "")\n get_requirement(line)\n return\n\n\ndef deduce_helpful_msg(req: str) -> str:\n """Returns helpful msg in case requirements file does not exist,\n or cannot be parsed.\n\n :params req: Requirements file path\n """\n if not os.path.exists(req):\n return f" File '{req}' does not exist."\n msg = " The path does exist. "\n # Try to parse and check if it is a requirements file.\n try:\n check_first_requirement_in_file(req)\n except InvalidRequirement:\n logger.debug("Cannot parse '%s' as requirements file", req)\n else:\n msg += (\n f"The argument you provided "\n f"({req}) appears to be a"\n f" requirements file. If that is the"\n f" case, use the '-r' flag to install"\n f" the packages specified within it."\n )\n return msg\n\n\n@dataclass(frozen=True)\nclass RequirementParts:\n requirement: Optional[Requirement]\n link: Optional[Link]\n markers: Optional[Marker]\n extras: Set[str]\n\n\ndef parse_req_from_editable(editable_req: str) -> RequirementParts:\n name, url, extras_override = parse_editable(editable_req)\n\n if name is not None:\n try:\n req: Optional[Requirement] = get_requirement(name)\n except InvalidRequirement as exc:\n raise InstallationError(f"Invalid requirement: {name!r}: {exc}")\n else:\n req = None\n\n link = Link(url)\n\n return RequirementParts(req, link, None, extras_override)\n\n\n# ---- The actual constructors follow ----\n\n\ndef install_req_from_editable(\n editable_req: str,\n comes_from: Optional[Union[InstallRequirement, str]] = None,\n *,\n use_pep517: Optional[bool] = None,\n isolated: bool = False,\n global_options: Optional[List[str]] = None,\n hash_options: Optional[Dict[str, List[str]]] = None,\n constraint: bool = False,\n user_supplied: bool = False,\n permit_editable_wheels: bool = False,\n config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,\n) -> InstallRequirement:\n parts = parse_req_from_editable(editable_req)\n\n return InstallRequirement(\n parts.requirement,\n comes_from=comes_from,\n user_supplied=user_supplied,\n editable=True,\n permit_editable_wheels=permit_editable_wheels,\n link=parts.link,\n constraint=constraint,\n use_pep517=use_pep517,\n isolated=isolated,\n global_options=global_options,\n hash_options=hash_options,\n config_settings=config_settings,\n extras=parts.extras,\n )\n\n\ndef _looks_like_path(name: str) -> bool:\n """Checks whether the string "looks like" a path on the filesystem.\n\n This does not check whether the target actually exists, only judge from the\n appearance.\n\n Returns true if any of the following conditions is true:\n * a path separator is found (either os.path.sep or os.path.altsep);\n * a dot is found (which represents the current directory).\n """\n if os.path.sep in name:\n return True\n if os.path.altsep is not None and os.path.altsep in name:\n return True\n if name.startswith("."):\n return True\n return False\n\n\ndef _get_url_from_path(path: str, name: str) -> Optional[str]:\n """\n First, it checks whether a provided path is an installable directory. If it\n is, returns the path.\n\n If false, check if the path is an archive file (such as a .whl).\n The function checks if the path is a file. If false, if the path has\n an @, it will treat it as a PEP 440 URL requirement and return the path.\n """\n if _looks_like_path(name) and os.path.isdir(path):\n if is_installable_dir(path):\n return path_to_url(path)\n # TODO: The is_installable_dir test here might not be necessary\n # now that it is done in load_pyproject_toml too.\n raise InstallationError(\n f"Directory {name!r} is not installable. Neither 'setup.py' "\n "nor 'pyproject.toml' found."\n )\n if not is_archive_file(path):\n return None\n if os.path.isfile(path):\n return path_to_url(path)\n urlreq_parts = name.split("@", 1)\n if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):\n # If the path contains '@' and the part before it does not look\n # like a path, try to treat it as a PEP 440 URL req instead.\n return None\n logger.warning(\n "Requirement %r looks like a filename, but the file does not exist",\n name,\n )\n return path_to_url(path)\n\n\ndef parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts:\n if is_url(name):\n marker_sep = "; "\n else:\n marker_sep = ";"\n if marker_sep in name:\n name, markers_as_string = name.split(marker_sep, 1)\n markers_as_string = markers_as_string.strip()\n if not markers_as_string:\n markers = None\n else:\n markers = Marker(markers_as_string)\n else:\n markers = None\n name = name.strip()\n req_as_string = None\n path = os.path.normpath(os.path.abspath(name))\n link = None\n extras_as_string = None\n\n if is_url(name):\n link = Link(name)\n else:\n p, extras_as_string = _strip_extras(path)\n url = _get_url_from_path(p, name)\n if url is not None:\n link = Link(url)\n\n # it's a local file, dir, or url\n if link:\n # Handle relative file URLs\n if link.scheme == "file" and re.search(r"\.\./", link.url):\n link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))\n # wheel file\n if link.is_wheel:\n wheel = Wheel(link.filename) # can raise InvalidWheelFilename\n req_as_string = f"{wheel.name}=={wheel.version}"\n else:\n # set the req to the egg fragment. when it's not there, this\n # will become an 'unnamed' requirement\n req_as_string = link.egg_fragment\n\n # a requirement specifier\n else:\n req_as_string = name\n\n extras = convert_extras(extras_as_string)\n\n def with_source(text: str) -> str:\n if not line_source:\n return text\n return f"{text} (from {line_source})"\n\n def _parse_req_string(req_as_string: str) -> Requirement:\n try:\n return get_requirement(req_as_string)\n except InvalidRequirement as exc:\n if os.path.sep in req_as_string:\n add_msg = "It looks like a path."\n add_msg += deduce_helpful_msg(req_as_string)\n elif "=" in req_as_string and not any(\n op in req_as_string for op in operators\n ):\n add_msg = "= is not a valid operator. Did you mean == ?"\n else:\n add_msg = ""\n msg = with_source(f"Invalid requirement: {req_as_string!r}: {exc}")\n if add_msg:\n msg += f"\nHint: {add_msg}"\n raise InstallationError(msg)\n\n if req_as_string is not None:\n req: Optional[Requirement] = _parse_req_string(req_as_string)\n else:\n req = None\n\n return RequirementParts(req, link, markers, extras)\n\n\ndef install_req_from_line(\n name: str,\n comes_from: Optional[Union[str, InstallRequirement]] = None,\n *,\n use_pep517: Optional[bool] = None,\n isolated: bool = False,\n global_options: Optional[List[str]] = None,\n hash_options: Optional[Dict[str, List[str]]] = None,\n constraint: bool = False,\n line_source: Optional[str] = None,\n user_supplied: bool = False,\n config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,\n) -> InstallRequirement:\n """Creates an InstallRequirement from a name, which might be a\n requirement, directory containing 'setup.py', filename, or URL.\n\n :param line_source: An optional string describing where the line is from,\n for logging purposes in case of an error.\n """\n parts = parse_req_from_line(name, line_source)\n\n return InstallRequirement(\n parts.requirement,\n comes_from,\n link=parts.link,\n markers=parts.markers,\n use_pep517=use_pep517,\n isolated=isolated,\n global_options=global_options,\n hash_options=hash_options,\n config_settings=config_settings,\n constraint=constraint,\n extras=parts.extras,\n user_supplied=user_supplied,\n )\n\n\ndef install_req_from_req_string(\n req_string: str,\n comes_from: Optional[InstallRequirement] = None,\n isolated: bool = False,\n use_pep517: Optional[bool] = None,\n user_supplied: bool = False,\n) -> InstallRequirement:\n try:\n req = get_requirement(req_string)\n except InvalidRequirement as exc:\n raise InstallationError(f"Invalid requirement: {req_string!r}: {exc}")\n\n domains_not_allowed = [\n PyPI.file_storage_domain,\n TestPyPI.file_storage_domain,\n ]\n if (\n req.url\n and comes_from\n and comes_from.link\n and comes_from.link.netloc in domains_not_allowed\n ):\n # Explicitly disallow pypi packages that depend on external urls\n raise InstallationError(\n "Packages installed from PyPI cannot depend on packages "\n "which are not also hosted on PyPI.\n"\n f"{comes_from.name} depends on {req} "\n )\n\n return InstallRequirement(\n req,\n comes_from,\n isolated=isolated,\n use_pep517=use_pep517,\n user_supplied=user_supplied,\n )\n\n\ndef install_req_from_parsed_requirement(\n parsed_req: ParsedRequirement,\n isolated: bool = False,\n use_pep517: Optional[bool] = None,\n user_supplied: bool = False,\n config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,\n) -> InstallRequirement:\n if parsed_req.is_editable:\n req = install_req_from_editable(\n parsed_req.requirement,\n comes_from=parsed_req.comes_from,\n use_pep517=use_pep517,\n constraint=parsed_req.constraint,\n isolated=isolated,\n user_supplied=user_supplied,\n config_settings=config_settings,\n )\n\n else:\n req = install_req_from_line(\n parsed_req.requirement,\n comes_from=parsed_req.comes_from,\n use_pep517=use_pep517,\n isolated=isolated,\n global_options=(\n parsed_req.options.get("global_options", [])\n if parsed_req.options\n else []\n ),\n hash_options=(\n parsed_req.options.get("hashes", {}) if parsed_req.options else {}\n ),\n constraint=parsed_req.constraint,\n line_source=parsed_req.line_source,\n user_supplied=user_supplied,\n config_settings=config_settings,\n )\n return req\n\n\ndef install_req_from_link_and_ireq(\n link: Link, ireq: InstallRequirement\n) -> InstallRequirement:\n return InstallRequirement(\n req=ireq.req,\n comes_from=ireq.comes_from,\n editable=ireq.editable,\n link=link,\n markers=ireq.markers,\n use_pep517=ireq.use_pep517,\n isolated=ireq.isolated,\n global_options=ireq.global_options,\n hash_options=ireq.hash_options,\n config_settings=ireq.config_settings,\n user_supplied=ireq.user_supplied,\n )\n\n\ndef install_req_drop_extras(ireq: InstallRequirement) -> InstallRequirement:\n """\n Creates a new InstallationRequirement using the given template but without\n any extras. Sets the original requirement as the new one's parent\n (comes_from).\n """\n return InstallRequirement(\n req=(\n _set_requirement_extras(ireq.req, set()) if ireq.req is not None else None\n ),\n comes_from=ireq,\n editable=ireq.editable,\n link=ireq.link,\n markers=ireq.markers,\n use_pep517=ireq.use_pep517,\n isolated=ireq.isolated,\n global_options=ireq.global_options,\n hash_options=ireq.hash_options,\n constraint=ireq.constraint,\n extras=[],\n config_settings=ireq.config_settings,\n user_supplied=ireq.user_supplied,\n permit_editable_wheels=ireq.permit_editable_wheels,\n )\n\n\ndef install_req_extend_extras(\n ireq: InstallRequirement,\n extras: Collection[str],\n) -> InstallRequirement:\n """\n Returns a copy of an installation requirement with some additional extras.\n Makes a shallow copy of the ireq object.\n """\n result = copy.copy(ireq)\n result.extras = {*ireq.extras, *extras}\n result.req = (\n _set_requirement_extras(ireq.req, result.extras)\n if ireq.req is not None\n else None\n )\n return result\n
.venv\Lib\site-packages\pip\_internal\req\constructors.py
constructors.py
Python
18,430
0.95
0.151786
0.049793
node-utils
796
2024-07-13T07:14:06.556113
GPL-3.0
false
6ba5e92d1a1604e5117ee9f578c3edc9
import sys\nfrom typing import Any, Dict, Iterable, Iterator, List, Tuple\n\nif sys.version_info >= (3, 11):\n import tomllib\nelse:\n from pip._vendor import tomli as tomllib\n\nfrom pip._vendor.dependency_groups import DependencyGroupResolver\n\nfrom pip._internal.exceptions import InstallationError\n\n\ndef parse_dependency_groups(groups: List[Tuple[str, str]]) -> List[str]:\n """\n Parse dependency groups data as provided via the CLI, in a `[path:]group` syntax.\n\n Raises InstallationErrors if anything goes wrong.\n """\n resolvers = _build_resolvers(path for (path, _) in groups)\n return list(_resolve_all_groups(resolvers, groups))\n\n\ndef _resolve_all_groups(\n resolvers: Dict[str, DependencyGroupResolver], groups: List[Tuple[str, str]]\n) -> Iterator[str]:\n """\n Run all resolution, converting any error from `DependencyGroupResolver` into\n an InstallationError.\n """\n for path, groupname in groups:\n resolver = resolvers[path]\n try:\n yield from (str(req) for req in resolver.resolve(groupname))\n except (ValueError, TypeError, LookupError) as e:\n raise InstallationError(\n f"[dependency-groups] resolution failed for '{groupname}' "\n f"from '{path}': {e}"\n ) from e\n\n\ndef _build_resolvers(paths: Iterable[str]) -> Dict[str, Any]:\n resolvers = {}\n for path in paths:\n if path in resolvers:\n continue\n\n pyproject = _load_pyproject(path)\n if "dependency-groups" not in pyproject:\n raise InstallationError(\n f"[dependency-groups] table was missing from '{path}'. "\n "Cannot resolve '--group' option."\n )\n raw_dependency_groups = pyproject["dependency-groups"]\n if not isinstance(raw_dependency_groups, dict):\n raise InstallationError(\n f"[dependency-groups] table was malformed in {path}. "\n "Cannot resolve '--group' option."\n )\n\n resolvers[path] = DependencyGroupResolver(raw_dependency_groups)\n return resolvers\n\n\ndef _load_pyproject(path: str) -> Dict[str, Any]:\n """\n This helper loads a pyproject.toml as TOML.\n\n It raises an InstallationError if the operation fails.\n """\n try:\n with open(path, "rb") as fp:\n return tomllib.load(fp)\n except FileNotFoundError:\n raise InstallationError(f"{path} not found. Cannot resolve '--group' option.")\n except tomllib.TOMLDecodeError as e:\n raise InstallationError(f"Error parsing {path}: {e}") from e\n except OSError as e:\n raise InstallationError(f"Error reading {path}: {e}") from e\n
.venv\Lib\site-packages\pip\_internal\req\req_dependency_group.py
req_dependency_group.py
Python
2,677
0.85
0.21519
0
vue-tools
465
2024-11-30T20:42:28.128330
GPL-3.0
false
0be1105aa91d1f7ae9b5aa979dbff15a
"""\nRequirements file parsing\n"""\n\nimport codecs\nimport locale\nimport logging\nimport optparse\nimport os\nimport re\nimport shlex\nimport sys\nimport urllib.parse\nfrom dataclasses import dataclass\nfrom optparse import Values\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Generator,\n Iterable,\n List,\n NoReturn,\n Optional,\n Tuple,\n)\n\nfrom pip._internal.cli import cmdoptions\nfrom pip._internal.exceptions import InstallationError, RequirementsFileParseError\nfrom pip._internal.models.search_scope import SearchScope\n\nif TYPE_CHECKING:\n from pip._internal.index.package_finder import PackageFinder\n from pip._internal.network.session import PipSession\n\n__all__ = ["parse_requirements"]\n\nReqFileLines = Iterable[Tuple[int, str]]\n\nLineParser = Callable[[str], Tuple[str, Values]]\n\nSCHEME_RE = re.compile(r"^(http|https|file):", re.I)\nCOMMENT_RE = re.compile(r"(^|\s+)#.*$")\n\n# Matches environment variable-style values in '${MY_VARIABLE_1}' with the\n# variable name consisting of only uppercase letters, digits or the '_'\n# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1,\n# 2013 Edition.\nENV_VAR_RE = re.compile(r"(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})")\n\nSUPPORTED_OPTIONS: List[Callable[..., optparse.Option]] = [\n cmdoptions.index_url,\n cmdoptions.extra_index_url,\n cmdoptions.no_index,\n cmdoptions.constraints,\n cmdoptions.requirements,\n cmdoptions.editable,\n cmdoptions.find_links,\n cmdoptions.no_binary,\n cmdoptions.only_binary,\n cmdoptions.prefer_binary,\n cmdoptions.require_hashes,\n cmdoptions.pre,\n cmdoptions.trusted_host,\n cmdoptions.use_new_feature,\n]\n\n# options to be passed to requirements\nSUPPORTED_OPTIONS_REQ: List[Callable[..., optparse.Option]] = [\n cmdoptions.global_options,\n cmdoptions.hash,\n cmdoptions.config_settings,\n]\n\nSUPPORTED_OPTIONS_EDITABLE_REQ: List[Callable[..., optparse.Option]] = [\n cmdoptions.config_settings,\n]\n\n\n# the 'dest' string values\nSUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ]\nSUPPORTED_OPTIONS_EDITABLE_REQ_DEST = [\n str(o().dest) for o in SUPPORTED_OPTIONS_EDITABLE_REQ\n]\n\n# order of BOMS is important: codecs.BOM_UTF16_LE is a prefix of codecs.BOM_UTF32_LE\n# so data.startswith(BOM_UTF16_LE) would be true for UTF32_LE data\nBOMS: List[Tuple[bytes, str]] = [\n (codecs.BOM_UTF8, "utf-8"),\n (codecs.BOM_UTF32, "utf-32"),\n (codecs.BOM_UTF32_BE, "utf-32-be"),\n (codecs.BOM_UTF32_LE, "utf-32-le"),\n (codecs.BOM_UTF16, "utf-16"),\n (codecs.BOM_UTF16_BE, "utf-16-be"),\n (codecs.BOM_UTF16_LE, "utf-16-le"),\n]\n\nPEP263_ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)")\nDEFAULT_ENCODING = "utf-8"\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass(frozen=True)\nclass ParsedRequirement:\n # TODO: replace this with slots=True when dropping Python 3.9 support.\n __slots__ = (\n "requirement",\n "is_editable",\n "comes_from",\n "constraint",\n "options",\n "line_source",\n )\n\n requirement: str\n is_editable: bool\n comes_from: str\n constraint: bool\n options: Optional[Dict[str, Any]]\n line_source: Optional[str]\n\n\n@dataclass(frozen=True)\nclass ParsedLine:\n __slots__ = ("filename", "lineno", "args", "opts", "constraint")\n\n filename: str\n lineno: int\n args: str\n opts: Values\n constraint: bool\n\n @property\n def is_editable(self) -> bool:\n return bool(self.opts.editables)\n\n @property\n def requirement(self) -> Optional[str]:\n if self.args:\n return self.args\n elif self.is_editable:\n # We don't support multiple -e on one line\n return self.opts.editables[0]\n return None\n\n\ndef parse_requirements(\n filename: str,\n session: "PipSession",\n finder: Optional["PackageFinder"] = None,\n options: Optional[optparse.Values] = None,\n constraint: bool = False,\n) -> Generator[ParsedRequirement, None, None]:\n """Parse a requirements file and yield ParsedRequirement instances.\n\n :param filename: Path or url of requirements file.\n :param session: PipSession instance.\n :param finder: Instance of pip.index.PackageFinder.\n :param options: cli options.\n :param constraint: If true, parsing a constraint file rather than\n requirements file.\n """\n line_parser = get_line_parser(finder)\n parser = RequirementsFileParser(session, line_parser)\n\n for parsed_line in parser.parse(filename, constraint):\n parsed_req = handle_line(\n parsed_line, options=options, finder=finder, session=session\n )\n if parsed_req is not None:\n yield parsed_req\n\n\ndef preprocess(content: str) -> ReqFileLines:\n """Split, filter, and join lines, and return a line iterator\n\n :param content: the content of the requirements file\n """\n lines_enum: ReqFileLines = enumerate(content.splitlines(), start=1)\n lines_enum = join_lines(lines_enum)\n lines_enum = ignore_comments(lines_enum)\n lines_enum = expand_env_variables(lines_enum)\n return lines_enum\n\n\ndef handle_requirement_line(\n line: ParsedLine,\n options: Optional[optparse.Values] = None,\n) -> ParsedRequirement:\n # preserve for the nested code path\n line_comes_from = "{} {} (line {})".format(\n "-c" if line.constraint else "-r",\n line.filename,\n line.lineno,\n )\n\n assert line.requirement is not None\n\n # get the options that apply to requirements\n if line.is_editable:\n supported_dest = SUPPORTED_OPTIONS_EDITABLE_REQ_DEST\n else:\n supported_dest = SUPPORTED_OPTIONS_REQ_DEST\n req_options = {}\n for dest in supported_dest:\n if dest in line.opts.__dict__ and line.opts.__dict__[dest]:\n req_options[dest] = line.opts.__dict__[dest]\n\n line_source = f"line {line.lineno} of {line.filename}"\n return ParsedRequirement(\n requirement=line.requirement,\n is_editable=line.is_editable,\n comes_from=line_comes_from,\n constraint=line.constraint,\n options=req_options,\n line_source=line_source,\n )\n\n\ndef handle_option_line(\n opts: Values,\n filename: str,\n lineno: int,\n finder: Optional["PackageFinder"] = None,\n options: Optional[optparse.Values] = None,\n session: Optional["PipSession"] = None,\n) -> None:\n if opts.hashes:\n logger.warning(\n "%s line %s has --hash but no requirement, and will be ignored.",\n filename,\n lineno,\n )\n\n if options:\n # percolate options upward\n if opts.require_hashes:\n options.require_hashes = opts.require_hashes\n if opts.features_enabled:\n options.features_enabled.extend(\n f for f in opts.features_enabled if f not in options.features_enabled\n )\n\n # set finder options\n if finder:\n find_links = finder.find_links\n index_urls = finder.index_urls\n no_index = finder.search_scope.no_index\n if opts.no_index is True:\n no_index = True\n index_urls = []\n if opts.index_url and not no_index:\n index_urls = [opts.index_url]\n if opts.extra_index_urls and not no_index:\n index_urls.extend(opts.extra_index_urls)\n if opts.find_links:\n # FIXME: it would be nice to keep track of the source\n # of the find_links: support a find-links local path\n # relative to a requirements file.\n value = opts.find_links[0]\n req_dir = os.path.dirname(os.path.abspath(filename))\n relative_to_reqs_file = os.path.join(req_dir, value)\n if os.path.exists(relative_to_reqs_file):\n value = relative_to_reqs_file\n find_links.append(value)\n\n if session:\n # We need to update the auth urls in session\n session.update_index_urls(index_urls)\n\n search_scope = SearchScope(\n find_links=find_links,\n index_urls=index_urls,\n no_index=no_index,\n )\n finder.search_scope = search_scope\n\n if opts.pre:\n finder.set_allow_all_prereleases()\n\n if opts.prefer_binary:\n finder.set_prefer_binary()\n\n if session:\n for host in opts.trusted_hosts or []:\n source = f"line {lineno} of {filename}"\n session.add_trusted_host(host, source=source)\n\n\ndef handle_line(\n line: ParsedLine,\n options: Optional[optparse.Values] = None,\n finder: Optional["PackageFinder"] = None,\n session: Optional["PipSession"] = None,\n) -> Optional[ParsedRequirement]:\n """Handle a single parsed requirements line; This can result in\n creating/yielding requirements, or updating the finder.\n\n :param line: The parsed line to be processed.\n :param options: CLI options.\n :param finder: The finder - updated by non-requirement lines.\n :param session: The session - updated by non-requirement lines.\n\n Returns a ParsedRequirement object if the line is a requirement line,\n otherwise returns None.\n\n For lines that contain requirements, the only options that have an effect\n are from SUPPORTED_OPTIONS_REQ, and they are scoped to the\n requirement. Other options from SUPPORTED_OPTIONS may be present, but are\n ignored.\n\n For lines that do not contain requirements, the only options that have an\n effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may\n be present, but are ignored. These lines may contain multiple options\n (although our docs imply only one is supported), and all our parsed and\n affect the finder.\n """\n\n if line.requirement is not None:\n parsed_req = handle_requirement_line(line, options)\n return parsed_req\n else:\n handle_option_line(\n line.opts,\n line.filename,\n line.lineno,\n finder,\n options,\n session,\n )\n return None\n\n\nclass RequirementsFileParser:\n def __init__(\n self,\n session: "PipSession",\n line_parser: LineParser,\n ) -> None:\n self._session = session\n self._line_parser = line_parser\n\n def parse(\n self, filename: str, constraint: bool\n ) -> Generator[ParsedLine, None, None]:\n """Parse a given file, yielding parsed lines."""\n yield from self._parse_and_recurse(\n filename, constraint, [{os.path.abspath(filename): None}]\n )\n\n def _parse_and_recurse(\n self,\n filename: str,\n constraint: bool,\n parsed_files_stack: List[Dict[str, Optional[str]]],\n ) -> Generator[ParsedLine, None, None]:\n for line in self._parse_file(filename, constraint):\n if line.requirement is None and (\n line.opts.requirements or line.opts.constraints\n ):\n # parse a nested requirements file\n if line.opts.requirements:\n req_path = line.opts.requirements[0]\n nested_constraint = False\n else:\n req_path = line.opts.constraints[0]\n nested_constraint = True\n\n # original file is over http\n if SCHEME_RE.search(filename):\n # do a url join so relative paths work\n req_path = urllib.parse.urljoin(filename, req_path)\n # original file and nested file are paths\n elif not SCHEME_RE.search(req_path):\n # do a join so relative paths work\n # and then abspath so that we can identify recursive references\n req_path = os.path.abspath(\n os.path.join(\n os.path.dirname(filename),\n req_path,\n )\n )\n parsed_files = parsed_files_stack[0]\n if req_path in parsed_files:\n initial_file = parsed_files[req_path]\n tail = (\n f" and again in {initial_file}"\n if initial_file is not None\n else ""\n )\n raise RequirementsFileParseError(\n f"{req_path} recursively references itself in {filename}{tail}"\n )\n # Keeping a track where was each file first included in\n new_parsed_files = parsed_files.copy()\n new_parsed_files[req_path] = filename\n yield from self._parse_and_recurse(\n req_path, nested_constraint, [new_parsed_files, *parsed_files_stack]\n )\n else:\n yield line\n\n def _parse_file(\n self, filename: str, constraint: bool\n ) -> Generator[ParsedLine, None, None]:\n _, content = get_file_content(filename, self._session)\n\n lines_enum = preprocess(content)\n\n for line_number, line in lines_enum:\n try:\n args_str, opts = self._line_parser(line)\n except OptionParsingError as e:\n # add offending line\n msg = f"Invalid requirement: {line}\n{e.msg}"\n raise RequirementsFileParseError(msg)\n\n yield ParsedLine(\n filename,\n line_number,\n args_str,\n opts,\n constraint,\n )\n\n\ndef get_line_parser(finder: Optional["PackageFinder"]) -> LineParser:\n def parse_line(line: str) -> Tuple[str, Values]:\n # Build new parser for each line since it accumulates appendable\n # options.\n parser = build_parser()\n defaults = parser.get_default_values()\n defaults.index_url = None\n if finder:\n defaults.format_control = finder.format_control\n\n args_str, options_str = break_args_options(line)\n\n try:\n options = shlex.split(options_str)\n except ValueError as e:\n raise OptionParsingError(f"Could not split options: {options_str}") from e\n\n opts, _ = parser.parse_args(options, defaults)\n\n return args_str, opts\n\n return parse_line\n\n\ndef break_args_options(line: str) -> Tuple[str, str]:\n """Break up the line into an args and options string. We only want to shlex\n (and then optparse) the options, not the args. args can contain markers\n which are corrupted by shlex.\n """\n tokens = line.split(" ")\n args = []\n options = tokens[:]\n for token in tokens:\n if token.startswith("-") or token.startswith("--"):\n break\n else:\n args.append(token)\n options.pop(0)\n return " ".join(args), " ".join(options)\n\n\nclass OptionParsingError(Exception):\n def __init__(self, msg: str) -> None:\n self.msg = msg\n\n\ndef build_parser() -> optparse.OptionParser:\n """\n Return a parser for parsing requirement lines\n """\n parser = optparse.OptionParser(add_help_option=False)\n\n option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ\n for option_factory in option_factories:\n option = option_factory()\n parser.add_option(option)\n\n # By default optparse sys.exits on parsing errors. We want to wrap\n # that in our own exception.\n def parser_exit(self: Any, msg: str) -> "NoReturn":\n raise OptionParsingError(msg)\n\n # NOTE: mypy disallows assigning to a method\n # https://github.com/python/mypy/issues/2427\n parser.exit = parser_exit # type: ignore\n\n return parser\n\n\ndef join_lines(lines_enum: ReqFileLines) -> ReqFileLines:\n """Joins a line ending in '\' with the previous line (except when following\n comments). The joined line takes on the index of the first line.\n """\n primary_line_number = None\n new_line: List[str] = []\n for line_number, line in lines_enum:\n if not line.endswith("\\") or COMMENT_RE.match(line):\n if COMMENT_RE.match(line):\n # this ensures comments are always matched later\n line = " " + line\n if new_line:\n new_line.append(line)\n assert primary_line_number is not None\n yield primary_line_number, "".join(new_line)\n new_line = []\n else:\n yield line_number, line\n else:\n if not new_line:\n primary_line_number = line_number\n new_line.append(line.strip("\\"))\n\n # last line contains \\n if new_line:\n assert primary_line_number is not None\n yield primary_line_number, "".join(new_line)\n\n # TODO: handle space after '\'.\n\n\ndef ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines:\n """\n Strips comments and filter empty lines.\n """\n for line_number, line in lines_enum:\n line = COMMENT_RE.sub("", line)\n line = line.strip()\n if line:\n yield line_number, line\n\n\ndef expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines:\n """Replace all environment variables that can be retrieved via `os.getenv`.\n\n The only allowed format for environment variables defined in the\n requirement file is `${MY_VARIABLE_1}` to ensure two things:\n\n 1. Strings that contain a `$` aren't accidentally (partially) expanded.\n 2. Ensure consistency across platforms for requirement files.\n\n These points are the result of a discussion on the `github pull\n request #3514 <https://github.com/pypa/pip/pull/3514>`_.\n\n Valid characters in variable names follow the `POSIX standard\n <http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited\n to uppercase letter, digits and the `_` (underscore).\n """\n for line_number, line in lines_enum:\n for env_var, var_name in ENV_VAR_RE.findall(line):\n value = os.getenv(var_name)\n if not value:\n continue\n\n line = line.replace(env_var, value)\n\n yield line_number, line\n\n\ndef get_file_content(url: str, session: "PipSession") -> Tuple[str, str]:\n """Gets the content of a file; it may be a filename, file: URL, or\n http: URL. Returns (location, content). Content is unicode.\n Respects # -*- coding: declarations on the retrieved files.\n\n :param url: File path or url.\n :param session: PipSession instance.\n """\n scheme = urllib.parse.urlsplit(url).scheme\n # Pip has special support for file:// URLs (LocalFSAdapter).\n if scheme in ["http", "https", "file"]:\n # Delay importing heavy network modules until absolutely necessary.\n from pip._internal.network.utils import raise_for_status\n\n resp = session.get(url)\n raise_for_status(resp)\n return resp.url, resp.text\n\n # Assume this is a bare path.\n try:\n with open(url, "rb") as f:\n raw_content = f.read()\n except OSError as exc:\n raise InstallationError(f"Could not open requirements file: {exc}")\n\n content = _decode_req_file(raw_content, url)\n\n return url, content\n\n\ndef _decode_req_file(data: bytes, url: str) -> str:\n for bom, encoding in BOMS:\n if data.startswith(bom):\n return data[len(bom) :].decode(encoding)\n\n for line in data.split(b"\n")[:2]:\n if line[0:1] == b"#":\n result = PEP263_ENCODING_RE.search(line)\n if result is not None:\n encoding = result.groups()[0].decode("ascii")\n return data.decode(encoding)\n\n try:\n return data.decode(DEFAULT_ENCODING)\n except UnicodeDecodeError:\n locale_encoding = locale.getpreferredencoding(False) or sys.getdefaultencoding()\n logging.warning(\n "unable to decode data from %s with default encoding %s, "\n "falling back to encoding from locale: %s. "\n "If this is intentional you should specify the encoding with a "\n "PEP-263 style comment, e.g. '# -*- coding: %s -*-'",\n url,\n DEFAULT_ENCODING,\n locale_encoding,\n locale_encoding,\n )\n return data.decode(locale_encoding)\n
.venv\Lib\site-packages\pip\_internal\req\req_file.py
req_file.py
Python
20,234
0.95
0.150883
0.073077
python-kit
353
2023-12-22T14:17:03.366259
MIT
false
24e040d93fa1a4c639c647bc910f4075
import functools\nimport logging\nimport os\nimport shutil\nimport sys\nimport uuid\nimport zipfile\nfrom optparse import Values\nfrom pathlib import Path\nfrom typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union\n\nfrom pip._vendor.packaging.markers import Marker\nfrom pip._vendor.packaging.requirements import Requirement\nfrom pip._vendor.packaging.specifiers import SpecifierSet\nfrom pip._vendor.packaging.utils import canonicalize_name\nfrom pip._vendor.packaging.version import Version\nfrom pip._vendor.packaging.version import parse as parse_version\nfrom pip._vendor.pyproject_hooks import BuildBackendHookCaller\n\nfrom pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment\nfrom pip._internal.exceptions import InstallationError, PreviousBuildDirError\nfrom pip._internal.locations import get_scheme\nfrom pip._internal.metadata import (\n BaseDistribution,\n get_default_environment,\n get_directory_distribution,\n get_wheel_distribution,\n)\nfrom pip._internal.metadata.base import FilesystemWheel\nfrom pip._internal.models.direct_url import DirectUrl\nfrom pip._internal.models.link import Link\nfrom pip._internal.operations.build.metadata import generate_metadata\nfrom pip._internal.operations.build.metadata_editable import generate_editable_metadata\nfrom pip._internal.operations.build.metadata_legacy import (\n generate_metadata as generate_metadata_legacy,\n)\nfrom pip._internal.operations.install.editable_legacy import (\n install_editable as install_editable_legacy,\n)\nfrom pip._internal.operations.install.wheel import install_wheel\nfrom pip._internal.pyproject import load_pyproject_toml, make_pyproject_path\nfrom pip._internal.req.req_uninstall import UninstallPathSet\nfrom pip._internal.utils.deprecation import deprecated\nfrom pip._internal.utils.hashes import Hashes\nfrom pip._internal.utils.misc import (\n ConfiguredBuildBackendHookCaller,\n ask_path_exists,\n backup_dir,\n display_path,\n hide_url,\n is_installable_dir,\n redact_auth_from_requirement,\n redact_auth_from_url,\n)\nfrom pip._internal.utils.packaging import get_requirement\nfrom pip._internal.utils.subprocess import runner_with_spinner_message\nfrom pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds\nfrom pip._internal.utils.unpacking import unpack_file\nfrom pip._internal.utils.virtualenv import running_under_virtualenv\nfrom pip._internal.vcs import vcs\n\nlogger = logging.getLogger(__name__)\n\n\nclass InstallRequirement:\n """\n Represents something that may be installed later on, may have information\n about where to fetch the relevant requirement and also contains logic for\n installing the said requirement.\n """\n\n def __init__(\n self,\n req: Optional[Requirement],\n comes_from: Optional[Union[str, "InstallRequirement"]],\n editable: bool = False,\n link: Optional[Link] = None,\n markers: Optional[Marker] = None,\n use_pep517: Optional[bool] = None,\n isolated: bool = False,\n *,\n global_options: Optional[List[str]] = None,\n hash_options: Optional[Dict[str, List[str]]] = None,\n config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,\n constraint: bool = False,\n extras: Collection[str] = (),\n user_supplied: bool = False,\n permit_editable_wheels: bool = False,\n ) -> None:\n assert req is None or isinstance(req, Requirement), req\n self.req = req\n self.comes_from = comes_from\n self.constraint = constraint\n self.editable = editable\n self.permit_editable_wheels = permit_editable_wheels\n\n # source_dir is the local directory where the linked requirement is\n # located, or unpacked. In case unpacking is needed, creating and\n # populating source_dir is done by the RequirementPreparer. Note this\n # is not necessarily the directory where pyproject.toml or setup.py is\n # located - that one is obtained via unpacked_source_directory.\n self.source_dir: Optional[str] = None\n if self.editable:\n assert link\n if link.is_file:\n self.source_dir = os.path.normpath(os.path.abspath(link.file_path))\n\n # original_link is the direct URL that was provided by the user for the\n # requirement, either directly or via a constraints file.\n if link is None and req and req.url:\n # PEP 508 URL requirement\n link = Link(req.url)\n self.link = self.original_link = link\n\n # When this InstallRequirement is a wheel obtained from the cache of locally\n # built wheels, this is the source link corresponding to the cache entry, which\n # was used to download and build the cached wheel.\n self.cached_wheel_source_link: Optional[Link] = None\n\n # Information about the location of the artifact that was downloaded . This\n # property is guaranteed to be set in resolver results.\n self.download_info: Optional[DirectUrl] = None\n\n # Path to any downloaded or already-existing package.\n self.local_file_path: Optional[str] = None\n if self.link and self.link.is_file:\n self.local_file_path = self.link.file_path\n\n if extras:\n self.extras = extras\n elif req:\n self.extras = req.extras\n else:\n self.extras = set()\n if markers is None and req:\n markers = req.marker\n self.markers = markers\n\n # This holds the Distribution object if this requirement is already installed.\n self.satisfied_by: Optional[BaseDistribution] = None\n # Whether the installation process should try to uninstall an existing\n # distribution before installing this requirement.\n self.should_reinstall = False\n # Temporary build location\n self._temp_build_dir: Optional[TempDirectory] = None\n # Set to True after successful installation\n self.install_succeeded: Optional[bool] = None\n # Supplied options\n self.global_options = global_options if global_options else []\n self.hash_options = hash_options if hash_options else {}\n self.config_settings = config_settings\n # Set to True after successful preparation of this requirement\n self.prepared = False\n # User supplied requirement are explicitly requested for installation\n # by the user via CLI arguments or requirements files, as opposed to,\n # e.g. dependencies, extras or constraints.\n self.user_supplied = user_supplied\n\n self.isolated = isolated\n self.build_env: BuildEnvironment = NoOpBuildEnvironment()\n\n # For PEP 517, the directory where we request the project metadata\n # gets stored. We need this to pass to build_wheel, so the backend\n # can ensure that the wheel matches the metadata (see the PEP for\n # details).\n self.metadata_directory: Optional[str] = None\n\n # The static build requirements (from pyproject.toml)\n self.pyproject_requires: Optional[List[str]] = None\n\n # Build requirements that we will check are available\n self.requirements_to_check: List[str] = []\n\n # The PEP 517 backend we should use to build the project\n self.pep517_backend: Optional[BuildBackendHookCaller] = None\n\n # Are we using PEP 517 for this requirement?\n # After pyproject.toml has been loaded, the only valid values are True\n # and False. Before loading, None is valid (meaning "use the default").\n # Setting an explicit value before loading pyproject.toml is supported,\n # but after loading this flag should be treated as read only.\n self.use_pep517 = use_pep517\n\n # If config settings are provided, enforce PEP 517.\n if self.config_settings:\n if self.use_pep517 is False:\n logger.warning(\n "--no-use-pep517 ignored for %s "\n "because --config-settings are specified.",\n self,\n )\n self.use_pep517 = True\n\n # This requirement needs more preparation before it can be built\n self.needs_more_preparation = False\n\n # This requirement needs to be unpacked before it can be installed.\n self._archive_source: Optional[Path] = None\n\n def __str__(self) -> str:\n if self.req:\n s = redact_auth_from_requirement(self.req)\n if self.link:\n s += f" from {redact_auth_from_url(self.link.url)}"\n elif self.link:\n s = redact_auth_from_url(self.link.url)\n else:\n s = "<InstallRequirement>"\n if self.satisfied_by is not None:\n if self.satisfied_by.location is not None:\n location = display_path(self.satisfied_by.location)\n else:\n location = "<memory>"\n s += f" in {location}"\n if self.comes_from:\n if isinstance(self.comes_from, str):\n comes_from: Optional[str] = self.comes_from\n else:\n comes_from = self.comes_from.from_path()\n if comes_from:\n s += f" (from {comes_from})"\n return s\n\n def __repr__(self) -> str:\n return (\n f"<{self.__class__.__name__} object: "\n f"{str(self)} editable={self.editable!r}>"\n )\n\n def format_debug(self) -> str:\n """An un-tested helper for getting state, for debugging."""\n attributes = vars(self)\n names = sorted(attributes)\n\n state = (f"{attr}={attributes[attr]!r}" for attr in sorted(names))\n return "<{name} object: {{{state}}}>".format(\n name=self.__class__.__name__,\n state=", ".join(state),\n )\n\n # Things that are valid for all kinds of requirements?\n @property\n def name(self) -> Optional[str]:\n if self.req is None:\n return None\n return self.req.name\n\n @functools.cached_property\n def supports_pyproject_editable(self) -> bool:\n if not self.use_pep517:\n return False\n assert self.pep517_backend\n with self.build_env:\n runner = runner_with_spinner_message(\n "Checking if build backend supports build_editable"\n )\n with self.pep517_backend.subprocess_runner(runner):\n return "build_editable" in self.pep517_backend._supported_features()\n\n @property\n def specifier(self) -> SpecifierSet:\n assert self.req is not None\n return self.req.specifier\n\n @property\n def is_direct(self) -> bool:\n """Whether this requirement was specified as a direct URL."""\n return self.original_link is not None\n\n @property\n def is_pinned(self) -> bool:\n """Return whether I am pinned to an exact version.\n\n For example, some-package==1.2 is pinned; some-package>1.2 is not.\n """\n assert self.req is not None\n specifiers = self.req.specifier\n return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}\n\n def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool:\n if not extras_requested:\n # Provide an extra to safely evaluate the markers\n # without matching any extra\n extras_requested = ("",)\n if self.markers is not None:\n return any(\n self.markers.evaluate({"extra": extra}) for extra in extras_requested\n )\n else:\n return True\n\n @property\n def has_hash_options(self) -> bool:\n """Return whether any known-good hashes are specified as options.\n\n These activate --require-hashes mode; hashes specified as part of a\n URL do not.\n\n """\n return bool(self.hash_options)\n\n def hashes(self, trust_internet: bool = True) -> Hashes:\n """Return a hash-comparer that considers my option- and URL-based\n hashes to be known-good.\n\n Hashes in URLs--ones embedded in the requirements file, not ones\n downloaded from an index server--are almost peers with ones from\n flags. They satisfy --require-hashes (whether it was implicitly or\n explicitly activated) but do not activate it. md5 and sha224 are not\n allowed in flags, which should nudge people toward good algos. We\n always OR all hashes together, even ones from URLs.\n\n :param trust_internet: Whether to trust URL-based (#md5=...) hashes\n downloaded from the internet, as by populate_link()\n\n """\n good_hashes = self.hash_options.copy()\n if trust_internet:\n link = self.link\n elif self.is_direct and self.user_supplied:\n link = self.original_link\n else:\n link = None\n if link and link.hash:\n assert link.hash_name is not None\n good_hashes.setdefault(link.hash_name, []).append(link.hash)\n return Hashes(good_hashes)\n\n def from_path(self) -> Optional[str]:\n """Format a nice indicator to show where this "comes from" """\n if self.req is None:\n return None\n s = str(self.req)\n if self.comes_from:\n comes_from: Optional[str]\n if isinstance(self.comes_from, str):\n comes_from = self.comes_from\n else:\n comes_from = self.comes_from.from_path()\n if comes_from:\n s += "->" + comes_from\n return s\n\n def ensure_build_location(\n self, build_dir: str, autodelete: bool, parallel_builds: bool\n ) -> str:\n assert build_dir is not None\n if self._temp_build_dir is not None:\n assert self._temp_build_dir.path\n return self._temp_build_dir.path\n if self.req is None:\n # Some systems have /tmp as a symlink which confuses custom\n # builds (such as numpy). Thus, we ensure that the real path\n # is returned.\n self._temp_build_dir = TempDirectory(\n kind=tempdir_kinds.REQ_BUILD, globally_managed=True\n )\n\n return self._temp_build_dir.path\n\n # This is the only remaining place where we manually determine the path\n # for the temporary directory. It is only needed for editables where\n # it is the value of the --src option.\n\n # When parallel builds are enabled, add a UUID to the build directory\n # name so multiple builds do not interfere with each other.\n dir_name: str = canonicalize_name(self.req.name)\n if parallel_builds:\n dir_name = f"{dir_name}_{uuid.uuid4().hex}"\n\n # FIXME: Is there a better place to create the build_dir? (hg and bzr\n # need this)\n if not os.path.exists(build_dir):\n logger.debug("Creating directory %s", build_dir)\n os.makedirs(build_dir)\n actual_build_dir = os.path.join(build_dir, dir_name)\n # `None` indicates that we respect the globally-configured deletion\n # settings, which is what we actually want when auto-deleting.\n delete_arg = None if autodelete else False\n return TempDirectory(\n path=actual_build_dir,\n delete=delete_arg,\n kind=tempdir_kinds.REQ_BUILD,\n globally_managed=True,\n ).path\n\n def _set_requirement(self) -> None:\n """Set requirement after generating metadata."""\n assert self.req is None\n assert self.metadata is not None\n assert self.source_dir is not None\n\n # Construct a Requirement object from the generated metadata\n if isinstance(parse_version(self.metadata["Version"]), Version):\n op = "=="\n else:\n op = "==="\n\n self.req = get_requirement(\n "".join(\n [\n self.metadata["Name"],\n op,\n self.metadata["Version"],\n ]\n )\n )\n\n def warn_on_mismatching_name(self) -> None:\n assert self.req is not None\n metadata_name = canonicalize_name(self.metadata["Name"])\n if canonicalize_name(self.req.name) == metadata_name:\n # Everything is fine.\n return\n\n # If we're here, there's a mismatch. Log a warning about it.\n logger.warning(\n "Generating metadata for package %s "\n "produced metadata for project name %s. Fix your "\n "#egg=%s fragments.",\n self.name,\n metadata_name,\n self.name,\n )\n self.req = get_requirement(metadata_name)\n\n def check_if_exists(self, use_user_site: bool) -> None:\n """Find an installed distribution that satisfies or conflicts\n with this requirement, and set self.satisfied_by or\n self.should_reinstall appropriately.\n """\n if self.req is None:\n return\n existing_dist = get_default_environment().get_distribution(self.req.name)\n if not existing_dist:\n return\n\n version_compatible = self.req.specifier.contains(\n existing_dist.version,\n prereleases=True,\n )\n if not version_compatible:\n self.satisfied_by = None\n if use_user_site:\n if existing_dist.in_usersite:\n self.should_reinstall = True\n elif running_under_virtualenv() and existing_dist.in_site_packages:\n raise InstallationError(\n f"Will not install to the user site because it will "\n f"lack sys.path precedence to {existing_dist.raw_name} "\n f"in {existing_dist.location}"\n )\n else:\n self.should_reinstall = True\n else:\n if self.editable:\n self.should_reinstall = True\n # when installing editables, nothing pre-existing should ever\n # satisfy\n self.satisfied_by = None\n else:\n self.satisfied_by = existing_dist\n\n # Things valid for wheels\n @property\n def is_wheel(self) -> bool:\n if not self.link:\n return False\n return self.link.is_wheel\n\n @property\n def is_wheel_from_cache(self) -> bool:\n # When True, it means that this InstallRequirement is a local wheel file in the\n # cache of locally built wheels.\n return self.cached_wheel_source_link is not None\n\n # Things valid for sdists\n @property\n def unpacked_source_directory(self) -> str:\n assert self.source_dir, f"No source dir for {self}"\n return os.path.join(\n self.source_dir, self.link and self.link.subdirectory_fragment or ""\n )\n\n @property\n def setup_py_path(self) -> str:\n assert self.source_dir, f"No source dir for {self}"\n setup_py = os.path.join(self.unpacked_source_directory, "setup.py")\n\n return setup_py\n\n @property\n def setup_cfg_path(self) -> str:\n assert self.source_dir, f"No source dir for {self}"\n setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg")\n\n return setup_cfg\n\n @property\n def pyproject_toml_path(self) -> str:\n assert self.source_dir, f"No source dir for {self}"\n return make_pyproject_path(self.unpacked_source_directory)\n\n def load_pyproject_toml(self) -> None:\n """Load the pyproject.toml file.\n\n After calling this routine, all of the attributes related to PEP 517\n processing for this requirement have been set. In particular, the\n use_pep517 attribute can be used to determine whether we should\n follow the PEP 517 or legacy (setup.py) code path.\n """\n pyproject_toml_data = load_pyproject_toml(\n self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self)\n )\n\n if pyproject_toml_data is None:\n assert not self.config_settings\n self.use_pep517 = False\n return\n\n self.use_pep517 = True\n requires, backend, check, backend_path = pyproject_toml_data\n self.requirements_to_check = check\n self.pyproject_requires = requires\n self.pep517_backend = ConfiguredBuildBackendHookCaller(\n self,\n self.unpacked_source_directory,\n backend,\n backend_path=backend_path,\n )\n\n def isolated_editable_sanity_check(self) -> None:\n """Check that an editable requirement if valid for use with PEP 517/518.\n\n This verifies that an editable that has a pyproject.toml either supports PEP 660\n or as a setup.py or a setup.cfg\n """\n if (\n self.editable\n and self.use_pep517\n and not self.supports_pyproject_editable\n and not os.path.isfile(self.setup_py_path)\n and not os.path.isfile(self.setup_cfg_path)\n ):\n raise InstallationError(\n f"Project {self} has a 'pyproject.toml' and its build "\n f"backend is missing the 'build_editable' hook. Since it does not "\n f"have a 'setup.py' nor a 'setup.cfg', "\n f"it cannot be installed in editable mode. "\n f"Consider using a build backend that supports PEP 660."\n )\n\n def prepare_metadata(self) -> None:\n """Ensure that project metadata is available.\n\n Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.\n Under legacy processing, call setup.py egg-info.\n """\n assert self.source_dir, f"No source dir for {self}"\n details = self.name or f"from {self.link}"\n\n if self.use_pep517:\n assert self.pep517_backend is not None\n if (\n self.editable\n and self.permit_editable_wheels\n and self.supports_pyproject_editable\n ):\n self.metadata_directory = generate_editable_metadata(\n build_env=self.build_env,\n backend=self.pep517_backend,\n details=details,\n )\n else:\n self.metadata_directory = generate_metadata(\n build_env=self.build_env,\n backend=self.pep517_backend,\n details=details,\n )\n else:\n self.metadata_directory = generate_metadata_legacy(\n build_env=self.build_env,\n setup_py_path=self.setup_py_path,\n source_dir=self.unpacked_source_directory,\n isolated=self.isolated,\n details=details,\n )\n\n # Act on the newly generated metadata, based on the name and version.\n if not self.name:\n self._set_requirement()\n else:\n self.warn_on_mismatching_name()\n\n self.assert_source_matches_version()\n\n @property\n def metadata(self) -> Any:\n if not hasattr(self, "_metadata"):\n self._metadata = self.get_dist().metadata\n\n return self._metadata\n\n def get_dist(self) -> BaseDistribution:\n if self.metadata_directory:\n return get_directory_distribution(self.metadata_directory)\n elif self.local_file_path and self.is_wheel:\n assert self.req is not None\n return get_wheel_distribution(\n FilesystemWheel(self.local_file_path),\n canonicalize_name(self.req.name),\n )\n raise AssertionError(\n f"InstallRequirement {self} has no metadata directory and no wheel: "\n f"can't make a distribution."\n )\n\n def assert_source_matches_version(self) -> None:\n assert self.source_dir, f"No source dir for {self}"\n version = self.metadata["version"]\n if self.req and self.req.specifier and version not in self.req.specifier:\n logger.warning(\n "Requested %s, but installing version %s",\n self,\n version,\n )\n else:\n logger.debug(\n "Source in %s has version %s, which satisfies requirement %s",\n display_path(self.source_dir),\n version,\n self,\n )\n\n # For both source distributions and editables\n def ensure_has_source_dir(\n self,\n parent_dir: str,\n autodelete: bool = False,\n parallel_builds: bool = False,\n ) -> None:\n """Ensure that a source_dir is set.\n\n This will create a temporary build dir if the name of the requirement\n isn't known yet.\n\n :param parent_dir: The ideal pip parent_dir for the source_dir.\n Generally src_dir for editables and build_dir for sdists.\n :return: self.source_dir\n """\n if self.source_dir is None:\n self.source_dir = self.ensure_build_location(\n parent_dir,\n autodelete=autodelete,\n parallel_builds=parallel_builds,\n )\n\n def needs_unpacked_archive(self, archive_source: Path) -> None:\n assert self._archive_source is None\n self._archive_source = archive_source\n\n def ensure_pristine_source_checkout(self) -> None:\n """Ensure the source directory has not yet been built in."""\n assert self.source_dir is not None\n if self._archive_source is not None:\n unpack_file(str(self._archive_source), self.source_dir)\n elif is_installable_dir(self.source_dir):\n # If a checkout exists, it's unwise to keep going.\n # version inconsistencies are logged later, but do not fail\n # the installation.\n raise PreviousBuildDirError(\n f"pip can't proceed with requirements '{self}' due to a "\n f"pre-existing build directory ({self.source_dir}). This is likely "\n "due to a previous installation that failed . pip is "\n "being responsible and not assuming it can delete this. "\n "Please delete it and try again."\n )\n\n # For editable installations\n def update_editable(self) -> None:\n if not self.link:\n logger.debug(\n "Cannot update repository at %s; repository location is unknown",\n self.source_dir,\n )\n return\n assert self.editable\n assert self.source_dir\n if self.link.scheme == "file":\n # Static paths don't get updated\n return\n vcs_backend = vcs.get_backend_for_scheme(self.link.scheme)\n # Editable requirements are validated in Requirement constructors.\n # So here, if it's neither a path nor a valid VCS URL, it's a bug.\n assert vcs_backend, f"Unsupported VCS URL {self.link.url}"\n hidden_url = hide_url(self.link.url)\n vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0)\n\n # Top-level Actions\n def uninstall(\n self, auto_confirm: bool = False, verbose: bool = False\n ) -> Optional[UninstallPathSet]:\n """\n Uninstall the distribution currently satisfying this requirement.\n\n Prompts before removing or modifying files unless\n ``auto_confirm`` is True.\n\n Refuses to delete or modify files outside of ``sys.prefix`` -\n thus uninstallation within a virtual environment can only\n modify that virtual environment, even if the virtualenv is\n linked to global site-packages.\n\n """\n assert self.req\n dist = get_default_environment().get_distribution(self.req.name)\n if not dist:\n logger.warning("Skipping %s as it is not installed.", self.name)\n return None\n logger.info("Found existing installation: %s", dist)\n\n uninstalled_pathset = UninstallPathSet.from_dist(dist)\n uninstalled_pathset.remove(auto_confirm, verbose)\n return uninstalled_pathset\n\n def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str:\n def _clean_zip_name(name: str, prefix: str) -> str:\n assert name.startswith(\n prefix + os.path.sep\n ), f"name {name!r} doesn't start with prefix {prefix!r}"\n name = name[len(prefix) + 1 :]\n name = name.replace(os.path.sep, "/")\n return name\n\n assert self.req is not None\n path = os.path.join(parentdir, path)\n name = _clean_zip_name(path, rootdir)\n return self.req.name + "/" + name\n\n def archive(self, build_dir: Optional[str]) -> None:\n """Saves archive to provided build_dir.\n\n Used for saving downloaded VCS requirements as part of `pip download`.\n """\n assert self.source_dir\n if build_dir is None:\n return\n\n create_archive = True\n archive_name = "{}-{}.zip".format(self.name, self.metadata["version"])\n archive_path = os.path.join(build_dir, archive_name)\n\n if os.path.exists(archive_path):\n response = ask_path_exists(\n f"The file {display_path(archive_path)} exists. (i)gnore, (w)ipe, "\n "(b)ackup, (a)bort ",\n ("i", "w", "b", "a"),\n )\n if response == "i":\n create_archive = False\n elif response == "w":\n logger.warning("Deleting %s", display_path(archive_path))\n os.remove(archive_path)\n elif response == "b":\n dest_file = backup_dir(archive_path)\n logger.warning(\n "Backing up %s to %s",\n display_path(archive_path),\n display_path(dest_file),\n )\n shutil.move(archive_path, dest_file)\n elif response == "a":\n sys.exit(-1)\n\n if not create_archive:\n return\n\n zip_output = zipfile.ZipFile(\n archive_path,\n "w",\n zipfile.ZIP_DEFLATED,\n allowZip64=True,\n )\n with zip_output:\n dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory))\n for dirpath, dirnames, filenames in os.walk(dir):\n for dirname in dirnames:\n dir_arcname = self._get_archive_name(\n dirname,\n parentdir=dirpath,\n rootdir=dir,\n )\n zipdir = zipfile.ZipInfo(dir_arcname + "/")\n zipdir.external_attr = 0x1ED << 16 # 0o755\n zip_output.writestr(zipdir, "")\n for filename in filenames:\n file_arcname = self._get_archive_name(\n filename,\n parentdir=dirpath,\n rootdir=dir,\n )\n filename = os.path.join(dirpath, filename)\n zip_output.write(filename, file_arcname)\n\n logger.info("Saved %s", display_path(archive_path))\n\n def install(\n self,\n global_options: Optional[Sequence[str]] = None,\n root: Optional[str] = None,\n home: Optional[str] = None,\n prefix: Optional[str] = None,\n warn_script_location: bool = True,\n use_user_site: bool = False,\n pycompile: bool = True,\n ) -> None:\n assert self.req is not None\n scheme = get_scheme(\n self.req.name,\n user=use_user_site,\n home=home,\n root=root,\n isolated=self.isolated,\n prefix=prefix,\n )\n\n if self.editable and not self.is_wheel:\n deprecated(\n reason=(\n f"Legacy editable install of {self} (setup.py develop) "\n "is deprecated."\n ),\n replacement=(\n "to add a pyproject.toml or enable --use-pep517, "\n "and use setuptools >= 64. "\n "If the resulting installation is not behaving as expected, "\n "try using --config-settings editable_mode=compat. "\n "Please consult the setuptools documentation for more information"\n ),\n gone_in="25.3",\n issue=11457,\n )\n if self.config_settings:\n logger.warning(\n "--config-settings ignored for legacy editable install of %s. "\n "Consider upgrading to a version of setuptools "\n "that supports PEP 660 (>= 64).",\n self,\n )\n install_editable_legacy(\n global_options=global_options if global_options is not None else [],\n prefix=prefix,\n home=home,\n use_user_site=use_user_site,\n name=self.req.name,\n setup_py_path=self.setup_py_path,\n isolated=self.isolated,\n build_env=self.build_env,\n unpacked_source_directory=self.unpacked_source_directory,\n )\n self.install_succeeded = True\n return\n\n assert self.is_wheel\n assert self.local_file_path\n\n install_wheel(\n self.req.name,\n self.local_file_path,\n scheme=scheme,\n req_description=str(self.req),\n pycompile=pycompile,\n warn_script_location=warn_script_location,\n direct_url=self.download_info if self.is_direct else None,\n requested=self.user_supplied,\n )\n self.install_succeeded = True\n\n\ndef check_invalid_constraint_type(req: InstallRequirement) -> str:\n # Check for unsupported forms\n problem = ""\n if not req.name:\n problem = "Unnamed requirements are not allowed as constraints"\n elif req.editable:\n problem = "Editable requirements are not allowed as constraints"\n elif req.extras:\n problem = "Constraints cannot have extras"\n\n if problem:\n deprecated(\n reason=(\n "Constraints are only allowed to take the form of a package "\n "name and a version specifier. Other forms were originally "\n "permitted as an accident of the implementation, but were "\n "undocumented. The new implementation of the resolver no "\n "longer supports these forms."\n ),\n replacement="replacing the constraint with a requirement",\n # No plan yet for when the new resolver becomes default\n gone_in=None,\n issue=8210,\n )\n\n return problem\n\n\ndef _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool:\n if getattr(options, option, None):\n return True\n for req in reqs:\n if getattr(req, option, None):\n return True\n return False\n\n\ndef check_legacy_setup_py_options(\n options: Values,\n reqs: List[InstallRequirement],\n) -> None:\n has_build_options = _has_option(options, reqs, "build_options")\n has_global_options = _has_option(options, reqs, "global_options")\n if has_build_options or has_global_options:\n deprecated(\n reason="--build-option and --global-option are deprecated.",\n issue=11859,\n replacement="to use --config-settings",\n gone_in="25.3",\n )\n logger.warning(\n "Implying --no-binary=:all: due to the presence of "\n "--build-option / --global-option. "\n )\n options.format_control.disallow_binaries()\n
.venv\Lib\site-packages\pip\_internal\req\req_install.py
req_install.py
Python
35,788
0.95
0.165953
0.09201
python-kit
573
2024-03-15T06:42:05.586870
MIT
false
57d11d9cd43dc1aa7557cc6b136eae24
import logging\nfrom collections import OrderedDict\nfrom typing import Dict, List\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.req.req_install import InstallRequirement\n\nlogger = logging.getLogger(__name__)\n\n\nclass RequirementSet:\n def __init__(self, check_supported_wheels: bool = True) -> None:\n """Create a RequirementSet."""\n\n self.requirements: Dict[str, InstallRequirement] = OrderedDict()\n self.check_supported_wheels = check_supported_wheels\n\n self.unnamed_requirements: List[InstallRequirement] = []\n\n def __str__(self) -> str:\n requirements = sorted(\n (req for req in self.requirements.values() if not req.comes_from),\n key=lambda req: canonicalize_name(req.name or ""),\n )\n return " ".join(str(req.req) for req in requirements)\n\n def __repr__(self) -> str:\n requirements = sorted(\n self.requirements.values(),\n key=lambda req: canonicalize_name(req.name or ""),\n )\n\n format_string = "<{classname} object; {count} requirement(s): {reqs}>"\n return format_string.format(\n classname=self.__class__.__name__,\n count=len(requirements),\n reqs=", ".join(str(req.req) for req in requirements),\n )\n\n def add_unnamed_requirement(self, install_req: InstallRequirement) -> None:\n assert not install_req.name\n self.unnamed_requirements.append(install_req)\n\n def add_named_requirement(self, install_req: InstallRequirement) -> None:\n assert install_req.name\n\n project_name = canonicalize_name(install_req.name)\n self.requirements[project_name] = install_req\n\n def has_requirement(self, name: str) -> bool:\n project_name = canonicalize_name(name)\n\n return (\n project_name in self.requirements\n and not self.requirements[project_name].constraint\n )\n\n def get_requirement(self, name: str) -> InstallRequirement:\n project_name = canonicalize_name(name)\n\n if project_name in self.requirements:\n return self.requirements[project_name]\n\n raise KeyError(f"No project with the name {name!r}")\n\n @property\n def all_requirements(self) -> List[InstallRequirement]:\n return self.unnamed_requirements + list(self.requirements.values())\n\n @property\n def requirements_to_install(self) -> List[InstallRequirement]:\n """Return the list of requirements that need to be installed.\n\n TODO remove this property together with the legacy resolver, since the new\n resolver only returns requirements that need to be installed.\n """\n return [\n install_req\n for install_req in self.all_requirements\n if not install_req.constraint and not install_req.satisfied_by\n ]\n
.venv\Lib\site-packages\pip\_internal\req\req_set.py
req_set.py
Python
2,858
0.85
0.207317
0
react-lib
890
2025-03-22T03:44:12.244264
MIT
false
5e5ce95b24a278a3d7ce245c37ff960e
import functools\nimport os\nimport sys\nimport sysconfig\nfrom importlib.util import cache_from_source\nfrom typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple\n\nfrom pip._internal.exceptions import LegacyDistutilsInstall, UninstallMissingRecord\nfrom pip._internal.locations import get_bin_prefix, get_bin_user\nfrom pip._internal.metadata import BaseDistribution\nfrom pip._internal.utils.compat import WINDOWS\nfrom pip._internal.utils.egg_link import egg_link_path_from_location\nfrom pip._internal.utils.logging import getLogger, indent_log\nfrom pip._internal.utils.misc import ask, normalize_path, renames, rmtree\nfrom pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory\nfrom pip._internal.utils.virtualenv import running_under_virtualenv\n\nlogger = getLogger(__name__)\n\n\ndef _script_names(\n bin_dir: str, script_name: str, is_gui: bool\n) -> Generator[str, None, None]:\n """Create the fully qualified name of the files created by\n {console,gui}_scripts for the given ``dist``.\n Returns the list of file names\n """\n exe_name = os.path.join(bin_dir, script_name)\n yield exe_name\n if not WINDOWS:\n return\n yield f"{exe_name}.exe"\n yield f"{exe_name}.exe.manifest"\n if is_gui:\n yield f"{exe_name}-script.pyw"\n else:\n yield f"{exe_name}-script.py"\n\n\ndef _unique(\n fn: Callable[..., Generator[Any, None, None]],\n) -> Callable[..., Generator[Any, None, None]]:\n @functools.wraps(fn)\n def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]:\n seen: Set[Any] = set()\n for item in fn(*args, **kw):\n if item not in seen:\n seen.add(item)\n yield item\n\n return unique\n\n\n@_unique\ndef uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]:\n """\n Yield all the uninstallation paths for dist based on RECORD-without-.py[co]\n\n Yield paths to all the files in RECORD. For each .py file in RECORD, add\n the .pyc and .pyo in the same directory.\n\n UninstallPathSet.add() takes care of the __pycache__ .py[co].\n\n If RECORD is not found, raises an error,\n with possible information from the INSTALLER file.\n\n https://packaging.python.org/specifications/recording-installed-packages/\n """\n location = dist.location\n assert location is not None, "not installed"\n\n entries = dist.iter_declared_entries()\n if entries is None:\n raise UninstallMissingRecord(distribution=dist)\n\n for entry in entries:\n path = os.path.join(location, entry)\n yield path\n if path.endswith(".py"):\n dn, fn = os.path.split(path)\n base = fn[:-3]\n path = os.path.join(dn, base + ".pyc")\n yield path\n path = os.path.join(dn, base + ".pyo")\n yield path\n\n\ndef compact(paths: Iterable[str]) -> Set[str]:\n """Compact a path set to contain the minimal number of paths\n necessary to contain all paths in the set. If /a/path/ and\n /a/path/to/a/file.txt are both in the set, leave only the\n shorter path."""\n\n sep = os.path.sep\n short_paths: Set[str] = set()\n for path in sorted(paths, key=len):\n should_skip = any(\n path.startswith(shortpath.rstrip("*"))\n and path[len(shortpath.rstrip("*").rstrip(sep))] == sep\n for shortpath in short_paths\n )\n if not should_skip:\n short_paths.add(path)\n return short_paths\n\n\ndef compress_for_rename(paths: Iterable[str]) -> Set[str]:\n """Returns a set containing the paths that need to be renamed.\n\n This set may include directories when the original sequence of paths\n included every file on disk.\n """\n case_map = {os.path.normcase(p): p for p in paths}\n remaining = set(case_map)\n unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len)\n wildcards: Set[str] = set()\n\n def norm_join(*a: str) -> str:\n return os.path.normcase(os.path.join(*a))\n\n for root in unchecked:\n if any(os.path.normcase(root).startswith(w) for w in wildcards):\n # This directory has already been handled.\n continue\n\n all_files: Set[str] = set()\n all_subdirs: Set[str] = set()\n for dirname, subdirs, files in os.walk(root):\n all_subdirs.update(norm_join(root, dirname, d) for d in subdirs)\n all_files.update(norm_join(root, dirname, f) for f in files)\n # If all the files we found are in our remaining set of files to\n # remove, then remove them from the latter set and add a wildcard\n # for the directory.\n if not (all_files - remaining):\n remaining.difference_update(all_files)\n wildcards.add(root + os.sep)\n\n return set(map(case_map.__getitem__, remaining)) | wildcards\n\n\ndef compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str]]:\n """Returns a tuple of 2 sets of which paths to display to user\n\n The first set contains paths that would be deleted. Files of a package\n are not added and the top-level directory of the package has a '*' added\n at the end - to signify that all it's contents are removed.\n\n The second set contains files that would have been skipped in the above\n folders.\n """\n\n will_remove = set(paths)\n will_skip = set()\n\n # Determine folders and files\n folders = set()\n files = set()\n for path in will_remove:\n if path.endswith(".pyc"):\n continue\n if path.endswith("__init__.py") or ".dist-info" in path:\n folders.add(os.path.dirname(path))\n files.add(path)\n\n _normcased_files = set(map(os.path.normcase, files))\n\n folders = compact(folders)\n\n # This walks the tree using os.walk to not miss extra folders\n # that might get added.\n for folder in folders:\n for dirpath, _, dirfiles in os.walk(folder):\n for fname in dirfiles:\n if fname.endswith(".pyc"):\n continue\n\n file_ = os.path.join(dirpath, fname)\n if (\n os.path.isfile(file_)\n and os.path.normcase(file_) not in _normcased_files\n ):\n # We are skipping this file. Add it to the set.\n will_skip.add(file_)\n\n will_remove = files | {os.path.join(folder, "*") for folder in folders}\n\n return will_remove, will_skip\n\n\nclass StashedUninstallPathSet:\n """A set of file rename operations to stash files while\n tentatively uninstalling them."""\n\n def __init__(self) -> None:\n # Mapping from source file root to [Adjacent]TempDirectory\n # for files under that directory.\n self._save_dirs: Dict[str, TempDirectory] = {}\n # (old path, new path) tuples for each move that may need\n # to be undone.\n self._moves: List[Tuple[str, str]] = []\n\n def _get_directory_stash(self, path: str) -> str:\n """Stashes a directory.\n\n Directories are stashed adjacent to their original location if\n possible, or else moved/copied into the user's temp dir."""\n\n try:\n save_dir: TempDirectory = AdjacentTempDirectory(path)\n except OSError:\n save_dir = TempDirectory(kind="uninstall")\n self._save_dirs[os.path.normcase(path)] = save_dir\n\n return save_dir.path\n\n def _get_file_stash(self, path: str) -> str:\n """Stashes a file.\n\n If no root has been provided, one will be created for the directory\n in the user's temp directory."""\n path = os.path.normcase(path)\n head, old_head = os.path.dirname(path), None\n save_dir = None\n\n while head != old_head:\n try:\n save_dir = self._save_dirs[head]\n break\n except KeyError:\n pass\n head, old_head = os.path.dirname(head), head\n else:\n # Did not find any suitable root\n head = os.path.dirname(path)\n save_dir = TempDirectory(kind="uninstall")\n self._save_dirs[head] = save_dir\n\n relpath = os.path.relpath(path, head)\n if relpath and relpath != os.path.curdir:\n return os.path.join(save_dir.path, relpath)\n return save_dir.path\n\n def stash(self, path: str) -> str:\n """Stashes the directory or file and returns its new location.\n Handle symlinks as files to avoid modifying the symlink targets.\n """\n path_is_dir = os.path.isdir(path) and not os.path.islink(path)\n if path_is_dir:\n new_path = self._get_directory_stash(path)\n else:\n new_path = self._get_file_stash(path)\n\n self._moves.append((path, new_path))\n if path_is_dir and os.path.isdir(new_path):\n # If we're moving a directory, we need to\n # remove the destination first or else it will be\n # moved to inside the existing directory.\n # We just created new_path ourselves, so it will\n # be removable.\n os.rmdir(new_path)\n renames(path, new_path)\n return new_path\n\n def commit(self) -> None:\n """Commits the uninstall by removing stashed files."""\n for save_dir in self._save_dirs.values():\n save_dir.cleanup()\n self._moves = []\n self._save_dirs = {}\n\n def rollback(self) -> None:\n """Undoes the uninstall by moving stashed files back."""\n for p in self._moves:\n logger.info("Moving to %s\n from %s", *p)\n\n for new_path, path in self._moves:\n try:\n logger.debug("Replacing %s from %s", new_path, path)\n if os.path.isfile(new_path) or os.path.islink(new_path):\n os.unlink(new_path)\n elif os.path.isdir(new_path):\n rmtree(new_path)\n renames(path, new_path)\n except OSError as ex:\n logger.error("Failed to restore %s", new_path)\n logger.debug("Exception: %s", ex)\n\n self.commit()\n\n @property\n def can_rollback(self) -> bool:\n return bool(self._moves)\n\n\nclass UninstallPathSet:\n """A set of file paths to be removed in the uninstallation of a\n requirement."""\n\n def __init__(self, dist: BaseDistribution) -> None:\n self._paths: Set[str] = set()\n self._refuse: Set[str] = set()\n self._pth: Dict[str, UninstallPthEntries] = {}\n self._dist = dist\n self._moved_paths = StashedUninstallPathSet()\n # Create local cache of normalize_path results. Creating an UninstallPathSet\n # can result in hundreds/thousands of redundant calls to normalize_path with\n # the same args, which hurts performance.\n self._normalize_path_cached = functools.lru_cache(normalize_path)\n\n def _permitted(self, path: str) -> bool:\n """\n Return True if the given path is one we are permitted to\n remove/modify, False otherwise.\n\n """\n # aka is_local, but caching normalized sys.prefix\n if not running_under_virtualenv():\n return True\n return path.startswith(self._normalize_path_cached(sys.prefix))\n\n def add(self, path: str) -> None:\n head, tail = os.path.split(path)\n\n # we normalize the head to resolve parent directory symlinks, but not\n # the tail, since we only want to uninstall symlinks, not their targets\n path = os.path.join(self._normalize_path_cached(head), os.path.normcase(tail))\n\n if not os.path.exists(path):\n return\n if self._permitted(path):\n self._paths.add(path)\n else:\n self._refuse.add(path)\n\n # __pycache__ files can show up after 'installed-files.txt' is created,\n # due to imports\n if os.path.splitext(path)[1] == ".py":\n self.add(cache_from_source(path))\n\n def add_pth(self, pth_file: str, entry: str) -> None:\n pth_file = self._normalize_path_cached(pth_file)\n if self._permitted(pth_file):\n if pth_file not in self._pth:\n self._pth[pth_file] = UninstallPthEntries(pth_file)\n self._pth[pth_file].add(entry)\n else:\n self._refuse.add(pth_file)\n\n def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None:\n """Remove paths in ``self._paths`` with confirmation (unless\n ``auto_confirm`` is True)."""\n\n if not self._paths:\n logger.info(\n "Can't uninstall '%s'. No files were found to uninstall.",\n self._dist.raw_name,\n )\n return\n\n dist_name_version = f"{self._dist.raw_name}-{self._dist.raw_version}"\n logger.info("Uninstalling %s:", dist_name_version)\n\n with indent_log():\n if auto_confirm or self._allowed_to_proceed(verbose):\n moved = self._moved_paths\n\n for_rename = compress_for_rename(self._paths)\n\n for path in sorted(compact(for_rename)):\n moved.stash(path)\n logger.verbose("Removing file or directory %s", path)\n\n for pth in self._pth.values():\n pth.remove()\n\n logger.info("Successfully uninstalled %s", dist_name_version)\n\n def _allowed_to_proceed(self, verbose: bool) -> bool:\n """Display which files would be deleted and prompt for confirmation"""\n\n def _display(msg: str, paths: Iterable[str]) -> None:\n if not paths:\n return\n\n logger.info(msg)\n with indent_log():\n for path in sorted(compact(paths)):\n logger.info(path)\n\n if not verbose:\n will_remove, will_skip = compress_for_output_listing(self._paths)\n else:\n # In verbose mode, display all the files that are going to be\n # deleted.\n will_remove = set(self._paths)\n will_skip = set()\n\n _display("Would remove:", will_remove)\n _display("Would not remove (might be manually added):", will_skip)\n _display("Would not remove (outside of prefix):", self._refuse)\n if verbose:\n _display("Will actually move:", compress_for_rename(self._paths))\n\n return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n"\n\n def rollback(self) -> None:\n """Rollback the changes previously made by remove()."""\n if not self._moved_paths.can_rollback:\n logger.error(\n "Can't roll back %s; was not uninstalled",\n self._dist.raw_name,\n )\n return\n logger.info("Rolling back uninstall of %s", self._dist.raw_name)\n self._moved_paths.rollback()\n for pth in self._pth.values():\n pth.rollback()\n\n def commit(self) -> None:\n """Remove temporary save dir: rollback will no longer be possible."""\n self._moved_paths.commit()\n\n @classmethod\n def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":\n dist_location = dist.location\n info_location = dist.info_location\n if dist_location is None:\n logger.info(\n "Not uninstalling %s since it is not installed",\n dist.canonical_name,\n )\n return cls(dist)\n\n normalized_dist_location = normalize_path(dist_location)\n if not dist.local:\n logger.info(\n "Not uninstalling %s at %s, outside environment %s",\n dist.canonical_name,\n normalized_dist_location,\n sys.prefix,\n )\n return cls(dist)\n\n if normalized_dist_location in {\n p\n for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")}\n if p\n }:\n logger.info(\n "Not uninstalling %s at %s, as it is in the standard library.",\n dist.canonical_name,\n normalized_dist_location,\n )\n return cls(dist)\n\n paths_to_remove = cls(dist)\n develop_egg_link = egg_link_path_from_location(dist.raw_name)\n\n # Distribution is installed with metadata in a "flat" .egg-info\n # directory. This means it is not a modern .dist-info installation, an\n # egg, or legacy editable.\n setuptools_flat_installation = (\n dist.installed_with_setuptools_egg_info\n and info_location is not None\n and os.path.exists(info_location)\n # If dist is editable and the location points to a ``.egg-info``,\n # we are in fact in the legacy editable case.\n and not info_location.endswith(f"{dist.setuptools_filename}.egg-info")\n )\n\n # Uninstall cases order do matter as in the case of 2 installs of the\n # same package, pip needs to uninstall the currently detected version\n if setuptools_flat_installation:\n if info_location is not None:\n paths_to_remove.add(info_location)\n installed_files = dist.iter_declared_entries()\n if installed_files is not None:\n for installed_file in installed_files:\n paths_to_remove.add(os.path.join(dist_location, installed_file))\n # FIXME: need a test for this elif block\n # occurs with --single-version-externally-managed/--record outside\n # of pip\n elif dist.is_file("top_level.txt"):\n try:\n namespace_packages = dist.read_text("namespace_packages.txt")\n except FileNotFoundError:\n namespaces = []\n else:\n namespaces = namespace_packages.splitlines(keepends=False)\n for top_level_pkg in [\n p\n for p in dist.read_text("top_level.txt").splitlines()\n if p and p not in namespaces\n ]:\n path = os.path.join(dist_location, top_level_pkg)\n paths_to_remove.add(path)\n paths_to_remove.add(f"{path}.py")\n paths_to_remove.add(f"{path}.pyc")\n paths_to_remove.add(f"{path}.pyo")\n\n elif dist.installed_by_distutils:\n raise LegacyDistutilsInstall(distribution=dist)\n\n elif dist.installed_as_egg:\n # package installed by easy_install\n # We cannot match on dist.egg_name because it can slightly vary\n # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg\n # XXX We use normalized_dist_location because dist_location my contain\n # a trailing / if the distribution is a zipped egg\n # (which is not a directory).\n paths_to_remove.add(normalized_dist_location)\n easy_install_egg = os.path.split(normalized_dist_location)[1]\n easy_install_pth = os.path.join(\n os.path.dirname(normalized_dist_location),\n "easy-install.pth",\n )\n paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg)\n\n elif dist.installed_with_dist_info:\n for path in uninstallation_paths(dist):\n paths_to_remove.add(path)\n\n elif develop_egg_link:\n # PEP 660 modern editable is handled in the ``.dist-info`` case\n # above, so this only covers the setuptools-style editable.\n with open(develop_egg_link) as fh:\n link_pointer = os.path.normcase(fh.readline().strip())\n normalized_link_pointer = paths_to_remove._normalize_path_cached(\n link_pointer\n )\n assert os.path.samefile(\n normalized_link_pointer, normalized_dist_location\n ), (\n f"Egg-link {develop_egg_link} (to {link_pointer}) does not match "\n f"installed location of {dist.raw_name} (at {dist_location})"\n )\n paths_to_remove.add(develop_egg_link)\n easy_install_pth = os.path.join(\n os.path.dirname(develop_egg_link), "easy-install.pth"\n )\n paths_to_remove.add_pth(easy_install_pth, dist_location)\n\n else:\n logger.debug(\n "Not sure how to uninstall: %s - Check: %s",\n dist,\n dist_location,\n )\n\n if dist.in_usersite:\n bin_dir = get_bin_user()\n else:\n bin_dir = get_bin_prefix()\n\n # find distutils scripts= scripts\n try:\n for script in dist.iter_distutils_script_names():\n paths_to_remove.add(os.path.join(bin_dir, script))\n if WINDOWS:\n paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat"))\n except (FileNotFoundError, NotADirectoryError):\n pass\n\n # find console_scripts and gui_scripts\n def iter_scripts_to_remove(\n dist: BaseDistribution,\n bin_dir: str,\n ) -> Generator[str, None, None]:\n for entry_point in dist.iter_entry_points():\n if entry_point.group == "console_scripts":\n yield from _script_names(bin_dir, entry_point.name, False)\n elif entry_point.group == "gui_scripts":\n yield from _script_names(bin_dir, entry_point.name, True)\n\n for s in iter_scripts_to_remove(dist, bin_dir):\n paths_to_remove.add(s)\n\n return paths_to_remove\n\n\nclass UninstallPthEntries:\n def __init__(self, pth_file: str) -> None:\n self.file = pth_file\n self.entries: Set[str] = set()\n self._saved_lines: Optional[List[bytes]] = None\n\n def add(self, entry: str) -> None:\n entry = os.path.normcase(entry)\n # On Windows, os.path.normcase converts the entry to use\n # backslashes. This is correct for entries that describe absolute\n # paths outside of site-packages, but all the others use forward\n # slashes.\n # os.path.splitdrive is used instead of os.path.isabs because isabs\n # treats non-absolute paths with drive letter markings like c:foo\bar\n # as absolute paths. It also does not recognize UNC paths if they don't\n # have more than "\\sever\share". Valid examples: "\\server\share\" or\n # "\\server\share\folder".\n if WINDOWS and not os.path.splitdrive(entry)[0]:\n entry = entry.replace("\\", "/")\n self.entries.add(entry)\n\n def remove(self) -> None:\n logger.verbose("Removing pth entries from %s:", self.file)\n\n # If the file doesn't exist, log a warning and return\n if not os.path.isfile(self.file):\n logger.warning("Cannot remove entries from nonexistent file %s", self.file)\n return\n with open(self.file, "rb") as fh:\n # windows uses '\r\n' with py3k, but uses '\n' with py2.x\n lines = fh.readlines()\n self._saved_lines = lines\n if any(b"\r\n" in line for line in lines):\n endline = "\r\n"\n else:\n endline = "\n"\n # handle missing trailing newline\n if lines and not lines[-1].endswith(endline.encode("utf-8")):\n lines[-1] = lines[-1] + endline.encode("utf-8")\n for entry in self.entries:\n try:\n logger.verbose("Removing entry: %s", entry)\n lines.remove((entry + endline).encode("utf-8"))\n except ValueError:\n pass\n with open(self.file, "wb") as fh:\n fh.writelines(lines)\n\n def rollback(self) -> bool:\n if self._saved_lines is None:\n logger.error("Cannot roll back changes to %s, none were made", self.file)\n return False\n logger.debug("Rolling %s back to previous state", self.file)\n with open(self.file, "wb") as fh:\n fh.writelines(self._saved_lines)\n return True\n
.venv\Lib\site-packages\pip\_internal\req\req_uninstall.py
req_uninstall.py
Python
24,075
0.95
0.205975
0.11257
awesome-app
52
2023-08-16T22:53:12.802655
Apache-2.0
false
1f8450153a0a22233ad28a923d45e32f
import collections\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Generator, List, Optional, Sequence, Tuple\n\nfrom pip._internal.cli.progress_bars import get_install_progress_renderer\nfrom pip._internal.utils.logging import indent_log\n\nfrom .req_file import parse_requirements\nfrom .req_install import InstallRequirement\nfrom .req_set import RequirementSet\n\n__all__ = [\n "RequirementSet",\n "InstallRequirement",\n "parse_requirements",\n "install_given_reqs",\n]\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass(frozen=True)\nclass InstallationResult:\n name: str\n\n\ndef _validate_requirements(\n requirements: List[InstallRequirement],\n) -> Generator[Tuple[str, InstallRequirement], None, None]:\n for req in requirements:\n assert req.name, f"invalid to-be-installed requirement: {req}"\n yield req.name, req\n\n\ndef install_given_reqs(\n requirements: List[InstallRequirement],\n global_options: Sequence[str],\n root: Optional[str],\n home: Optional[str],\n prefix: Optional[str],\n warn_script_location: bool,\n use_user_site: bool,\n pycompile: bool,\n progress_bar: str,\n) -> List[InstallationResult]:\n """\n Install everything in the given list.\n\n (to be called after having downloaded and unpacked the packages)\n """\n to_install = collections.OrderedDict(_validate_requirements(requirements))\n\n if to_install:\n logger.info(\n "Installing collected packages: %s",\n ", ".join(to_install.keys()),\n )\n\n installed = []\n\n show_progress = logger.isEnabledFor(logging.INFO) and len(to_install) > 1\n\n items = iter(to_install.values())\n if show_progress:\n renderer = get_install_progress_renderer(\n bar_type=progress_bar, total=len(to_install)\n )\n items = renderer(items)\n\n with indent_log():\n for requirement in items:\n req_name = requirement.name\n assert req_name is not None\n if requirement.should_reinstall:\n logger.info("Attempting uninstall: %s", req_name)\n with indent_log():\n uninstalled_pathset = requirement.uninstall(auto_confirm=True)\n else:\n uninstalled_pathset = None\n\n try:\n requirement.install(\n global_options,\n root=root,\n home=home,\n prefix=prefix,\n warn_script_location=warn_script_location,\n use_user_site=use_user_site,\n pycompile=pycompile,\n )\n except Exception:\n # if install did not succeed, rollback previous uninstall\n if uninstalled_pathset and not requirement.install_succeeded:\n uninstalled_pathset.rollback()\n raise\n else:\n if uninstalled_pathset and requirement.install_succeeded:\n uninstalled_pathset.commit()\n\n installed.append(InstallationResult(req_name))\n\n return installed\n
.venv\Lib\site-packages\pip\_internal\req\__init__.py
__init__.py
Python
3,096
0.95
0.116505
0.011905
node-utils
659
2023-10-02T16:36:06.382911
GPL-3.0
false
82d5c4714b59a54632194472b5fd50ae
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\constructors.cpython-313.pyc
constructors.cpython-313.pyc
Other
21,549
0.95
0.058577
0.017699
python-kit
959
2023-09-23T06:02:11.732218
MIT
false
037768c9823ea8db0218270aedab5a5a
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\req_dependency_group.cpython-313.pyc
req_dependency_group.cpython-313.pyc
Other
4,105
0.8
0.071429
0
awesome-app
945
2024-08-30T23:42:21.420380
BSD-3-Clause
false
cef3aa8365fb9feb9fa33022f67855bd
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\req_file.cpython-313.pyc
req_file.cpython-313.pyc
Other
24,617
0.95
0.015209
0.012346
node-utils
463
2024-01-01T02:22:40.264323
MIT
false
192694c8f70ff6dbf3e204f7d23faf8e
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\req_install.cpython-313.pyc
req_install.cpython-313.pyc
Other
39,352
0.95
0.070234
0.007168
vue-tools
284
2025-06-01T20:37:52.387692
BSD-3-Clause
false
3f1798b75853f9bb31782c14e626a840
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\req_set.cpython-313.pyc
req_set.cpython-313.pyc
Other
5,623
0.95
0
0.023256
react-lib
814
2023-07-31T15:36:46.274630
MIT
false
c342b9ae7a73d1d1b15f4ef01ecb1872
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\req_uninstall.cpython-313.pyc
req_uninstall.cpython-313.pyc
Other
32,858
0.95
0.02439
0.018939
python-kit
779
2023-07-29T15:24:51.035307
BSD-3-Clause
false
7b15be251774e523a58fe03f1d1a57d3
\n\n
.venv\Lib\site-packages\pip\_internal\req\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
4,141
0.95
0
0
vue-tools
189
2025-03-04T07:39:29.706271
GPL-3.0
false
1f726c9b1f1d602f39894d40679affe7
from typing import Callable, List, Optional\n\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.req.req_set import RequirementSet\n\nInstallRequirementProvider = Callable[\n [str, Optional[InstallRequirement]], InstallRequirement\n]\n\n\nclass BaseResolver:\n def resolve(\n self, root_reqs: List[InstallRequirement], check_supported_wheels: bool\n ) -> RequirementSet:\n raise NotImplementedError()\n\n def get_installation_order(\n self, req_set: RequirementSet\n ) -> List[InstallRequirement]:\n raise NotImplementedError()\n
.venv\Lib\site-packages\pip\_internal\resolution\base.py
base.py
Python
583
0.85
0.15
0
node-utils
418
2024-05-12T13:58:36.479832
BSD-3-Clause
false
bbfa436b355a45aa3393c1e1ac9033f2
"""Dependency Resolution\n\nThe dependency resolution in pip is performed as follows:\n\nfor top-level requirements:\n a. only one spec allowed per project, regardless of conflicts or not.\n otherwise a "double requirement" exception is raised\n b. they override sub-dependency requirements.\nfor sub-dependencies\n a. "first found, wins" (where the order is breadth first)\n"""\n\nimport logging\nimport sys\nfrom collections import defaultdict\nfrom itertools import chain\nfrom typing import DefaultDict, Iterable, List, Optional, Set, Tuple\n\nfrom pip._vendor.packaging import specifiers\nfrom pip._vendor.packaging.requirements import Requirement\n\nfrom pip._internal.cache import WheelCache\nfrom pip._internal.exceptions import (\n BestVersionAlreadyInstalled,\n DistributionNotFound,\n HashError,\n HashErrors,\n InstallationError,\n NoneMetadataError,\n UnsupportedPythonVersion,\n)\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution\nfrom pip._internal.models.link import Link\nfrom pip._internal.models.wheel import Wheel\nfrom pip._internal.operations.prepare import RequirementPreparer\nfrom pip._internal.req.req_install import (\n InstallRequirement,\n check_invalid_constraint_type,\n)\nfrom pip._internal.req.req_set import RequirementSet\nfrom pip._internal.resolution.base import BaseResolver, InstallRequirementProvider\nfrom pip._internal.utils import compatibility_tags\nfrom pip._internal.utils.compatibility_tags import get_supported\nfrom pip._internal.utils.direct_url_helpers import direct_url_from_link\nfrom pip._internal.utils.logging import indent_log\nfrom pip._internal.utils.misc import normalize_version_info\nfrom pip._internal.utils.packaging import check_requires_python\n\nlogger = logging.getLogger(__name__)\n\nDiscoveredDependencies = DefaultDict[Optional[str], List[InstallRequirement]]\n\n\ndef _check_dist_requires_python(\n dist: BaseDistribution,\n version_info: Tuple[int, int, int],\n ignore_requires_python: bool = False,\n) -> None:\n """\n Check whether the given Python version is compatible with a distribution's\n "Requires-Python" value.\n\n :param version_info: A 3-tuple of ints representing the Python\n major-minor-micro version to check.\n :param ignore_requires_python: Whether to ignore the "Requires-Python"\n value if the given Python version isn't compatible.\n\n :raises UnsupportedPythonVersion: When the given Python version isn't\n compatible.\n """\n # This idiosyncratically converts the SpecifierSet to str and let\n # check_requires_python then parse it again into SpecifierSet. But this\n # is the legacy resolver so I'm just not going to bother refactoring.\n try:\n requires_python = str(dist.requires_python)\n except FileNotFoundError as e:\n raise NoneMetadataError(dist, str(e))\n try:\n is_compatible = check_requires_python(\n requires_python,\n version_info=version_info,\n )\n except specifiers.InvalidSpecifier as exc:\n logger.warning(\n "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc\n )\n return\n\n if is_compatible:\n return\n\n version = ".".join(map(str, version_info))\n if ignore_requires_python:\n logger.debug(\n "Ignoring failed Requires-Python check for package %r: %s not in %r",\n dist.raw_name,\n version,\n requires_python,\n )\n return\n\n raise UnsupportedPythonVersion(\n f"Package {dist.raw_name!r} requires a different Python: "\n f"{version} not in {requires_python!r}"\n )\n\n\nclass Resolver(BaseResolver):\n """Resolves which packages need to be installed/uninstalled to perform \\n the requested operation without breaking the requirements of any package.\n """\n\n _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}\n\n def __init__(\n self,\n preparer: RequirementPreparer,\n finder: PackageFinder,\n wheel_cache: Optional[WheelCache],\n make_install_req: InstallRequirementProvider,\n use_user_site: bool,\n ignore_dependencies: bool,\n ignore_installed: bool,\n ignore_requires_python: bool,\n force_reinstall: bool,\n upgrade_strategy: str,\n py_version_info: Optional[Tuple[int, ...]] = None,\n ) -> None:\n super().__init__()\n assert upgrade_strategy in self._allowed_strategies\n\n if py_version_info is None:\n py_version_info = sys.version_info[:3]\n else:\n py_version_info = normalize_version_info(py_version_info)\n\n self._py_version_info = py_version_info\n\n self.preparer = preparer\n self.finder = finder\n self.wheel_cache = wheel_cache\n\n self.upgrade_strategy = upgrade_strategy\n self.force_reinstall = force_reinstall\n self.ignore_dependencies = ignore_dependencies\n self.ignore_installed = ignore_installed\n self.ignore_requires_python = ignore_requires_python\n self.use_user_site = use_user_site\n self._make_install_req = make_install_req\n\n self._discovered_dependencies: DiscoveredDependencies = defaultdict(list)\n\n def resolve(\n self, root_reqs: List[InstallRequirement], check_supported_wheels: bool\n ) -> RequirementSet:\n """Resolve what operations need to be done\n\n As a side-effect of this method, the packages (and their dependencies)\n are downloaded, unpacked and prepared for installation. This\n preparation is done by ``pip.operations.prepare``.\n\n Once PyPI has static dependency metadata available, it would be\n possible to move the preparation to become a step separated from\n dependency resolution.\n """\n requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels)\n for req in root_reqs:\n if req.constraint:\n check_invalid_constraint_type(req)\n self._add_requirement_to_set(requirement_set, req)\n\n # Actually prepare the files, and collect any exceptions. Most hash\n # exceptions cannot be checked ahead of time, because\n # _populate_link() needs to be called before we can make decisions\n # based on link type.\n discovered_reqs: List[InstallRequirement] = []\n hash_errors = HashErrors()\n for req in chain(requirement_set.all_requirements, discovered_reqs):\n try:\n discovered_reqs.extend(self._resolve_one(requirement_set, req))\n except HashError as exc:\n exc.req = req\n hash_errors.append(exc)\n\n if hash_errors:\n raise hash_errors\n\n return requirement_set\n\n def _add_requirement_to_set(\n self,\n requirement_set: RequirementSet,\n install_req: InstallRequirement,\n parent_req_name: Optional[str] = None,\n extras_requested: Optional[Iterable[str]] = None,\n ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]:\n """Add install_req as a requirement to install.\n\n :param parent_req_name: The name of the requirement that needed this\n added. The name is used because when multiple unnamed requirements\n resolve to the same name, we could otherwise end up with dependency\n links that point outside the Requirements set. parent_req must\n already be added. Note that None implies that this is a user\n supplied requirement, vs an inferred one.\n :param extras_requested: an iterable of extras used to evaluate the\n environment markers.\n :return: Additional requirements to scan. That is either [] if\n the requirement is not applicable, or [install_req] if the\n requirement is applicable and has just been added.\n """\n # If the markers do not match, ignore this requirement.\n if not install_req.match_markers(extras_requested):\n logger.info(\n "Ignoring %s: markers '%s' don't match your environment",\n install_req.name,\n install_req.markers,\n )\n return [], None\n\n # If the wheel is not supported, raise an error.\n # Should check this after filtering out based on environment markers to\n # allow specifying different wheels based on the environment/OS, in a\n # single requirements file.\n if install_req.link and install_req.link.is_wheel:\n wheel = Wheel(install_req.link.filename)\n tags = compatibility_tags.get_supported()\n if requirement_set.check_supported_wheels and not wheel.supported(tags):\n raise InstallationError(\n f"{wheel.filename} is not a supported wheel on this platform."\n )\n\n # This next bit is really a sanity check.\n assert (\n not install_req.user_supplied or parent_req_name is None\n ), "a user supplied req shouldn't have a parent"\n\n # Unnamed requirements are scanned again and the requirement won't be\n # added as a dependency until after scanning.\n if not install_req.name:\n requirement_set.add_unnamed_requirement(install_req)\n return [install_req], None\n\n try:\n existing_req: Optional[InstallRequirement] = (\n requirement_set.get_requirement(install_req.name)\n )\n except KeyError:\n existing_req = None\n\n has_conflicting_requirement = (\n parent_req_name is None\n and existing_req\n and not existing_req.constraint\n and existing_req.extras == install_req.extras\n and existing_req.req\n and install_req.req\n and existing_req.req.specifier != install_req.req.specifier\n )\n if has_conflicting_requirement:\n raise InstallationError(\n f"Double requirement given: {install_req} "\n f"(already in {existing_req}, name={install_req.name!r})"\n )\n\n # When no existing requirement exists, add the requirement as a\n # dependency and it will be scanned again after.\n if not existing_req:\n requirement_set.add_named_requirement(install_req)\n # We'd want to rescan this requirement later\n return [install_req], install_req\n\n # Assume there's no need to scan, and that we've already\n # encountered this for scanning.\n if install_req.constraint or not existing_req.constraint:\n return [], existing_req\n\n does_not_satisfy_constraint = install_req.link and not (\n existing_req.link and install_req.link.path == existing_req.link.path\n )\n if does_not_satisfy_constraint:\n raise InstallationError(\n f"Could not satisfy constraints for '{install_req.name}': "\n "installation from path or url cannot be "\n "constrained to a version"\n )\n # If we're now installing a constraint, mark the existing\n # object for real installation.\n existing_req.constraint = False\n # If we're now installing a user supplied requirement,\n # mark the existing object as such.\n if install_req.user_supplied:\n existing_req.user_supplied = True\n existing_req.extras = tuple(\n sorted(set(existing_req.extras) | set(install_req.extras))\n )\n logger.debug(\n "Setting %s extras to: %s",\n existing_req,\n existing_req.extras,\n )\n # Return the existing requirement for addition to the parent and\n # scanning again.\n return [existing_req], existing_req\n\n def _is_upgrade_allowed(self, req: InstallRequirement) -> bool:\n if self.upgrade_strategy == "to-satisfy-only":\n return False\n elif self.upgrade_strategy == "eager":\n return True\n else:\n assert self.upgrade_strategy == "only-if-needed"\n return req.user_supplied or req.constraint\n\n def _set_req_to_reinstall(self, req: InstallRequirement) -> None:\n """\n Set a requirement to be installed.\n """\n # Don't uninstall the conflict if doing a user install and the\n # conflict is not a user install.\n assert req.satisfied_by is not None\n if not self.use_user_site or req.satisfied_by.in_usersite:\n req.should_reinstall = True\n req.satisfied_by = None\n\n def _check_skip_installed(\n self, req_to_install: InstallRequirement\n ) -> Optional[str]:\n """Check if req_to_install should be skipped.\n\n This will check if the req is installed, and whether we should upgrade\n or reinstall it, taking into account all the relevant user options.\n\n After calling this req_to_install will only have satisfied_by set to\n None if the req_to_install is to be upgraded/reinstalled etc. Any\n other value will be a dist recording the current thing installed that\n satisfies the requirement.\n\n Note that for vcs urls and the like we can't assess skipping in this\n routine - we simply identify that we need to pull the thing down,\n then later on it is pulled down and introspected to assess upgrade/\n reinstalls etc.\n\n :return: A text reason for why it was skipped, or None.\n """\n if self.ignore_installed:\n return None\n\n req_to_install.check_if_exists(self.use_user_site)\n if not req_to_install.satisfied_by:\n return None\n\n if self.force_reinstall:\n self._set_req_to_reinstall(req_to_install)\n return None\n\n if not self._is_upgrade_allowed(req_to_install):\n if self.upgrade_strategy == "only-if-needed":\n return "already satisfied, skipping upgrade"\n return "already satisfied"\n\n # Check for the possibility of an upgrade. For link-based\n # requirements we have to pull the tree down and inspect to assess\n # the version #, so it's handled way down.\n if not req_to_install.link:\n try:\n self.finder.find_requirement(req_to_install, upgrade=True)\n except BestVersionAlreadyInstalled:\n # Then the best version is installed.\n return "already up-to-date"\n except DistributionNotFound:\n # No distribution found, so we squash the error. It will\n # be raised later when we re-try later to do the install.\n # Why don't we just raise here?\n pass\n\n self._set_req_to_reinstall(req_to_install)\n return None\n\n def _find_requirement_link(self, req: InstallRequirement) -> Optional[Link]:\n upgrade = self._is_upgrade_allowed(req)\n best_candidate = self.finder.find_requirement(req, upgrade)\n if not best_candidate:\n return None\n\n # Log a warning per PEP 592 if necessary before returning.\n link = best_candidate.link\n if link.is_yanked:\n reason = link.yanked_reason or "<none given>"\n msg = (\n # Mark this as a unicode string to prevent\n # "UnicodeEncodeError: 'ascii' codec can't encode character"\n # in Python 2 when the reason contains non-ascii characters.\n "The candidate selected for download or install is a "\n f"yanked version: {best_candidate}\n"\n f"Reason for being yanked: {reason}"\n )\n logger.warning(msg)\n\n return link\n\n def _populate_link(self, req: InstallRequirement) -> None:\n """Ensure that if a link can be found for this, that it is found.\n\n Note that req.link may still be None - if the requirement is already\n installed and not needed to be upgraded based on the return value of\n _is_upgrade_allowed().\n\n If preparer.require_hashes is True, don't use the wheel cache, because\n cached wheels, always built locally, have different hashes than the\n files downloaded from the index server and thus throw false hash\n mismatches. Furthermore, cached wheels at present have undeterministic\n contents due to file modification times.\n """\n if req.link is None:\n req.link = self._find_requirement_link(req)\n\n if self.wheel_cache is None or self.preparer.require_hashes:\n return\n\n assert req.link is not None, "_find_requirement_link unexpectedly returned None"\n cache_entry = self.wheel_cache.get_cache_entry(\n link=req.link,\n package_name=req.name,\n supported_tags=get_supported(),\n )\n if cache_entry is not None:\n logger.debug("Using cached wheel link: %s", cache_entry.link)\n if req.link is req.original_link and cache_entry.persistent:\n req.cached_wheel_source_link = req.link\n if cache_entry.origin is not None:\n req.download_info = cache_entry.origin\n else:\n # Legacy cache entry that does not have origin.json.\n # download_info may miss the archive_info.hashes field.\n req.download_info = direct_url_from_link(\n req.link, link_is_in_wheel_cache=cache_entry.persistent\n )\n req.link = cache_entry.link\n\n def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution:\n """Takes a InstallRequirement and returns a single AbstractDist \\n representing a prepared variant of the same.\n """\n if req.editable:\n return self.preparer.prepare_editable_requirement(req)\n\n # satisfied_by is only evaluated by calling _check_skip_installed,\n # so it must be None here.\n assert req.satisfied_by is None\n skip_reason = self._check_skip_installed(req)\n\n if req.satisfied_by:\n return self.preparer.prepare_installed_requirement(req, skip_reason)\n\n # We eagerly populate the link, since that's our "legacy" behavior.\n self._populate_link(req)\n dist = self.preparer.prepare_linked_requirement(req)\n\n # NOTE\n # The following portion is for determining if a certain package is\n # going to be re-installed/upgraded or not and reporting to the user.\n # This should probably get cleaned up in a future refactor.\n\n # req.req is only avail after unpack for URL\n # pkgs repeat check_if_exists to uninstall-on-upgrade\n # (#14)\n if not self.ignore_installed:\n req.check_if_exists(self.use_user_site)\n\n if req.satisfied_by:\n should_modify = (\n self.upgrade_strategy != "to-satisfy-only"\n or self.force_reinstall\n or self.ignore_installed\n or req.link.scheme == "file"\n )\n if should_modify:\n self._set_req_to_reinstall(req)\n else:\n logger.info(\n "Requirement already satisfied (use --upgrade to upgrade): %s",\n req,\n )\n return dist\n\n def _resolve_one(\n self,\n requirement_set: RequirementSet,\n req_to_install: InstallRequirement,\n ) -> List[InstallRequirement]:\n """Prepare a single requirements file.\n\n :return: A list of additional InstallRequirements to also install.\n """\n # Tell user what we are doing for this requirement:\n # obtain (editable), skipping, processing (local url), collecting\n # (remote url or package name)\n if req_to_install.constraint or req_to_install.prepared:\n return []\n\n req_to_install.prepared = True\n\n # Parse and return dependencies\n dist = self._get_dist_for(req_to_install)\n # This will raise UnsupportedPythonVersion if the given Python\n # version isn't compatible with the distribution's Requires-Python.\n _check_dist_requires_python(\n dist,\n version_info=self._py_version_info,\n ignore_requires_python=self.ignore_requires_python,\n )\n\n more_reqs: List[InstallRequirement] = []\n\n def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None:\n # This idiosyncratically converts the Requirement to str and let\n # make_install_req then parse it again into Requirement. But this is\n # the legacy resolver so I'm just not going to bother refactoring.\n sub_install_req = self._make_install_req(str(subreq), req_to_install)\n parent_req_name = req_to_install.name\n to_scan_again, add_to_parent = self._add_requirement_to_set(\n requirement_set,\n sub_install_req,\n parent_req_name=parent_req_name,\n extras_requested=extras_requested,\n )\n if parent_req_name and add_to_parent:\n self._discovered_dependencies[parent_req_name].append(add_to_parent)\n more_reqs.extend(to_scan_again)\n\n with indent_log():\n # We add req_to_install before its dependencies, so that we\n # can refer to it when adding dependencies.\n assert req_to_install.name is not None\n if not requirement_set.has_requirement(req_to_install.name):\n # 'unnamed' requirements will get added here\n # 'unnamed' requirements can only come from being directly\n # provided by the user.\n assert req_to_install.user_supplied\n self._add_requirement_to_set(\n requirement_set, req_to_install, parent_req_name=None\n )\n\n if not self.ignore_dependencies:\n if req_to_install.extras:\n logger.debug(\n "Installing extra requirements: %r",\n ",".join(req_to_install.extras),\n )\n missing_requested = sorted(\n set(req_to_install.extras) - set(dist.iter_provided_extras())\n )\n for missing in missing_requested:\n logger.warning(\n "%s %s does not provide the extra '%s'",\n dist.raw_name,\n dist.version,\n missing,\n )\n\n available_requested = sorted(\n set(dist.iter_provided_extras()) & set(req_to_install.extras)\n )\n for subreq in dist.iter_dependencies(available_requested):\n add_req(subreq, extras_requested=available_requested)\n\n return more_reqs\n\n def get_installation_order(\n self, req_set: RequirementSet\n ) -> List[InstallRequirement]:\n """Create the installation order.\n\n The installation order is topological - requirements are installed\n before the requiring thing. We break cycles at an arbitrary point,\n and make no other guarantees.\n """\n # The current implementation, which we may change at any point\n # installs the user specified things in the order given, except when\n # dependencies must come earlier to achieve topological order.\n order = []\n ordered_reqs: Set[InstallRequirement] = set()\n\n def schedule(req: InstallRequirement) -> None:\n if req.satisfied_by or req in ordered_reqs:\n return\n if req.constraint:\n return\n ordered_reqs.add(req)\n for dep in self._discovered_dependencies[req.name]:\n schedule(dep)\n order.append(req)\n\n for install_req in req_set.requirements.values():\n schedule(install_req)\n return order\n
.venv\Lib\site-packages\pip\_internal\resolution\legacy\resolver.py
resolver.py
Python
24,068
0.95
0.167504
0.131528
react-lib
972
2025-02-21T23:26:03.935422
GPL-3.0
false
df4cbf04c748edaf4cfc90de0dd19cf1
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\legacy\__pycache__\resolver.cpython-313.pyc
resolver.cpython-313.pyc
Other
22,883
0.95
0.084071
0
node-utils
444
2024-01-01T16:25:02.483145
MIT
false
4130fb3a62b81152a0377d376c4866ae
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\legacy\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
206
0.7
0
0
python-kit
654
2025-04-25T02:17:16.913463
BSD-3-Clause
false
25dd6614b59972f86b9a5b783f3a48f2
from dataclasses import dataclass\nfrom typing import FrozenSet, Iterable, Optional, Tuple\n\nfrom pip._vendor.packaging.specifiers import SpecifierSet\nfrom pip._vendor.packaging.utils import NormalizedName\nfrom pip._vendor.packaging.version import Version\n\nfrom pip._internal.models.link import Link, links_equivalent\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.utils.hashes import Hashes\n\nCandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]]\n\n\ndef format_name(project: NormalizedName, extras: FrozenSet[NormalizedName]) -> str:\n if not extras:\n return project\n extras_expr = ",".join(sorted(extras))\n return f"{project}[{extras_expr}]"\n\n\n@dataclass(frozen=True)\nclass Constraint:\n specifier: SpecifierSet\n hashes: Hashes\n links: FrozenSet[Link]\n\n @classmethod\n def empty(cls) -> "Constraint":\n return Constraint(SpecifierSet(), Hashes(), frozenset())\n\n @classmethod\n def from_ireq(cls, ireq: InstallRequirement) -> "Constraint":\n links = frozenset([ireq.link]) if ireq.link else frozenset()\n return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)\n\n def __bool__(self) -> bool:\n return bool(self.specifier) or bool(self.hashes) or bool(self.links)\n\n def __and__(self, other: InstallRequirement) -> "Constraint":\n if not isinstance(other, InstallRequirement):\n return NotImplemented\n specifier = self.specifier & other.specifier\n hashes = self.hashes & other.hashes(trust_internet=False)\n links = self.links\n if other.link:\n links = links.union([other.link])\n return Constraint(specifier, hashes, links)\n\n def is_satisfied_by(self, candidate: "Candidate") -> bool:\n # Reject if there are any mismatched URL constraints on this package.\n if self.links and not all(_match_link(link, candidate) for link in self.links):\n return False\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n return self.specifier.contains(candidate.version, prereleases=True)\n\n\nclass Requirement:\n @property\n def project_name(self) -> NormalizedName:\n """The "project name" of a requirement.\n\n This is different from ``name`` if this requirement contains extras,\n in which case ``name`` would contain the ``[...]`` part, while this\n refers to the name of the project.\n """\n raise NotImplementedError("Subclass should override")\n\n @property\n def name(self) -> str:\n """The name identifying this requirement in the resolver.\n\n This is different from ``project_name`` if this requirement contains\n extras, where ``project_name`` would not contain the ``[...]`` part.\n """\n raise NotImplementedError("Subclass should override")\n\n def is_satisfied_by(self, candidate: "Candidate") -> bool:\n return False\n\n def get_candidate_lookup(self) -> CandidateLookup:\n raise NotImplementedError("Subclass should override")\n\n def format_for_error(self) -> str:\n raise NotImplementedError("Subclass should override")\n\n\ndef _match_link(link: Link, candidate: "Candidate") -> bool:\n if candidate.source_link:\n return links_equivalent(link, candidate.source_link)\n return False\n\n\nclass Candidate:\n @property\n def project_name(self) -> NormalizedName:\n """The "project name" of the candidate.\n\n This is different from ``name`` if this candidate contains extras,\n in which case ``name`` would contain the ``[...]`` part, while this\n refers to the name of the project.\n """\n raise NotImplementedError("Override in subclass")\n\n @property\n def name(self) -> str:\n """The name identifying this candidate in the resolver.\n\n This is different from ``project_name`` if this candidate contains\n extras, where ``project_name`` would not contain the ``[...]`` part.\n """\n raise NotImplementedError("Override in subclass")\n\n @property\n def version(self) -> Version:\n raise NotImplementedError("Override in subclass")\n\n @property\n def is_installed(self) -> bool:\n raise NotImplementedError("Override in subclass")\n\n @property\n def is_editable(self) -> bool:\n raise NotImplementedError("Override in subclass")\n\n @property\n def source_link(self) -> Optional[Link]:\n raise NotImplementedError("Override in subclass")\n\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n raise NotImplementedError("Override in subclass")\n\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n raise NotImplementedError("Override in subclass")\n\n def format_for_error(self) -> str:\n raise NotImplementedError("Subclass should override")\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\base.py
base.py
Python
5,023
0.95
0.280576
0.038095
awesome-app
717
2025-05-23T17:37:02.776736
Apache-2.0
false
f4f9f1a3831c1c292624efa8043542a9
import logging\nimport sys\nfrom typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast\n\nfrom pip._vendor.packaging.requirements import InvalidRequirement\nfrom pip._vendor.packaging.utils import NormalizedName, canonicalize_name\nfrom pip._vendor.packaging.version import Version\n\nfrom pip._internal.exceptions import (\n HashError,\n InstallationSubprocessError,\n InvalidInstalledPackage,\n MetadataInconsistent,\n MetadataInvalid,\n)\nfrom pip._internal.metadata import BaseDistribution\nfrom pip._internal.models.link import Link, links_equivalent\nfrom pip._internal.models.wheel import Wheel\nfrom pip._internal.req.constructors import (\n install_req_from_editable,\n install_req_from_line,\n)\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.utils.direct_url_helpers import direct_url_from_link\nfrom pip._internal.utils.misc import normalize_version_info\n\nfrom .base import Candidate, Requirement, format_name\n\nif TYPE_CHECKING:\n from .factory import Factory\n\nlogger = logging.getLogger(__name__)\n\nBaseCandidate = Union[\n "AlreadyInstalledCandidate",\n "EditableCandidate",\n "LinkCandidate",\n]\n\n# Avoid conflicting with the PyPI package "Python".\nREQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "<Python from Requires-Python>")\n\n\ndef as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]:\n """The runtime version of BaseCandidate."""\n base_candidate_classes = (\n AlreadyInstalledCandidate,\n EditableCandidate,\n LinkCandidate,\n )\n if isinstance(candidate, base_candidate_classes):\n return candidate\n return None\n\n\ndef make_install_req_from_link(\n link: Link, template: InstallRequirement\n) -> InstallRequirement:\n assert not template.editable, "template is editable"\n if template.req:\n line = str(template.req)\n else:\n line = link.url\n ireq = install_req_from_line(\n line,\n user_supplied=template.user_supplied,\n comes_from=template.comes_from,\n use_pep517=template.use_pep517,\n isolated=template.isolated,\n constraint=template.constraint,\n global_options=template.global_options,\n hash_options=template.hash_options,\n config_settings=template.config_settings,\n )\n ireq.original_link = template.original_link\n ireq.link = link\n ireq.extras = template.extras\n return ireq\n\n\ndef make_install_req_from_editable(\n link: Link, template: InstallRequirement\n) -> InstallRequirement:\n assert template.editable, "template not editable"\n ireq = install_req_from_editable(\n link.url,\n user_supplied=template.user_supplied,\n comes_from=template.comes_from,\n use_pep517=template.use_pep517,\n isolated=template.isolated,\n constraint=template.constraint,\n permit_editable_wheels=template.permit_editable_wheels,\n global_options=template.global_options,\n hash_options=template.hash_options,\n config_settings=template.config_settings,\n )\n ireq.extras = template.extras\n return ireq\n\n\ndef _make_install_req_from_dist(\n dist: BaseDistribution, template: InstallRequirement\n) -> InstallRequirement:\n if template.req:\n line = str(template.req)\n elif template.link:\n line = f"{dist.canonical_name} @ {template.link.url}"\n else:\n line = f"{dist.canonical_name}=={dist.version}"\n ireq = install_req_from_line(\n line,\n user_supplied=template.user_supplied,\n comes_from=template.comes_from,\n use_pep517=template.use_pep517,\n isolated=template.isolated,\n constraint=template.constraint,\n global_options=template.global_options,\n hash_options=template.hash_options,\n config_settings=template.config_settings,\n )\n ireq.satisfied_by = dist\n return ireq\n\n\nclass _InstallRequirementBackedCandidate(Candidate):\n """A candidate backed by an ``InstallRequirement``.\n\n This represents a package request with the target not being already\n in the environment, and needs to be fetched and installed. The backing\n ``InstallRequirement`` is responsible for most of the leg work; this\n class exposes appropriate information to the resolver.\n\n :param link: The link passed to the ``InstallRequirement``. The backing\n ``InstallRequirement`` will use this link to fetch the distribution.\n :param source_link: The link this candidate "originates" from. This is\n different from ``link`` when the link is found in the wheel cache.\n ``link`` would point to the wheel cache, while this points to the\n found remote link (e.g. from pypi.org).\n """\n\n dist: BaseDistribution\n is_installed = False\n\n def __init__(\n self,\n link: Link,\n source_link: Link,\n ireq: InstallRequirement,\n factory: "Factory",\n name: Optional[NormalizedName] = None,\n version: Optional[Version] = None,\n ) -> None:\n self._link = link\n self._source_link = source_link\n self._factory = factory\n self._ireq = ireq\n self._name = name\n self._version = version\n self.dist = self._prepare()\n self._hash: Optional[int] = None\n\n def __str__(self) -> str:\n return f"{self.name} {self.version}"\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({str(self._link)!r})"\n\n def __hash__(self) -> int:\n if self._hash is not None:\n return self._hash\n\n self._hash = hash((self.__class__, self._link))\n return self._hash\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, self.__class__):\n return links_equivalent(self._link, other._link)\n return False\n\n @property\n def source_link(self) -> Optional[Link]:\n return self._source_link\n\n @property\n def project_name(self) -> NormalizedName:\n """The normalised name of the project the candidate refers to"""\n if self._name is None:\n self._name = self.dist.canonical_name\n return self._name\n\n @property\n def name(self) -> str:\n return self.project_name\n\n @property\n def version(self) -> Version:\n if self._version is None:\n self._version = self.dist.version\n return self._version\n\n def format_for_error(self) -> str:\n return (\n f"{self.name} {self.version} "\n f"(from {self._link.file_path if self._link.is_file else self._link})"\n )\n\n def _prepare_distribution(self) -> BaseDistribution:\n raise NotImplementedError("Override in subclass")\n\n def _check_metadata_consistency(self, dist: BaseDistribution) -> None:\n """Check for consistency of project name and version of dist."""\n if self._name is not None and self._name != dist.canonical_name:\n raise MetadataInconsistent(\n self._ireq,\n "name",\n self._name,\n dist.canonical_name,\n )\n if self._version is not None and self._version != dist.version:\n raise MetadataInconsistent(\n self._ireq,\n "version",\n str(self._version),\n str(dist.version),\n )\n # check dependencies are valid\n # TODO performance: this means we iterate the dependencies at least twice,\n # we may want to cache parsed Requires-Dist\n try:\n list(dist.iter_dependencies(list(dist.iter_provided_extras())))\n except InvalidRequirement as e:\n raise MetadataInvalid(self._ireq, str(e))\n\n def _prepare(self) -> BaseDistribution:\n try:\n dist = self._prepare_distribution()\n except HashError as e:\n # Provide HashError the underlying ireq that caused it. This\n # provides context for the resulting error message to show the\n # offending line to the user.\n e.req = self._ireq\n raise\n except InstallationSubprocessError as exc:\n # The output has been presented already, so don't duplicate it.\n exc.context = "See above for output."\n raise\n\n self._check_metadata_consistency(dist)\n return dist\n\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n # Emit the Requires-Python requirement first to fail fast on\n # unsupported candidates and avoid pointless downloads/preparation.\n yield self._factory.make_requires_python_requirement(self.dist.requires_python)\n requires = self.dist.iter_dependencies() if with_requires else ()\n for r in requires:\n yield from self._factory.make_requirements_from_spec(str(r), self._ireq)\n\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n return self._ireq\n\n\nclass LinkCandidate(_InstallRequirementBackedCandidate):\n is_editable = False\n\n def __init__(\n self,\n link: Link,\n template: InstallRequirement,\n factory: "Factory",\n name: Optional[NormalizedName] = None,\n version: Optional[Version] = None,\n ) -> None:\n source_link = link\n cache_entry = factory.get_wheel_cache_entry(source_link, name)\n if cache_entry is not None:\n logger.debug("Using cached wheel link: %s", cache_entry.link)\n link = cache_entry.link\n ireq = make_install_req_from_link(link, template)\n assert ireq.link == link\n if ireq.link.is_wheel and not ireq.link.is_file:\n wheel = Wheel(ireq.link.filename)\n wheel_name = canonicalize_name(wheel.name)\n assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel"\n # Version may not be present for PEP 508 direct URLs\n if version is not None:\n wheel_version = Version(wheel.version)\n assert (\n version == wheel_version\n ), f"{version!r} != {wheel_version!r} for wheel {name}"\n\n if cache_entry is not None:\n assert ireq.link.is_wheel\n assert ireq.link.is_file\n if cache_entry.persistent and template.link is template.original_link:\n ireq.cached_wheel_source_link = source_link\n if cache_entry.origin is not None:\n ireq.download_info = cache_entry.origin\n else:\n # Legacy cache entry that does not have origin.json.\n # download_info may miss the archive_info.hashes field.\n ireq.download_info = direct_url_from_link(\n source_link, link_is_in_wheel_cache=cache_entry.persistent\n )\n\n super().__init__(\n link=link,\n source_link=source_link,\n ireq=ireq,\n factory=factory,\n name=name,\n version=version,\n )\n\n def _prepare_distribution(self) -> BaseDistribution:\n preparer = self._factory.preparer\n return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)\n\n\nclass EditableCandidate(_InstallRequirementBackedCandidate):\n is_editable = True\n\n def __init__(\n self,\n link: Link,\n template: InstallRequirement,\n factory: "Factory",\n name: Optional[NormalizedName] = None,\n version: Optional[Version] = None,\n ) -> None:\n super().__init__(\n link=link,\n source_link=link,\n ireq=make_install_req_from_editable(link, template),\n factory=factory,\n name=name,\n version=version,\n )\n\n def _prepare_distribution(self) -> BaseDistribution:\n return self._factory.preparer.prepare_editable_requirement(self._ireq)\n\n\nclass AlreadyInstalledCandidate(Candidate):\n is_installed = True\n source_link = None\n\n def __init__(\n self,\n dist: BaseDistribution,\n template: InstallRequirement,\n factory: "Factory",\n ) -> None:\n self.dist = dist\n self._ireq = _make_install_req_from_dist(dist, template)\n self._factory = factory\n self._version = None\n\n # This is just logging some messages, so we can do it eagerly.\n # The returned dist would be exactly the same as self.dist because we\n # set satisfied_by in _make_install_req_from_dist.\n # TODO: Supply reason based on force_reinstall and upgrade_strategy.\n skip_reason = "already satisfied"\n factory.preparer.prepare_installed_requirement(self._ireq, skip_reason)\n\n def __str__(self) -> str:\n return str(self.dist)\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({self.dist!r})"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, AlreadyInstalledCandidate):\n return NotImplemented\n return self.name == other.name and self.version == other.version\n\n def __hash__(self) -> int:\n return hash((self.name, self.version))\n\n @property\n def project_name(self) -> NormalizedName:\n return self.dist.canonical_name\n\n @property\n def name(self) -> str:\n return self.project_name\n\n @property\n def version(self) -> Version:\n if self._version is None:\n self._version = self.dist.version\n return self._version\n\n @property\n def is_editable(self) -> bool:\n return self.dist.editable\n\n def format_for_error(self) -> str:\n return f"{self.name} {self.version} (Installed)"\n\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n if not with_requires:\n return\n\n try:\n for r in self.dist.iter_dependencies():\n yield from self._factory.make_requirements_from_spec(str(r), self._ireq)\n except InvalidRequirement as exc:\n raise InvalidInstalledPackage(dist=self.dist, invalid_exc=exc) from None\n\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n return None\n\n\nclass ExtrasCandidate(Candidate):\n """A candidate that has 'extras', indicating additional dependencies.\n\n Requirements can be for a project with dependencies, something like\n foo[extra]. The extras don't affect the project/version being installed\n directly, but indicate that we need additional dependencies. We model that\n by having an artificial ExtrasCandidate that wraps the "base" candidate.\n\n The ExtrasCandidate differs from the base in the following ways:\n\n 1. It has a unique name, of the form foo[extra]. This causes the resolver\n to treat it as a separate node in the dependency graph.\n 2. When we're getting the candidate's dependencies,\n a) We specify that we want the extra dependencies as well.\n b) We add a dependency on the base candidate.\n See below for why this is needed.\n 3. We return None for the underlying InstallRequirement, as the base\n candidate will provide it, and we don't want to end up with duplicates.\n\n The dependency on the base candidate is needed so that the resolver can't\n decide that it should recommend foo[extra1] version 1.0 and foo[extra2]\n version 2.0. Having those candidates depend on foo=1.0 and foo=2.0\n respectively forces the resolver to recognise that this is a conflict.\n """\n\n def __init__(\n self,\n base: BaseCandidate,\n extras: FrozenSet[str],\n *,\n comes_from: Optional[InstallRequirement] = None,\n ) -> None:\n """\n :param comes_from: the InstallRequirement that led to this candidate if it\n differs from the base's InstallRequirement. This will often be the\n case in the sense that this candidate's requirement has the extras\n while the base's does not. Unlike the InstallRequirement backed\n candidates, this requirement is used solely for reporting purposes,\n it does not do any leg work.\n """\n self.base = base\n self.extras = frozenset(canonicalize_name(e) for e in extras)\n self._comes_from = comes_from if comes_from is not None else self.base._ireq\n\n def __str__(self) -> str:\n name, rest = str(self.base).split(" ", 1)\n return "{}[{}] {}".format(name, ",".join(self.extras), rest)\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}(base={self.base!r}, extras={self.extras!r})"\n\n def __hash__(self) -> int:\n return hash((self.base, self.extras))\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, self.__class__):\n return self.base == other.base and self.extras == other.extras\n return False\n\n @property\n def project_name(self) -> NormalizedName:\n return self.base.project_name\n\n @property\n def name(self) -> str:\n """The normalised name of the project the candidate refers to"""\n return format_name(self.base.project_name, self.extras)\n\n @property\n def version(self) -> Version:\n return self.base.version\n\n def format_for_error(self) -> str:\n return "{} [{}]".format(\n self.base.format_for_error(), ", ".join(sorted(self.extras))\n )\n\n @property\n def is_installed(self) -> bool:\n return self.base.is_installed\n\n @property\n def is_editable(self) -> bool:\n return self.base.is_editable\n\n @property\n def source_link(self) -> Optional[Link]:\n return self.base.source_link\n\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n factory = self.base._factory\n\n # Add a dependency on the exact base\n # (See note 2b in the class docstring)\n yield factory.make_requirement_from_candidate(self.base)\n if not with_requires:\n return\n\n # The user may have specified extras that the candidate doesn't\n # support. We ignore any unsupported extras here.\n valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras())\n invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras())\n for extra in sorted(invalid_extras):\n logger.warning(\n "%s %s does not provide the extra '%s'",\n self.base.name,\n self.version,\n extra,\n )\n\n for r in self.base.dist.iter_dependencies(valid_extras):\n yield from factory.make_requirements_from_spec(\n str(r),\n self._comes_from,\n valid_extras,\n )\n\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n # We don't return anything here, because we always\n # depend on the base candidate, and we'll get the\n # install requirement from that.\n return None\n\n\nclass RequiresPythonCandidate(Candidate):\n is_installed = False\n source_link = None\n\n def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None:\n if py_version_info is not None:\n version_info = normalize_version_info(py_version_info)\n else:\n version_info = sys.version_info[:3]\n self._version = Version(".".join(str(c) for c in version_info))\n\n # We don't need to implement __eq__() and __ne__() since there is always\n # only one RequiresPythonCandidate in a resolution, i.e. the host Python.\n # The built-in object.__eq__() and object.__ne__() do exactly what we want.\n\n def __str__(self) -> str:\n return f"Python {self._version}"\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({self._version!r})"\n\n @property\n def project_name(self) -> NormalizedName:\n return REQUIRES_PYTHON_IDENTIFIER\n\n @property\n def name(self) -> str:\n return REQUIRES_PYTHON_IDENTIFIER\n\n @property\n def version(self) -> Version:\n return self._version\n\n def format_for_error(self) -> str:\n return f"Python {self.version}"\n\n def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:\n return ()\n\n def get_install_requirement(self) -> Optional[InstallRequirement]:\n return None\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\candidates.py
candidates.py
Python
20,241
0.95
0.196891
0.058212
react-lib
338
2024-08-24T06:06:15.986449
MIT
false
4c16f2051dc93e25d1ffed4158368b7a
import contextlib\nimport functools\nimport logging\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Dict,\n FrozenSet,\n Iterable,\n Iterator,\n List,\n Mapping,\n NamedTuple,\n Optional,\n Protocol,\n Sequence,\n Set,\n Tuple,\n TypeVar,\n cast,\n)\n\nfrom pip._vendor.packaging.requirements import InvalidRequirement\nfrom pip._vendor.packaging.specifiers import SpecifierSet\nfrom pip._vendor.packaging.utils import NormalizedName, canonicalize_name\nfrom pip._vendor.packaging.version import InvalidVersion, Version\nfrom pip._vendor.resolvelib import ResolutionImpossible\n\nfrom pip._internal.cache import CacheEntry, WheelCache\nfrom pip._internal.exceptions import (\n DistributionNotFound,\n InstallationError,\n InvalidInstalledPackage,\n MetadataInconsistent,\n MetadataInvalid,\n UnsupportedPythonVersion,\n UnsupportedWheel,\n)\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution, get_default_environment\nfrom pip._internal.models.link import Link\nfrom pip._internal.models.wheel import Wheel\nfrom pip._internal.operations.prepare import RequirementPreparer\nfrom pip._internal.req.constructors import (\n install_req_drop_extras,\n install_req_from_link_and_ireq,\n)\nfrom pip._internal.req.req_install import (\n InstallRequirement,\n check_invalid_constraint_type,\n)\nfrom pip._internal.resolution.base import InstallRequirementProvider\nfrom pip._internal.utils.compatibility_tags import get_supported\nfrom pip._internal.utils.hashes import Hashes\nfrom pip._internal.utils.packaging import get_requirement\nfrom pip._internal.utils.virtualenv import running_under_virtualenv\n\nfrom .base import Candidate, Constraint, Requirement\nfrom .candidates import (\n AlreadyInstalledCandidate,\n BaseCandidate,\n EditableCandidate,\n ExtrasCandidate,\n LinkCandidate,\n RequiresPythonCandidate,\n as_base_candidate,\n)\nfrom .found_candidates import FoundCandidates, IndexCandidateInfo\nfrom .requirements import (\n ExplicitRequirement,\n RequiresPythonRequirement,\n SpecifierRequirement,\n SpecifierWithoutExtrasRequirement,\n UnsatisfiableRequirement,\n)\n\nif TYPE_CHECKING:\n\n class ConflictCause(Protocol):\n requirement: RequiresPythonRequirement\n parent: Candidate\n\n\nlogger = logging.getLogger(__name__)\n\nC = TypeVar("C")\nCache = Dict[Link, C]\n\n\nclass CollectedRootRequirements(NamedTuple):\n requirements: List[Requirement]\n constraints: Dict[str, Constraint]\n user_requested: Dict[str, int]\n\n\nclass Factory:\n def __init__(\n self,\n finder: PackageFinder,\n preparer: RequirementPreparer,\n make_install_req: InstallRequirementProvider,\n wheel_cache: Optional[WheelCache],\n use_user_site: bool,\n force_reinstall: bool,\n ignore_installed: bool,\n ignore_requires_python: bool,\n py_version_info: Optional[Tuple[int, ...]] = None,\n ) -> None:\n self._finder = finder\n self.preparer = preparer\n self._wheel_cache = wheel_cache\n self._python_candidate = RequiresPythonCandidate(py_version_info)\n self._make_install_req_from_spec = make_install_req\n self._use_user_site = use_user_site\n self._force_reinstall = force_reinstall\n self._ignore_requires_python = ignore_requires_python\n\n self._build_failures: Cache[InstallationError] = {}\n self._link_candidate_cache: Cache[LinkCandidate] = {}\n self._editable_candidate_cache: Cache[EditableCandidate] = {}\n self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {}\n self._extras_candidate_cache: Dict[\n Tuple[int, FrozenSet[NormalizedName]], ExtrasCandidate\n ] = {}\n self._supported_tags_cache = get_supported()\n\n if not ignore_installed:\n env = get_default_environment()\n self._installed_dists = {\n dist.canonical_name: dist\n for dist in env.iter_installed_distributions(local_only=False)\n }\n else:\n self._installed_dists = {}\n\n @property\n def force_reinstall(self) -> bool:\n return self._force_reinstall\n\n def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None:\n if not link.is_wheel:\n return\n wheel = Wheel(link.filename)\n if wheel.supported(self._finder.target_python.get_unsorted_tags()):\n return\n msg = f"{link.filename} is not a supported wheel on this platform."\n raise UnsupportedWheel(msg)\n\n def _make_extras_candidate(\n self,\n base: BaseCandidate,\n extras: FrozenSet[str],\n *,\n comes_from: Optional[InstallRequirement] = None,\n ) -> ExtrasCandidate:\n cache_key = (id(base), frozenset(canonicalize_name(e) for e in extras))\n try:\n candidate = self._extras_candidate_cache[cache_key]\n except KeyError:\n candidate = ExtrasCandidate(base, extras, comes_from=comes_from)\n self._extras_candidate_cache[cache_key] = candidate\n return candidate\n\n def _make_candidate_from_dist(\n self,\n dist: BaseDistribution,\n extras: FrozenSet[str],\n template: InstallRequirement,\n ) -> Candidate:\n try:\n base = self._installed_candidate_cache[dist.canonical_name]\n except KeyError:\n base = AlreadyInstalledCandidate(dist, template, factory=self)\n self._installed_candidate_cache[dist.canonical_name] = base\n if not extras:\n return base\n return self._make_extras_candidate(base, extras, comes_from=template)\n\n def _make_candidate_from_link(\n self,\n link: Link,\n extras: FrozenSet[str],\n template: InstallRequirement,\n name: Optional[NormalizedName],\n version: Optional[Version],\n ) -> Optional[Candidate]:\n base: Optional[BaseCandidate] = self._make_base_candidate_from_link(\n link, template, name, version\n )\n if not extras or base is None:\n return base\n return self._make_extras_candidate(base, extras, comes_from=template)\n\n def _make_base_candidate_from_link(\n self,\n link: Link,\n template: InstallRequirement,\n name: Optional[NormalizedName],\n version: Optional[Version],\n ) -> Optional[BaseCandidate]:\n # TODO: Check already installed candidate, and use it if the link and\n # editable flag match.\n\n if link in self._build_failures:\n # We already tried this candidate before, and it does not build.\n # Don't bother trying again.\n return None\n\n if template.editable:\n if link not in self._editable_candidate_cache:\n try:\n self._editable_candidate_cache[link] = EditableCandidate(\n link,\n template,\n factory=self,\n name=name,\n version=version,\n )\n except (MetadataInconsistent, MetadataInvalid) as e:\n logger.info(\n "Discarding [blue underline]%s[/]: [yellow]%s[reset]",\n link,\n e,\n extra={"markup": True},\n )\n self._build_failures[link] = e\n return None\n\n return self._editable_candidate_cache[link]\n else:\n if link not in self._link_candidate_cache:\n try:\n self._link_candidate_cache[link] = LinkCandidate(\n link,\n template,\n factory=self,\n name=name,\n version=version,\n )\n except MetadataInconsistent as e:\n logger.info(\n "Discarding [blue underline]%s[/]: [yellow]%s[reset]",\n link,\n e,\n extra={"markup": True},\n )\n self._build_failures[link] = e\n return None\n return self._link_candidate_cache[link]\n\n def _iter_found_candidates(\n self,\n ireqs: Sequence[InstallRequirement],\n specifier: SpecifierSet,\n hashes: Hashes,\n prefers_installed: bool,\n incompatible_ids: Set[int],\n ) -> Iterable[Candidate]:\n if not ireqs:\n return ()\n\n # The InstallRequirement implementation requires us to give it a\n # "template". Here we just choose the first requirement to represent\n # all of them.\n # Hopefully the Project model can correct this mismatch in the future.\n template = ireqs[0]\n assert template.req, "Candidates found on index must be PEP 508"\n name = canonicalize_name(template.req.name)\n\n extras: FrozenSet[str] = frozenset()\n for ireq in ireqs:\n assert ireq.req, "Candidates found on index must be PEP 508"\n specifier &= ireq.req.specifier\n hashes &= ireq.hashes(trust_internet=False)\n extras |= frozenset(ireq.extras)\n\n def _get_installed_candidate() -> Optional[Candidate]:\n """Get the candidate for the currently-installed version."""\n # If --force-reinstall is set, we want the version from the index\n # instead, so we "pretend" there is nothing installed.\n if self._force_reinstall:\n return None\n try:\n installed_dist = self._installed_dists[name]\n except KeyError:\n return None\n\n try:\n # Don't use the installed distribution if its version\n # does not fit the current dependency graph.\n if not specifier.contains(installed_dist.version, prereleases=True):\n return None\n except InvalidVersion as e:\n raise InvalidInstalledPackage(dist=installed_dist, invalid_exc=e)\n\n candidate = self._make_candidate_from_dist(\n dist=installed_dist,\n extras=extras,\n template=template,\n )\n # The candidate is a known incompatibility. Don't use it.\n if id(candidate) in incompatible_ids:\n return None\n return candidate\n\n def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]:\n result = self._finder.find_best_candidate(\n project_name=name,\n specifier=specifier,\n hashes=hashes,\n )\n icans = result.applicable_candidates\n\n # PEP 592: Yanked releases are ignored unless the specifier\n # explicitly pins a version (via '==' or '===') that can be\n # solely satisfied by a yanked release.\n all_yanked = all(ican.link.is_yanked for ican in icans)\n\n def is_pinned(specifier: SpecifierSet) -> bool:\n for sp in specifier:\n if sp.operator == "===":\n return True\n if sp.operator != "==":\n continue\n if sp.version.endswith(".*"):\n continue\n return True\n return False\n\n pinned = is_pinned(specifier)\n\n # PackageFinder returns earlier versions first, so we reverse.\n for ican in reversed(icans):\n if not (all_yanked and pinned) and ican.link.is_yanked:\n continue\n func = functools.partial(\n self._make_candidate_from_link,\n link=ican.link,\n extras=extras,\n template=template,\n name=name,\n version=ican.version,\n )\n yield ican.version, func\n\n return FoundCandidates(\n iter_index_candidate_infos,\n _get_installed_candidate(),\n prefers_installed,\n incompatible_ids,\n )\n\n def _iter_explicit_candidates_from_base(\n self,\n base_requirements: Iterable[Requirement],\n extras: FrozenSet[str],\n ) -> Iterator[Candidate]:\n """Produce explicit candidates from the base given an extra-ed package.\n\n :param base_requirements: Requirements known to the resolver. The\n requirements are guaranteed to not have extras.\n :param extras: The extras to inject into the explicit requirements'\n candidates.\n """\n for req in base_requirements:\n lookup_cand, _ = req.get_candidate_lookup()\n if lookup_cand is None: # Not explicit.\n continue\n # We've stripped extras from the identifier, and should always\n # get a BaseCandidate here, unless there's a bug elsewhere.\n base_cand = as_base_candidate(lookup_cand)\n assert base_cand is not None, "no extras here"\n yield self._make_extras_candidate(base_cand, extras)\n\n def _iter_candidates_from_constraints(\n self,\n identifier: str,\n constraint: Constraint,\n template: InstallRequirement,\n ) -> Iterator[Candidate]:\n """Produce explicit candidates from constraints.\n\n This creates "fake" InstallRequirement objects that are basically clones\n of what "should" be the template, but with original_link set to link.\n """\n for link in constraint.links:\n self._fail_if_link_is_unsupported_wheel(link)\n candidate = self._make_base_candidate_from_link(\n link,\n template=install_req_from_link_and_ireq(link, template),\n name=canonicalize_name(identifier),\n version=None,\n )\n if candidate:\n yield candidate\n\n def find_candidates(\n self,\n identifier: str,\n requirements: Mapping[str, Iterable[Requirement]],\n incompatibilities: Mapping[str, Iterator[Candidate]],\n constraint: Constraint,\n prefers_installed: bool,\n is_satisfied_by: Callable[[Requirement, Candidate], bool],\n ) -> Iterable[Candidate]:\n # Collect basic lookup information from the requirements.\n explicit_candidates: Set[Candidate] = set()\n ireqs: List[InstallRequirement] = []\n for req in requirements[identifier]:\n cand, ireq = req.get_candidate_lookup()\n if cand is not None:\n explicit_candidates.add(cand)\n if ireq is not None:\n ireqs.append(ireq)\n\n # If the current identifier contains extras, add requires and explicit\n # candidates from entries from extra-less identifier.\n with contextlib.suppress(InvalidRequirement):\n parsed_requirement = get_requirement(identifier)\n if parsed_requirement.name != identifier:\n explicit_candidates.update(\n self._iter_explicit_candidates_from_base(\n requirements.get(parsed_requirement.name, ()),\n frozenset(parsed_requirement.extras),\n ),\n )\n for req in requirements.get(parsed_requirement.name, []):\n _, ireq = req.get_candidate_lookup()\n if ireq is not None:\n ireqs.append(ireq)\n\n # Add explicit candidates from constraints. We only do this if there are\n # known ireqs, which represent requirements not already explicit. If\n # there are no ireqs, we're constraining already-explicit requirements,\n # which is handled later when we return the explicit candidates.\n if ireqs:\n try:\n explicit_candidates.update(\n self._iter_candidates_from_constraints(\n identifier,\n constraint,\n template=ireqs[0],\n ),\n )\n except UnsupportedWheel:\n # If we're constrained to install a wheel incompatible with the\n # target architecture, no candidates will ever be valid.\n return ()\n\n # Since we cache all the candidates, incompatibility identification\n # can be made quicker by comparing only the id() values.\n incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}\n\n # If none of the requirements want an explicit candidate, we can ask\n # the finder for candidates.\n if not explicit_candidates:\n return self._iter_found_candidates(\n ireqs,\n constraint.specifier,\n constraint.hashes,\n prefers_installed,\n incompat_ids,\n )\n\n return (\n c\n for c in explicit_candidates\n if id(c) not in incompat_ids\n and constraint.is_satisfied_by(c)\n and all(is_satisfied_by(req, c) for req in requirements[identifier])\n )\n\n def _make_requirements_from_install_req(\n self, ireq: InstallRequirement, requested_extras: Iterable[str]\n ) -> Iterator[Requirement]:\n """\n Returns requirement objects associated with the given InstallRequirement. In\n most cases this will be a single object but the following special cases exist:\n - the InstallRequirement has markers that do not apply -> result is empty\n - the InstallRequirement has both a constraint (or link) and extras\n -> result is split in two requirement objects: one with the constraint\n (or link) and one with the extra. This allows centralized constraint\n handling for the base, resulting in fewer candidate rejections.\n """\n if not ireq.match_markers(requested_extras):\n logger.info(\n "Ignoring %s: markers '%s' don't match your environment",\n ireq.name,\n ireq.markers,\n )\n elif not ireq.link:\n if ireq.extras and ireq.req is not None and ireq.req.specifier:\n yield SpecifierWithoutExtrasRequirement(ireq)\n yield SpecifierRequirement(ireq)\n else:\n self._fail_if_link_is_unsupported_wheel(ireq.link)\n # Always make the link candidate for the base requirement to make it\n # available to `find_candidates` for explicit candidate lookup for any\n # set of extras.\n # The extras are required separately via a second requirement.\n cand = self._make_base_candidate_from_link(\n ireq.link,\n template=install_req_drop_extras(ireq) if ireq.extras else ireq,\n name=canonicalize_name(ireq.name) if ireq.name else None,\n version=None,\n )\n if cand is None:\n # There's no way we can satisfy a URL requirement if the underlying\n # candidate fails to build. An unnamed URL must be user-supplied, so\n # we fail eagerly. If the URL is named, an unsatisfiable requirement\n # can make the resolver do the right thing, either backtrack (and\n # maybe find some other requirement that's buildable) or raise a\n # ResolutionImpossible eventually.\n if not ireq.name:\n raise self._build_failures[ireq.link]\n yield UnsatisfiableRequirement(canonicalize_name(ireq.name))\n else:\n # require the base from the link\n yield self.make_requirement_from_candidate(cand)\n if ireq.extras:\n # require the extras on top of the base candidate\n yield self.make_requirement_from_candidate(\n self._make_extras_candidate(cand, frozenset(ireq.extras))\n )\n\n def collect_root_requirements(\n self, root_ireqs: List[InstallRequirement]\n ) -> CollectedRootRequirements:\n collected = CollectedRootRequirements([], {}, {})\n for i, ireq in enumerate(root_ireqs):\n if ireq.constraint:\n # Ensure we only accept valid constraints\n problem = check_invalid_constraint_type(ireq)\n if problem:\n raise InstallationError(problem)\n if not ireq.match_markers():\n continue\n assert ireq.name, "Constraint must be named"\n name = canonicalize_name(ireq.name)\n if name in collected.constraints:\n collected.constraints[name] &= ireq\n else:\n collected.constraints[name] = Constraint.from_ireq(ireq)\n else:\n reqs = list(\n self._make_requirements_from_install_req(\n ireq,\n requested_extras=(),\n )\n )\n if not reqs:\n continue\n template = reqs[0]\n if ireq.user_supplied and template.name not in collected.user_requested:\n collected.user_requested[template.name] = i\n collected.requirements.extend(reqs)\n # Put requirements with extras at the end of the root requires. This does not\n # affect resolvelib's picking preference but it does affect its initial criteria\n # population: by putting extras at the end we enable the candidate finder to\n # present resolvelib with a smaller set of candidates to resolvelib, already\n # taking into account any non-transient constraints on the associated base. This\n # means resolvelib will have fewer candidates to visit and reject.\n # Python's list sort is stable, meaning relative order is kept for objects with\n # the same key.\n collected.requirements.sort(key=lambda r: r.name != r.project_name)\n return collected\n\n def make_requirement_from_candidate(\n self, candidate: Candidate\n ) -> ExplicitRequirement:\n return ExplicitRequirement(candidate)\n\n def make_requirements_from_spec(\n self,\n specifier: str,\n comes_from: Optional[InstallRequirement],\n requested_extras: Iterable[str] = (),\n ) -> Iterator[Requirement]:\n """\n Returns requirement objects associated with the given specifier. In most cases\n this will be a single object but the following special cases exist:\n - the specifier has markers that do not apply -> result is empty\n - the specifier has both a constraint and extras -> result is split\n in two requirement objects: one with the constraint and one with the\n extra. This allows centralized constraint handling for the base,\n resulting in fewer candidate rejections.\n """\n ireq = self._make_install_req_from_spec(specifier, comes_from)\n return self._make_requirements_from_install_req(ireq, requested_extras)\n\n def make_requires_python_requirement(\n self,\n specifier: SpecifierSet,\n ) -> Optional[Requirement]:\n if self._ignore_requires_python:\n return None\n # Don't bother creating a dependency for an empty Requires-Python.\n if not str(specifier):\n return None\n return RequiresPythonRequirement(specifier, self._python_candidate)\n\n def get_wheel_cache_entry(\n self, link: Link, name: Optional[str]\n ) -> Optional[CacheEntry]:\n """Look up the link in the wheel cache.\n\n If ``preparer.require_hashes`` is True, don't use the wheel cache,\n because cached wheels, always built locally, have different hashes\n than the files downloaded from the index server and thus throw false\n hash mismatches. Furthermore, cached wheels at present have\n nondeterministic contents due to file modification times.\n """\n if self._wheel_cache is None:\n return None\n return self._wheel_cache.get_cache_entry(\n link=link,\n package_name=name,\n supported_tags=self._supported_tags_cache,\n )\n\n def get_dist_to_uninstall(self, candidate: Candidate) -> Optional[BaseDistribution]:\n # TODO: Are there more cases this needs to return True? Editable?\n dist = self._installed_dists.get(candidate.project_name)\n if dist is None: # Not installed, no uninstallation required.\n return None\n\n # We're installing into global site. The current installation must\n # be uninstalled, no matter it's in global or user site, because the\n # user site installation has precedence over global.\n if not self._use_user_site:\n return dist\n\n # We're installing into user site. Remove the user site installation.\n if dist.in_usersite:\n return dist\n\n # We're installing into user site, but the installed incompatible\n # package is in global site. We can't uninstall that, and would let\n # the new user installation to "shadow" it. But shadowing won't work\n # in virtual environments, so we error out.\n if running_under_virtualenv() and dist.in_site_packages:\n message = (\n f"Will not install to the user site because it will lack "\n f"sys.path precedence to {dist.raw_name} in {dist.location}"\n )\n raise InstallationError(message)\n return None\n\n def _report_requires_python_error(\n self, causes: Sequence["ConflictCause"]\n ) -> UnsupportedPythonVersion:\n assert causes, "Requires-Python error reported with no cause"\n\n version = self._python_candidate.version\n\n if len(causes) == 1:\n specifier = str(causes[0].requirement.specifier)\n message = (\n f"Package {causes[0].parent.name!r} requires a different "\n f"Python: {version} not in {specifier!r}"\n )\n return UnsupportedPythonVersion(message)\n\n message = f"Packages require a different Python. {version} not in:"\n for cause in causes:\n package = cause.parent.format_for_error()\n specifier = str(cause.requirement.specifier)\n message += f"\n{specifier!r} (required by {package})"\n return UnsupportedPythonVersion(message)\n\n def _report_single_requirement_conflict(\n self, req: Requirement, parent: Optional[Candidate]\n ) -> DistributionNotFound:\n if parent is None:\n req_disp = str(req)\n else:\n req_disp = f"{req} (from {parent.name})"\n\n cands = self._finder.find_all_candidates(req.project_name)\n skipped_by_requires_python = self._finder.requires_python_skipped_reasons()\n\n versions_set: Set[Version] = set()\n yanked_versions_set: Set[Version] = set()\n for c in cands:\n is_yanked = c.link.is_yanked if c.link else False\n if is_yanked:\n yanked_versions_set.add(c.version)\n else:\n versions_set.add(c.version)\n\n versions = [str(v) for v in sorted(versions_set)]\n yanked_versions = [str(v) for v in sorted(yanked_versions_set)]\n\n if yanked_versions:\n # Saying "version X is yanked" isn't entirely accurate.\n # https://github.com/pypa/pip/issues/11745#issuecomment-1402805842\n logger.critical(\n "Ignored the following yanked versions: %s",\n ", ".join(yanked_versions) or "none",\n )\n if skipped_by_requires_python:\n logger.critical(\n "Ignored the following versions that require a different python "\n "version: %s",\n "; ".join(skipped_by_requires_python) or "none",\n )\n logger.critical(\n "Could not find a version that satisfies the requirement %s "\n "(from versions: %s)",\n req_disp,\n ", ".join(versions) or "none",\n )\n if str(req) == "requirements.txt":\n logger.info(\n "HINT: You are attempting to install a package literally "\n 'named "requirements.txt" (which cannot exist). Consider '\n "using the '-r' flag to install the packages listed in "\n "requirements.txt"\n )\n\n return DistributionNotFound(f"No matching distribution found for {req}")\n\n def get_installation_error(\n self,\n e: "ResolutionImpossible[Requirement, Candidate]",\n constraints: Dict[str, Constraint],\n ) -> InstallationError:\n assert e.causes, "Installation error reported with no cause"\n\n # If one of the things we can't solve is "we need Python X.Y",\n # that is what we report.\n requires_python_causes = [\n cause\n for cause in e.causes\n if isinstance(cause.requirement, RequiresPythonRequirement)\n and not cause.requirement.is_satisfied_by(self._python_candidate)\n ]\n if requires_python_causes:\n # The comprehension above makes sure all Requirement instances are\n # RequiresPythonRequirement, so let's cast for convenience.\n return self._report_requires_python_error(\n cast("Sequence[ConflictCause]", requires_python_causes),\n )\n\n # Otherwise, we have a set of causes which can't all be satisfied\n # at once.\n\n # The simplest case is when we have *one* cause that can't be\n # satisfied. We just report that case.\n if len(e.causes) == 1:\n req, parent = next(iter(e.causes))\n if req.name not in constraints:\n return self._report_single_requirement_conflict(req, parent)\n\n # OK, we now have a list of requirements that can't all be\n # satisfied at once.\n\n # A couple of formatting helpers\n def text_join(parts: List[str]) -> str:\n if len(parts) == 1:\n return parts[0]\n\n return ", ".join(parts[:-1]) + " and " + parts[-1]\n\n def describe_trigger(parent: Candidate) -> str:\n ireq = parent.get_install_requirement()\n if not ireq or not ireq.comes_from:\n return f"{parent.name}=={parent.version}"\n if isinstance(ireq.comes_from, InstallRequirement):\n return str(ireq.comes_from.name)\n return str(ireq.comes_from)\n\n triggers = set()\n for req, parent in e.causes:\n if parent is None:\n # This is a root requirement, so we can report it directly\n trigger = req.format_for_error()\n else:\n trigger = describe_trigger(parent)\n triggers.add(trigger)\n\n if triggers:\n info = text_join(sorted(triggers))\n else:\n info = "the requested packages"\n\n msg = (\n f"Cannot install {info} because these package versions "\n "have conflicting dependencies."\n )\n logger.critical(msg)\n msg = "\nThe conflict is caused by:"\n\n relevant_constraints = set()\n for req, parent in e.causes:\n if req.name in constraints:\n relevant_constraints.add(req.name)\n msg = msg + "\n "\n if parent:\n msg = msg + f"{parent.name} {parent.version} depends on "\n else:\n msg = msg + "The user requested "\n msg = msg + req.format_for_error()\n for key in relevant_constraints:\n spec = constraints[key].specifier\n msg += f"\n The user requested (constraint) {key}{spec}"\n\n msg = (\n msg\n + "\n\n"\n + "To fix this you could try to:\n"\n + "1. loosen the range of package versions you've specified\n"\n + "2. remove package versions to allow pip to attempt to solve "\n + "the dependency conflict\n"\n )\n\n logger.info(msg)\n\n return DistributionNotFound(\n "ResolutionImpossible: for help visit "\n "https://pip.pypa.io/en/latest/topics/dependency-resolution/"\n "#dealing-with-dependency-conflicts"\n )\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\factory.py
factory.py
Python
32,668
0.95
0.170109
0.105121
node-utils
988
2024-05-07T11:57:15.833645
MIT
false
473d266e37193aff9ee485aef250c20f
"""Utilities to lazily create and visit candidates found.\n\nCreating and visiting a candidate is a *very* costly operation. It involves\nfetching, extracting, potentially building modules from source, and verifying\ndistribution metadata. It is therefore crucial for performance to keep\neverything here lazy all the way down, so we only touch candidates that we\nabsolutely need, and not "download the world" when we only need one version of\nsomething.\n"""\n\nimport logging\nfrom collections.abc import Sequence\nfrom typing import Any, Callable, Iterator, Optional, Set, Tuple\n\nfrom pip._vendor.packaging.version import _BaseVersion\n\nfrom pip._internal.exceptions import MetadataInvalid\n\nfrom .base import Candidate\n\nlogger = logging.getLogger(__name__)\n\nIndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]]\n\n\ndef _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]:\n """Iterator for ``FoundCandidates``.\n\n This iterator is used when the package is not already installed. Candidates\n from index come later in their normal ordering.\n """\n versions_found: Set[_BaseVersion] = set()\n for version, func in infos:\n if version in versions_found:\n continue\n try:\n candidate = func()\n except MetadataInvalid as e:\n logger.warning(\n "Ignoring version %s of %s since it has invalid metadata:\n"\n "%s\n"\n "Please use pip<24.1 if you need to use this version.",\n version,\n e.ireq.name,\n e,\n )\n # Mark version as found to avoid trying other candidates with the same\n # version, since they most likely have invalid metadata as well.\n versions_found.add(version)\n else:\n if candidate is None:\n continue\n yield candidate\n versions_found.add(version)\n\n\ndef _iter_built_with_prepended(\n installed: Candidate, infos: Iterator[IndexCandidateInfo]\n) -> Iterator[Candidate]:\n """Iterator for ``FoundCandidates``.\n\n This iterator is used when the resolver prefers the already-installed\n candidate and NOT to upgrade. The installed candidate is therefore\n always yielded first, and candidates from index come later in their\n normal ordering, except skipped when the version is already installed.\n """\n yield installed\n versions_found: Set[_BaseVersion] = {installed.version}\n for version, func in infos:\n if version in versions_found:\n continue\n candidate = func()\n if candidate is None:\n continue\n yield candidate\n versions_found.add(version)\n\n\ndef _iter_built_with_inserted(\n installed: Candidate, infos: Iterator[IndexCandidateInfo]\n) -> Iterator[Candidate]:\n """Iterator for ``FoundCandidates``.\n\n This iterator is used when the resolver prefers to upgrade an\n already-installed package. Candidates from index are returned in their\n normal ordering, except replaced when the version is already installed.\n\n The implementation iterates through and yields other candidates, inserting\n the installed candidate exactly once before we start yielding older or\n equivalent candidates, or after all other candidates if they are all newer.\n """\n versions_found: Set[_BaseVersion] = set()\n for version, func in infos:\n if version in versions_found:\n continue\n # If the installed candidate is better, yield it first.\n if installed.version >= version:\n yield installed\n versions_found.add(installed.version)\n candidate = func()\n if candidate is None:\n continue\n yield candidate\n versions_found.add(version)\n\n # If the installed candidate is older than all other candidates.\n if installed.version not in versions_found:\n yield installed\n\n\nclass FoundCandidates(Sequence[Candidate]):\n """A lazy sequence to provide candidates to the resolver.\n\n The intended usage is to return this from `find_matches()` so the resolver\n can iterate through the sequence multiple times, but only access the index\n page when remote packages are actually needed. This improve performances\n when suitable candidates are already installed on disk.\n """\n\n def __init__(\n self,\n get_infos: Callable[[], Iterator[IndexCandidateInfo]],\n installed: Optional[Candidate],\n prefers_installed: bool,\n incompatible_ids: Set[int],\n ):\n self._get_infos = get_infos\n self._installed = installed\n self._prefers_installed = prefers_installed\n self._incompatible_ids = incompatible_ids\n self._bool: Optional[bool] = None\n\n def __getitem__(self, index: Any) -> Any:\n # Implemented to satisfy the ABC check. This is not needed by the\n # resolver, and should not be used by the provider either (for\n # performance reasons).\n raise NotImplementedError("don't do this")\n\n def __iter__(self) -> Iterator[Candidate]:\n infos = self._get_infos()\n if not self._installed:\n iterator = _iter_built(infos)\n elif self._prefers_installed:\n iterator = _iter_built_with_prepended(self._installed, infos)\n else:\n iterator = _iter_built_with_inserted(self._installed, infos)\n return (c for c in iterator if id(c) not in self._incompatible_ids)\n\n def __len__(self) -> int:\n # Implemented to satisfy the ABC check. This is not needed by the\n # resolver, and should not be used by the provider either (for\n # performance reasons).\n raise NotImplementedError("don't do this")\n\n def __bool__(self) -> bool:\n if self._bool is not None:\n return self._bool\n\n if self._prefers_installed and self._installed:\n self._bool = True\n return True\n\n self._bool = any(self)\n return self._bool\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\found_candidates.py
found_candidates.py
Python
6,000
0.95
0.207317
0.073529
awesome-app
782
2023-08-22T01:12:00.104792
MIT
false
cbcb2533bd8bbbc08152a1d353fcfb7b
import math\nfrom functools import lru_cache\nfrom typing import (\n TYPE_CHECKING,\n Dict,\n Iterable,\n Iterator,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n)\n\nfrom pip._vendor.resolvelib.providers import AbstractProvider\n\nfrom pip._internal.req.req_install import InstallRequirement\n\nfrom .base import Candidate, Constraint, Requirement\nfrom .candidates import REQUIRES_PYTHON_IDENTIFIER\nfrom .factory import Factory\nfrom .requirements import ExplicitRequirement\n\nif TYPE_CHECKING:\n from pip._vendor.resolvelib.providers import Preference\n from pip._vendor.resolvelib.resolvers import RequirementInformation\n\n PreferenceInformation = RequirementInformation[Requirement, Candidate]\n\n _ProviderBase = AbstractProvider[Requirement, Candidate, str]\nelse:\n _ProviderBase = AbstractProvider\n\n# Notes on the relationship between the provider, the factory, and the\n# candidate and requirement classes.\n#\n# The provider is a direct implementation of the resolvelib class. Its role\n# is to deliver the API that resolvelib expects.\n#\n# Rather than work with completely abstract "requirement" and "candidate"\n# concepts as resolvelib does, pip has concrete classes implementing these two\n# ideas. The API of Requirement and Candidate objects are defined in the base\n# classes, but essentially map fairly directly to the equivalent provider\n# methods. In particular, `find_matches` and `is_satisfied_by` are\n# requirement methods, and `get_dependencies` is a candidate method.\n#\n# The factory is the interface to pip's internal mechanisms. It is stateless,\n# and is created by the resolver and held as a property of the provider. It is\n# responsible for creating Requirement and Candidate objects, and provides\n# services to those objects (access to pip's finder and preparer).\n\n\nD = TypeVar("D")\nV = TypeVar("V")\n\n\ndef _get_with_identifier(\n mapping: Mapping[str, V],\n identifier: str,\n default: D,\n) -> Union[D, V]:\n """Get item from a package name lookup mapping with a resolver identifier.\n\n This extra logic is needed when the target mapping is keyed by package\n name, which cannot be directly looked up with an identifier (which may\n contain requested extras). Additional logic is added to also look up a value\n by "cleaning up" the extras from the identifier.\n """\n if identifier in mapping:\n return mapping[identifier]\n # HACK: Theoretically we should check whether this identifier is a valid\n # "NAME[EXTRAS]" format, and parse out the name part with packaging or\n # some regular expression. But since pip's resolver only spits out three\n # kinds of identifiers: normalized PEP 503 names, normalized names plus\n # extras, and Requires-Python, we can cheat a bit here.\n name, open_bracket, _ = identifier.partition("[")\n if open_bracket and name in mapping:\n return mapping[name]\n return default\n\n\nclass PipProvider(_ProviderBase):\n """Pip's provider implementation for resolvelib.\n\n :params constraints: A mapping of constraints specified by the user. Keys\n are canonicalized project names.\n :params ignore_dependencies: Whether the user specified ``--no-deps``.\n :params upgrade_strategy: The user-specified upgrade strategy.\n :params user_requested: A set of canonicalized package names that the user\n supplied for pip to install/upgrade.\n """\n\n def __init__(\n self,\n factory: Factory,\n constraints: Dict[str, Constraint],\n ignore_dependencies: bool,\n upgrade_strategy: str,\n user_requested: Dict[str, int],\n ) -> None:\n self._factory = factory\n self._constraints = constraints\n self._ignore_dependencies = ignore_dependencies\n self._upgrade_strategy = upgrade_strategy\n self._user_requested = user_requested\n\n def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str:\n return requirement_or_candidate.name\n\n def narrow_requirement_selection(\n self,\n identifiers: Iterable[str],\n resolutions: Mapping[str, Candidate],\n candidates: Mapping[str, Iterator[Candidate]],\n information: Mapping[str, Iterator["PreferenceInformation"]],\n backtrack_causes: Sequence["PreferenceInformation"],\n ) -> Iterable[str]:\n """Produce a subset of identifiers that should be considered before others.\n\n Currently pip narrows the following selection:\n * Requires-Python, if present is always returned by itself\n * Backtrack causes are considered next because they can be identified\n in linear time here, whereas because get_preference() is called\n for each identifier, it would be quadratic to check for them there.\n Further, the current backtrack causes likely need to be resolved\n before other requirements as a resolution can't be found while\n there is a conflict.\n """\n backtrack_identifiers = set()\n for info in backtrack_causes:\n backtrack_identifiers.add(info.requirement.name)\n if info.parent is not None:\n backtrack_identifiers.add(info.parent.name)\n\n current_backtrack_causes = []\n for identifier in identifiers:\n # Requires-Python has only one candidate and the check is basically\n # free, so we always do it first to avoid needless work if it fails.\n # This skips calling get_preference() for all other identifiers.\n if identifier == REQUIRES_PYTHON_IDENTIFIER:\n return [identifier]\n\n # Check if this identifier is a backtrack cause\n if identifier in backtrack_identifiers:\n current_backtrack_causes.append(identifier)\n continue\n\n if current_backtrack_causes:\n return current_backtrack_causes\n\n return identifiers\n\n def get_preference(\n self,\n identifier: str,\n resolutions: Mapping[str, Candidate],\n candidates: Mapping[str, Iterator[Candidate]],\n information: Mapping[str, Iterable["PreferenceInformation"]],\n backtrack_causes: Sequence["PreferenceInformation"],\n ) -> "Preference":\n """Produce a sort key for given requirement based on preference.\n\n The lower the return value is, the more preferred this group of\n arguments is.\n\n Currently pip considers the following in order:\n\n * Any requirement that is "direct", e.g., points to an explicit URL.\n * Any requirement that is "pinned", i.e., contains the operator ``===``\n or ``==`` without a wildcard.\n * Any requirement that imposes an upper version limit, i.e., contains the\n operator ``<``, ``<=``, ``~=``, or ``==`` with a wildcard. Because\n pip prioritizes the latest version, preferring explicit upper bounds\n can rule out infeasible candidates sooner. This does not imply that\n upper bounds are good practice; they can make dependency management\n and resolution harder.\n * Order user-specified requirements as they are specified, placing\n other requirements afterward.\n * Any "non-free" requirement, i.e., one that contains at least one\n operator, such as ``>=`` or ``!=``.\n * Alphabetical order for consistency (aids debuggability).\n """\n try:\n next(iter(information[identifier]))\n except StopIteration:\n # There is no information for this identifier, so there's no known\n # candidates.\n has_information = False\n else:\n has_information = True\n\n if not has_information:\n direct = False\n ireqs: Tuple[Optional[InstallRequirement], ...] = ()\n else:\n # Go through the information and for each requirement,\n # check if it's explicit (e.g., a direct link) and get the\n # InstallRequirement (the second element) from get_candidate_lookup()\n directs, ireqs = zip(\n *(\n (isinstance(r, ExplicitRequirement), r.get_candidate_lookup()[1])\n for r, _ in information[identifier]\n )\n )\n direct = any(directs)\n\n operators: list[tuple[str, str]] = [\n (specifier.operator, specifier.version)\n for specifier_set in (ireq.specifier for ireq in ireqs if ireq)\n for specifier in specifier_set\n ]\n\n pinned = any(((op[:2] == "==") and ("*" not in ver)) for op, ver in operators)\n upper_bounded = any(\n ((op in ("<", "<=", "~=")) or (op == "==" and "*" in ver))\n for op, ver in operators\n )\n unfree = bool(operators)\n requested_order = self._user_requested.get(identifier, math.inf)\n\n return (\n not direct,\n not pinned,\n not upper_bounded,\n requested_order,\n not unfree,\n identifier,\n )\n\n def find_matches(\n self,\n identifier: str,\n requirements: Mapping[str, Iterator[Requirement]],\n incompatibilities: Mapping[str, Iterator[Candidate]],\n ) -> Iterable[Candidate]:\n def _eligible_for_upgrade(identifier: str) -> bool:\n """Are upgrades allowed for this project?\n\n This checks the upgrade strategy, and whether the project was one\n that the user specified in the command line, in order to decide\n whether we should upgrade if there's a newer version available.\n\n (Note that we don't need access to the `--upgrade` flag, because\n an upgrade strategy of "to-satisfy-only" means that `--upgrade`\n was not specified).\n """\n if self._upgrade_strategy == "eager":\n return True\n elif self._upgrade_strategy == "only-if-needed":\n user_order = _get_with_identifier(\n self._user_requested,\n identifier,\n default=None,\n )\n return user_order is not None\n return False\n\n constraint = _get_with_identifier(\n self._constraints,\n identifier,\n default=Constraint.empty(),\n )\n return self._factory.find_candidates(\n identifier=identifier,\n requirements=requirements,\n constraint=constraint,\n prefers_installed=(not _eligible_for_upgrade(identifier)),\n incompatibilities=incompatibilities,\n is_satisfied_by=self.is_satisfied_by,\n )\n\n @staticmethod\n @lru_cache(maxsize=None)\n def is_satisfied_by(requirement: Requirement, candidate: Candidate) -> bool:\n return requirement.is_satisfied_by(candidate)\n\n def get_dependencies(self, candidate: Candidate) -> Iterable[Requirement]:\n with_requires = not self._ignore_dependencies\n # iter_dependencies() can perform nontrivial work so delay until needed.\n return (r for r in candidate.iter_dependencies(with_requires) if r is not None)\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\provider.py
provider.py
Python
11,190
0.95
0.177936
0.168033
react-lib
65
2025-02-13T15:22:39.199370
MIT
false
77963c4970faa170a4a2e9d27671a0db
from collections import defaultdict\nfrom logging import getLogger\nfrom typing import Any, DefaultDict, Optional\n\nfrom pip._vendor.resolvelib.reporters import BaseReporter\n\nfrom .base import Candidate, Requirement\n\nlogger = getLogger(__name__)\n\n\nclass PipReporter(BaseReporter[Requirement, Candidate, str]):\n def __init__(self) -> None:\n self.reject_count_by_package: DefaultDict[str, int] = defaultdict(int)\n\n self._messages_at_reject_count = {\n 1: (\n "pip is looking at multiple versions of {package_name} to "\n "determine which version is compatible with other "\n "requirements. This could take a while."\n ),\n 8: (\n "pip is still looking at multiple versions of {package_name} to "\n "determine which version is compatible with other "\n "requirements. This could take a while."\n ),\n 13: (\n "This is taking longer than usual. You might need to provide "\n "the dependency resolver with stricter constraints to reduce "\n "runtime. See https://pip.pypa.io/warnings/backtracking for "\n "guidance. If you want to abort this run, press Ctrl + C."\n ),\n }\n\n def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None:\n self.reject_count_by_package[candidate.name] += 1\n\n count = self.reject_count_by_package[candidate.name]\n if count not in self._messages_at_reject_count:\n return\n\n message = self._messages_at_reject_count[count]\n logger.info("INFO: %s", message.format(package_name=candidate.name))\n\n msg = "Will try a different candidate, due to conflict:"\n for req_info in criterion.information:\n req, parent = req_info.requirement, req_info.parent\n # Inspired by Factory.get_installation_error\n msg += "\n "\n if parent:\n msg += f"{parent.name} {parent.version} depends on "\n else:\n msg += "The user requested "\n msg += req.format_for_error()\n logger.debug(msg)\n\n\nclass PipDebuggingReporter(BaseReporter[Requirement, Candidate, str]):\n """A reporter that does an info log for every event it sees."""\n\n def starting(self) -> None:\n logger.info("Reporter.starting()")\n\n def starting_round(self, index: int) -> None:\n logger.info("Reporter.starting_round(%r)", index)\n\n def ending_round(self, index: int, state: Any) -> None:\n logger.info("Reporter.ending_round(%r, state)", index)\n logger.debug("Reporter.ending_round(%r, %r)", index, state)\n\n def ending(self, state: Any) -> None:\n logger.info("Reporter.ending(%r)", state)\n\n def adding_requirement(\n self, requirement: Requirement, parent: Optional[Candidate]\n ) -> None:\n logger.info("Reporter.adding_requirement(%r, %r)", requirement, parent)\n\n def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None:\n logger.info("Reporter.rejecting_candidate(%r, %r)", criterion, candidate)\n\n def pinning(self, candidate: Candidate) -> None:\n logger.info("Reporter.pinning(%r)", candidate)\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\reporter.py
reporter.py
Python
3,260
0.95
0.228916
0.015625
node-utils
100
2024-10-13T07:28:39.214597
MIT
false
54e2a4150bc692d6138e5c6c53f918e3
from typing import Any, Optional\n\nfrom pip._vendor.packaging.specifiers import SpecifierSet\nfrom pip._vendor.packaging.utils import NormalizedName, canonicalize_name\n\nfrom pip._internal.req.constructors import install_req_drop_extras\nfrom pip._internal.req.req_install import InstallRequirement\n\nfrom .base import Candidate, CandidateLookup, Requirement, format_name\n\n\nclass ExplicitRequirement(Requirement):\n def __init__(self, candidate: Candidate) -> None:\n self.candidate = candidate\n\n def __str__(self) -> str:\n return str(self.candidate)\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({self.candidate!r})"\n\n def __hash__(self) -> int:\n return hash(self.candidate)\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, ExplicitRequirement):\n return False\n return self.candidate == other.candidate\n\n @property\n def project_name(self) -> NormalizedName:\n # No need to canonicalize - the candidate did this\n return self.candidate.project_name\n\n @property\n def name(self) -> str:\n # No need to canonicalize - the candidate did this\n return self.candidate.name\n\n def format_for_error(self) -> str:\n return self.candidate.format_for_error()\n\n def get_candidate_lookup(self) -> CandidateLookup:\n return self.candidate, None\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n return candidate == self.candidate\n\n\nclass SpecifierRequirement(Requirement):\n def __init__(self, ireq: InstallRequirement) -> None:\n assert ireq.link is None, "This is a link, not a specifier"\n self._ireq = ireq\n self._equal_cache: Optional[str] = None\n self._hash: Optional[int] = None\n self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)\n\n @property\n def _equal(self) -> str:\n if self._equal_cache is not None:\n return self._equal_cache\n\n self._equal_cache = str(self._ireq)\n return self._equal_cache\n\n def __str__(self) -> str:\n return str(self._ireq.req)\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({str(self._ireq.req)!r})"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, SpecifierRequirement):\n return NotImplemented\n return self._equal == other._equal\n\n def __hash__(self) -> int:\n if self._hash is not None:\n return self._hash\n\n self._hash = hash(self._equal)\n return self._hash\n\n @property\n def project_name(self) -> NormalizedName:\n assert self._ireq.req, "Specifier-backed ireq is always PEP 508"\n return canonicalize_name(self._ireq.req.name)\n\n @property\n def name(self) -> str:\n return format_name(self.project_name, self._extras)\n\n def format_for_error(self) -> str:\n # Convert comma-separated specifiers into "A, B, ..., F and G"\n # This makes the specifier a bit more "human readable", without\n # risking a change in meaning. (Hopefully! Not all edge cases have\n # been checked)\n parts = [s.strip() for s in str(self).split(",")]\n if len(parts) == 0:\n return ""\n elif len(parts) == 1:\n return parts[0]\n\n return ", ".join(parts[:-1]) + " and " + parts[-1]\n\n def get_candidate_lookup(self) -> CandidateLookup:\n return None, self._ireq\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n assert candidate.name == self.name, (\n f"Internal issue: Candidate is not for this requirement "\n f"{candidate.name} vs {self.name}"\n )\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n assert self._ireq.req, "Specifier-backed ireq is always PEP 508"\n spec = self._ireq.req.specifier\n return spec.contains(candidate.version, prereleases=True)\n\n\nclass SpecifierWithoutExtrasRequirement(SpecifierRequirement):\n """\n Requirement backed by an install requirement on a base package.\n Trims extras from its install requirement if there are any.\n """\n\n def __init__(self, ireq: InstallRequirement) -> None:\n assert ireq.link is None, "This is a link, not a specifier"\n self._ireq = install_req_drop_extras(ireq)\n self._equal_cache: Optional[str] = None\n self._hash: Optional[int] = None\n self._extras = frozenset(canonicalize_name(e) for e in self._ireq.extras)\n\n @property\n def _equal(self) -> str:\n if self._equal_cache is not None:\n return self._equal_cache\n\n self._equal_cache = str(self._ireq)\n return self._equal_cache\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, SpecifierWithoutExtrasRequirement):\n return NotImplemented\n return self._equal == other._equal\n\n def __hash__(self) -> int:\n if self._hash is not None:\n return self._hash\n\n self._hash = hash(self._equal)\n return self._hash\n\n\nclass RequiresPythonRequirement(Requirement):\n """A requirement representing Requires-Python metadata."""\n\n def __init__(self, specifier: SpecifierSet, match: Candidate) -> None:\n self.specifier = specifier\n self._specifier_string = str(specifier) # for faster __eq__\n self._hash: Optional[int] = None\n self._candidate = match\n\n def __str__(self) -> str:\n return f"Python {self.specifier}"\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({str(self.specifier)!r})"\n\n def __hash__(self) -> int:\n if self._hash is not None:\n return self._hash\n\n self._hash = hash((self._specifier_string, self._candidate))\n return self._hash\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, RequiresPythonRequirement):\n return False\n return (\n self._specifier_string == other._specifier_string\n and self._candidate == other._candidate\n )\n\n @property\n def project_name(self) -> NormalizedName:\n return self._candidate.project_name\n\n @property\n def name(self) -> str:\n return self._candidate.name\n\n def format_for_error(self) -> str:\n return str(self)\n\n def get_candidate_lookup(self) -> CandidateLookup:\n if self.specifier.contains(self._candidate.version, prereleases=True):\n return self._candidate, None\n return None, None\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n assert candidate.name == self._candidate.name, "Not Python candidate"\n # We can safely always allow prereleases here since PackageFinder\n # already implements the prerelease logic, and would have filtered out\n # prerelease candidates if the user does not expect them.\n return self.specifier.contains(candidate.version, prereleases=True)\n\n\nclass UnsatisfiableRequirement(Requirement):\n """A requirement that cannot be satisfied."""\n\n def __init__(self, name: NormalizedName) -> None:\n self._name = name\n\n def __str__(self) -> str:\n return f"{self._name} (unavailable)"\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({str(self._name)!r})"\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, UnsatisfiableRequirement):\n return NotImplemented\n return self._name == other._name\n\n def __hash__(self) -> int:\n return hash(self._name)\n\n @property\n def project_name(self) -> NormalizedName:\n return self._name\n\n @property\n def name(self) -> str:\n return self._name\n\n def format_for_error(self) -> str:\n return str(self)\n\n def get_candidate_lookup(self) -> CandidateLookup:\n return None, None\n\n def is_satisfied_by(self, candidate: Candidate) -> bool:\n return False\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\requirements.py
requirements.py
Python
8,065
0.95
0.285714
0.065574
node-utils
745
2023-10-07T23:53:51.851693
BSD-3-Clause
false
3ac646968193770054eef5ce0b299a4c
import contextlib\nimport functools\nimport logging\nimport os\nfrom typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast\n\nfrom pip._vendor.packaging.utils import canonicalize_name\nfrom pip._vendor.resolvelib import BaseReporter, ResolutionImpossible, ResolutionTooDeep\nfrom pip._vendor.resolvelib import Resolver as RLResolver\nfrom pip._vendor.resolvelib.structs import DirectedGraph\n\nfrom pip._internal.cache import WheelCache\nfrom pip._internal.exceptions import ResolutionTooDeepError\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.operations.prepare import RequirementPreparer\nfrom pip._internal.req.constructors import install_req_extend_extras\nfrom pip._internal.req.req_install import InstallRequirement\nfrom pip._internal.req.req_set import RequirementSet\nfrom pip._internal.resolution.base import BaseResolver, InstallRequirementProvider\nfrom pip._internal.resolution.resolvelib.provider import PipProvider\nfrom pip._internal.resolution.resolvelib.reporter import (\n PipDebuggingReporter,\n PipReporter,\n)\nfrom pip._internal.utils.packaging import get_requirement\n\nfrom .base import Candidate, Requirement\nfrom .factory import Factory\n\nif TYPE_CHECKING:\n from pip._vendor.resolvelib.resolvers import Result as RLResult\n\n Result = RLResult[Requirement, Candidate, str]\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Resolver(BaseResolver):\n _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}\n\n def __init__(\n self,\n preparer: RequirementPreparer,\n finder: PackageFinder,\n wheel_cache: Optional[WheelCache],\n make_install_req: InstallRequirementProvider,\n use_user_site: bool,\n ignore_dependencies: bool,\n ignore_installed: bool,\n ignore_requires_python: bool,\n force_reinstall: bool,\n upgrade_strategy: str,\n py_version_info: Optional[Tuple[int, ...]] = None,\n ):\n super().__init__()\n assert upgrade_strategy in self._allowed_strategies\n\n self.factory = Factory(\n finder=finder,\n preparer=preparer,\n make_install_req=make_install_req,\n wheel_cache=wheel_cache,\n use_user_site=use_user_site,\n force_reinstall=force_reinstall,\n ignore_installed=ignore_installed,\n ignore_requires_python=ignore_requires_python,\n py_version_info=py_version_info,\n )\n self.ignore_dependencies = ignore_dependencies\n self.upgrade_strategy = upgrade_strategy\n self._result: Optional[Result] = None\n\n def resolve(\n self, root_reqs: List[InstallRequirement], check_supported_wheels: bool\n ) -> RequirementSet:\n collected = self.factory.collect_root_requirements(root_reqs)\n provider = PipProvider(\n factory=self.factory,\n constraints=collected.constraints,\n ignore_dependencies=self.ignore_dependencies,\n upgrade_strategy=self.upgrade_strategy,\n user_requested=collected.user_requested,\n )\n if "PIP_RESOLVER_DEBUG" in os.environ:\n reporter: BaseReporter[Requirement, Candidate, str] = PipDebuggingReporter()\n else:\n reporter = PipReporter()\n resolver: RLResolver[Requirement, Candidate, str] = RLResolver(\n provider,\n reporter,\n )\n\n try:\n limit_how_complex_resolution_can_be = 200000\n result = self._result = resolver.resolve(\n collected.requirements, max_rounds=limit_how_complex_resolution_can_be\n )\n\n except ResolutionImpossible as e:\n error = self.factory.get_installation_error(\n cast("ResolutionImpossible[Requirement, Candidate]", e),\n collected.constraints,\n )\n raise error from e\n except ResolutionTooDeep:\n raise ResolutionTooDeepError from None\n\n req_set = RequirementSet(check_supported_wheels=check_supported_wheels)\n # process candidates with extras last to ensure their base equivalent is\n # already in the req_set if appropriate.\n # Python's sort is stable so using a binary key function keeps relative order\n # within both subsets.\n for candidate in sorted(\n result.mapping.values(), key=lambda c: c.name != c.project_name\n ):\n ireq = candidate.get_install_requirement()\n if ireq is None:\n if candidate.name != candidate.project_name:\n # extend existing req's extras\n with contextlib.suppress(KeyError):\n req = req_set.get_requirement(candidate.project_name)\n req_set.add_named_requirement(\n install_req_extend_extras(\n req, get_requirement(candidate.name).extras\n )\n )\n continue\n\n # Check if there is already an installation under the same name,\n # and set a flag for later stages to uninstall it, if needed.\n installed_dist = self.factory.get_dist_to_uninstall(candidate)\n if installed_dist is None:\n # There is no existing installation -- nothing to uninstall.\n ireq.should_reinstall = False\n elif self.factory.force_reinstall:\n # The --force-reinstall flag is set -- reinstall.\n ireq.should_reinstall = True\n elif installed_dist.version != candidate.version:\n # The installation is different in version -- reinstall.\n ireq.should_reinstall = True\n elif candidate.is_editable or installed_dist.editable:\n # The incoming distribution is editable, or different in\n # editable-ness to installation -- reinstall.\n ireq.should_reinstall = True\n elif candidate.source_link and candidate.source_link.is_file:\n # The incoming distribution is under file://\n if candidate.source_link.is_wheel:\n # is a local wheel -- do nothing.\n logger.info(\n "%s is already installed with the same version as the "\n "provided wheel. Use --force-reinstall to force an "\n "installation of the wheel.",\n ireq.name,\n )\n continue\n\n # is a local sdist or path -- reinstall\n ireq.should_reinstall = True\n else:\n continue\n\n link = candidate.source_link\n if link and link.is_yanked:\n # The reason can contain non-ASCII characters, Unicode\n # is required for Python 2.\n msg = (\n "The candidate selected for download or install is a "\n "yanked version: {name!r} candidate (version {version} "\n "at {link})\nReason for being yanked: {reason}"\n ).format(\n name=candidate.name,\n version=candidate.version,\n link=link,\n reason=link.yanked_reason or "<none given>",\n )\n logger.warning(msg)\n\n req_set.add_named_requirement(ireq)\n\n reqs = req_set.all_requirements\n self.factory.preparer.prepare_linked_requirements_more(reqs)\n for req in reqs:\n req.prepared = True\n req.needs_more_preparation = False\n return req_set\n\n def get_installation_order(\n self, req_set: RequirementSet\n ) -> List[InstallRequirement]:\n """Get order for installation of requirements in RequirementSet.\n\n The returned list contains a requirement before another that depends on\n it. This helps ensure that the environment is kept consistent as they\n get installed one-by-one.\n\n The current implementation creates a topological ordering of the\n dependency graph, giving more weight to packages with less\n or no dependencies, while breaking any cycles in the graph at\n arbitrary points. We make no guarantees about where the cycle\n would be broken, other than it *would* be broken.\n """\n assert self._result is not None, "must call resolve() first"\n\n if not req_set.requirements:\n # Nothing is left to install, so we do not need an order.\n return []\n\n graph = self._result.graph\n weights = get_topological_weights(graph, set(req_set.requirements.keys()))\n\n sorted_items = sorted(\n req_set.requirements.items(),\n key=functools.partial(_req_set_item_sorter, weights=weights),\n reverse=True,\n )\n return [ireq for _, ireq in sorted_items]\n\n\ndef get_topological_weights(\n graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]\n) -> Dict[Optional[str], int]:\n """Assign weights to each node based on how "deep" they are.\n\n This implementation may change at any point in the future without prior\n notice.\n\n We first simplify the dependency graph by pruning any leaves and giving them\n the highest weight: a package without any dependencies should be installed\n first. This is done again and again in the same way, giving ever less weight\n to the newly found leaves. The loop stops when no leaves are left: all\n remaining packages have at least one dependency left in the graph.\n\n Then we continue with the remaining graph, by taking the length for the\n longest path to any node from root, ignoring any paths that contain a single\n node twice (i.e. cycles). This is done through a depth-first search through\n the graph, while keeping track of the path to the node.\n\n Cycles in the graph result would result in node being revisited while also\n being on its own path. In this case, take no action. This helps ensure we\n don't get stuck in a cycle.\n\n When assigning weight, the longer path (i.e. larger length) is preferred.\n\n We are only interested in the weights of packages that are in the\n requirement_keys.\n """\n path: Set[Optional[str]] = set()\n weights: Dict[Optional[str], int] = {}\n\n def visit(node: Optional[str]) -> None:\n if node in path:\n # We hit a cycle, so we'll break it here.\n return\n\n # Time to visit the children!\n path.add(node)\n for child in graph.iter_children(node):\n visit(child)\n path.remove(node)\n\n if node not in requirement_keys:\n return\n\n last_known_parent_count = weights.get(node, 0)\n weights[node] = max(last_known_parent_count, len(path))\n\n # Simplify the graph, pruning leaves that have no dependencies.\n # This is needed for large graphs (say over 200 packages) because the\n # `visit` function is exponentially slower then, taking minutes.\n # See https://github.com/pypa/pip/issues/10557\n # We will loop until we explicitly break the loop.\n while True:\n leaves = set()\n for key in graph:\n if key is None:\n continue\n for _child in graph.iter_children(key):\n # This means we have at least one child\n break\n else:\n # No child.\n leaves.add(key)\n if not leaves:\n # We are done simplifying.\n break\n # Calculate the weight for the leaves.\n weight = len(graph) - 1\n for leaf in leaves:\n if leaf not in requirement_keys:\n continue\n weights[leaf] = weight\n # Remove the leaves from the graph, making it simpler.\n for leaf in leaves:\n graph.remove(leaf)\n\n # Visit the remaining graph.\n # `None` is guaranteed to be the root node by resolvelib.\n visit(None)\n\n # Sanity check: all requirement keys should be in the weights,\n # and no other keys should be in the weights.\n difference = set(weights.keys()).difference(requirement_keys)\n assert not difference, difference\n\n return weights\n\n\ndef _req_set_item_sorter(\n item: Tuple[str, InstallRequirement],\n weights: Dict[Optional[str], int],\n) -> Tuple[int, str]:\n """Key function used to sort install requirements for installation.\n\n Based on the "weight" mapping calculated in ``get_installation_order()``.\n The canonical package name is returned as the second member as a tie-\n breaker to ensure the result is predictable, which is useful in tests.\n """\n name = canonicalize_name(item[0])\n return weights[name], name\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\resolver.py
resolver.py
Python
12,785
0.95
0.153125
0.123636
python-kit
225
2023-11-08T08:22:31.439097
Apache-2.0
false
2d37c49dc7badcf0461a95c93fc60cc8
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
8,269
0.95
0.111111
0
react-lib
963
2024-07-02T14:03:17.471349
GPL-3.0
false
a812adb2dc8286550a7f659f3e04b3b5
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\candidates.cpython-313.pyc
candidates.cpython-313.pyc
Other
30,043
0.95
0.050388
0.008264
python-kit
932
2025-01-26T10:02:21.879136
GPL-3.0
false
fc89e2a124b2d79e3a337697b17690b4
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\factory.cpython-313.pyc
factory.cpython-313.pyc
Other
33,165
0.95
0.018519
0.003185
node-utils
428
2024-05-10T01:28:33.857737
MIT
false
51aebcce69622de9993c197485d7d20b
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\found_candidates.cpython-313.pyc
found_candidates.cpython-313.pyc
Other
6,840
0.8
0.081081
0
python-kit
904
2024-07-22T22:47:11.435197
GPL-3.0
false
7dc028d97c397e03f3005f20e122bcf9
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\provider.cpython-313.pyc
provider.cpython-313.pyc
Other
11,045
0.95
0.06875
0.06993
awesome-app
712
2024-11-10T20:38:57.135940
Apache-2.0
false
f40930aef060f9f470fa9ea8b2b5aea2
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\reporter.cpython-313.pyc
reporter.cpython-313.pyc
Other
5,232
0.95
0.147059
0
node-utils
485
2024-01-05T09:30:03.046286
Apache-2.0
false
6e8a32f6b7a02bca6bea9f0a94ffaf2a
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\requirements.cpython-313.pyc
requirements.cpython-313.pyc
Other
15,749
0.95
0.028169
0
awesome-app
229
2025-04-13T12:07:44.059744
Apache-2.0
false
6e91f93402498391de37e4e87e107bd3
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\resolver.cpython-313.pyc
resolver.cpython-313.pyc
Other
12,561
0.95
0.071429
0
react-lib
372
2024-12-09T06:40:42.488974
BSD-3-Clause
false
72c65bd67033096d0bb507dddd4e6a95
\n\n
.venv\Lib\site-packages\pip\_internal\resolution\resolvelib\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
210
0.7
0
0
awesome-app
736
2023-11-07T07:09:05.943824
GPL-3.0
false
ecbf37cab1144f176a24c64400e61d6a