content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
"""\ncertifi.py\n~~~~~~~~~~\n\nThis module returns the installation location of cacert.pem or its contents.\n"""\nimport sys\nimport atexit\n\ndef exit_cacert_ctx() -> None:\n _CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]\n\n\nif sys.version_info >= (3, 11):\n\n from importlib.resources import as_file, files\n\n _CACERT_CTX = None\n _CACERT_PATH = None\n\n def where() -> str:\n # This is slightly terrible, but we want to delay extracting the file\n # in cases where we're inside of a zipimport situation until someone\n # actually calls where(), but we don't want to re-extract the file\n # on every call of where(), so we'll do it once then store it in a\n # global variable.\n global _CACERT_CTX\n global _CACERT_PATH\n if _CACERT_PATH is None:\n # This is slightly janky, the importlib.resources API wants you to\n # manage the cleanup of this file, so it doesn't actually return a\n # path, it returns a context manager that will give you the path\n # when you enter it and will do any cleanup when you leave it. In\n # the common case of not needing a temporary file, it will just\n # return the file system location and the __exit__() is a no-op.\n #\n # We also have to hold onto the actual context manager, because\n # it will do the cleanup whenever it gets garbage collected, so\n # we will also store that at the global level as well.\n _CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem"))\n _CACERT_PATH = str(_CACERT_CTX.__enter__())\n atexit.register(exit_cacert_ctx)\n\n return _CACERT_PATH\n\n def contents() -> str:\n return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii")\n\nelif sys.version_info >= (3, 7):\n\n from importlib.resources import path as get_path, read_text\n\n _CACERT_CTX = None\n _CACERT_PATH = None\n\n def where() -> str:\n # This is slightly terrible, but we want to delay extracting the\n # file in cases where we're inside of a zipimport situation until\n # someone actually calls where(), but we don't want to re-extract\n # the file on every call of where(), so we'll do it once then store\n # it in a global variable.\n global _CACERT_CTX\n global _CACERT_PATH\n if _CACERT_PATH is None:\n # This is slightly janky, the importlib.resources API wants you\n # to manage the cleanup of this file, so it doesn't actually\n # return a path, it returns a context manager that will give\n # you the path when you enter it and will do any cleanup when\n # you leave it. In the common case of not needing a temporary\n # file, it will just return the file system location and the\n # __exit__() is a no-op.\n #\n # We also have to hold onto the actual context manager, because\n # it will do the cleanup whenever it gets garbage collected, so\n # we will also store that at the global level as well.\n _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")\n _CACERT_PATH = str(_CACERT_CTX.__enter__())\n atexit.register(exit_cacert_ctx)\n\n return _CACERT_PATH\n\n def contents() -> str:\n return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")\n\nelse:\n import os\n import types\n from typing import Union\n\n Package = Union[types.ModuleType, str]\n Resource = Union[str, "os.PathLike"]\n\n # This fallback will work for Python versions prior to 3.7 that lack the\n # importlib.resources module but relies on the existing `where` function\n # so won't address issues with environments like PyOxidizer that don't set\n # __file__ on modules.\n def read_text(\n package: Package,\n resource: Resource,\n encoding: str = 'utf-8',\n errors: str = 'strict'\n ) -> str:\n with open(where(), encoding=encoding) as data:\n return data.read()\n\n # If we don't have importlib.resources, then we will just do the old logic\n # of assuming we're on the filesystem and munge the path directly.\n def where() -> str:\n f = os.path.dirname(__file__)\n\n return os.path.join(f, "cacert.pem")\n\n def contents() -> str:\n return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")\n
.venv\Lib\site-packages\pip\_vendor\certifi\core.py
core.py
Python
4,486
0.95
0.114035
0.397849
node-utils
671
2024-07-23T20:54:59.033369
MIT
false
9550f6f96b63a426f3148fb1fa0e9367
from .core import contents, where\n\n__all__ = ["contents", "where"]\n__version__ = "2025.01.31"\n
.venv\Lib\site-packages\pip\_vendor\certifi\__init__.py
__init__.py
Python
94
0.65
0
0
vue-tools
308
2024-03-19T01:56:16.328409
BSD-3-Clause
false
fd23f5907134920e81f9a0b3cfb74b5b
import argparse\n\nfrom pip._vendor.certifi import contents, where\n\nparser = argparse.ArgumentParser()\nparser.add_argument("-c", "--contents", action="store_true")\nargs = parser.parse_args()\n\nif args.contents:\n print(contents())\nelse:\n print(where())\n
.venv\Lib\site-packages\pip\_vendor\certifi\__main__.py
__main__.py
Python
255
0.85
0.083333
0
awesome-app
359
2025-06-22T14:28:11.756617
BSD-3-Clause
false
49689cf432641c277156f1b5e119bb03
\n\n
.venv\Lib\site-packages\pip\_vendor\certifi\__pycache__\core.cpython-313.pyc
core.cpython-313.pyc
Other
3,230
0.95
0
0
awesome-app
30
2023-12-15T12:19:23.487634
MIT
false
27d04e7d4cdd5e91d5054b48a960ea3b
\n\n
.venv\Lib\site-packages\pip\_vendor\certifi\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
321
0.7
0
0
node-utils
116
2023-09-04T16:18:48.526192
BSD-3-Clause
false
6b478d54e910f8265613a124cec84b31
\n\n
.venv\Lib\site-packages\pip\_vendor\certifi\__pycache__\__main__.cpython-313.pyc
__main__.cpython-313.pyc
Other
650
0.7
0
0
python-kit
141
2024-10-10T04:39:54.045619
GPL-3.0
false
1a570e24e0bff6367bb18baafabc43eb
from __future__ import annotations\n\nimport dataclasses\nimport re\nfrom collections.abc import Mapping\n\nfrom pip._vendor.packaging.requirements import Requirement\n\n\ndef _normalize_name(name: str) -> str:\n return re.sub(r"[-_.]+", "-", name).lower()\n\n\ndef _normalize_group_names(\n dependency_groups: Mapping[str, str | Mapping[str, str]],\n) -> Mapping[str, str | Mapping[str, str]]:\n original_names: dict[str, list[str]] = {}\n normalized_groups = {}\n\n for group_name, value in dependency_groups.items():\n normed_group_name = _normalize_name(group_name)\n original_names.setdefault(normed_group_name, []).append(group_name)\n normalized_groups[normed_group_name] = value\n\n errors = []\n for normed_name, names in original_names.items():\n if len(names) > 1:\n errors.append(f"{normed_name} ({', '.join(names)})")\n if errors:\n raise ValueError(f"Duplicate dependency group names: {', '.join(errors)}")\n\n return normalized_groups\n\n\n@dataclasses.dataclass\nclass DependencyGroupInclude:\n include_group: str\n\n\nclass CyclicDependencyError(ValueError):\n """\n An error representing the detection of a cycle.\n """\n\n def __init__(self, requested_group: str, group: str, include_group: str) -> None:\n self.requested_group = requested_group\n self.group = group\n self.include_group = include_group\n\n if include_group == group:\n reason = f"{group} includes itself"\n else:\n reason = f"{include_group} -> {group}, {group} -> {include_group}"\n super().__init__(\n "Cyclic dependency group include while resolving "\n f"{requested_group}: {reason}"\n )\n\n\nclass DependencyGroupResolver:\n """\n A resolver for Dependency Group data.\n\n This class handles caching, name normalization, cycle detection, and other\n parsing requirements. There are only two public methods for exploring the data:\n ``lookup()`` and ``resolve()``.\n\n :param dependency_groups: A mapping, as provided via pyproject\n ``[dependency-groups]``.\n """\n\n def __init__(\n self,\n dependency_groups: Mapping[str, str | Mapping[str, str]],\n ) -> None:\n if not isinstance(dependency_groups, Mapping):\n raise TypeError("Dependency Groups table is not a mapping")\n self.dependency_groups = _normalize_group_names(dependency_groups)\n # a map of group names to parsed data\n self._parsed_groups: dict[\n str, tuple[Requirement | DependencyGroupInclude, ...]\n ] = {}\n # a map of group names to their ancestors, used for cycle detection\n self._include_graph_ancestors: dict[str, tuple[str, ...]] = {}\n # a cache of completed resolutions to Requirement lists\n self._resolve_cache: dict[str, tuple[Requirement, ...]] = {}\n\n def lookup(self, group: str) -> tuple[Requirement | DependencyGroupInclude, ...]:\n """\n Lookup a group name, returning the parsed dependency data for that group.\n This will not resolve includes.\n\n :param group: the name of the group to lookup\n\n :raises ValueError: if the data does not appear to be valid dependency group\n data\n :raises TypeError: if the data is not a string\n :raises LookupError: if group name is absent\n :raises packaging.requirements.InvalidRequirement: if a specifier is not valid\n """\n if not isinstance(group, str):\n raise TypeError("Dependency group name is not a str")\n group = _normalize_name(group)\n return self._parse_group(group)\n\n def resolve(self, group: str) -> tuple[Requirement, ...]:\n """\n Resolve a dependency group to a list of requirements.\n\n :param group: the name of the group to resolve\n\n :raises TypeError: if the inputs appear to be the wrong types\n :raises ValueError: if the data does not appear to be valid dependency group\n data\n :raises LookupError: if group name is absent\n :raises packaging.requirements.InvalidRequirement: if a specifier is not valid\n """\n if not isinstance(group, str):\n raise TypeError("Dependency group name is not a str")\n group = _normalize_name(group)\n return self._resolve(group, group)\n\n def _parse_group(\n self, group: str\n ) -> tuple[Requirement | DependencyGroupInclude, ...]:\n # short circuit -- never do the work twice\n if group in self._parsed_groups:\n return self._parsed_groups[group]\n\n if group not in self.dependency_groups:\n raise LookupError(f"Dependency group '{group}' not found")\n\n raw_group = self.dependency_groups[group]\n if not isinstance(raw_group, list):\n raise TypeError(f"Dependency group '{group}' is not a list")\n\n elements: list[Requirement | DependencyGroupInclude] = []\n for item in raw_group:\n if isinstance(item, str):\n # packaging.requirements.Requirement parsing ensures that this is a\n # valid PEP 508 Dependency Specifier\n # raises InvalidRequirement on failure\n elements.append(Requirement(item))\n elif isinstance(item, dict):\n if tuple(item.keys()) != ("include-group",):\n raise ValueError(f"Invalid dependency group item: {item}")\n\n include_group = next(iter(item.values()))\n elements.append(DependencyGroupInclude(include_group=include_group))\n else:\n raise ValueError(f"Invalid dependency group item: {item}")\n\n self._parsed_groups[group] = tuple(elements)\n return self._parsed_groups[group]\n\n def _resolve(self, group: str, requested_group: str) -> tuple[Requirement, ...]:\n """\n This is a helper for cached resolution to strings.\n\n :param group: The name of the group to resolve.\n :param requested_group: The group which was used in the original, user-facing\n request.\n """\n if group in self._resolve_cache:\n return self._resolve_cache[group]\n\n parsed = self._parse_group(group)\n\n resolved_group = []\n for item in parsed:\n if isinstance(item, Requirement):\n resolved_group.append(item)\n elif isinstance(item, DependencyGroupInclude):\n include_group = _normalize_name(item.include_group)\n if include_group in self._include_graph_ancestors.get(group, ()):\n raise CyclicDependencyError(\n requested_group, group, item.include_group\n )\n self._include_graph_ancestors[include_group] = (\n *self._include_graph_ancestors.get(group, ()),\n group,\n )\n resolved_group.extend(self._resolve(include_group, requested_group))\n else: # unreachable\n raise NotImplementedError(\n f"Invalid dependency group item after parse: {item}"\n )\n\n self._resolve_cache[group] = tuple(resolved_group)\n return self._resolve_cache[group]\n\n\ndef resolve(\n dependency_groups: Mapping[str, str | Mapping[str, str]], /, *groups: str\n) -> tuple[str, ...]:\n """\n Resolve a dependency group to a tuple of requirements, as strings.\n\n :param dependency_groups: the parsed contents of the ``[dependency-groups]`` table\n from ``pyproject.toml``\n :param groups: the name of the group(s) to resolve\n\n :raises TypeError: if the inputs appear to be the wrong types\n :raises ValueError: if the data does not appear to be valid dependency group data\n :raises LookupError: if group name is absent\n :raises packaging.requirements.InvalidRequirement: if a specifier is not valid\n """\n resolver = DependencyGroupResolver(dependency_groups)\n return tuple(str(r) for group in groups for r in resolver.resolve(group))\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\_implementation.py
_implementation.py
Python
8,041
0.95
0.244019
0.047619
node-utils
582
2023-10-03T22:25:00.220902
BSD-3-Clause
false
89ac5fb3af7d507bb19006e5baac8c4b
from __future__ import annotations\n\nimport argparse\nimport sys\n\nfrom ._implementation import DependencyGroupResolver\nfrom ._toml_compat import tomllib\n\n\ndef main(*, argv: list[str] | None = None) -> None:\n if tomllib is None:\n print(\n "Usage error: dependency-groups CLI requires tomli or Python 3.11+",\n file=sys.stderr,\n )\n raise SystemExit(2)\n\n parser = argparse.ArgumentParser(\n description=(\n "Lint Dependency Groups for validity. "\n "This will eagerly load and check all of your Dependency Groups."\n )\n )\n parser.add_argument(\n "-f",\n "--pyproject-file",\n default="pyproject.toml",\n help="The pyproject.toml file. Defaults to trying in the current directory.",\n )\n args = parser.parse_args(argv if argv is not None else sys.argv[1:])\n\n with open(args.pyproject_file, "rb") as fp:\n pyproject = tomllib.load(fp)\n dependency_groups_raw = pyproject.get("dependency-groups", {})\n\n errors: list[str] = []\n try:\n resolver = DependencyGroupResolver(dependency_groups_raw)\n except (ValueError, TypeError) as e:\n errors.append(f"{type(e).__name__}: {e}")\n else:\n for groupname in resolver.dependency_groups:\n try:\n resolver.resolve(groupname)\n except (LookupError, ValueError, TypeError) as e:\n errors.append(f"{type(e).__name__}: {e}")\n\n if errors:\n print("errors encountered while examining dependency groups:")\n for msg in errors:\n print(f" {msg}")\n sys.exit(1)\n else:\n print("ok")\n sys.exit(0)\n\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\_lint_dependency_groups.py
_lint_dependency_groups.py
Python
1,710
0.85
0.186441
0
vue-tools
760
2024-01-01T14:31:47.436148
GPL-3.0
false
80e5f0d2755dcc10f26ba8f178e3543b
from __future__ import annotations\n\nimport argparse\nimport subprocess\nimport sys\n\nfrom ._implementation import DependencyGroupResolver\nfrom ._toml_compat import tomllib\n\n\ndef _invoke_pip(deps: list[str]) -> None:\n subprocess.check_call([sys.executable, "-m", "pip", "install", *deps])\n\n\ndef main(*, argv: list[str] | None = None) -> None:\n if tomllib is None:\n print(\n "Usage error: dependency-groups CLI requires tomli or Python 3.11+",\n file=sys.stderr,\n )\n raise SystemExit(2)\n\n parser = argparse.ArgumentParser(description="Install Dependency Groups.")\n parser.add_argument(\n "DEPENDENCY_GROUP", nargs="+", help="The dependency groups to install."\n )\n parser.add_argument(\n "-f",\n "--pyproject-file",\n default="pyproject.toml",\n help="The pyproject.toml file. Defaults to trying in the current directory.",\n )\n args = parser.parse_args(argv if argv is not None else sys.argv[1:])\n\n with open(args.pyproject_file, "rb") as fp:\n pyproject = tomllib.load(fp)\n dependency_groups_raw = pyproject.get("dependency-groups", {})\n\n errors: list[str] = []\n resolved: list[str] = []\n try:\n resolver = DependencyGroupResolver(dependency_groups_raw)\n except (ValueError, TypeError) as e:\n errors.append(f"{type(e).__name__}: {e}")\n else:\n for groupname in args.DEPENDENCY_GROUP:\n try:\n resolved.extend(str(r) for r in resolver.resolve(groupname))\n except (LookupError, ValueError, TypeError) as e:\n errors.append(f"{type(e).__name__}: {e}")\n\n if errors:\n print("errors encountered while examining dependency groups:")\n for msg in errors:\n print(f" {msg}")\n sys.exit(1)\n\n _invoke_pip(resolved)\n\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\_pip_wrapper.py
_pip_wrapper.py
Python
1,865
0.85
0.193548
0
react-lib
754
2024-02-16T21:12:59.579368
Apache-2.0
false
8e3cbe24b626723f5d69b105f443d6ff
try:\n import tomllib\nexcept ImportError:\n try:\n from pip._vendor import tomli as tomllib # type: ignore[no-redef, unused-ignore]\n except ModuleNotFoundError: # pragma: no cover\n tomllib = None # type: ignore[assignment, unused-ignore]\n\n__all__ = ("tomllib",)\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\_toml_compat.py
_toml_compat.py
Python
285
0.95
0.222222
0
awesome-app
600
2024-09-26T16:37:04.245797
BSD-3-Clause
false
56132e494728b95734674d02f407bb40
from ._implementation import (\n CyclicDependencyError,\n DependencyGroupInclude,\n DependencyGroupResolver,\n resolve,\n)\n\n__all__ = (\n "CyclicDependencyError",\n "DependencyGroupInclude",\n "DependencyGroupResolver",\n "resolve",\n)\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__init__.py
__init__.py
Python
250
0.85
0
0
vue-tools
465
2023-09-10T21:30:23.117735
Apache-2.0
false
5110bba2763c807e85757105f90e748d
import argparse\nimport sys\n\nfrom ._implementation import resolve\nfrom ._toml_compat import tomllib\n\n\ndef main() -> None:\n if tomllib is None:\n print(\n "Usage error: dependency-groups CLI requires tomli or Python 3.11+",\n file=sys.stderr,\n )\n raise SystemExit(2)\n\n parser = argparse.ArgumentParser(\n description=(\n "A dependency-groups CLI. Prints out a resolved group, newline-delimited."\n )\n )\n parser.add_argument(\n "GROUP_NAME", nargs="*", help="The dependency group(s) to resolve."\n )\n parser.add_argument(\n "-f",\n "--pyproject-file",\n default="pyproject.toml",\n help="The pyproject.toml file. Defaults to trying in the current directory.",\n )\n parser.add_argument(\n "-o",\n "--output",\n help="An output file. Defaults to stdout.",\n )\n parser.add_argument(\n "-l",\n "--list",\n action="store_true",\n help="List the available dependency groups",\n )\n args = parser.parse_args()\n\n with open(args.pyproject_file, "rb") as fp:\n pyproject = tomllib.load(fp)\n\n dependency_groups_raw = pyproject.get("dependency-groups", {})\n\n if args.list:\n print(*dependency_groups_raw.keys())\n return\n if not args.GROUP_NAME:\n print("A GROUP_NAME is required", file=sys.stderr)\n raise SystemExit(3)\n\n content = "\n".join(resolve(dependency_groups_raw, *args.GROUP_NAME))\n\n if args.output is None or args.output == "-":\n print(content)\n else:\n with open(args.output, "w", encoding="utf-8") as fp:\n print(content, file=fp)\n\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__main__.py
__main__.py
Python
1,709
0.85
0.092308
0
react-lib
382
2023-12-18T10:25:53.674650
Apache-2.0
false
36aeb000750dd96f138230f558ff2b92
\n\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__pycache__\_implementation.cpython-313.pyc
_implementation.cpython-313.pyc
Other
9,655
0.95
0.189474
0.011765
vue-tools
913
2025-03-03T22:20:14.036409
GPL-3.0
false
47aefe315dd3720d82466ced65575ad6
\n\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__pycache__\_lint_dependency_groups.cpython-313.pyc
_lint_dependency_groups.cpython-313.pyc
Other
2,882
0.95
0.054054
0
awesome-app
834
2024-04-25T12:55:05.023682
BSD-3-Clause
false
40989dc2c78806fd269eafa6396df35b
\n\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__pycache__\_pip_wrapper.cpython-313.pyc
_pip_wrapper.cpython-313.pyc
Other
3,455
0.95
0.018868
0
awesome-app
239
2023-12-07T06:08:11.991495
GPL-3.0
false
f46b50688635e7aeefa32b918f85763d
\n\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__pycache__\_toml_compat.cpython-313.pyc
_toml_compat.cpython-313.pyc
Other
488
0.7
0
0
node-utils
520
2025-07-07T12:20:26.862143
GPL-3.0
false
d0ff8500594a16b36fc4e8480628cc04
\n\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
380
0.7
0
0
python-kit
333
2025-03-24T00:36:31.089037
GPL-3.0
false
1324b8de2dfb85f88ad29517c3dcdfaa
\n\n
.venv\Lib\site-packages\pip\_vendor\dependency_groups\__pycache__\__main__.cpython-313.pyc
__main__.cpython-313.pyc
Other
2,682
0.95
0
0
awesome-app
39
2025-04-14T07:14:15.966165
Apache-2.0
false
49c22de06d8d48727fe5a9d45369f4f0
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport shutil\nimport sys\n\ntry:\n import ssl\nexcept ImportError: # pragma: no cover\n ssl = None\n\nif sys.version_info[0] < 3: # pragma: no cover\n from StringIO import StringIO\n string_types = basestring,\n text_type = unicode\n from types import FileType as file_type\n import __builtin__ as builtins\n import ConfigParser as configparser\n from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit\n from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,\n pathname2url, ContentTooShortError, splittype)\n\n def quote(s):\n if isinstance(s, unicode):\n s = s.encode('utf-8')\n return _quote(s)\n\n import urllib2\n from urllib2 import (Request, urlopen, URLError, HTTPError,\n HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler,\n HTTPRedirectHandler, build_opener)\n if ssl:\n from urllib2 import HTTPSHandler\n import httplib\n import xmlrpclib\n import Queue as queue\n from HTMLParser import HTMLParser\n import htmlentitydefs\n raw_input = raw_input\n from itertools import ifilter as filter\n from itertools import ifilterfalse as filterfalse\n\n # Leaving this around for now, in case it needs resurrecting in some way\n # _userprog = None\n # def splituser(host):\n # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""\n # global _userprog\n # if _userprog is None:\n # import re\n # _userprog = re.compile('^(.*)@(.*)$')\n\n # match = _userprog.match(host)\n # if match: return match.group(1, 2)\n # return None, host\n\nelse: # pragma: no cover\n from io import StringIO\n string_types = str,\n text_type = str\n from io import TextIOWrapper as file_type\n import builtins\n import configparser\n from urllib.parse import (urlparse, urlunparse, urljoin, quote, unquote,\n urlsplit, urlunsplit, splittype)\n from urllib.request import (urlopen, urlretrieve, Request, url2pathname,\n pathname2url, HTTPBasicAuthHandler,\n HTTPPasswordMgr, HTTPHandler,\n HTTPRedirectHandler, build_opener)\n if ssl:\n from urllib.request import HTTPSHandler\n from urllib.error import HTTPError, URLError, ContentTooShortError\n import http.client as httplib\n import urllib.request as urllib2\n import xmlrpc.client as xmlrpclib\n import queue\n from html.parser import HTMLParser\n import html.entities as htmlentitydefs\n raw_input = input\n from itertools import filterfalse\n filter = filter\n\ntry:\n from ssl import match_hostname, CertificateError\nexcept ImportError: # pragma: no cover\n\n class CertificateError(ValueError):\n pass\n\n def _dnsname_match(dn, hostname, max_wildcards=1):\n """Matching according to RFC 6125, section 6.4.3\n\n http://tools.ietf.org/html/rfc6125#section-6.4.3\n """\n pats = []\n if not dn:\n return False\n\n parts = dn.split('.')\n leftmost, remainder = parts[0], parts[1:]\n\n wildcards = leftmost.count('*')\n if wildcards > max_wildcards:\n # Issue #17980: avoid denials of service by refusing more\n # than one wildcard per fragment. A survey of established\n # policy among SSL implementations showed it to be a\n # reasonable choice.\n raise CertificateError(\n "too many wildcards in certificate DNS name: " + repr(dn))\n\n # speed up common case w/o wildcards\n if not wildcards:\n return dn.lower() == hostname.lower()\n\n # RFC 6125, section 6.4.3, subitem 1.\n # The client SHOULD NOT attempt to match a presented identifier in which\n # the wildcard character comprises a label other than the left-most label.\n if leftmost == '*':\n # When '*' is a fragment by itself, it matches a non-empty dotless\n # fragment.\n pats.append('[^.]+')\n elif leftmost.startswith('xn--') or hostname.startswith('xn--'):\n # RFC 6125, section 6.4.3, subitem 3.\n # The client SHOULD NOT attempt to match a presented identifier\n # where the wildcard character is embedded within an A-label or\n # U-label of an internationalized domain name.\n pats.append(re.escape(leftmost))\n else:\n # Otherwise, '*' matches any dotless string, e.g. www*\n pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))\n\n # add the remaining fragments, ignore any wildcards\n for frag in remainder:\n pats.append(re.escape(frag))\n\n pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)\n return pat.match(hostname)\n\n def match_hostname(cert, hostname):\n """Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n """\n if not cert:\n raise ValueError("empty or no certificate, match_hostname needs a "\n "SSL socket or SSL context with either "\n "CERT_OPTIONAL or CERT_REQUIRED")\n dnsnames = []\n san = cert.get('subjectAltName', ())\n for key, value in san:\n if key == 'DNS':\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n if not dnsnames:\n # The subject is only checked when there is no dNSName entry\n # in subjectAltName\n for sub in cert.get('subject', ()):\n for key, value in sub:\n # XXX according to RFC 2818, the most specific Common Name\n # must be used.\n if key == 'commonName':\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n if len(dnsnames) > 1:\n raise CertificateError("hostname %r "\n "doesn't match either of %s" %\n (hostname, ', '.join(map(repr, dnsnames))))\n elif len(dnsnames) == 1:\n raise CertificateError("hostname %r "\n "doesn't match %r" %\n (hostname, dnsnames[0]))\n else:\n raise CertificateError("no appropriate commonName or "\n "subjectAltName fields were found")\n\n\ntry:\n from types import SimpleNamespace as Container\nexcept ImportError: # pragma: no cover\n\n class Container(object):\n """\n A generic container for when multiple values need to be returned\n """\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n\ntry:\n from shutil import which\nexcept ImportError: # pragma: no cover\n # Implementation from Python 3.3\n def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n """Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get("PATH"), or can be overridden with a custom search\n path.\n\n """\n\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get("PATH", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == "win32":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get("PATHEXT", "").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given "python.exe".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\n\n# ZipFile is a context manager in 2.7, but not in 2.6\n\nfrom zipfile import ZipFile as BaseZipFile\n\nif hasattr(BaseZipFile, '__enter__'): # pragma: no cover\n ZipFile = BaseZipFile\nelse: # pragma: no cover\n from zipfile import ZipExtFile as BaseZipExtFile\n\n class ZipExtFile(BaseZipExtFile):\n\n def __init__(self, base):\n self.__dict__.update(base.__dict__)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n self.close()\n # return None, so if an exception occurred, it will propagate\n\n class ZipFile(BaseZipFile):\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n self.close()\n # return None, so if an exception occurred, it will propagate\n\n def open(self, *args, **kwargs):\n base = BaseZipFile.open(self, *args, **kwargs)\n return ZipExtFile(base)\n\n\ntry:\n from platform import python_implementation\nexcept ImportError: # pragma: no cover\n\n def python_implementation():\n """Return a string identifying the Python implementation."""\n if 'PyPy' in sys.version:\n return 'PyPy'\n if os.name == 'java':\n return 'Jython'\n if sys.version.startswith('IronPython'):\n return 'IronPython'\n return 'CPython'\n\n\nimport sysconfig\n\ntry:\n callable = callable\nexcept NameError: # pragma: no cover\n from collections.abc import Callable\n\n def callable(obj):\n return isinstance(obj, Callable)\n\n\ntry:\n fsencode = os.fsencode\n fsdecode = os.fsdecode\nexcept AttributeError: # pragma: no cover\n # Issue #99: on some systems (e.g. containerised),\n # sys.getfilesystemencoding() returns None, and we need a real value,\n # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and\n # sys.getfilesystemencoding(): the return value is "the user’s preference\n # according to the result of nl_langinfo(CODESET), or None if the\n # nl_langinfo(CODESET) failed."\n _fsencoding = sys.getfilesystemencoding() or 'utf-8'\n if _fsencoding == 'mbcs':\n _fserrors = 'strict'\n else:\n _fserrors = 'surrogateescape'\n\n def fsencode(filename):\n if isinstance(filename, bytes):\n return filename\n elif isinstance(filename, text_type):\n return filename.encode(_fsencoding, _fserrors)\n else:\n raise TypeError("expect bytes or str, not %s" %\n type(filename).__name__)\n\n def fsdecode(filename):\n if isinstance(filename, text_type):\n return filename\n elif isinstance(filename, bytes):\n return filename.decode(_fsencoding, _fserrors)\n else:\n raise TypeError("expect bytes or str, not %s" %\n type(filename).__name__)\n\n\ntry:\n from tokenize import detect_encoding\nexcept ImportError: # pragma: no cover\n from codecs import BOM_UTF8, lookup\n\n cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")\n\n def _get_normal_name(orig_enc):\n """Imitates get_normal_name in tokenizer.c."""\n # Only care about the first 12 characters.\n enc = orig_enc[:12].lower().replace("_", "-")\n if enc == "utf-8" or enc.startswith("utf-8-"):\n return "utf-8"\n if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \\n enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):\n return "iso-8859-1"\n return orig_enc\n\n def detect_encoding(readline):\n """\n The detect_encoding() function is used to detect the encoding that should\n be used to decode a Python source file. It requires one argument, readline,\n in the same way as the tokenize() generator.\n\n It will call readline a maximum of twice, and return the encoding used\n (as a string) and a list of any lines (left as bytes) it has read in.\n\n It detects the encoding from the presence of a utf-8 bom or an encoding\n cookie as specified in pep-0263. If both a bom and a cookie are present,\n but disagree, a SyntaxError will be raised. If the encoding cookie is an\n invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,\n 'utf-8-sig' is returned.\n\n If no encoding is specified, then the default of 'utf-8' will be returned.\n """\n try:\n filename = readline.__self__.name\n except AttributeError:\n filename = None\n bom_found = False\n encoding = None\n default = 'utf-8'\n\n def read_or_stop():\n try:\n return readline()\n except StopIteration:\n return b''\n\n def find_cookie(line):\n try:\n # Decode as UTF-8. Either the line is an encoding declaration,\n # in which case it should be pure ASCII, or it must be UTF-8\n # per default encoding.\n line_string = line.decode('utf-8')\n except UnicodeDecodeError:\n msg = "invalid or missing encoding declaration"\n if filename is not None:\n msg = '{} for {!r}'.format(msg, filename)\n raise SyntaxError(msg)\n\n matches = cookie_re.findall(line_string)\n if not matches:\n return None\n encoding = _get_normal_name(matches[0])\n try:\n codec = lookup(encoding)\n except LookupError:\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = "unknown encoding: " + encoding\n else:\n msg = "unknown encoding for {!r}: {}".format(\n filename, encoding)\n raise SyntaxError(msg)\n\n if bom_found:\n if codec.name != 'utf-8':\n # This behaviour mimics the Python interpreter\n if filename is None:\n msg = 'encoding problem: utf-8'\n else:\n msg = 'encoding problem for {!r}: utf-8'.format(\n filename)\n raise SyntaxError(msg)\n encoding += '-sig'\n return encoding\n\n first = read_or_stop()\n if first.startswith(BOM_UTF8):\n bom_found = True\n first = first[3:]\n default = 'utf-8-sig'\n if not first:\n return default, []\n\n encoding = find_cookie(first)\n if encoding:\n return encoding, [first]\n\n second = read_or_stop()\n if not second:\n return default, [first]\n\n encoding = find_cookie(second)\n if encoding:\n return encoding, [first, second]\n\n return default, [first, second]\n\n\n# For converting & <-> &amp; etc.\ntry:\n from html import escape\nexcept ImportError:\n from cgi import escape\nif sys.version_info[:2] < (3, 4):\n unescape = HTMLParser().unescape\nelse:\n from html import unescape\n\ntry:\n from collections import ChainMap\nexcept ImportError: # pragma: no cover\n from collections import MutableMapping\n\n try:\n from reprlib import recursive_repr as _recursive_repr\n except ImportError:\n\n def _recursive_repr(fillvalue='...'):\n '''\n Decorator to make a repr function return fillvalue for a recursive\n call\n '''\n\n def decorating_function(user_function):\n repr_running = set()\n\n def wrapper(self):\n key = id(self), get_ident()\n if key in repr_running:\n return fillvalue\n repr_running.add(key)\n try:\n result = user_function(self)\n finally:\n repr_running.discard(key)\n return result\n\n # Can't use functools.wraps() here because of bootstrap issues\n wrapper.__module__ = getattr(user_function, '__module__')\n wrapper.__doc__ = getattr(user_function, '__doc__')\n wrapper.__name__ = getattr(user_function, '__name__')\n wrapper.__annotations__ = getattr(user_function,\n '__annotations__', {})\n return wrapper\n\n return decorating_function\n\n class ChainMap(MutableMapping):\n '''\n A ChainMap groups multiple dicts (or other mappings) together\n to create a single, updateable view.\n\n The underlying mappings are stored in a list. That list is public and can\n accessed or updated using the *maps* attribute. There is no other state.\n\n Lookups search the underlying mappings successively until a key is found.\n In contrast, writes, updates, and deletions only operate on the first\n mapping.\n '''\n\n def __init__(self, *maps):\n '''Initialize a ChainMap by setting *maps* to the given mappings.\n If no mappings are provided, a single empty dictionary is used.\n\n '''\n self.maps = list(maps) or [{}] # always at least one map\n\n def __missing__(self, key):\n raise KeyError(key)\n\n def __getitem__(self, key):\n for mapping in self.maps:\n try:\n return mapping[\n key] # can't use 'key in mapping' with defaultdict\n except KeyError:\n pass\n return self.__missing__(\n key) # support subclasses that define __missing__\n\n def get(self, key, default=None):\n return self[key] if key in self else default\n\n def __len__(self):\n return len(set().union(\n *self.maps)) # reuses stored hash values if possible\n\n def __iter__(self):\n return iter(set().union(*self.maps))\n\n def __contains__(self, key):\n return any(key in m for m in self.maps)\n\n def __bool__(self):\n return any(self.maps)\n\n @_recursive_repr()\n def __repr__(self):\n return '{0.__class__.__name__}({1})'.format(\n self, ', '.join(map(repr, self.maps)))\n\n @classmethod\n def fromkeys(cls, iterable, *args):\n 'Create a ChainMap with a single dict created from the iterable.'\n return cls(dict.fromkeys(iterable, *args))\n\n def copy(self):\n 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'\n return self.__class__(self.maps[0].copy(), *self.maps[1:])\n\n __copy__ = copy\n\n def new_child(self): # like Django's Context.push()\n 'New ChainMap with a new dict followed by all previous maps.'\n return self.__class__({}, *self.maps)\n\n @property\n def parents(self): # like Django's Context.pop()\n 'New ChainMap from maps[1:].'\n return self.__class__(*self.maps[1:])\n\n def __setitem__(self, key, value):\n self.maps[0][key] = value\n\n def __delitem__(self, key):\n try:\n del self.maps[0][key]\n except KeyError:\n raise KeyError(\n 'Key not found in the first mapping: {!r}'.format(key))\n\n def popitem(self):\n 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'\n try:\n return self.maps[0].popitem()\n except KeyError:\n raise KeyError('No keys found in the first mapping.')\n\n def pop(self, key, *args):\n 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'\n try:\n return self.maps[0].pop(key, *args)\n except KeyError:\n raise KeyError(\n 'Key not found in the first mapping: {!r}'.format(key))\n\n def clear(self):\n 'Clear maps[0], leaving maps[1:] intact.'\n self.maps[0].clear()\n\n\ntry:\n from importlib.util import cache_from_source # Python >= 3.4\nexcept ImportError: # pragma: no cover\n\n def cache_from_source(path, debug_override=None):\n assert path.endswith('.py')\n if debug_override is None:\n debug_override = __debug__\n if debug_override:\n suffix = 'c'\n else:\n suffix = 'o'\n return path + suffix\n\n\ntry:\n from collections import OrderedDict\nexcept ImportError: # pragma: no cover\n # {{{ http://code.activestate.com/recipes/576693/ (r9)\n # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.\n # Passes Python2.7's test suite and incorporates all the latest updates.\n try:\n from thread import get_ident as _get_ident\n except ImportError:\n from dummy_thread import get_ident as _get_ident\n\n try:\n from _abcoll import KeysView, ValuesView, ItemsView\n except ImportError:\n pass\n\n class OrderedDict(dict):\n 'Dictionary that remembers insertion order'\n\n # An inherited dict maps keys to values.\n # The inherited dict provides __getitem__, __len__, __contains__, and get.\n # The remaining methods are order-aware.\n # Big-O running times for all methods are the same as for regular dictionaries.\n\n # The internal self.__map dictionary maps keys to links in a doubly linked list.\n # The circular doubly linked list starts and ends with a sentinel element.\n # The sentinel element never gets deleted (this simplifies the algorithm).\n # Each link is stored as a list of length three: [PREV, NEXT, KEY].\n\n def __init__(self, *args, **kwds):\n '''Initialize an ordered dictionary. Signature is the same as for\n regular dictionaries, but keyword arguments are not recommended\n because their insertion order is arbitrary.\n\n '''\n if len(args) > 1:\n raise TypeError('expected at most 1 arguments, got %d' %\n len(args))\n try:\n self.__root\n except AttributeError:\n self.__root = root = [] # sentinel node\n root[:] = [root, root, None]\n self.__map = {}\n self.__update(*args, **kwds)\n\n def __setitem__(self, key, value, dict_setitem=dict.__setitem__):\n 'od.__setitem__(i, y) <==> od[i]=y'\n # Setting a new item creates a new link which goes at the end of the linked\n # list, and the inherited dictionary is updated with the new key/value pair.\n if key not in self:\n root = self.__root\n last = root[0]\n last[1] = root[0] = self.__map[key] = [last, root, key]\n dict_setitem(self, key, value)\n\n def __delitem__(self, key, dict_delitem=dict.__delitem__):\n 'od.__delitem__(y) <==> del od[y]'\n # Deleting an existing item uses self.__map to find the link which is\n # then removed by updating the links in the predecessor and successor nodes.\n dict_delitem(self, key)\n link_prev, link_next, key = self.__map.pop(key)\n link_prev[1] = link_next\n link_next[0] = link_prev\n\n def __iter__(self):\n 'od.__iter__() <==> iter(od)'\n root = self.__root\n curr = root[1]\n while curr is not root:\n yield curr[2]\n curr = curr[1]\n\n def __reversed__(self):\n 'od.__reversed__() <==> reversed(od)'\n root = self.__root\n curr = root[0]\n while curr is not root:\n yield curr[2]\n curr = curr[0]\n\n def clear(self):\n 'od.clear() -> None. Remove all items from od.'\n try:\n for node in self.__map.itervalues():\n del node[:]\n root = self.__root\n root[:] = [root, root, None]\n self.__map.clear()\n except AttributeError:\n pass\n dict.clear(self)\n\n def popitem(self, last=True):\n '''od.popitem() -> (k, v), return and remove a (key, value) pair.\n Pairs are returned in LIFO order if last is true or FIFO order if false.\n\n '''\n if not self:\n raise KeyError('dictionary is empty')\n root = self.__root\n if last:\n link = root[0]\n link_prev = link[0]\n link_prev[1] = root\n root[0] = link_prev\n else:\n link = root[1]\n link_next = link[1]\n root[1] = link_next\n link_next[0] = root\n key = link[2]\n del self.__map[key]\n value = dict.pop(self, key)\n return key, value\n\n # -- the following methods do not depend on the internal structure --\n\n def keys(self):\n 'od.keys() -> list of keys in od'\n return list(self)\n\n def values(self):\n 'od.values() -> list of values in od'\n return [self[key] for key in self]\n\n def items(self):\n 'od.items() -> list of (key, value) pairs in od'\n return [(key, self[key]) for key in self]\n\n def iterkeys(self):\n 'od.iterkeys() -> an iterator over the keys in od'\n return iter(self)\n\n def itervalues(self):\n 'od.itervalues -> an iterator over the values in od'\n for k in self:\n yield self[k]\n\n def iteritems(self):\n 'od.iteritems -> an iterator over the (key, value) items in od'\n for k in self:\n yield (k, self[k])\n\n def update(*args, **kwds):\n '''od.update(E, **F) -> None. Update od from dict/iterable E and F.\n\n If E is a dict instance, does: for k in E: od[k] = E[k]\n If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]\n Or if E is an iterable of items, does: for k, v in E: od[k] = v\n In either case, this is followed by: for k, v in F.items(): od[k] = v\n\n '''\n if len(args) > 2:\n raise TypeError('update() takes at most 2 positional '\n 'arguments (%d given)' % (len(args), ))\n elif not args:\n raise TypeError('update() takes at least 1 argument (0 given)')\n self = args[0]\n # Make progressively weaker assumptions about "other"\n other = ()\n if len(args) == 2:\n other = args[1]\n if isinstance(other, dict):\n for key in other:\n self[key] = other[key]\n elif hasattr(other, 'keys'):\n for key in other.keys():\n self[key] = other[key]\n else:\n for key, value in other:\n self[key] = value\n for key, value in kwds.items():\n self[key] = value\n\n __update = update # let subclasses override update without breaking __init__\n\n __marker = object()\n\n def pop(self, key, default=__marker):\n '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n\n '''\n if key in self:\n result = self[key]\n del self[key]\n return result\n if default is self.__marker:\n raise KeyError(key)\n return default\n\n def setdefault(self, key, default=None):\n 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'\n if key in self:\n return self[key]\n self[key] = default\n return default\n\n def __repr__(self, _repr_running=None):\n 'od.__repr__() <==> repr(od)'\n if not _repr_running:\n _repr_running = {}\n call_key = id(self), _get_ident()\n if call_key in _repr_running:\n return '...'\n _repr_running[call_key] = 1\n try:\n if not self:\n return '%s()' % (self.__class__.__name__, )\n return '%s(%r)' % (self.__class__.__name__, self.items())\n finally:\n del _repr_running[call_key]\n\n def __reduce__(self):\n 'Return state information for pickling'\n items = [[k, self[k]] for k in self]\n inst_dict = vars(self).copy()\n for k in vars(OrderedDict()):\n inst_dict.pop(k, None)\n if inst_dict:\n return (self.__class__, (items, ), inst_dict)\n return self.__class__, (items, )\n\n def copy(self):\n 'od.copy() -> a shallow copy of od'\n return self.__class__(self)\n\n @classmethod\n def fromkeys(cls, iterable, value=None):\n '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S\n and values equal to v (which defaults to None).\n\n '''\n d = cls()\n for key in iterable:\n d[key] = value\n return d\n\n def __eq__(self, other):\n '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive\n while comparison to a regular mapping is order-insensitive.\n\n '''\n if isinstance(other, OrderedDict):\n return len(self) == len(\n other) and self.items() == other.items()\n return dict.__eq__(self, other)\n\n def __ne__(self, other):\n return not self == other\n\n # -- the following methods are only used in Python 2.7 --\n\n def viewkeys(self):\n "od.viewkeys() -> a set-like object providing a view on od's keys"\n return KeysView(self)\n\n def viewvalues(self):\n "od.viewvalues() -> an object providing a view on od's values"\n return ValuesView(self)\n\n def viewitems(self):\n "od.viewitems() -> a set-like object providing a view on od's items"\n return ItemsView(self)\n\n\ntry:\n from logging.config import BaseConfigurator, valid_ident\nexcept ImportError: # pragma: no cover\n IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)\n\n def valid_ident(s):\n m = IDENTIFIER.match(s)\n if not m:\n raise ValueError('Not a valid Python identifier: %r' % s)\n return True\n\n # The ConvertingXXX classes are wrappers around standard Python containers,\n # and they serve to convert any suitable values in the container. The\n # conversion converts base dicts, lists and tuples to their wrapped\n # equivalents, whereas strings which match a conversion format are converted\n # appropriately.\n #\n # Each wrapper should have a configurator attribute holding the actual\n # configurator to use for conversion.\n\n class ConvertingDict(dict):\n """A converting dictionary wrapper."""\n\n def __getitem__(self, key):\n value = dict.__getitem__(self, key)\n result = self.configurator.convert(value)\n # If the converted value is different, save for next time\n if value is not result:\n self[key] = result\n if type(result) in (ConvertingDict, ConvertingList,\n ConvertingTuple):\n result.parent = self\n result.key = key\n return result\n\n def get(self, key, default=None):\n value = dict.get(self, key, default)\n result = self.configurator.convert(value)\n # If the converted value is different, save for next time\n if value is not result:\n self[key] = result\n if type(result) in (ConvertingDict, ConvertingList,\n ConvertingTuple):\n result.parent = self\n result.key = key\n return result\n\n def pop(self, key, default=None):\n value = dict.pop(self, key, default)\n result = self.configurator.convert(value)\n if value is not result:\n if type(result) in (ConvertingDict, ConvertingList,\n ConvertingTuple):\n result.parent = self\n result.key = key\n return result\n\n class ConvertingList(list):\n """A converting list wrapper."""\n\n def __getitem__(self, key):\n value = list.__getitem__(self, key)\n result = self.configurator.convert(value)\n # If the converted value is different, save for next time\n if value is not result:\n self[key] = result\n if type(result) in (ConvertingDict, ConvertingList,\n ConvertingTuple):\n result.parent = self\n result.key = key\n return result\n\n def pop(self, idx=-1):\n value = list.pop(self, idx)\n result = self.configurator.convert(value)\n if value is not result:\n if type(result) in (ConvertingDict, ConvertingList,\n ConvertingTuple):\n result.parent = self\n return result\n\n class ConvertingTuple(tuple):\n """A converting tuple wrapper."""\n\n def __getitem__(self, key):\n value = tuple.__getitem__(self, key)\n result = self.configurator.convert(value)\n if value is not result:\n if type(result) in (ConvertingDict, ConvertingList,\n ConvertingTuple):\n result.parent = self\n result.key = key\n return result\n\n class BaseConfigurator(object):\n """\n The configurator base class which defines some useful defaults.\n """\n\n CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')\n\n WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')\n DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')\n INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')\n DIGIT_PATTERN = re.compile(r'^\d+$')\n\n value_converters = {\n 'ext': 'ext_convert',\n 'cfg': 'cfg_convert',\n }\n\n # We might want to use a different one, e.g. importlib\n importer = staticmethod(__import__)\n\n def __init__(self, config):\n self.config = ConvertingDict(config)\n self.config.configurator = self\n\n def resolve(self, s):\n """\n Resolve strings to objects using standard import and attribute\n syntax.\n """\n name = s.split('.')\n used = name.pop(0)\n try:\n found = self.importer(used)\n for frag in name:\n used += '.' + frag\n try:\n found = getattr(found, frag)\n except AttributeError:\n self.importer(used)\n found = getattr(found, frag)\n return found\n except ImportError:\n e, tb = sys.exc_info()[1:]\n v = ValueError('Cannot resolve %r: %s' % (s, e))\n v.__cause__, v.__traceback__ = e, tb\n raise v\n\n def ext_convert(self, value):\n """Default converter for the ext:// protocol."""\n return self.resolve(value)\n\n def cfg_convert(self, value):\n """Default converter for the cfg:// protocol."""\n rest = value\n m = self.WORD_PATTERN.match(rest)\n if m is None:\n raise ValueError("Unable to convert %r" % value)\n else:\n rest = rest[m.end():]\n d = self.config[m.groups()[0]]\n while rest:\n m = self.DOT_PATTERN.match(rest)\n if m:\n d = d[m.groups()[0]]\n else:\n m = self.INDEX_PATTERN.match(rest)\n if m:\n idx = m.groups()[0]\n if not self.DIGIT_PATTERN.match(idx):\n d = d[idx]\n else:\n try:\n n = int(\n idx\n ) # try as number first (most likely)\n d = d[n]\n except TypeError:\n d = d[idx]\n if m:\n rest = rest[m.end():]\n else:\n raise ValueError('Unable to convert '\n '%r at %r' % (value, rest))\n # rest should be empty\n return d\n\n def convert(self, value):\n """\n Convert values to an appropriate type. dicts, lists and tuples are\n replaced by their converting alternatives. Strings are checked to\n see if they have a conversion format and are converted if they do.\n """\n if not isinstance(value, ConvertingDict) and isinstance(\n value, dict):\n value = ConvertingDict(value)\n value.configurator = self\n elif not isinstance(value, ConvertingList) and isinstance(\n value, list):\n value = ConvertingList(value)\n value.configurator = self\n elif not isinstance(value, ConvertingTuple) and isinstance(value, tuple):\n value = ConvertingTuple(value)\n value.configurator = self\n elif isinstance(value, string_types):\n m = self.CONVERT_PATTERN.match(value)\n if m:\n d = m.groupdict()\n prefix = d['prefix']\n converter = self.value_converters.get(prefix, None)\n if converter:\n suffix = d['suffix']\n converter = getattr(self, converter)\n value = converter(suffix)\n return value\n\n def configure_custom(self, config):\n """Configure an object with a user-supplied factory."""\n c = config.pop('()')\n if not callable(c):\n c = self.resolve(c)\n props = config.pop('.', None)\n # Check for valid identifiers\n kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])\n result = c(**kwargs)\n if props:\n for name, value in props.items():\n setattr(result, name, value)\n return result\n\n def as_tuple(self, value):\n """Utility function which converts lists to tuples."""\n if isinstance(value, list):\n value = tuple(value)\n return value\n
.venv\Lib\site-packages\pip\_vendor\distlib\compat.py
compat.py
Python
41,467
0.95
0.253298
0.106139
vue-tools
706
2023-08-02T07:24:59.768245
MIT
false
603a53a9e2de9d912de1dedd4b856a13
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2023 The Python Software Foundation.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\n"""PEP 376 implementation."""\n\nfrom __future__ import unicode_literals\n\nimport base64\nimport codecs\nimport contextlib\nimport hashlib\nimport logging\nimport os\nimport posixpath\nimport sys\nimport zipimport\n\nfrom . import DistlibException, resources\nfrom .compat import StringIO\nfrom .version import get_scheme, UnsupportedVersionError\nfrom .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME)\nfrom .util import (parse_requirement, cached_property, parse_name_and_version, read_exports, write_exports, CSVReader,\n CSVWriter)\n\n__all__ = [\n 'Distribution', 'BaseInstalledDistribution', 'InstalledDistribution', 'EggInfoDistribution', 'DistributionPath'\n]\n\nlogger = logging.getLogger(__name__)\n\nEXPORTS_FILENAME = 'pydist-exports.json'\nCOMMANDS_FILENAME = 'pydist-commands.json'\n\nDIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', 'RESOURCES', EXPORTS_FILENAME, 'SHARED')\n\nDISTINFO_EXT = '.dist-info'\n\n\nclass _Cache(object):\n """\n A simple cache mapping names and .dist-info paths to distributions\n """\n\n def __init__(self):\n """\n Initialise an instance. There is normally one for each DistributionPath.\n """\n self.name = {}\n self.path = {}\n self.generated = False\n\n def clear(self):\n """\n Clear the cache, setting it to its initial state.\n """\n self.name.clear()\n self.path.clear()\n self.generated = False\n\n def add(self, dist):\n """\n Add a distribution to the cache.\n :param dist: The distribution to add.\n """\n if dist.path not in self.path:\n self.path[dist.path] = dist\n self.name.setdefault(dist.key, []).append(dist)\n\n\nclass DistributionPath(object):\n """\n Represents a set of distributions installed on a path (typically sys.path).\n """\n\n def __init__(self, path=None, include_egg=False):\n """\n Create an instance from a path, optionally including legacy (distutils/\n setuptools/distribute) distributions.\n :param path: The path to use, as a list of directories. If not specified,\n sys.path is used.\n :param include_egg: If True, this instance will look for and return legacy\n distributions as well as those based on PEP 376.\n """\n if path is None:\n path = sys.path\n self.path = path\n self._include_dist = True\n self._include_egg = include_egg\n\n self._cache = _Cache()\n self._cache_egg = _Cache()\n self._cache_enabled = True\n self._scheme = get_scheme('default')\n\n def _get_cache_enabled(self):\n return self._cache_enabled\n\n def _set_cache_enabled(self, value):\n self._cache_enabled = value\n\n cache_enabled = property(_get_cache_enabled, _set_cache_enabled)\n\n def clear_cache(self):\n """\n Clears the internal cache.\n """\n self._cache.clear()\n self._cache_egg.clear()\n\n def _yield_distributions(self):\n """\n Yield .dist-info and/or .egg(-info) distributions.\n """\n # We need to check if we've seen some resources already, because on\n # some Linux systems (e.g. some Debian/Ubuntu variants) there are\n # symlinks which alias other files in the environment.\n seen = set()\n for path in self.path:\n finder = resources.finder_for_path(path)\n if finder is None:\n continue\n r = finder.find('')\n if not r or not r.is_container:\n continue\n rset = sorted(r.resources)\n for entry in rset:\n r = finder.find(entry)\n if not r or r.path in seen:\n continue\n try:\n if self._include_dist and entry.endswith(DISTINFO_EXT):\n possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]\n for metadata_filename in possible_filenames:\n metadata_path = posixpath.join(entry, metadata_filename)\n pydist = finder.find(metadata_path)\n if pydist:\n break\n else:\n continue\n\n with contextlib.closing(pydist.as_stream()) as stream:\n metadata = Metadata(fileobj=stream, scheme='legacy')\n logger.debug('Found %s', r.path)\n seen.add(r.path)\n yield new_dist_class(r.path, metadata=metadata, env=self)\n elif self._include_egg and entry.endswith(('.egg-info', '.egg')):\n logger.debug('Found %s', r.path)\n seen.add(r.path)\n yield old_dist_class(r.path, self)\n except Exception as e:\n msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s'\n logger.warning(msg, r.path, e)\n import warnings\n warnings.warn(msg % (r.path, e), stacklevel=2)\n\n def _generate_cache(self):\n """\n Scan the path for distributions and populate the cache with\n those that are found.\n """\n gen_dist = not self._cache.generated\n gen_egg = self._include_egg and not self._cache_egg.generated\n if gen_dist or gen_egg:\n for dist in self._yield_distributions():\n if isinstance(dist, InstalledDistribution):\n self._cache.add(dist)\n else:\n self._cache_egg.add(dist)\n\n if gen_dist:\n self._cache.generated = True\n if gen_egg:\n self._cache_egg.generated = True\n\n @classmethod\n def distinfo_dirname(cls, name, version):\n """\n The *name* and *version* parameters are converted into their\n filename-escaped form, i.e. any ``'-'`` characters are replaced\n with ``'_'`` other than the one in ``'dist-info'`` and the one\n separating the name from the version number.\n\n :parameter name: is converted to a standard distribution name by replacing\n any runs of non- alphanumeric characters with a single\n ``'-'``.\n :type name: string\n :parameter version: is converted to a standard version string. Spaces\n become dots, and all other non-alphanumeric characters\n (except dots) become dashes, with runs of multiple\n dashes condensed to a single dash.\n :type version: string\n :returns: directory name\n :rtype: string"""\n name = name.replace('-', '_')\n return '-'.join([name, version]) + DISTINFO_EXT\n\n def get_distributions(self):\n """\n Provides an iterator that looks for distributions and returns\n :class:`InstalledDistribution` or\n :class:`EggInfoDistribution` instances for each one of them.\n\n :rtype: iterator of :class:`InstalledDistribution` and\n :class:`EggInfoDistribution` instances\n """\n if not self._cache_enabled:\n for dist in self._yield_distributions():\n yield dist\n else:\n self._generate_cache()\n\n for dist in self._cache.path.values():\n yield dist\n\n if self._include_egg:\n for dist in self._cache_egg.path.values():\n yield dist\n\n def get_distribution(self, name):\n """\n Looks for a named distribution on the path.\n\n This function only returns the first result found, as no more than one\n value is expected. If nothing is found, ``None`` is returned.\n\n :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`\n or ``None``\n """\n result = None\n name = name.lower()\n if not self._cache_enabled:\n for dist in self._yield_distributions():\n if dist.key == name:\n result = dist\n break\n else:\n self._generate_cache()\n\n if name in self._cache.name:\n result = self._cache.name[name][0]\n elif self._include_egg and name in self._cache_egg.name:\n result = self._cache_egg.name[name][0]\n return result\n\n def provides_distribution(self, name, version=None):\n """\n Iterates over all distributions to find which distributions provide *name*.\n If a *version* is provided, it will be used to filter the results.\n\n This function only returns the first result found, since no more than\n one values are expected. If the directory is not found, returns ``None``.\n\n :parameter version: a version specifier that indicates the version\n required, conforming to the format in ``PEP-345``\n\n :type name: string\n :type version: string\n """\n matcher = None\n if version is not None:\n try:\n matcher = self._scheme.matcher('%s (%s)' % (name, version))\n except ValueError:\n raise DistlibException('invalid name or version: %r, %r' % (name, version))\n\n for dist in self.get_distributions():\n # We hit a problem on Travis where enum34 was installed and doesn't\n # have a provides attribute ...\n if not hasattr(dist, 'provides'):\n logger.debug('No "provides": %s', dist)\n else:\n provided = dist.provides\n\n for p in provided:\n p_name, p_ver = parse_name_and_version(p)\n if matcher is None:\n if p_name == name:\n yield dist\n break\n else:\n if p_name == name and matcher.match(p_ver):\n yield dist\n break\n\n def get_file_path(self, name, relative_path):\n """\n Return the path to a resource file.\n """\n dist = self.get_distribution(name)\n if dist is None:\n raise LookupError('no distribution named %r found' % name)\n return dist.get_resource_path(relative_path)\n\n def get_exported_entries(self, category, name=None):\n """\n Return all of the exported entries in a particular category.\n\n :param category: The category to search for entries.\n :param name: If specified, only entries with that name are returned.\n """\n for dist in self.get_distributions():\n r = dist.exports\n if category in r:\n d = r[category]\n if name is not None:\n if name in d:\n yield d[name]\n else:\n for v in d.values():\n yield v\n\n\nclass Distribution(object):\n """\n A base class for distributions, whether installed or from indexes.\n Either way, it must have some metadata, so that's all that's needed\n for construction.\n """\n\n build_time_dependency = False\n """\n Set to True if it's known to be only a build-time dependency (i.e.\n not needed after installation).\n """\n\n requested = False\n """A boolean that indicates whether the ``REQUESTED`` metadata file is\n present (in other words, whether the package was installed by user\n request or it was installed as a dependency)."""\n\n def __init__(self, metadata):\n """\n Initialise an instance.\n :param metadata: The instance of :class:`Metadata` describing this\n distribution.\n """\n self.metadata = metadata\n self.name = metadata.name\n self.key = self.name.lower() # for case-insensitive comparisons\n self.version = metadata.version\n self.locator = None\n self.digest = None\n self.extras = None # additional features requested\n self.context = None # environment marker overrides\n self.download_urls = set()\n self.digests = {}\n\n @property\n def source_url(self):\n """\n The source archive download URL for this distribution.\n """\n return self.metadata.source_url\n\n download_url = source_url # Backward compatibility\n\n @property\n def name_and_version(self):\n """\n A utility property which displays the name and version in parentheses.\n """\n return '%s (%s)' % (self.name, self.version)\n\n @property\n def provides(self):\n """\n A set of distribution names and versions provided by this distribution.\n :return: A set of "name (version)" strings.\n """\n plist = self.metadata.provides\n s = '%s (%s)' % (self.name, self.version)\n if s not in plist:\n plist.append(s)\n return plist\n\n def _get_requirements(self, req_attr):\n md = self.metadata\n reqts = getattr(md, req_attr)\n logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr, reqts)\n return set(md.get_requirements(reqts, extras=self.extras, env=self.context))\n\n @property\n def run_requires(self):\n return self._get_requirements('run_requires')\n\n @property\n def meta_requires(self):\n return self._get_requirements('meta_requires')\n\n @property\n def build_requires(self):\n return self._get_requirements('build_requires')\n\n @property\n def test_requires(self):\n return self._get_requirements('test_requires')\n\n @property\n def dev_requires(self):\n return self._get_requirements('dev_requires')\n\n def matches_requirement(self, req):\n """\n Say if this instance matches (fulfills) a requirement.\n :param req: The requirement to match.\n :rtype req: str\n :return: True if it matches, else False.\n """\n # Requirement may contain extras - parse to lose those\n # from what's passed to the matcher\n r = parse_requirement(req)\n scheme = get_scheme(self.metadata.scheme)\n try:\n matcher = scheme.matcher(r.requirement)\n except UnsupportedVersionError:\n # XXX compat-mode if cannot read the version\n logger.warning('could not read version %r - using name only', req)\n name = req.split()[0]\n matcher = scheme.matcher(name)\n\n name = matcher.key # case-insensitive\n\n result = False\n for p in self.provides:\n p_name, p_ver = parse_name_and_version(p)\n if p_name != name:\n continue\n try:\n result = matcher.match(p_ver)\n break\n except UnsupportedVersionError:\n pass\n return result\n\n def __repr__(self):\n """\n Return a textual representation of this instance,\n """\n if self.source_url:\n suffix = ' [%s]' % self.source_url\n else:\n suffix = ''\n return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)\n\n def __eq__(self, other):\n """\n See if this distribution is the same as another.\n :param other: The distribution to compare with. To be equal to one\n another. distributions must have the same type, name,\n version and source_url.\n :return: True if it is the same, else False.\n """\n if type(other) is not type(self):\n result = False\n else:\n result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url)\n return result\n\n def __hash__(self):\n """\n Compute hash in a way which matches the equality test.\n """\n return hash(self.name) + hash(self.version) + hash(self.source_url)\n\n\nclass BaseInstalledDistribution(Distribution):\n """\n This is the base class for installed distributions (whether PEP 376 or\n legacy).\n """\n\n hasher = None\n\n def __init__(self, metadata, path, env=None):\n """\n Initialise an instance.\n :param metadata: An instance of :class:`Metadata` which describes the\n distribution. This will normally have been initialised\n from a metadata file in the ``path``.\n :param path: The path of the ``.dist-info`` or ``.egg-info``\n directory for the distribution.\n :param env: This is normally the :class:`DistributionPath`\n instance where this distribution was found.\n """\n super(BaseInstalledDistribution, self).__init__(metadata)\n self.path = path\n self.dist_path = env\n\n def get_hash(self, data, hasher=None):\n """\n Get the hash of some data, using a particular hash algorithm, if\n specified.\n\n :param data: The data to be hashed.\n :type data: bytes\n :param hasher: The name of a hash implementation, supported by hashlib,\n or ``None``. Examples of valid values are ``'sha1'``,\n ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and\n ``'sha512'``. If no hasher is specified, the ``hasher``\n attribute of the :class:`InstalledDistribution` instance\n is used. If the hasher is determined to be ``None``, MD5\n is used as the hashing algorithm.\n :returns: The hash of the data. If a hasher was explicitly specified,\n the returned hash will be prefixed with the specified hasher\n followed by '='.\n :rtype: str\n """\n if hasher is None:\n hasher = self.hasher\n if hasher is None:\n hasher = hashlib.md5\n prefix = ''\n else:\n hasher = getattr(hashlib, hasher)\n prefix = '%s=' % self.hasher\n digest = hasher(data).digest()\n digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')\n return '%s%s' % (prefix, digest)\n\n\nclass InstalledDistribution(BaseInstalledDistribution):\n """\n Created with the *path* of the ``.dist-info`` directory provided to the\n constructor. It reads the metadata contained in ``pydist.json`` when it is\n instantiated., or uses a passed in Metadata instance (useful for when\n dry-run mode is being used).\n """\n\n hasher = 'sha256'\n\n def __init__(self, path, metadata=None, env=None):\n self.modules = []\n self.finder = finder = resources.finder_for_path(path)\n if finder is None:\n raise ValueError('finder unavailable for %s' % path)\n if env and env._cache_enabled and path in env._cache.path:\n metadata = env._cache.path[path].metadata\n elif metadata is None:\n r = finder.find(METADATA_FILENAME)\n # Temporary - for Wheel 0.23 support\n if r is None:\n r = finder.find(WHEEL_METADATA_FILENAME)\n # Temporary - for legacy support\n if r is None:\n r = finder.find(LEGACY_METADATA_FILENAME)\n if r is None:\n raise ValueError('no %s found in %s' % (METADATA_FILENAME, path))\n with contextlib.closing(r.as_stream()) as stream:\n metadata = Metadata(fileobj=stream, scheme='legacy')\n\n super(InstalledDistribution, self).__init__(metadata, path, env)\n\n if env and env._cache_enabled:\n env._cache.add(self)\n\n r = finder.find('REQUESTED')\n self.requested = r is not None\n p = os.path.join(path, 'top_level.txt')\n if os.path.exists(p):\n with open(p, 'rb') as f:\n data = f.read().decode('utf-8')\n self.modules = data.splitlines()\n\n def __repr__(self):\n return '<InstalledDistribution %r %s at %r>' % (self.name, self.version, self.path)\n\n def __str__(self):\n return "%s %s" % (self.name, self.version)\n\n def _get_records(self):\n """\n Get the list of installed files for the distribution\n :return: A list of tuples of path, hash and size. Note that hash and\n size might be ``None`` for some entries. The path is exactly\n as stored in the file (which is as in PEP 376).\n """\n results = []\n r = self.get_distinfo_resource('RECORD')\n with contextlib.closing(r.as_stream()) as stream:\n with CSVReader(stream=stream) as record_reader:\n # Base location is parent dir of .dist-info dir\n # base_location = os.path.dirname(self.path)\n # base_location = os.path.abspath(base_location)\n for row in record_reader:\n missing = [None for i in range(len(row), 3)]\n path, checksum, size = row + missing\n # if not os.path.isabs(path):\n # path = path.replace('/', os.sep)\n # path = os.path.join(base_location, path)\n results.append((path, checksum, size))\n return results\n\n @cached_property\n def exports(self):\n """\n Return the information exported by this distribution.\n :return: A dictionary of exports, mapping an export category to a dict\n of :class:`ExportEntry` instances describing the individual\n export entries, and keyed by name.\n """\n result = {}\n r = self.get_distinfo_resource(EXPORTS_FILENAME)\n if r:\n result = self.read_exports()\n return result\n\n def read_exports(self):\n """\n Read exports data from a file in .ini format.\n\n :return: A dictionary of exports, mapping an export category to a list\n of :class:`ExportEntry` instances describing the individual\n export entries.\n """\n result = {}\n r = self.get_distinfo_resource(EXPORTS_FILENAME)\n if r:\n with contextlib.closing(r.as_stream()) as stream:\n result = read_exports(stream)\n return result\n\n def write_exports(self, exports):\n """\n Write a dictionary of exports to a file in .ini format.\n :param exports: A dictionary of exports, mapping an export category to\n a list of :class:`ExportEntry` instances describing the\n individual export entries.\n """\n rf = self.get_distinfo_file(EXPORTS_FILENAME)\n with open(rf, 'w') as f:\n write_exports(exports, f)\n\n def get_resource_path(self, relative_path):\n """\n NOTE: This API may change in the future.\n\n Return the absolute path to a resource file with the given relative\n path.\n\n :param relative_path: The path, relative to .dist-info, of the resource\n of interest.\n :return: The absolute path where the resource is to be found.\n """\n r = self.get_distinfo_resource('RESOURCES')\n with contextlib.closing(r.as_stream()) as stream:\n with CSVReader(stream=stream) as resources_reader:\n for relative, destination in resources_reader:\n if relative == relative_path:\n return destination\n raise KeyError('no resource file with relative path %r '\n 'is installed' % relative_path)\n\n def list_installed_files(self):\n """\n Iterates over the ``RECORD`` entries and returns a tuple\n ``(path, hash, size)`` for each line.\n\n :returns: iterator of (path, hash, size)\n """\n for result in self._get_records():\n yield result\n\n def write_installed_files(self, paths, prefix, dry_run=False):\n """\n Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any\n existing ``RECORD`` file is silently overwritten.\n\n prefix is used to determine when to write absolute paths.\n """\n prefix = os.path.join(prefix, '')\n base = os.path.dirname(self.path)\n base_under_prefix = base.startswith(prefix)\n base = os.path.join(base, '')\n record_path = self.get_distinfo_file('RECORD')\n logger.info('creating %s', record_path)\n if dry_run:\n return None\n with CSVWriter(record_path) as writer:\n for path in paths:\n if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):\n # do not put size and hash, as in PEP-376\n hash_value = size = ''\n else:\n size = '%d' % os.path.getsize(path)\n with open(path, 'rb') as fp:\n hash_value = self.get_hash(fp.read())\n if path.startswith(base) or (base_under_prefix and path.startswith(prefix)):\n path = os.path.relpath(path, base)\n writer.writerow((path, hash_value, size))\n\n # add the RECORD file itself\n if record_path.startswith(base):\n record_path = os.path.relpath(record_path, base)\n writer.writerow((record_path, '', ''))\n return record_path\n\n def check_installed_files(self):\n """\n Checks that the hashes and sizes of the files in ``RECORD`` are\n matched by the files themselves. Returns a (possibly empty) list of\n mismatches. Each entry in the mismatch list will be a tuple consisting\n of the path, 'exists', 'size' or 'hash' according to what didn't match\n (existence is checked first, then size, then hash), the expected\n value and the actual value.\n """\n mismatches = []\n base = os.path.dirname(self.path)\n record_path = self.get_distinfo_file('RECORD')\n for path, hash_value, size in self.list_installed_files():\n if not os.path.isabs(path):\n path = os.path.join(base, path)\n if path == record_path:\n continue\n if not os.path.exists(path):\n mismatches.append((path, 'exists', True, False))\n elif os.path.isfile(path):\n actual_size = str(os.path.getsize(path))\n if size and actual_size != size:\n mismatches.append((path, 'size', size, actual_size))\n elif hash_value:\n if '=' in hash_value:\n hasher = hash_value.split('=', 1)[0]\n else:\n hasher = None\n\n with open(path, 'rb') as f:\n actual_hash = self.get_hash(f.read(), hasher)\n if actual_hash != hash_value:\n mismatches.append((path, 'hash', hash_value, actual_hash))\n return mismatches\n\n @cached_property\n def shared_locations(self):\n """\n A dictionary of shared locations whose keys are in the set 'prefix',\n 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.\n The corresponding value is the absolute path of that category for\n this distribution, and takes into account any paths selected by the\n user at installation time (e.g. via command-line arguments). In the\n case of the 'namespace' key, this would be a list of absolute paths\n for the roots of namespace packages in this distribution.\n\n The first time this property is accessed, the relevant information is\n read from the SHARED file in the .dist-info directory.\n """\n result = {}\n shared_path = os.path.join(self.path, 'SHARED')\n if os.path.isfile(shared_path):\n with codecs.open(shared_path, 'r', encoding='utf-8') as f:\n lines = f.read().splitlines()\n for line in lines:\n key, value = line.split('=', 1)\n if key == 'namespace':\n result.setdefault(key, []).append(value)\n else:\n result[key] = value\n return result\n\n def write_shared_locations(self, paths, dry_run=False):\n """\n Write shared location information to the SHARED file in .dist-info.\n :param paths: A dictionary as described in the documentation for\n :meth:`shared_locations`.\n :param dry_run: If True, the action is logged but no file is actually\n written.\n :return: The path of the file written to.\n """\n shared_path = os.path.join(self.path, 'SHARED')\n logger.info('creating %s', shared_path)\n if dry_run:\n return None\n lines = []\n for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):\n path = paths[key]\n if os.path.isdir(paths[key]):\n lines.append('%s=%s' % (key, path))\n for ns in paths.get('namespace', ()):\n lines.append('namespace=%s' % ns)\n\n with codecs.open(shared_path, 'w', encoding='utf-8') as f:\n f.write('\n'.join(lines))\n return shared_path\n\n def get_distinfo_resource(self, path):\n if path not in DIST_FILES:\n raise DistlibException('invalid path for a dist-info file: '\n '%r at %r' % (path, self.path))\n finder = resources.finder_for_path(self.path)\n if finder is None:\n raise DistlibException('Unable to get a finder for %s' % self.path)\n return finder.find(path)\n\n def get_distinfo_file(self, path):\n """\n Returns a path located under the ``.dist-info`` directory. Returns a\n string representing the path.\n\n :parameter path: a ``'/'``-separated path relative to the\n ``.dist-info`` directory or an absolute path;\n If *path* is an absolute path and doesn't start\n with the ``.dist-info`` directory path,\n a :class:`DistlibException` is raised\n :type path: str\n :rtype: str\n """\n # Check if it is an absolute path # XXX use relpath, add tests\n if path.find(os.sep) >= 0:\n # it's an absolute path?\n distinfo_dirname, path = path.split(os.sep)[-2:]\n if distinfo_dirname != self.path.split(os.sep)[-1]:\n raise DistlibException('dist-info file %r does not belong to the %r %s '\n 'distribution' % (path, self.name, self.version))\n\n # The file must be relative\n if path not in DIST_FILES:\n raise DistlibException('invalid path for a dist-info file: '\n '%r at %r' % (path, self.path))\n\n return os.path.join(self.path, path)\n\n def list_distinfo_files(self):\n """\n Iterates over the ``RECORD`` entries and returns paths for each line if\n the path is pointing to a file located in the ``.dist-info`` directory\n or one of its subdirectories.\n\n :returns: iterator of paths\n """\n base = os.path.dirname(self.path)\n for path, checksum, size in self._get_records():\n # XXX add separator or use real relpath algo\n if not os.path.isabs(path):\n path = os.path.join(base, path)\n if path.startswith(self.path):\n yield path\n\n def __eq__(self, other):\n return (isinstance(other, InstalledDistribution) and self.path == other.path)\n\n # See http://docs.python.org/reference/datamodel#object.__hash__\n __hash__ = object.__hash__\n\n\nclass EggInfoDistribution(BaseInstalledDistribution):\n """Created with the *path* of the ``.egg-info`` directory or file provided\n to the constructor. It reads the metadata contained in the file itself, or\n if the given path happens to be a directory, the metadata is read from the\n file ``PKG-INFO`` under that directory."""\n\n requested = True # as we have no way of knowing, assume it was\n shared_locations = {}\n\n def __init__(self, path, env=None):\n\n def set_name_and_version(s, n, v):\n s.name = n\n s.key = n.lower() # for case-insensitive comparisons\n s.version = v\n\n self.path = path\n self.dist_path = env\n if env and env._cache_enabled and path in env._cache_egg.path:\n metadata = env._cache_egg.path[path].metadata\n set_name_and_version(self, metadata.name, metadata.version)\n else:\n metadata = self._get_metadata(path)\n\n # Need to be set before caching\n set_name_and_version(self, metadata.name, metadata.version)\n\n if env and env._cache_enabled:\n env._cache_egg.add(self)\n super(EggInfoDistribution, self).__init__(metadata, path, env)\n\n def _get_metadata(self, path):\n requires = None\n\n def parse_requires_data(data):\n """Create a list of dependencies from a requires.txt file.\n\n *data*: the contents of a setuptools-produced requires.txt file.\n """\n reqs = []\n lines = data.splitlines()\n for line in lines:\n line = line.strip()\n # sectioned files have bare newlines (separating sections)\n if not line: # pragma: no cover\n continue\n if line.startswith('['): # pragma: no cover\n logger.warning('Unexpected line: quitting requirement scan: %r', line)\n break\n r = parse_requirement(line)\n if not r: # pragma: no cover\n logger.warning('Not recognised as a requirement: %r', line)\n continue\n if r.extras: # pragma: no cover\n logger.warning('extra requirements in requires.txt are '\n 'not supported')\n if not r.constraints:\n reqs.append(r.name)\n else:\n cons = ', '.join('%s%s' % c for c in r.constraints)\n reqs.append('%s (%s)' % (r.name, cons))\n return reqs\n\n def parse_requires_path(req_path):\n """Create a list of dependencies from a requires.txt file.\n\n *req_path*: the path to a setuptools-produced requires.txt file.\n """\n\n reqs = []\n try:\n with codecs.open(req_path, 'r', 'utf-8') as fp:\n reqs = parse_requires_data(fp.read())\n except IOError:\n pass\n return reqs\n\n tl_path = tl_data = None\n if path.endswith('.egg'):\n if os.path.isdir(path):\n p = os.path.join(path, 'EGG-INFO')\n meta_path = os.path.join(p, 'PKG-INFO')\n metadata = Metadata(path=meta_path, scheme='legacy')\n req_path = os.path.join(p, 'requires.txt')\n tl_path = os.path.join(p, 'top_level.txt')\n requires = parse_requires_path(req_path)\n else:\n # FIXME handle the case where zipfile is not available\n zipf = zipimport.zipimporter(path)\n fileobj = StringIO(zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))\n metadata = Metadata(fileobj=fileobj, scheme='legacy')\n try:\n data = zipf.get_data('EGG-INFO/requires.txt')\n tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')\n requires = parse_requires_data(data.decode('utf-8'))\n except IOError:\n requires = None\n elif path.endswith('.egg-info'):\n if os.path.isdir(path):\n req_path = os.path.join(path, 'requires.txt')\n requires = parse_requires_path(req_path)\n path = os.path.join(path, 'PKG-INFO')\n tl_path = os.path.join(path, 'top_level.txt')\n metadata = Metadata(path=path, scheme='legacy')\n else:\n raise DistlibException('path must end with .egg-info or .egg, '\n 'got %r' % path)\n\n if requires:\n metadata.add_requirements(requires)\n # look for top-level modules in top_level.txt, if present\n if tl_data is None:\n if tl_path is not None and os.path.exists(tl_path):\n with open(tl_path, 'rb') as f:\n tl_data = f.read().decode('utf-8')\n if not tl_data:\n tl_data = []\n else:\n tl_data = tl_data.splitlines()\n self.modules = tl_data\n return metadata\n\n def __repr__(self):\n return '<EggInfoDistribution %r %s at %r>' % (self.name, self.version, self.path)\n\n def __str__(self):\n return "%s %s" % (self.name, self.version)\n\n def check_installed_files(self):\n """\n Checks that the hashes and sizes of the files in ``RECORD`` are\n matched by the files themselves. Returns a (possibly empty) list of\n mismatches. Each entry in the mismatch list will be a tuple consisting\n of the path, 'exists', 'size' or 'hash' according to what didn't match\n (existence is checked first, then size, then hash), the expected\n value and the actual value.\n """\n mismatches = []\n record_path = os.path.join(self.path, 'installed-files.txt')\n if os.path.exists(record_path):\n for path, _, _ in self.list_installed_files():\n if path == record_path:\n continue\n if not os.path.exists(path):\n mismatches.append((path, 'exists', True, False))\n return mismatches\n\n def list_installed_files(self):\n """\n Iterates over the ``installed-files.txt`` entries and returns a tuple\n ``(path, hash, size)`` for each line.\n\n :returns: a list of (path, hash, size)\n """\n\n def _md5(path):\n f = open(path, 'rb')\n try:\n content = f.read()\n finally:\n f.close()\n return hashlib.md5(content).hexdigest()\n\n def _size(path):\n return os.stat(path).st_size\n\n record_path = os.path.join(self.path, 'installed-files.txt')\n result = []\n if os.path.exists(record_path):\n with codecs.open(record_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n p = os.path.normpath(os.path.join(self.path, line))\n # "./" is present as a marker between installed files\n # and installation metadata files\n if not os.path.exists(p):\n logger.warning('Non-existent file: %s', p)\n if p.endswith(('.pyc', '.pyo')):\n continue\n # otherwise fall through and fail\n if not os.path.isdir(p):\n result.append((p, _md5(p), _size(p)))\n result.append((record_path, None, None))\n return result\n\n def list_distinfo_files(self, absolute=False):\n """\n Iterates over the ``installed-files.txt`` entries and returns paths for\n each line if the path is pointing to a file located in the\n ``.egg-info`` directory or one of its subdirectories.\n\n :parameter absolute: If *absolute* is ``True``, each returned path is\n transformed into a local absolute path. Otherwise the\n raw value from ``installed-files.txt`` is returned.\n :type absolute: boolean\n :returns: iterator of paths\n """\n record_path = os.path.join(self.path, 'installed-files.txt')\n if os.path.exists(record_path):\n skip = True\n with codecs.open(record_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == './':\n skip = False\n continue\n if not skip:\n p = os.path.normpath(os.path.join(self.path, line))\n if p.startswith(self.path):\n if absolute:\n yield p\n else:\n yield line\n\n def __eq__(self, other):\n return (isinstance(other, EggInfoDistribution) and self.path == other.path)\n\n # See http://docs.python.org/reference/datamodel#object.__hash__\n __hash__ = object.__hash__\n\n\nnew_dist_class = InstalledDistribution\nold_dist_class = EggInfoDistribution\n\n\nclass DependencyGraph(object):\n """\n Represents a dependency graph between distributions.\n\n The dependency relationships are stored in an ``adjacency_list`` that maps\n distributions to a list of ``(other, label)`` tuples where ``other``\n is a distribution and the edge is labeled with ``label`` (i.e. the version\n specifier, if such was provided). Also, for more efficient traversal, for\n every distribution ``x``, a list of predecessors is kept in\n ``reverse_list[x]``. An edge from distribution ``a`` to\n distribution ``b`` means that ``a`` depends on ``b``. If any missing\n dependencies are found, they are stored in ``missing``, which is a\n dictionary that maps distributions to a list of requirements that were not\n provided by any other distributions.\n """\n\n def __init__(self):\n self.adjacency_list = {}\n self.reverse_list = {}\n self.missing = {}\n\n def add_distribution(self, distribution):\n """Add the *distribution* to the graph.\n\n :type distribution: :class:`distutils2.database.InstalledDistribution`\n or :class:`distutils2.database.EggInfoDistribution`\n """\n self.adjacency_list[distribution] = []\n self.reverse_list[distribution] = []\n # self.missing[distribution] = []\n\n def add_edge(self, x, y, label=None):\n """Add an edge from distribution *x* to distribution *y* with the given\n *label*.\n\n :type x: :class:`distutils2.database.InstalledDistribution` or\n :class:`distutils2.database.EggInfoDistribution`\n :type y: :class:`distutils2.database.InstalledDistribution` or\n :class:`distutils2.database.EggInfoDistribution`\n :type label: ``str`` or ``None``\n """\n self.adjacency_list[x].append((y, label))\n # multiple edges are allowed, so be careful\n if x not in self.reverse_list[y]:\n self.reverse_list[y].append(x)\n\n def add_missing(self, distribution, requirement):\n """\n Add a missing *requirement* for the given *distribution*.\n\n :type distribution: :class:`distutils2.database.InstalledDistribution`\n or :class:`distutils2.database.EggInfoDistribution`\n :type requirement: ``str``\n """\n logger.debug('%s missing %r', distribution, requirement)\n self.missing.setdefault(distribution, []).append(requirement)\n\n def _repr_dist(self, dist):\n return '%s %s' % (dist.name, dist.version)\n\n def repr_node(self, dist, level=1):\n """Prints only a subgraph"""\n output = [self._repr_dist(dist)]\n for other, label in self.adjacency_list[dist]:\n dist = self._repr_dist(other)\n if label is not None:\n dist = '%s [%s]' % (dist, label)\n output.append(' ' * level + str(dist))\n suboutput = self.repr_node(other, level + 1)\n subs = suboutput.split('\n')\n output.extend(subs[1:])\n return '\n'.join(output)\n\n def to_dot(self, f, skip_disconnected=True):\n """Writes a DOT output for the graph to the provided file *f*.\n\n If *skip_disconnected* is set to ``True``, then all distributions\n that are not dependent on any other distribution are skipped.\n\n :type f: has to support ``file``-like operations\n :type skip_disconnected: ``bool``\n """\n disconnected = []\n\n f.write("digraph dependencies {\n")\n for dist, adjs in self.adjacency_list.items():\n if len(adjs) == 0 and not skip_disconnected:\n disconnected.append(dist)\n for other, label in adjs:\n if label is not None:\n f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label))\n else:\n f.write('"%s" -> "%s"\n' % (dist.name, other.name))\n if not skip_disconnected and len(disconnected) > 0:\n f.write('subgraph disconnected {\n')\n f.write('label = "Disconnected"\n')\n f.write('bgcolor = red\n')\n\n for dist in disconnected:\n f.write('"%s"' % dist.name)\n f.write('\n')\n f.write('}\n')\n f.write('}\n')\n\n def topological_sort(self):\n """\n Perform a topological sort of the graph.\n :return: A tuple, the first element of which is a topologically sorted\n list of distributions, and the second element of which is a\n list of distributions that cannot be sorted because they have\n circular dependencies and so form a cycle.\n """\n result = []\n # Make a shallow copy of the adjacency list\n alist = {}\n for k, v in self.adjacency_list.items():\n alist[k] = v[:]\n while True:\n # See what we can remove in this run\n to_remove = []\n for k, v in list(alist.items())[:]:\n if not v:\n to_remove.append(k)\n del alist[k]\n if not to_remove:\n # What's left in alist (if anything) is a cycle.\n break\n # Remove from the adjacency list of others\n for k, v in alist.items():\n alist[k] = [(d, r) for d, r in v if d not in to_remove]\n logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove])\n result.extend(to_remove)\n return result, list(alist.keys())\n\n def __repr__(self):\n """Representation of the graph"""\n output = []\n for dist, adjs in self.adjacency_list.items():\n output.append(self.repr_node(dist))\n return '\n'.join(output)\n\n\ndef make_graph(dists, scheme='default'):\n """Makes a dependency graph from the given distributions.\n\n :parameter dists: a list of distributions\n :type dists: list of :class:`distutils2.database.InstalledDistribution` and\n :class:`distutils2.database.EggInfoDistribution` instances\n :rtype: a :class:`DependencyGraph` instance\n """\n scheme = get_scheme(scheme)\n graph = DependencyGraph()\n provided = {} # maps names to lists of (version, dist) tuples\n\n # first, build the graph and find out what's provided\n for dist in dists:\n graph.add_distribution(dist)\n\n for p in dist.provides:\n name, version = parse_name_and_version(p)\n logger.debug('Add to provided: %s, %s, %s', name, version, dist)\n provided.setdefault(name, []).append((version, dist))\n\n # now make the edges\n for dist in dists:\n requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires)\n for req in requires:\n try:\n matcher = scheme.matcher(req)\n except UnsupportedVersionError:\n # XXX compat-mode if cannot read the version\n logger.warning('could not read version %r - using name only', req)\n name = req.split()[0]\n matcher = scheme.matcher(name)\n\n name = matcher.key # case-insensitive\n\n matched = False\n if name in provided:\n for version, provider in provided[name]:\n try:\n match = matcher.match(version)\n except UnsupportedVersionError:\n match = False\n\n if match:\n graph.add_edge(dist, provider, req)\n matched = True\n break\n if not matched:\n graph.add_missing(dist, req)\n return graph\n\n\ndef get_dependent_dists(dists, dist):\n """Recursively generate a list of distributions from *dists* that are\n dependent on *dist*.\n\n :param dists: a list of distributions\n :param dist: a distribution, member of *dists* for which we are interested\n """\n if dist not in dists:\n raise DistlibException('given distribution %r is not a member '\n 'of the list' % dist.name)\n graph = make_graph(dists)\n\n dep = [dist] # dependent distributions\n todo = graph.reverse_list[dist] # list of nodes we should inspect\n\n while todo:\n d = todo.pop()\n dep.append(d)\n for succ in graph.reverse_list[d]:\n if succ not in dep:\n todo.append(succ)\n\n dep.pop(0) # remove dist from dep, was there to prevent infinite loops\n return dep\n\n\ndef get_required_dists(dists, dist):\n """Recursively generate a list of distributions from *dists* that are\n required by *dist*.\n\n :param dists: a list of distributions\n :param dist: a distribution, member of *dists* for which we are interested\n in finding the dependencies.\n """\n if dist not in dists:\n raise DistlibException('given distribution %r is not a member '\n 'of the list' % dist.name)\n graph = make_graph(dists)\n\n req = set() # required distributions\n todo = graph.adjacency_list[dist] # list of nodes we should inspect\n seen = set(t[0] for t in todo) # already added to todo\n\n while todo:\n d = todo.pop()[0]\n req.add(d)\n pred_list = graph.adjacency_list[d]\n for pred in pred_list:\n d = pred[0]\n if d not in req and d not in seen:\n seen.add(d)\n todo.append(pred)\n return req\n\n\ndef make_dist(name, version, **kwargs):\n """\n A convenience method for making a dist given just a name and version.\n """\n summary = kwargs.pop('summary', 'Placeholder for summary')\n md = Metadata(**kwargs)\n md.name = name\n md.version = version\n md.summary = summary or 'Placeholder for summary'\n return Distribution(md)\n
.venv\Lib\site-packages\pip\_vendor\distlib\database.py
database.py
Python
51,160
0.75
0.246802
0.041739
python-kit
304
2023-11-12T15:22:25.031433
GPL-3.0
false
45b0c292d664d475164c4fdf97ab3790
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2023 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nimport hashlib\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport tempfile\ntry:\n from threading import Thread\nexcept ImportError: # pragma: no cover\n from dummy_threading import Thread\n\nfrom . import DistlibException\nfrom .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,\n urlparse, build_opener, string_types)\nfrom .util import zip_dir, ServerProxy\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_INDEX = 'https://pypi.org/pypi'\nDEFAULT_REALM = 'pypi'\n\n\nclass PackageIndex(object):\n """\n This class represents a package index compatible with PyPI, the Python\n Package Index.\n """\n\n boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'\n\n def __init__(self, url=None):\n """\n Initialise an instance.\n\n :param url: The URL of the index. If not specified, the URL for PyPI is\n used.\n """\n self.url = url or DEFAULT_INDEX\n self.read_configuration()\n scheme, netloc, path, params, query, frag = urlparse(self.url)\n if params or query or frag or scheme not in ('http', 'https'):\n raise DistlibException('invalid repository: %s' % self.url)\n self.password_handler = None\n self.ssl_verifier = None\n self.gpg = None\n self.gpg_home = None\n with open(os.devnull, 'w') as sink:\n # Use gpg by default rather than gpg2, as gpg2 insists on\n # prompting for passwords\n for s in ('gpg', 'gpg2'):\n try:\n rc = subprocess.check_call([s, '--version'], stdout=sink,\n stderr=sink)\n if rc == 0:\n self.gpg = s\n break\n except OSError:\n pass\n\n def _get_pypirc_command(self):\n """\n Get the distutils command for interacting with PyPI configurations.\n :return: the command.\n """\n from .util import _get_pypirc_command as cmd\n return cmd()\n\n def read_configuration(self):\n """\n Read the PyPI access configuration as supported by distutils. This populates\n ``username``, ``password``, ``realm`` and ``url`` attributes from the\n configuration.\n """\n from .util import _load_pypirc\n cfg = _load_pypirc(self)\n self.username = cfg.get('username')\n self.password = cfg.get('password')\n self.realm = cfg.get('realm', 'pypi')\n self.url = cfg.get('repository', self.url)\n\n def save_configuration(self):\n """\n Save the PyPI access configuration. You must have set ``username`` and\n ``password`` attributes before calling this method.\n """\n self.check_credentials()\n from .util import _store_pypirc\n _store_pypirc(self)\n\n def check_credentials(self):\n """\n Check that ``username`` and ``password`` have been set, and raise an\n exception if not.\n """\n if self.username is None or self.password is None:\n raise DistlibException('username and password must be set')\n pm = HTTPPasswordMgr()\n _, netloc, _, _, _, _ = urlparse(self.url)\n pm.add_password(self.realm, netloc, self.username, self.password)\n self.password_handler = HTTPBasicAuthHandler(pm)\n\n def register(self, metadata): # pragma: no cover\n """\n Register a distribution on PyPI, using the provided metadata.\n\n :param metadata: A :class:`Metadata` instance defining at least a name\n and version number for the distribution to be\n registered.\n :return: The HTTP response received from PyPI upon submission of the\n request.\n """\n self.check_credentials()\n metadata.validate()\n d = metadata.todict()\n d[':action'] = 'verify'\n request = self.encode_request(d.items(), [])\n self.send_request(request)\n d[':action'] = 'submit'\n request = self.encode_request(d.items(), [])\n return self.send_request(request)\n\n def _reader(self, name, stream, outbuf):\n """\n Thread runner for reading lines of from a subprocess into a buffer.\n\n :param name: The logical name of the stream (used for logging only).\n :param stream: The stream to read from. This will typically a pipe\n connected to the output stream of a subprocess.\n :param outbuf: The list to append the read lines to.\n """\n while True:\n s = stream.readline()\n if not s:\n break\n s = s.decode('utf-8').rstrip()\n outbuf.append(s)\n logger.debug('%s: %s' % (name, s))\n stream.close()\n\n def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover\n """\n Return a suitable command for signing a file.\n\n :param filename: The pathname to the file to be signed.\n :param signer: The identifier of the signer of the file.\n :param sign_password: The passphrase for the signer's\n private key used for signing.\n :param keystore: The path to a directory which contains the keys\n used in verification. If not specified, the\n instance's ``gpg_home`` attribute is used instead.\n :return: The signing command as a list suitable to be\n passed to :class:`subprocess.Popen`.\n """\n cmd = [self.gpg, '--status-fd', '2', '--no-tty']\n if keystore is None:\n keystore = self.gpg_home\n if keystore:\n cmd.extend(['--homedir', keystore])\n if sign_password is not None:\n cmd.extend(['--batch', '--passphrase-fd', '0'])\n td = tempfile.mkdtemp()\n sf = os.path.join(td, os.path.basename(filename) + '.asc')\n cmd.extend(['--detach-sign', '--armor', '--local-user',\n signer, '--output', sf, filename])\n logger.debug('invoking: %s', ' '.join(cmd))\n return cmd, sf\n\n def run_command(self, cmd, input_data=None):\n """\n Run a command in a child process , passing it any input data specified.\n\n :param cmd: The command to run.\n :param input_data: If specified, this must be a byte string containing\n data to be sent to the child process.\n :return: A tuple consisting of the subprocess' exit code, a list of\n lines read from the subprocess' ``stdout``, and a list of\n lines read from the subprocess' ``stderr``.\n """\n kwargs = {\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n if input_data is not None:\n kwargs['stdin'] = subprocess.PIPE\n stdout = []\n stderr = []\n p = subprocess.Popen(cmd, **kwargs)\n # We don't use communicate() here because we may need to\n # get clever with interacting with the command\n t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))\n t1.start()\n t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))\n t2.start()\n if input_data is not None:\n p.stdin.write(input_data)\n p.stdin.close()\n\n p.wait()\n t1.join()\n t2.join()\n return p.returncode, stdout, stderr\n\n def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover\n """\n Sign a file.\n\n :param filename: The pathname to the file to be signed.\n :param signer: The identifier of the signer of the file.\n :param sign_password: The passphrase for the signer's\n private key used for signing.\n :param keystore: The path to a directory which contains the keys\n used in signing. If not specified, the instance's\n ``gpg_home`` attribute is used instead.\n :return: The absolute pathname of the file where the signature is\n stored.\n """\n cmd, sig_file = self.get_sign_command(filename, signer, sign_password,\n keystore)\n rc, stdout, stderr = self.run_command(cmd,\n sign_password.encode('utf-8'))\n if rc != 0:\n raise DistlibException('sign command failed with error '\n 'code %s' % rc)\n return sig_file\n\n def upload_file(self, metadata, filename, signer=None, sign_password=None,\n filetype='sdist', pyversion='source', keystore=None):\n """\n Upload a release file to the index.\n\n :param metadata: A :class:`Metadata` instance defining at least a name\n and version number for the file to be uploaded.\n :param filename: The pathname of the file to be uploaded.\n :param signer: The identifier of the signer of the file.\n :param sign_password: The passphrase for the signer's\n private key used for signing.\n :param filetype: The type of the file being uploaded. This is the\n distutils command which produced that file, e.g.\n ``sdist`` or ``bdist_wheel``.\n :param pyversion: The version of Python which the release relates\n to. For code compatible with any Python, this would\n be ``source``, otherwise it would be e.g. ``3.2``.\n :param keystore: The path to a directory which contains the keys\n used in signing. If not specified, the instance's\n ``gpg_home`` attribute is used instead.\n :return: The HTTP response received from PyPI upon submission of the\n request.\n """\n self.check_credentials()\n if not os.path.exists(filename):\n raise DistlibException('not found: %s' % filename)\n metadata.validate()\n d = metadata.todict()\n sig_file = None\n if signer:\n if not self.gpg:\n logger.warning('no signing program available - not signed')\n else:\n sig_file = self.sign_file(filename, signer, sign_password,\n keystore)\n with open(filename, 'rb') as f:\n file_data = f.read()\n md5_digest = hashlib.md5(file_data).hexdigest()\n sha256_digest = hashlib.sha256(file_data).hexdigest()\n d.update({\n ':action': 'file_upload',\n 'protocol_version': '1',\n 'filetype': filetype,\n 'pyversion': pyversion,\n 'md5_digest': md5_digest,\n 'sha256_digest': sha256_digest,\n })\n files = [('content', os.path.basename(filename), file_data)]\n if sig_file:\n with open(sig_file, 'rb') as f:\n sig_data = f.read()\n files.append(('gpg_signature', os.path.basename(sig_file),\n sig_data))\n shutil.rmtree(os.path.dirname(sig_file))\n request = self.encode_request(d.items(), files)\n return self.send_request(request)\n\n def upload_documentation(self, metadata, doc_dir): # pragma: no cover\n """\n Upload documentation to the index.\n\n :param metadata: A :class:`Metadata` instance defining at least a name\n and version number for the documentation to be\n uploaded.\n :param doc_dir: The pathname of the directory which contains the\n documentation. This should be the directory that\n contains the ``index.html`` for the documentation.\n :return: The HTTP response received from PyPI upon submission of the\n request.\n """\n self.check_credentials()\n if not os.path.isdir(doc_dir):\n raise DistlibException('not a directory: %r' % doc_dir)\n fn = os.path.join(doc_dir, 'index.html')\n if not os.path.exists(fn):\n raise DistlibException('not found: %r' % fn)\n metadata.validate()\n name, version = metadata.name, metadata.version\n zip_data = zip_dir(doc_dir).getvalue()\n fields = [(':action', 'doc_upload'),\n ('name', name), ('version', version)]\n files = [('content', name, zip_data)]\n request = self.encode_request(fields, files)\n return self.send_request(request)\n\n def get_verify_command(self, signature_filename, data_filename,\n keystore=None):\n """\n Return a suitable command for verifying a file.\n\n :param signature_filename: The pathname to the file containing the\n signature.\n :param data_filename: The pathname to the file containing the\n signed data.\n :param keystore: The path to a directory which contains the keys\n used in verification. If not specified, the\n instance's ``gpg_home`` attribute is used instead.\n :return: The verifying command as a list suitable to be\n passed to :class:`subprocess.Popen`.\n """\n cmd = [self.gpg, '--status-fd', '2', '--no-tty']\n if keystore is None:\n keystore = self.gpg_home\n if keystore:\n cmd.extend(['--homedir', keystore])\n cmd.extend(['--verify', signature_filename, data_filename])\n logger.debug('invoking: %s', ' '.join(cmd))\n return cmd\n\n def verify_signature(self, signature_filename, data_filename,\n keystore=None):\n """\n Verify a signature for a file.\n\n :param signature_filename: The pathname to the file containing the\n signature.\n :param data_filename: The pathname to the file containing the\n signed data.\n :param keystore: The path to a directory which contains the keys\n used in verification. If not specified, the\n instance's ``gpg_home`` attribute is used instead.\n :return: True if the signature was verified, else False.\n """\n if not self.gpg:\n raise DistlibException('verification unavailable because gpg '\n 'unavailable')\n cmd = self.get_verify_command(signature_filename, data_filename,\n keystore)\n rc, stdout, stderr = self.run_command(cmd)\n if rc not in (0, 1):\n raise DistlibException('verify command failed with error code %s' % rc)\n return rc == 0\n\n def download_file(self, url, destfile, digest=None, reporthook=None):\n """\n This is a convenience method for downloading a file from an URL.\n Normally, this will be a file from the index, though currently\n no check is made for this (i.e. a file can be downloaded from\n anywhere).\n\n The method is just like the :func:`urlretrieve` function in the\n standard library, except that it allows digest computation to be\n done during download and checking that the downloaded data\n matched any expected value.\n\n :param url: The URL of the file to be downloaded (assumed to be\n available via an HTTP GET request).\n :param destfile: The pathname where the downloaded file is to be\n saved.\n :param digest: If specified, this must be a (hasher, value)\n tuple, where hasher is the algorithm used (e.g.\n ``'md5'``) and ``value`` is the expected value.\n :param reporthook: The same as for :func:`urlretrieve` in the\n standard library.\n """\n if digest is None:\n digester = None\n logger.debug('No digest specified')\n else:\n if isinstance(digest, (list, tuple)):\n hasher, digest = digest\n else:\n hasher = 'md5'\n digester = getattr(hashlib, hasher)()\n logger.debug('Digest specified: %s' % digest)\n # The following code is equivalent to urlretrieve.\n # We need to do it this way so that we can compute the\n # digest of the file as we go.\n with open(destfile, 'wb') as dfp:\n # addinfourl is not a context manager on 2.x\n # so we have to use try/finally\n sfp = self.send_request(Request(url))\n try:\n headers = sfp.info()\n blocksize = 8192\n size = -1\n read = 0\n blocknum = 0\n if "content-length" in headers:\n size = int(headers["Content-Length"])\n if reporthook:\n reporthook(blocknum, blocksize, size)\n while True:\n block = sfp.read(blocksize)\n if not block:\n break\n read += len(block)\n dfp.write(block)\n if digester:\n digester.update(block)\n blocknum += 1\n if reporthook:\n reporthook(blocknum, blocksize, size)\n finally:\n sfp.close()\n\n # check that we got the whole file, if we can\n if size >= 0 and read < size:\n raise DistlibException(\n 'retrieval incomplete: got only %d out of %d bytes'\n % (read, size))\n # if we have a digest, it must match.\n if digester:\n actual = digester.hexdigest()\n if digest != actual:\n raise DistlibException('%s digest mismatch for %s: expected '\n '%s, got %s' % (hasher, destfile,\n digest, actual))\n logger.debug('Digest verified: %s', digest)\n\n def send_request(self, req):\n """\n Send a standard library :class:`Request` to PyPI and return its\n response.\n\n :param req: The request to send.\n :return: The HTTP response from PyPI (a standard library HTTPResponse).\n """\n handlers = []\n if self.password_handler:\n handlers.append(self.password_handler)\n if self.ssl_verifier:\n handlers.append(self.ssl_verifier)\n opener = build_opener(*handlers)\n return opener.open(req)\n\n def encode_request(self, fields, files):\n """\n Encode fields and files for posting to an HTTP server.\n\n :param fields: The fields to send as a list of (fieldname, value)\n tuples.\n :param files: The files to send as a list of (fieldname, filename,\n file_bytes) tuple.\n """\n # Adapted from packaging, which in turn was adapted from\n # http://code.activestate.com/recipes/146306\n\n parts = []\n boundary = self.boundary\n for k, values in fields:\n if not isinstance(values, (list, tuple)):\n values = [values]\n\n for v in values:\n parts.extend((\n b'--' + boundary,\n ('Content-Disposition: form-data; name="%s"' %\n k).encode('utf-8'),\n b'',\n v.encode('utf-8')))\n for key, filename, value in files:\n parts.extend((\n b'--' + boundary,\n ('Content-Disposition: form-data; name="%s"; filename="%s"' %\n (key, filename)).encode('utf-8'),\n b'',\n value))\n\n parts.extend((b'--' + boundary + b'--', b''))\n\n body = b'\r\n'.join(parts)\n ct = b'multipart/form-data; boundary=' + boundary\n headers = {\n 'Content-type': ct,\n 'Content-length': str(len(body))\n }\n return Request(self.url, body, headers)\n\n def search(self, terms, operator=None): # pragma: no cover\n if isinstance(terms, string_types):\n terms = {'name': terms}\n rpc_proxy = ServerProxy(self.url, timeout=3.0)\n try:\n return rpc_proxy.search(terms, operator or 'and')\n finally:\n rpc_proxy('close')()\n
.venv\Lib\site-packages\pip\_vendor\distlib\index.py
index.py
Python
20,797
0.95
0.194882
0.040948
python-kit
906
2024-12-21T07:24:55.959738
Apache-2.0
false
f06ac4e48dd45cc33fc3a283c4335658
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2023 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\n\nimport gzip\nfrom io import BytesIO\nimport json\nimport logging\nimport os\nimport posixpath\nimport re\ntry:\n import threading\nexcept ImportError: # pragma: no cover\n import dummy_threading as threading\nimport zlib\n\nfrom . import DistlibException\nfrom .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, queue, quote, unescape, build_opener,\n HTTPRedirectHandler as BaseRedirectHandler, text_type, Request, HTTPError, URLError)\nfrom .database import Distribution, DistributionPath, make_dist\nfrom .metadata import Metadata, MetadataInvalidError\nfrom .util import (cached_property, ensure_slash, split_filename, get_project_data, parse_requirement,\n parse_name_and_version, ServerProxy, normalize_name)\nfrom .version import get_scheme, UnsupportedVersionError\nfrom .wheel import Wheel, is_compatible\n\nlogger = logging.getLogger(__name__)\n\nHASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')\nCHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)\nHTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')\nDEFAULT_INDEX = 'https://pypi.org/pypi'\n\n\ndef get_all_distribution_names(url=None):\n """\n Return all distribution names known by an index.\n :param url: The URL of the index.\n :return: A list of all known distribution names.\n """\n if url is None:\n url = DEFAULT_INDEX\n client = ServerProxy(url, timeout=3.0)\n try:\n return client.list_packages()\n finally:\n client('close')()\n\n\nclass RedirectHandler(BaseRedirectHandler):\n """\n A class to work around a bug in some Python 3.2.x releases.\n """\n\n # There's a bug in the base version for some 3.2.x\n # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header\n # returns e.g. /abc, it bails because it says the scheme ''\n # is bogus, when actually it should use the request's\n # URL for the scheme. See Python issue #13696.\n def http_error_302(self, req, fp, code, msg, headers):\n # Some servers (incorrectly) return multiple Location headers\n # (so probably same goes for URI). Use first header.\n newurl = None\n for key in ('location', 'uri'):\n if key in headers:\n newurl = headers[key]\n break\n if newurl is None: # pragma: no cover\n return\n urlparts = urlparse(newurl)\n if urlparts.scheme == '':\n newurl = urljoin(req.get_full_url(), newurl)\n if hasattr(headers, 'replace_header'):\n headers.replace_header(key, newurl)\n else:\n headers[key] = newurl\n return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers)\n\n http_error_301 = http_error_303 = http_error_307 = http_error_302\n\n\nclass Locator(object):\n """\n A base class for locators - things that locate distributions.\n """\n source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')\n binary_extensions = ('.egg', '.exe', '.whl')\n excluded_extensions = ('.pdf', )\n\n # A list of tags indicating which wheels you want to match. The default\n # value of None matches against the tags compatible with the running\n # Python. If you want to match other values, set wheel_tags on a locator\n # instance to a list of tuples (pyver, abi, arch) which you want to match.\n wheel_tags = None\n\n downloadable_extensions = source_extensions + ('.whl', )\n\n def __init__(self, scheme='default'):\n """\n Initialise an instance.\n :param scheme: Because locators look for most recent versions, they\n need to know the version scheme to use. This specifies\n the current PEP-recommended scheme - use ``'legacy'``\n if you need to support existing distributions on PyPI.\n """\n self._cache = {}\n self.scheme = scheme\n # Because of bugs in some of the handlers on some of the platforms,\n # we use our own opener rather than just using urlopen.\n self.opener = build_opener(RedirectHandler())\n # If get_project() is called from locate(), the matcher instance\n # is set from the requirement passed to locate(). See issue #18 for\n # why this can be useful to know.\n self.matcher = None\n self.errors = queue.Queue()\n\n def get_errors(self):\n """\n Return any errors which have occurred.\n """\n result = []\n while not self.errors.empty(): # pragma: no cover\n try:\n e = self.errors.get(False)\n result.append(e)\n except self.errors.Empty:\n continue\n self.errors.task_done()\n return result\n\n def clear_errors(self):\n """\n Clear any errors which may have been logged.\n """\n # Just get the errors and throw them away\n self.get_errors()\n\n def clear_cache(self):\n self._cache.clear()\n\n def _get_scheme(self):\n return self._scheme\n\n def _set_scheme(self, value):\n self._scheme = value\n\n scheme = property(_get_scheme, _set_scheme)\n\n def _get_project(self, name):\n """\n For a given project, get a dictionary mapping available versions to Distribution\n instances.\n\n This should be implemented in subclasses.\n\n If called from a locate() request, self.matcher will be set to a\n matcher for the requirement to satisfy, otherwise it will be None.\n """\n raise NotImplementedError('Please implement in the subclass')\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n raise NotImplementedError('Please implement in the subclass')\n\n def get_project(self, name):\n """\n For a given project, get a dictionary mapping available versions to Distribution\n instances.\n\n This calls _get_project to do all the work, and just implements a caching layer on top.\n """\n if self._cache is None: # pragma: no cover\n result = self._get_project(name)\n elif name in self._cache:\n result = self._cache[name]\n else:\n self.clear_errors()\n result = self._get_project(name)\n self._cache[name] = result\n return result\n\n def score_url(self, url):\n """\n Give an url a score which can be used to choose preferred URLs\n for a given project release.\n """\n t = urlparse(url)\n basename = posixpath.basename(t.path)\n compatible = True\n is_wheel = basename.endswith('.whl')\n is_downloadable = basename.endswith(self.downloadable_extensions)\n if is_wheel:\n compatible = is_compatible(Wheel(basename), self.wheel_tags)\n return (t.scheme == 'https', 'pypi.org' in t.netloc, is_downloadable, is_wheel, compatible, basename)\n\n def prefer_url(self, url1, url2):\n """\n Choose one of two URLs where both are candidates for distribution\n archives for the same version of a distribution (for example,\n .tar.gz vs. zip).\n\n The current implementation favours https:// URLs over http://, archives\n from PyPI over those from other locations, wheel compatibility (if a\n wheel) and then the archive name.\n """\n result = url2\n if url1:\n s1 = self.score_url(url1)\n s2 = self.score_url(url2)\n if s1 > s2:\n result = url1\n if result != url2:\n logger.debug('Not replacing %r with %r', url1, url2)\n else:\n logger.debug('Replacing %r with %r', url1, url2)\n return result\n\n def split_filename(self, filename, project_name):\n """\n Attempt to split a filename in project name, version and Python version.\n """\n return split_filename(filename, project_name)\n\n def convert_url_to_download_info(self, url, project_name):\n """\n See if a URL is a candidate for a download URL for a project (the URL\n has typically been scraped from an HTML page).\n\n If it is, a dictionary is returned with keys "name", "version",\n "filename" and "url"; otherwise, None is returned.\n """\n\n def same_project(name1, name2):\n return normalize_name(name1) == normalize_name(name2)\n\n result = None\n scheme, netloc, path, params, query, frag = urlparse(url)\n if frag.lower().startswith('egg='): # pragma: no cover\n logger.debug('%s: version hint in fragment: %r', project_name, frag)\n m = HASHER_HASH.match(frag)\n if m:\n algo, digest = m.groups()\n else:\n algo, digest = None, None\n origpath = path\n if path and path[-1] == '/': # pragma: no cover\n path = path[:-1]\n if path.endswith('.whl'):\n try:\n wheel = Wheel(path)\n if not is_compatible(wheel, self.wheel_tags):\n logger.debug('Wheel not compatible: %s', path)\n else:\n if project_name is None:\n include = True\n else:\n include = same_project(wheel.name, project_name)\n if include:\n result = {\n 'name': wheel.name,\n 'version': wheel.version,\n 'filename': wheel.filename,\n 'url': urlunparse((scheme, netloc, origpath, params, query, '')),\n 'python-version': ', '.join(['.'.join(list(v[2:])) for v in wheel.pyver]),\n }\n except Exception: # pragma: no cover\n logger.warning('invalid path for wheel: %s', path)\n elif not path.endswith(self.downloadable_extensions): # pragma: no cover\n logger.debug('Not downloadable: %s', path)\n else: # downloadable extension\n path = filename = posixpath.basename(path)\n for ext in self.downloadable_extensions:\n if path.endswith(ext):\n path = path[:-len(ext)]\n t = self.split_filename(path, project_name)\n if not t: # pragma: no cover\n logger.debug('No match for project/version: %s', path)\n else:\n name, version, pyver = t\n if not project_name or same_project(project_name, name):\n result = {\n 'name': name,\n 'version': version,\n 'filename': filename,\n 'url': urlunparse((scheme, netloc, origpath, params, query, '')),\n }\n if pyver: # pragma: no cover\n result['python-version'] = pyver\n break\n if result and algo:\n result['%s_digest' % algo] = digest\n return result\n\n def _get_digest(self, info):\n """\n Get a digest from a dictionary by looking at a "digests" dictionary\n or keys of the form 'algo_digest'.\n\n Returns a 2-tuple (algo, digest) if found, else None. Currently\n looks only for SHA256, then MD5.\n """\n result = None\n if 'digests' in info:\n digests = info['digests']\n for algo in ('sha256', 'md5'):\n if algo in digests:\n result = (algo, digests[algo])\n break\n if not result:\n for algo in ('sha256', 'md5'):\n key = '%s_digest' % algo\n if key in info:\n result = (algo, info[key])\n break\n return result\n\n def _update_version_data(self, result, info):\n """\n Update a result dictionary (the final result from _get_project) with a\n dictionary for a specific version, which typically holds information\n gleaned from a filename or URL for an archive for the distribution.\n """\n name = info.pop('name')\n version = info.pop('version')\n if version in result:\n dist = result[version]\n md = dist.metadata\n else:\n dist = make_dist(name, version, scheme=self.scheme)\n md = dist.metadata\n dist.digest = digest = self._get_digest(info)\n url = info['url']\n result['digests'][url] = digest\n if md.source_url != info['url']:\n md.source_url = self.prefer_url(md.source_url, url)\n result['urls'].setdefault(version, set()).add(url)\n dist.locator = self\n result[version] = dist\n\n def locate(self, requirement, prereleases=False):\n """\n Find the most recent distribution which matches the given\n requirement.\n\n :param requirement: A requirement of the form 'foo (1.0)' or perhaps\n 'foo (>= 1.0, < 2.0, != 1.3)'\n :param prereleases: If ``True``, allow pre-release versions\n to be located. Otherwise, pre-release versions\n are not returned.\n :return: A :class:`Distribution` instance, or ``None`` if no such\n distribution could be located.\n """\n result = None\n r = parse_requirement(requirement)\n if r is None: # pragma: no cover\n raise DistlibException('Not a valid requirement: %r' % requirement)\n scheme = get_scheme(self.scheme)\n self.matcher = matcher = scheme.matcher(r.requirement)\n logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)\n versions = self.get_project(r.name)\n if len(versions) > 2: # urls and digests keys are present\n # sometimes, versions are invalid\n slist = []\n vcls = matcher.version_class\n for k in versions:\n if k in ('urls', 'digests'):\n continue\n try:\n if not matcher.match(k):\n pass # logger.debug('%s did not match %r', matcher, k)\n else:\n if prereleases or not vcls(k).is_prerelease:\n slist.append(k)\n except Exception: # pragma: no cover\n logger.warning('error matching %s with %r', matcher, k)\n pass # slist.append(k)\n if len(slist) > 1:\n slist = sorted(slist, key=scheme.key)\n if slist:\n logger.debug('sorted list: %s', slist)\n version = slist[-1]\n result = versions[version]\n if result:\n if r.extras:\n result.extras = r.extras\n result.download_urls = versions.get('urls', {}).get(version, set())\n d = {}\n sd = versions.get('digests', {})\n for url in result.download_urls:\n if url in sd: # pragma: no cover\n d[url] = sd[url]\n result.digests = d\n self.matcher = None\n return result\n\n\nclass PyPIRPCLocator(Locator):\n """\n This locator uses XML-RPC to locate distributions. It therefore\n cannot be used with simple mirrors (that only mirror file content).\n """\n\n def __init__(self, url, **kwargs):\n """\n Initialise an instance.\n\n :param url: The URL to use for XML-RPC.\n :param kwargs: Passed to the superclass constructor.\n """\n super(PyPIRPCLocator, self).__init__(**kwargs)\n self.base_url = url\n self.client = ServerProxy(url, timeout=3.0)\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n return set(self.client.list_packages())\n\n def _get_project(self, name):\n result = {'urls': {}, 'digests': {}}\n versions = self.client.package_releases(name, True)\n for v in versions:\n urls = self.client.release_urls(name, v)\n data = self.client.release_data(name, v)\n metadata = Metadata(scheme=self.scheme)\n metadata.name = data['name']\n metadata.version = data['version']\n metadata.license = data.get('license')\n metadata.keywords = data.get('keywords', [])\n metadata.summary = data.get('summary')\n dist = Distribution(metadata)\n if urls:\n info = urls[0]\n metadata.source_url = info['url']\n dist.digest = self._get_digest(info)\n dist.locator = self\n result[v] = dist\n for info in urls:\n url = info['url']\n digest = self._get_digest(info)\n result['urls'].setdefault(v, set()).add(url)\n result['digests'][url] = digest\n return result\n\n\nclass PyPIJSONLocator(Locator):\n """\n This locator uses PyPI's JSON interface. It's very limited in functionality\n and probably not worth using.\n """\n\n def __init__(self, url, **kwargs):\n super(PyPIJSONLocator, self).__init__(**kwargs)\n self.base_url = ensure_slash(url)\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n raise NotImplementedError('Not available from this locator')\n\n def _get_project(self, name):\n result = {'urls': {}, 'digests': {}}\n url = urljoin(self.base_url, '%s/json' % quote(name))\n try:\n resp = self.opener.open(url)\n data = resp.read().decode() # for now\n d = json.loads(data)\n md = Metadata(scheme=self.scheme)\n data = d['info']\n md.name = data['name']\n md.version = data['version']\n md.license = data.get('license')\n md.keywords = data.get('keywords', [])\n md.summary = data.get('summary')\n dist = Distribution(md)\n dist.locator = self\n # urls = d['urls']\n result[md.version] = dist\n for info in d['urls']:\n url = info['url']\n dist.download_urls.add(url)\n dist.digests[url] = self._get_digest(info)\n result['urls'].setdefault(md.version, set()).add(url)\n result['digests'][url] = self._get_digest(info)\n # Now get other releases\n for version, infos in d['releases'].items():\n if version == md.version:\n continue # already done\n omd = Metadata(scheme=self.scheme)\n omd.name = md.name\n omd.version = version\n odist = Distribution(omd)\n odist.locator = self\n result[version] = odist\n for info in infos:\n url = info['url']\n odist.download_urls.add(url)\n odist.digests[url] = self._get_digest(info)\n result['urls'].setdefault(version, set()).add(url)\n result['digests'][url] = self._get_digest(info)\n\n\n# for info in urls:\n# md.source_url = info['url']\n# dist.digest = self._get_digest(info)\n# dist.locator = self\n# for info in urls:\n# url = info['url']\n# result['urls'].setdefault(md.version, set()).add(url)\n# result['digests'][url] = self._get_digest(info)\n except Exception as e:\n self.errors.put(text_type(e))\n logger.exception('JSON fetch failed: %s', e)\n return result\n\n\nclass Page(object):\n """\n This class represents a scraped HTML page.\n """\n # The following slightly hairy-looking regex just looks for the contents of\n # an anchor link, which has an attribute "href" either immediately preceded\n # or immediately followed by a "rel" attribute. The attribute values can be\n # declared with double quotes, single quotes or no quotes - which leads to\n # the length of the expression.\n _href = re.compile(\n """\n(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?\nhref\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))\n(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?\n""", re.I | re.S | re.X)\n _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)\n\n def __init__(self, data, url):\n """\n Initialise an instance with the Unicode page contents and the URL they\n came from.\n """\n self.data = data\n self.base_url = self.url = url\n m = self._base.search(self.data)\n if m:\n self.base_url = m.group(1)\n\n _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)\n\n @cached_property\n def links(self):\n """\n Return the URLs of all the links on a page together with information\n about their "rel" attribute, for determining which ones to treat as\n downloads and which ones to queue for further scraping.\n """\n\n def clean(url):\n "Tidy up an URL."\n scheme, netloc, path, params, query, frag = urlparse(url)\n return urlunparse((scheme, netloc, quote(path), params, query, frag))\n\n result = set()\n for match in self._href.finditer(self.data):\n d = match.groupdict('')\n rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6'])\n url = d['url1'] or d['url2'] or d['url3']\n url = urljoin(self.base_url, url)\n url = unescape(url)\n url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)\n result.add((url, rel))\n # We sort the result, hoping to bring the most recent versions\n # to the front\n result = sorted(result, key=lambda t: t[0], reverse=True)\n return result\n\n\nclass SimpleScrapingLocator(Locator):\n """\n A locator which scrapes HTML pages to locate downloads for a distribution.\n This runs multiple threads to do the I/O; performance is at least as good\n as pip's PackageFinder, which works in an analogous fashion.\n """\n\n # These are used to deal with various Content-Encoding schemes.\n decoders = {\n 'deflate': zlib.decompress,\n 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),\n 'none': lambda b: b,\n }\n\n def __init__(self, url, timeout=None, num_workers=10, **kwargs):\n """\n Initialise an instance.\n :param url: The root URL to use for scraping.\n :param timeout: The timeout, in seconds, to be applied to requests.\n This defaults to ``None`` (no timeout specified).\n :param num_workers: The number of worker threads you want to do I/O,\n This defaults to 10.\n :param kwargs: Passed to the superclass.\n """\n super(SimpleScrapingLocator, self).__init__(**kwargs)\n self.base_url = ensure_slash(url)\n self.timeout = timeout\n self._page_cache = {}\n self._seen = set()\n self._to_fetch = queue.Queue()\n self._bad_hosts = set()\n self.skip_externals = False\n self.num_workers = num_workers\n self._lock = threading.RLock()\n # See issue #45: we need to be resilient when the locator is used\n # in a thread, e.g. with concurrent.futures. We can't use self._lock\n # as it is for coordinating our internal threads - the ones created\n # in _prepare_threads.\n self._gplock = threading.RLock()\n self.platform_check = False # See issue #112\n\n def _prepare_threads(self):\n """\n Threads are created only when get_project is called, and terminate\n before it returns. They are there primarily to parallelise I/O (i.e.\n fetching web pages).\n """\n self._threads = []\n for i in range(self.num_workers):\n t = threading.Thread(target=self._fetch)\n t.daemon = True\n t.start()\n self._threads.append(t)\n\n def _wait_threads(self):\n """\n Tell all the threads to terminate (by sending a sentinel value) and\n wait for them to do so.\n """\n # Note that you need two loops, since you can't say which\n # thread will get each sentinel\n for t in self._threads:\n self._to_fetch.put(None) # sentinel\n for t in self._threads:\n t.join()\n self._threads = []\n\n def _get_project(self, name):\n result = {'urls': {}, 'digests': {}}\n with self._gplock:\n self.result = result\n self.project_name = name\n url = urljoin(self.base_url, '%s/' % quote(name))\n self._seen.clear()\n self._page_cache.clear()\n self._prepare_threads()\n try:\n logger.debug('Queueing %s', url)\n self._to_fetch.put(url)\n self._to_fetch.join()\n finally:\n self._wait_threads()\n del self.result\n return result\n\n platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'\n r'win(32|_amd64)|macosx_?\d+)\b', re.I)\n\n def _is_platform_dependent(self, url):\n """\n Does an URL refer to a platform-specific download?\n """\n return self.platform_dependent.search(url)\n\n def _process_download(self, url):\n """\n See if an URL is a suitable download for a project.\n\n If it is, register information in the result dictionary (for\n _get_project) about the specific version it's for.\n\n Note that the return value isn't actually used other than as a boolean\n value.\n """\n if self.platform_check and self._is_platform_dependent(url):\n info = None\n else:\n info = self.convert_url_to_download_info(url, self.project_name)\n logger.debug('process_download: %s -> %s', url, info)\n if info:\n with self._lock: # needed because self.result is shared\n self._update_version_data(self.result, info)\n return info\n\n def _should_queue(self, link, referrer, rel):\n """\n Determine whether a link URL from a referring page and with a\n particular "rel" attribute should be queued for scraping.\n """\n scheme, netloc, path, _, _, _ = urlparse(link)\n if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions):\n result = False\n elif self.skip_externals and not link.startswith(self.base_url):\n result = False\n elif not referrer.startswith(self.base_url):\n result = False\n elif rel not in ('homepage', 'download'):\n result = False\n elif scheme not in ('http', 'https', 'ftp'):\n result = False\n elif self._is_platform_dependent(link):\n result = False\n else:\n host = netloc.split(':', 1)[0]\n if host.lower() == 'localhost':\n result = False\n else:\n result = True\n logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result)\n return result\n\n def _fetch(self):\n """\n Get a URL to fetch from the work queue, get the HTML page, examine its\n links for download candidates and candidates for further scraping.\n\n This is a handy method to run in a thread.\n """\n while True:\n url = self._to_fetch.get()\n try:\n if url:\n page = self.get_page(url)\n if page is None: # e.g. after an error\n continue\n for link, rel in page.links:\n if link not in self._seen:\n try:\n self._seen.add(link)\n if (not self._process_download(link) and self._should_queue(link, url, rel)):\n logger.debug('Queueing %s from %s', link, url)\n self._to_fetch.put(link)\n except MetadataInvalidError: # e.g. invalid versions\n pass\n except Exception as e: # pragma: no cover\n self.errors.put(text_type(e))\n finally:\n # always do this, to avoid hangs :-)\n self._to_fetch.task_done()\n if not url:\n # logger.debug('Sentinel seen, quitting.')\n break\n\n def get_page(self, url):\n """\n Get the HTML for an URL, possibly from an in-memory cache.\n\n XXX TODO Note: this cache is never actually cleared. It's assumed that\n the data won't get stale over the lifetime of a locator instance (not\n necessarily true for the default_locator).\n """\n # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api\n scheme, netloc, path, _, _, _ = urlparse(url)\n if scheme == 'file' and os.path.isdir(url2pathname(path)):\n url = urljoin(ensure_slash(url), 'index.html')\n\n if url in self._page_cache:\n result = self._page_cache[url]\n logger.debug('Returning %s from cache: %s', url, result)\n else:\n host = netloc.split(':', 1)[0]\n result = None\n if host in self._bad_hosts:\n logger.debug('Skipping %s due to bad host %s', url, host)\n else:\n req = Request(url, headers={'Accept-encoding': 'identity'})\n try:\n logger.debug('Fetching %s', url)\n resp = self.opener.open(req, timeout=self.timeout)\n logger.debug('Fetched %s', url)\n headers = resp.info()\n content_type = headers.get('Content-Type', '')\n if HTML_CONTENT_TYPE.match(content_type):\n final_url = resp.geturl()\n data = resp.read()\n encoding = headers.get('Content-Encoding')\n if encoding:\n decoder = self.decoders[encoding] # fail if not found\n data = decoder(data)\n encoding = 'utf-8'\n m = CHARSET.search(content_type)\n if m:\n encoding = m.group(1)\n try:\n data = data.decode(encoding)\n except UnicodeError: # pragma: no cover\n data = data.decode('latin-1') # fallback\n result = Page(data, final_url)\n self._page_cache[final_url] = result\n except HTTPError as e:\n if e.code != 404:\n logger.exception('Fetch failed: %s: %s', url, e)\n except URLError as e: # pragma: no cover\n logger.exception('Fetch failed: %s: %s', url, e)\n with self._lock:\n self._bad_hosts.add(host)\n except Exception as e: # pragma: no cover\n logger.exception('Fetch failed: %s: %s', url, e)\n finally:\n self._page_cache[url] = result # even if None (failure)\n return result\n\n _distname_re = re.compile('<a href=[^>]*>([^<]+)<')\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n result = set()\n page = self.get_page(self.base_url)\n if not page:\n raise DistlibException('Unable to get %s' % self.base_url)\n for match in self._distname_re.finditer(page.data):\n result.add(match.group(1))\n return result\n\n\nclass DirectoryLocator(Locator):\n """\n This class locates distributions in a directory tree.\n """\n\n def __init__(self, path, **kwargs):\n """\n Initialise an instance.\n :param path: The root of the directory tree to search.\n :param kwargs: Passed to the superclass constructor,\n except for:\n * recursive - if True (the default), subdirectories are\n recursed into. If False, only the top-level directory\n is searched,\n """\n self.recursive = kwargs.pop('recursive', True)\n super(DirectoryLocator, self).__init__(**kwargs)\n path = os.path.abspath(path)\n if not os.path.isdir(path): # pragma: no cover\n raise DistlibException('Not a directory: %r' % path)\n self.base_dir = path\n\n def should_include(self, filename, parent):\n """\n Should a filename be considered as a candidate for a distribution\n archive? As well as the filename, the directory which contains it\n is provided, though not used by the current implementation.\n """\n return filename.endswith(self.downloadable_extensions)\n\n def _get_project(self, name):\n result = {'urls': {}, 'digests': {}}\n for root, dirs, files in os.walk(self.base_dir):\n for fn in files:\n if self.should_include(fn, root):\n fn = os.path.join(root, fn)\n url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', ''))\n info = self.convert_url_to_download_info(url, name)\n if info:\n self._update_version_data(result, info)\n if not self.recursive:\n break\n return result\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n result = set()\n for root, dirs, files in os.walk(self.base_dir):\n for fn in files:\n if self.should_include(fn, root):\n fn = os.path.join(root, fn)\n url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', ''))\n info = self.convert_url_to_download_info(url, None)\n if info:\n result.add(info['name'])\n if not self.recursive:\n break\n return result\n\n\nclass JSONLocator(Locator):\n """\n This locator uses special extended metadata (not available on PyPI) and is\n the basis of performant dependency resolution in distlib. Other locators\n require archive downloads before dependencies can be determined! As you\n might imagine, that can be slow.\n """\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n raise NotImplementedError('Not available from this locator')\n\n def _get_project(self, name):\n result = {'urls': {}, 'digests': {}}\n data = get_project_data(name)\n if data:\n for info in data.get('files', []):\n if info['ptype'] != 'sdist' or info['pyversion'] != 'source':\n continue\n # We don't store summary in project metadata as it makes\n # the data bigger for no benefit during dependency\n # resolution\n dist = make_dist(data['name'],\n info['version'],\n summary=data.get('summary', 'Placeholder for summary'),\n scheme=self.scheme)\n md = dist.metadata\n md.source_url = info['url']\n # TODO SHA256 digest\n if 'digest' in info and info['digest']:\n dist.digest = ('md5', info['digest'])\n md.dependencies = info.get('requirements', {})\n dist.exports = info.get('exports', {})\n result[dist.version] = dist\n result['urls'].setdefault(dist.version, set()).add(info['url'])\n return result\n\n\nclass DistPathLocator(Locator):\n """\n This locator finds installed distributions in a path. It can be useful for\n adding to an :class:`AggregatingLocator`.\n """\n\n def __init__(self, distpath, **kwargs):\n """\n Initialise an instance.\n\n :param distpath: A :class:`DistributionPath` instance to search.\n """\n super(DistPathLocator, self).__init__(**kwargs)\n assert isinstance(distpath, DistributionPath)\n self.distpath = distpath\n\n def _get_project(self, name):\n dist = self.distpath.get_distribution(name)\n if dist is None:\n result = {'urls': {}, 'digests': {}}\n else:\n result = {\n dist.version: dist,\n 'urls': {\n dist.version: set([dist.source_url])\n },\n 'digests': {\n dist.version: set([None])\n }\n }\n return result\n\n\nclass AggregatingLocator(Locator):\n """\n This class allows you to chain and/or merge a list of locators.\n """\n\n def __init__(self, *locators, **kwargs):\n """\n Initialise an instance.\n\n :param locators: The list of locators to search.\n :param kwargs: Passed to the superclass constructor,\n except for:\n * merge - if False (the default), the first successful\n search from any of the locators is returned. If True,\n the results from all locators are merged (this can be\n slow).\n """\n self.merge = kwargs.pop('merge', False)\n self.locators = locators\n super(AggregatingLocator, self).__init__(**kwargs)\n\n def clear_cache(self):\n super(AggregatingLocator, self).clear_cache()\n for locator in self.locators:\n locator.clear_cache()\n\n def _set_scheme(self, value):\n self._scheme = value\n for locator in self.locators:\n locator.scheme = value\n\n scheme = property(Locator.scheme.fget, _set_scheme)\n\n def _get_project(self, name):\n result = {}\n for locator in self.locators:\n d = locator.get_project(name)\n if d:\n if self.merge:\n files = result.get('urls', {})\n digests = result.get('digests', {})\n # next line could overwrite result['urls'], result['digests']\n result.update(d)\n df = result.get('urls')\n if files and df:\n for k, v in files.items():\n if k in df:\n df[k] |= v\n else:\n df[k] = v\n dd = result.get('digests')\n if digests and dd:\n dd.update(digests)\n else:\n # See issue #18. If any dists are found and we're looking\n # for specific constraints, we only return something if\n # a match is found. For example, if a DirectoryLocator\n # returns just foo (1.0) while we're looking for\n # foo (>= 2.0), we'll pretend there was nothing there so\n # that subsequent locators can be queried. Otherwise we\n # would just return foo (1.0) which would then lead to a\n # failure to find foo (>= 2.0), because other locators\n # weren't searched. Note that this only matters when\n # merge=False.\n if self.matcher is None:\n found = True\n else:\n found = False\n for k in d:\n if self.matcher.match(k):\n found = True\n break\n if found:\n result = d\n break\n return result\n\n def get_distribution_names(self):\n """\n Return all the distribution names known to this locator.\n """\n result = set()\n for locator in self.locators:\n try:\n result |= locator.get_distribution_names()\n except NotImplementedError:\n pass\n return result\n\n\n# We use a legacy scheme simply because most of the dists on PyPI use legacy\n# versions which don't conform to PEP 440.\ndefault_locator = AggregatingLocator(\n # JSONLocator(), # don't use as PEP 426 is withdrawn\n SimpleScrapingLocator('https://pypi.org/simple/', timeout=3.0),\n scheme='legacy')\n\nlocate = default_locator.locate\n\n\nclass DependencyFinder(object):\n """\n Locate dependencies for distributions.\n """\n\n def __init__(self, locator=None):\n """\n Initialise an instance, using the specified locator\n to locate distributions.\n """\n self.locator = locator or default_locator\n self.scheme = get_scheme(self.locator.scheme)\n\n def add_distribution(self, dist):\n """\n Add a distribution to the finder. This will update internal information\n about who provides what.\n :param dist: The distribution to add.\n """\n logger.debug('adding distribution %s', dist)\n name = dist.key\n self.dists_by_name[name] = dist\n self.dists[(name, dist.version)] = dist\n for p in dist.provides:\n name, version = parse_name_and_version(p)\n logger.debug('Add to provided: %s, %s, %s', name, version, dist)\n self.provided.setdefault(name, set()).add((version, dist))\n\n def remove_distribution(self, dist):\n """\n Remove a distribution from the finder. This will update internal\n information about who provides what.\n :param dist: The distribution to remove.\n """\n logger.debug('removing distribution %s', dist)\n name = dist.key\n del self.dists_by_name[name]\n del self.dists[(name, dist.version)]\n for p in dist.provides:\n name, version = parse_name_and_version(p)\n logger.debug('Remove from provided: %s, %s, %s', name, version, dist)\n s = self.provided[name]\n s.remove((version, dist))\n if not s:\n del self.provided[name]\n\n def get_matcher(self, reqt):\n """\n Get a version matcher for a requirement.\n :param reqt: The requirement\n :type reqt: str\n :return: A version matcher (an instance of\n :class:`distlib.version.Matcher`).\n """\n try:\n matcher = self.scheme.matcher(reqt)\n except UnsupportedVersionError: # pragma: no cover\n # XXX compat-mode if cannot read the version\n name = reqt.split()[0]\n matcher = self.scheme.matcher(name)\n return matcher\n\n def find_providers(self, reqt):\n """\n Find the distributions which can fulfill a requirement.\n\n :param reqt: The requirement.\n :type reqt: str\n :return: A set of distribution which can fulfill the requirement.\n """\n matcher = self.get_matcher(reqt)\n name = matcher.key # case-insensitive\n result = set()\n provided = self.provided\n if name in provided:\n for version, provider in provided[name]:\n try:\n match = matcher.match(version)\n except UnsupportedVersionError:\n match = False\n\n if match:\n result.add(provider)\n break\n return result\n\n def try_to_replace(self, provider, other, problems):\n """\n Attempt to replace one provider with another. This is typically used\n when resolving dependencies from multiple sources, e.g. A requires\n (B >= 1.0) while C requires (B >= 1.1).\n\n For successful replacement, ``provider`` must meet all the requirements\n which ``other`` fulfills.\n\n :param provider: The provider we are trying to replace with.\n :param other: The provider we're trying to replace.\n :param problems: If False is returned, this will contain what\n problems prevented replacement. This is currently\n a tuple of the literal string 'cantreplace',\n ``provider``, ``other`` and the set of requirements\n that ``provider`` couldn't fulfill.\n :return: True if we can replace ``other`` with ``provider``, else\n False.\n """\n rlist = self.reqts[other]\n unmatched = set()\n for s in rlist:\n matcher = self.get_matcher(s)\n if not matcher.match(provider.version):\n unmatched.add(s)\n if unmatched:\n # can't replace other with provider\n problems.add(('cantreplace', provider, other, frozenset(unmatched)))\n result = False\n else:\n # can replace other with provider\n self.remove_distribution(other)\n del self.reqts[other]\n for s in rlist:\n self.reqts.setdefault(provider, set()).add(s)\n self.add_distribution(provider)\n result = True\n return result\n\n def find(self, requirement, meta_extras=None, prereleases=False):\n """\n Find a distribution and all distributions it depends on.\n\n :param requirement: The requirement specifying the distribution to\n find, or a Distribution instance.\n :param meta_extras: A list of meta extras such as :test:, :build: and\n so on.\n :param prereleases: If ``True``, allow pre-release versions to be\n returned - otherwise, don't return prereleases\n unless they're all that's available.\n\n Return a set of :class:`Distribution` instances and a set of\n problems.\n\n The distributions returned should be such that they have the\n :attr:`required` attribute set to ``True`` if they were\n from the ``requirement`` passed to ``find()``, and they have the\n :attr:`build_time_dependency` attribute set to ``True`` unless they\n are post-installation dependencies of the ``requirement``.\n\n The problems should be a tuple consisting of the string\n ``'unsatisfied'`` and the requirement which couldn't be satisfied\n by any distribution known to the locator.\n """\n\n self.provided = {}\n self.dists = {}\n self.dists_by_name = {}\n self.reqts = {}\n\n meta_extras = set(meta_extras or [])\n if ':*:' in meta_extras:\n meta_extras.remove(':*:')\n # :meta: and :run: are implicitly included\n meta_extras |= set([':test:', ':build:', ':dev:'])\n\n if isinstance(requirement, Distribution):\n dist = odist = requirement\n logger.debug('passed %s as requirement', odist)\n else:\n dist = odist = self.locator.locate(requirement, prereleases=prereleases)\n if dist is None:\n raise DistlibException('Unable to locate %r' % requirement)\n logger.debug('located %s', odist)\n dist.requested = True\n problems = set()\n todo = set([dist])\n install_dists = set([odist])\n while todo:\n dist = todo.pop()\n name = dist.key # case-insensitive\n if name not in self.dists_by_name:\n self.add_distribution(dist)\n else:\n # import pdb; pdb.set_trace()\n other = self.dists_by_name[name]\n if other != dist:\n self.try_to_replace(dist, other, problems)\n\n ireqts = dist.run_requires | dist.meta_requires\n sreqts = dist.build_requires\n ereqts = set()\n if meta_extras and dist in install_dists:\n for key in ('test', 'build', 'dev'):\n e = ':%s:' % key\n if e in meta_extras:\n ereqts |= getattr(dist, '%s_requires' % key)\n all_reqts = ireqts | sreqts | ereqts\n for r in all_reqts:\n providers = self.find_providers(r)\n if not providers:\n logger.debug('No providers found for %r', r)\n provider = self.locator.locate(r, prereleases=prereleases)\n # If no provider is found and we didn't consider\n # prereleases, consider them now.\n if provider is None and not prereleases:\n provider = self.locator.locate(r, prereleases=True)\n if provider is None:\n logger.debug('Cannot satisfy %r', r)\n problems.add(('unsatisfied', r))\n else:\n n, v = provider.key, provider.version\n if (n, v) not in self.dists:\n todo.add(provider)\n providers.add(provider)\n if r in ireqts and dist in install_dists:\n install_dists.add(provider)\n logger.debug('Adding %s to install_dists', provider.name_and_version)\n for p in providers:\n name = p.key\n if name not in self.dists_by_name:\n self.reqts.setdefault(p, set()).add(r)\n else:\n other = self.dists_by_name[name]\n if other != p:\n # see if other can be replaced by p\n self.try_to_replace(p, other, problems)\n\n dists = set(self.dists.values())\n for dist in dists:\n dist.build_time_dependency = dist not in install_dists\n if dist.build_time_dependency:\n logger.debug('%s is a build-time dependency only.', dist.name_and_version)\n logger.debug('find done for %s', odist)\n return dists, problems\n
.venv\Lib\site-packages\pip\_vendor\distlib\locators.py
locators.py
Python
51,026
0.75
0.230888
0.067753
react-lib
469
2023-09-10T00:41:29.723098
MIT
false
05ecd1931d751b701b5ea0ec5a53da6e
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2023 Python Software Foundation.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\n"""\nClass representing the list of files in a distribution.\n\nEquivalent to distutils.filelist, but fixes some problems.\n"""\nimport fnmatch\nimport logging\nimport os\nimport re\nimport sys\n\nfrom . import DistlibException\nfrom .compat import fsdecode\nfrom .util import convert_path\n\n\n__all__ = ['Manifest']\n\nlogger = logging.getLogger(__name__)\n\n# a \ followed by some spaces + EOL\n_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M)\n_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)\n\n#\n# Due to the different results returned by fnmatch.translate, we need\n# to do slightly different processing for Python 2.7 and 3.2 ... this needed\n# to be brought in for Python 3.6 onwards.\n#\n_PYTHON_VERSION = sys.version_info[:2]\n\n\nclass Manifest(object):\n """\n A list of files built by exploring the filesystem and filtered by applying various\n patterns to what we find there.\n """\n\n def __init__(self, base=None):\n """\n Initialise an instance.\n\n :param base: The base directory to explore under.\n """\n self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))\n self.prefix = self.base + os.sep\n self.allfiles = None\n self.files = set()\n\n #\n # Public API\n #\n\n def findall(self):\n """Find all files under the base and set ``allfiles`` to the absolute\n pathnames of files found.\n """\n from stat import S_ISREG, S_ISDIR, S_ISLNK\n\n self.allfiles = allfiles = []\n root = self.base\n stack = [root]\n pop = stack.pop\n push = stack.append\n\n while stack:\n root = pop()\n names = os.listdir(root)\n\n for name in names:\n fullname = os.path.join(root, name)\n\n # Avoid excess stat calls -- just one will do, thank you!\n stat = os.stat(fullname)\n mode = stat.st_mode\n if S_ISREG(mode):\n allfiles.append(fsdecode(fullname))\n elif S_ISDIR(mode) and not S_ISLNK(mode):\n push(fullname)\n\n def add(self, item):\n """\n Add a file to the manifest.\n\n :param item: The pathname to add. This can be relative to the base.\n """\n if not item.startswith(self.prefix):\n item = os.path.join(self.base, item)\n self.files.add(os.path.normpath(item))\n\n def add_many(self, items):\n """\n Add a list of files to the manifest.\n\n :param items: The pathnames to add. These can be relative to the base.\n """\n for item in items:\n self.add(item)\n\n def sorted(self, wantdirs=False):\n """\n Return sorted files in directory order\n """\n\n def add_dir(dirs, d):\n dirs.add(d)\n logger.debug('add_dir added %s', d)\n if d != self.base:\n parent, _ = os.path.split(d)\n assert parent not in ('', '/')\n add_dir(dirs, parent)\n\n result = set(self.files) # make a copy!\n if wantdirs:\n dirs = set()\n for f in result:\n add_dir(dirs, os.path.dirname(f))\n result |= dirs\n return [os.path.join(*path_tuple) for path_tuple in\n sorted(os.path.split(path) for path in result)]\n\n def clear(self):\n """Clear all collected files."""\n self.files = set()\n self.allfiles = []\n\n def process_directive(self, directive):\n """\n Process a directive which either adds some files from ``allfiles`` to\n ``files``, or removes some files from ``files``.\n\n :param directive: The directive to process. This should be in a format\n compatible with distutils ``MANIFEST.in`` files:\n\n http://docs.python.org/distutils/sourcedist.html#commands\n """\n # Parse the line: split it up, make sure the right number of words\n # is there, and return the relevant words. 'action' is always\n # defined: it's the first word of the line. Which of the other\n # three are defined depends on the action; it'll be either\n # patterns, (dir and patterns), or (dirpattern).\n action, patterns, thedir, dirpattern = self._parse_directive(directive)\n\n # OK, now we know that the action is valid and we have the\n # right number of words on the line for that action -- so we\n # can proceed with minimal error-checking.\n if action == 'include':\n for pattern in patterns:\n if not self._include_pattern(pattern, anchor=True):\n logger.warning('no files found matching %r', pattern)\n\n elif action == 'exclude':\n for pattern in patterns:\n self._exclude_pattern(pattern, anchor=True)\n\n elif action == 'global-include':\n for pattern in patterns:\n if not self._include_pattern(pattern, anchor=False):\n logger.warning('no files found matching %r '\n 'anywhere in distribution', pattern)\n\n elif action == 'global-exclude':\n for pattern in patterns:\n self._exclude_pattern(pattern, anchor=False)\n\n elif action == 'recursive-include':\n for pattern in patterns:\n if not self._include_pattern(pattern, prefix=thedir):\n logger.warning('no files found matching %r '\n 'under directory %r', pattern, thedir)\n\n elif action == 'recursive-exclude':\n for pattern in patterns:\n self._exclude_pattern(pattern, prefix=thedir)\n\n elif action == 'graft':\n if not self._include_pattern(None, prefix=dirpattern):\n logger.warning('no directories found matching %r',\n dirpattern)\n\n elif action == 'prune':\n if not self._exclude_pattern(None, prefix=dirpattern):\n logger.warning('no previously-included directories found '\n 'matching %r', dirpattern)\n else: # pragma: no cover\n # This should never happen, as it should be caught in\n # _parse_template_line\n raise DistlibException(\n 'invalid action %r' % action)\n\n #\n # Private API\n #\n\n def _parse_directive(self, directive):\n """\n Validate a directive.\n :param directive: The directive to validate.\n :return: A tuple of action, patterns, thedir, dir_patterns\n """\n words = directive.split()\n if len(words) == 1 and words[0] not in ('include', 'exclude',\n 'global-include',\n 'global-exclude',\n 'recursive-include',\n 'recursive-exclude',\n 'graft', 'prune'):\n # no action given, let's use the default 'include'\n words.insert(0, 'include')\n\n action = words[0]\n patterns = thedir = dir_pattern = None\n\n if action in ('include', 'exclude',\n 'global-include', 'global-exclude'):\n if len(words) < 2:\n raise DistlibException(\n '%r expects <pattern1> <pattern2> ...' % action)\n\n patterns = [convert_path(word) for word in words[1:]]\n\n elif action in ('recursive-include', 'recursive-exclude'):\n if len(words) < 3:\n raise DistlibException(\n '%r expects <dir> <pattern1> <pattern2> ...' % action)\n\n thedir = convert_path(words[1])\n patterns = [convert_path(word) for word in words[2:]]\n\n elif action in ('graft', 'prune'):\n if len(words) != 2:\n raise DistlibException(\n '%r expects a single <dir_pattern>' % action)\n\n dir_pattern = convert_path(words[1])\n\n else:\n raise DistlibException('unknown action %r' % action)\n\n return action, patterns, thedir, dir_pattern\n\n def _include_pattern(self, pattern, anchor=True, prefix=None,\n is_regex=False):\n """Select strings (presumably filenames) from 'self.files' that\n match 'pattern', a Unix-style wildcard (glob) pattern.\n\n Patterns are not quite the same as implemented by the 'fnmatch'\n module: '*' and '?' match non-special characters, where "special"\n is platform-dependent: slash on Unix; colon, slash, and backslash on\n DOS/Windows; and colon on Mac OS.\n\n If 'anchor' is true (the default), then the pattern match is more\n stringent: "*.py" will match "foo.py" but not "foo/bar.py". If\n 'anchor' is false, both of these will match.\n\n If 'prefix' is supplied, then only filenames starting with 'prefix'\n (itself a pattern) and ending with 'pattern', with anything in between\n them, will match. 'anchor' is ignored in this case.\n\n If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and\n 'pattern' is assumed to be either a string containing a regex or a\n regex object -- no translation is done, the regex is just compiled\n and used as-is.\n\n Selected strings will be added to self.files.\n\n Return True if files are found.\n """\n # XXX docstring lying about what the special chars are?\n found = False\n pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)\n\n # delayed loading of allfiles list\n if self.allfiles is None:\n self.findall()\n\n for name in self.allfiles:\n if pattern_re.search(name):\n self.files.add(name)\n found = True\n return found\n\n def _exclude_pattern(self, pattern, anchor=True, prefix=None,\n is_regex=False):\n """Remove strings (presumably filenames) from 'files' that match\n 'pattern'.\n\n Other parameters are the same as for 'include_pattern()', above.\n The list 'self.files' is modified in place. Return True if files are\n found.\n\n This API is public to allow e.g. exclusion of SCM subdirs, e.g. when\n packaging source distributions\n """\n found = False\n pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)\n for f in list(self.files):\n if pattern_re.search(f):\n self.files.remove(f)\n found = True\n return found\n\n def _translate_pattern(self, pattern, anchor=True, prefix=None,\n is_regex=False):\n """Translate a shell-like wildcard pattern to a compiled regular\n expression.\n\n Return the compiled regex. If 'is_regex' true,\n then 'pattern' is directly compiled to a regex (if it's a string)\n or just returned as-is (assumes it's a regex object).\n """\n if is_regex:\n if isinstance(pattern, str):\n return re.compile(pattern)\n else:\n return pattern\n\n if _PYTHON_VERSION > (3, 2):\n # ditch start and end characters\n start, _, end = self._glob_to_re('_').partition('_')\n\n if pattern:\n pattern_re = self._glob_to_re(pattern)\n if _PYTHON_VERSION > (3, 2):\n assert pattern_re.startswith(start) and pattern_re.endswith(end)\n else:\n pattern_re = ''\n\n base = re.escape(os.path.join(self.base, ''))\n if prefix is not None:\n # ditch end of pattern character\n if _PYTHON_VERSION <= (3, 2):\n empty_pattern = self._glob_to_re('')\n prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]\n else:\n prefix_re = self._glob_to_re(prefix)\n assert prefix_re.startswith(start) and prefix_re.endswith(end)\n prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]\n sep = os.sep\n if os.sep == '\\':\n sep = r'\\'\n if _PYTHON_VERSION <= (3, 2):\n pattern_re = '^' + base + sep.join((prefix_re,\n '.*' + pattern_re))\n else:\n pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]\n pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,\n pattern_re, end)\n else: # no prefix -- respect anchor flag\n if anchor:\n if _PYTHON_VERSION <= (3, 2):\n pattern_re = '^' + base + pattern_re\n else:\n pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])\n\n return re.compile(pattern_re)\n\n def _glob_to_re(self, pattern):\n """Translate a shell-like glob pattern to a regular expression.\n\n Return a string containing the regex. Differs from\n 'fnmatch.translate()' in that '*' does not match "special characters"\n (which are platform-specific).\n """\n pattern_re = fnmatch.translate(pattern)\n\n # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which\n # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,\n # and by extension they shouldn't match such "special characters" under\n # any OS. So change all non-escaped dots in the RE to match any\n # character except the special characters (currently: just os.sep).\n sep = os.sep\n if os.sep == '\\':\n # we're using a regex to manipulate a regex, so we need\n # to escape the backslash twice\n sep = r'\\\\'\n escaped = r'\1[^%s]' % sep\n pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)\n return pattern_re\n
.venv\Lib\site-packages\pip\_vendor\distlib\manifest.py
manifest.py
Python
14,168
0.95
0.174479
0.126582
vue-tools
569
2024-11-22T10:34:32.463087
BSD-3-Clause
false
640a16c56f14f6a23b43fd27e330ef6a
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2023 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\n"""\nParser for the environment markers micro-language defined in PEP 508.\n"""\n\n# Note: In PEP 345, the micro-language was Python compatible, so the ast\n# module could be used to parse it. However, PEP 508 introduced operators such\n# as ~= and === which aren't in Python, necessitating a different approach.\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom .compat import string_types\nfrom .util import in_venv, parse_marker\nfrom .version import LegacyVersion as LV\n\n__all__ = ['interpret']\n\n_VERSION_PATTERN = re.compile(r'((\d+(\.\d+)*\w*)|\'(\d+(\.\d+)*\w*)\'|\"(\d+(\.\d+)*\w*)\")')\n_VERSION_MARKERS = {'python_version', 'python_full_version'}\n\n\ndef _is_version_marker(s):\n return isinstance(s, string_types) and s in _VERSION_MARKERS\n\n\ndef _is_literal(o):\n if not isinstance(o, string_types) or not o:\n return False\n return o[0] in '\'"'\n\n\ndef _get_versions(s):\n return {LV(m.groups()[0]) for m in _VERSION_PATTERN.finditer(s)}\n\n\nclass Evaluator(object):\n """\n This class is used to evaluate marker expressions.\n """\n\n operations = {\n '==': lambda x, y: x == y,\n '===': lambda x, y: x == y,\n '~=': lambda x, y: x == y or x > y,\n '!=': lambda x, y: x != y,\n '<': lambda x, y: x < y,\n '<=': lambda x, y: x == y or x < y,\n '>': lambda x, y: x > y,\n '>=': lambda x, y: x == y or x > y,\n 'and': lambda x, y: x and y,\n 'or': lambda x, y: x or y,\n 'in': lambda x, y: x in y,\n 'not in': lambda x, y: x not in y,\n }\n\n def evaluate(self, expr, context):\n """\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n """\n if isinstance(expr, string_types):\n if expr[0] in '\'"':\n result = expr[1:-1]\n else:\n if expr not in context:\n raise SyntaxError('unknown variable: %s' % expr)\n result = context[expr]\n else:\n assert isinstance(expr, dict)\n op = expr['op']\n if op not in self.operations:\n raise NotImplementedError('op not implemented: %s' % op)\n elhs = expr['lhs']\n erhs = expr['rhs']\n if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):\n raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))\n\n lhs = self.evaluate(elhs, context)\n rhs = self.evaluate(erhs, context)\n if ((_is_version_marker(elhs) or _is_version_marker(erhs)) and\n op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):\n lhs = LV(lhs)\n rhs = LV(rhs)\n elif _is_version_marker(elhs) and op in ('in', 'not in'):\n lhs = LV(lhs)\n rhs = _get_versions(rhs)\n result = self.operations[op](lhs, rhs)\n return result\n\n\n_DIGITS = re.compile(r'\d+\.\d+')\n\n\ndef default_context():\n\n def format_full_version(info):\n version = '%s.%s.%s' % (info.major, info.minor, info.micro)\n kind = info.releaselevel\n if kind != 'final':\n version += kind[0] + str(info.serial)\n return version\n\n if hasattr(sys, 'implementation'):\n implementation_version = format_full_version(sys.implementation.version)\n implementation_name = sys.implementation.name\n else:\n implementation_version = '0'\n implementation_name = ''\n\n ppv = platform.python_version()\n m = _DIGITS.match(ppv)\n pv = m.group(0)\n result = {\n 'implementation_name': implementation_name,\n 'implementation_version': implementation_version,\n 'os_name': os.name,\n 'platform_machine': platform.machine(),\n 'platform_python_implementation': platform.python_implementation(),\n 'platform_release': platform.release(),\n 'platform_system': platform.system(),\n 'platform_version': platform.version(),\n 'platform_in_venv': str(in_venv()),\n 'python_full_version': ppv,\n 'python_version': pv,\n 'sys_platform': sys.platform,\n }\n return result\n\n\nDEFAULT_CONTEXT = default_context()\ndel default_context\n\nevaluator = Evaluator()\n\n\ndef interpret(marker, execution_context=None):\n """\n Interpret a marker and return a result depending on environment.\n\n :param marker: The marker to interpret.\n :type marker: str\n :param execution_context: The context used for name lookup.\n :type execution_context: mapping\n """\n try:\n expr, rest = parse_marker(marker)\n except Exception as e:\n raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))\n if rest and rest[0] != '#':\n raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))\n context = dict(DEFAULT_CONTEXT)\n if execution_context:\n context.update(execution_context)\n return evaluator.evaluate(expr, context)\n
.venv\Lib\site-packages\pip\_vendor\distlib\markers.py
markers.py
Python
5,164
0.95
0.154321
0.067669
vue-tools
62
2025-06-13T10:23:08.318678
Apache-2.0
false
ce4634500dce01103de205608249e409
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012 The Python Software Foundation.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\n"""Implementation of the Metadata for Python packages PEPs.\n\nSupports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and 2.2).\n"""\nfrom __future__ import unicode_literals\n\nimport codecs\nfrom email import message_from_file\nimport json\nimport logging\nimport re\n\nfrom . import DistlibException, __version__\nfrom .compat import StringIO, string_types, text_type\nfrom .markers import interpret\nfrom .util import extract_by_key, get_extras\nfrom .version import get_scheme, PEP440_VERSION_RE\n\nlogger = logging.getLogger(__name__)\n\n\nclass MetadataMissingError(DistlibException):\n """A required metadata is missing"""\n\n\nclass MetadataConflictError(DistlibException):\n """Attempt to read or write metadata fields that are conflictual."""\n\n\nclass MetadataUnrecognizedVersionError(DistlibException):\n """Unknown metadata version number."""\n\n\nclass MetadataInvalidError(DistlibException):\n """A metadata value is invalid"""\n\n\n# public API of this module\n__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']\n\n# Encoding used for the PKG-INFO files\nPKG_INFO_ENCODING = 'utf-8'\n\n# preferred version. Hopefully will be changed\n# to 1.2 once PEP 345 is supported everywhere\nPKG_INFO_PREFERRED_VERSION = '1.1'\n\n_LINE_PREFIX_1_2 = re.compile('\n \\|')\n_LINE_PREFIX_PRE_1_2 = re.compile('\n ')\n_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Summary', 'Description', 'Keywords', 'Home-page',\n 'Author', 'Author-email', 'License')\n\n_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description',\n 'Keywords', 'Home-page', 'Author', 'Author-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes',\n 'Provides', 'Requires')\n\n_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', 'Download-URL')\n\n_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description',\n 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License',\n 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist',\n 'Requires-Python', 'Requires-External')\n\n_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Obsoletes-Dist', 'Requires-External',\n 'Maintainer', 'Maintainer-email', 'Project-URL')\n\n_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description',\n 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License',\n 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist',\n 'Requires-Python', 'Requires-External', 'Private-Version', 'Obsoleted-By', 'Setup-Requires-Dist',\n 'Extension', 'Provides-Extra')\n\n_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension')\n\n# See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in\n# the metadata. Include them in the tuple literal below to allow them\n# (for now).\n# Ditto for Obsoletes - see issue #140.\n_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires', 'Provides', 'Obsoletes')\n\n_566_MARKERS = ('Description-Content-Type', )\n\n_643_MARKERS = ('Dynamic', 'License-File')\n\n_643_FIELDS = _566_FIELDS + _643_MARKERS\n\n_ALL_FIELDS = set()\n_ALL_FIELDS.update(_241_FIELDS)\n_ALL_FIELDS.update(_314_FIELDS)\n_ALL_FIELDS.update(_345_FIELDS)\n_ALL_FIELDS.update(_426_FIELDS)\n_ALL_FIELDS.update(_566_FIELDS)\n_ALL_FIELDS.update(_643_FIELDS)\n\nEXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')\n\n\ndef _version2fieldlist(version):\n if version == '1.0':\n return _241_FIELDS\n elif version == '1.1':\n return _314_FIELDS\n elif version == '1.2':\n return _345_FIELDS\n elif version in ('1.3', '2.1'):\n # avoid adding field names if already there\n return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS)\n elif version == '2.0':\n raise ValueError('Metadata 2.0 is withdrawn and not supported')\n # return _426_FIELDS\n elif version == '2.2':\n return _643_FIELDS\n raise MetadataUnrecognizedVersionError(version)\n\n\ndef _best_version(fields):\n """Detect the best version depending on the fields used."""\n\n def _has_marker(keys, markers):\n return any(marker in keys for marker in markers)\n\n keys = [key for key, value in fields.items() if value not in ([], 'UNKNOWN', None)]\n possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2'] # 2.0 removed\n\n # first let's try to see if a field is not part of one of the version\n for key in keys:\n if key not in _241_FIELDS and '1.0' in possible_versions:\n possible_versions.remove('1.0')\n logger.debug('Removed 1.0 due to %s', key)\n if key not in _314_FIELDS and '1.1' in possible_versions:\n possible_versions.remove('1.1')\n logger.debug('Removed 1.1 due to %s', key)\n if key not in _345_FIELDS and '1.2' in possible_versions:\n possible_versions.remove('1.2')\n logger.debug('Removed 1.2 due to %s', key)\n if key not in _566_FIELDS and '1.3' in possible_versions:\n possible_versions.remove('1.3')\n logger.debug('Removed 1.3 due to %s', key)\n if key not in _566_FIELDS and '2.1' in possible_versions:\n if key != 'Description': # In 2.1, description allowed after headers\n possible_versions.remove('2.1')\n logger.debug('Removed 2.1 due to %s', key)\n if key not in _643_FIELDS and '2.2' in possible_versions:\n possible_versions.remove('2.2')\n logger.debug('Removed 2.2 due to %s', key)\n # if key not in _426_FIELDS and '2.0' in possible_versions:\n # possible_versions.remove('2.0')\n # logger.debug('Removed 2.0 due to %s', key)\n\n # possible_version contains qualified versions\n if len(possible_versions) == 1:\n return possible_versions[0] # found !\n elif len(possible_versions) == 0:\n logger.debug('Out of options - unknown metadata set: %s', fields)\n raise MetadataConflictError('Unknown metadata set')\n\n # let's see if one unique marker is found\n is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)\n is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)\n is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)\n # is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)\n is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS)\n if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1:\n raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields')\n\n # we have the choice, 1.0, or 1.2, 2.1 or 2.2\n # - 1.0 has a broken Summary field but works with all tools\n # - 1.1 is to avoid\n # - 1.2 fixes Summary but has little adoption\n # - 2.1 adds more features\n # - 2.2 is the latest\n if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2:\n # we couldn't find any specific marker\n if PKG_INFO_PREFERRED_VERSION in possible_versions:\n return PKG_INFO_PREFERRED_VERSION\n if is_1_1:\n return '1.1'\n if is_1_2:\n return '1.2'\n if is_2_1:\n return '2.1'\n # if is_2_2:\n # return '2.2'\n\n return '2.2'\n\n\n# This follows the rules about transforming keys as described in\n# https://www.python.org/dev/peps/pep-0566/#id17\n_ATTR2FIELD = {name.lower().replace("-", "_"): name for name in _ALL_FIELDS}\n_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()}\n\n_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')\n_VERSIONS_FIELDS = ('Requires-Python', )\n_VERSION_FIELDS = ('Version', )\n_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist',\n 'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',\n 'Provides-Extra', 'Extension', 'License-File')\n_LISTTUPLEFIELDS = ('Project-URL', )\n\n_ELEMENTSFIELD = ('Keywords', )\n\n_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')\n\n_MISSING = object()\n\n_FILESAFE = re.compile('[^A-Za-z0-9.]+')\n\n\ndef _get_name_and_version(name, version, for_filename=False):\n """Return the distribution name with version.\n\n If for_filename is true, return a filename-escaped form."""\n if for_filename:\n # For both name and version any runs of non-alphanumeric or '.'\n # characters are replaced with a single '-'. Additionally any\n # spaces in the version string become '.'\n name = _FILESAFE.sub('-', name)\n version = _FILESAFE.sub('-', version.replace(' ', '.'))\n return '%s-%s' % (name, version)\n\n\nclass LegacyMetadata(object):\n """The legacy metadata of a release.\n\n Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can\n instantiate the class with one of these arguments (or none):\n - *path*, the path to a metadata file\n - *fileobj* give a file-like object with metadata as content\n - *mapping* is a dict-like object\n - *scheme* is a version scheme name\n """\n\n # TODO document the mapping API and UNKNOWN default key\n\n def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'):\n if [path, fileobj, mapping].count(None) < 2:\n raise TypeError('path, fileobj and mapping are exclusive')\n self._fields = {}\n self.requires_files = []\n self._dependencies = None\n self.scheme = scheme\n if path is not None:\n self.read(path)\n elif fileobj is not None:\n self.read_file(fileobj)\n elif mapping is not None:\n self.update(mapping)\n self.set_metadata_version()\n\n def set_metadata_version(self):\n self._fields['Metadata-Version'] = _best_version(self._fields)\n\n def _write_field(self, fileobj, name, value):\n fileobj.write('%s: %s\n' % (name, value))\n\n def __getitem__(self, name):\n return self.get(name)\n\n def __setitem__(self, name, value):\n return self.set(name, value)\n\n def __delitem__(self, name):\n field_name = self._convert_name(name)\n try:\n del self._fields[field_name]\n except KeyError:\n raise KeyError(name)\n\n def __contains__(self, name):\n return (name in self._fields or self._convert_name(name) in self._fields)\n\n def _convert_name(self, name):\n if name in _ALL_FIELDS:\n return name\n name = name.replace('-', '_').lower()\n return _ATTR2FIELD.get(name, name)\n\n def _default_value(self, name):\n if name in _LISTFIELDS or name in _ELEMENTSFIELD:\n return []\n return 'UNKNOWN'\n\n def _remove_line_prefix(self, value):\n if self.metadata_version in ('1.0', '1.1'):\n return _LINE_PREFIX_PRE_1_2.sub('\n', value)\n else:\n return _LINE_PREFIX_1_2.sub('\n', value)\n\n def __getattr__(self, name):\n if name in _ATTR2FIELD:\n return self[name]\n raise AttributeError(name)\n\n #\n # Public API\n #\n\n def get_fullname(self, filesafe=False):\n """\n Return the distribution name with version.\n\n If filesafe is true, return a filename-escaped form.\n """\n return _get_name_and_version(self['Name'], self['Version'], filesafe)\n\n def is_field(self, name):\n """return True if name is a valid metadata key"""\n name = self._convert_name(name)\n return name in _ALL_FIELDS\n\n def is_multi_field(self, name):\n name = self._convert_name(name)\n return name in _LISTFIELDS\n\n def read(self, filepath):\n """Read the metadata values from a file path."""\n fp = codecs.open(filepath, 'r', encoding='utf-8')\n try:\n self.read_file(fp)\n finally:\n fp.close()\n\n def read_file(self, fileob):\n """Read the metadata values from a file object."""\n msg = message_from_file(fileob)\n self._fields['Metadata-Version'] = msg['metadata-version']\n\n # When reading, get all the fields we can\n for field in _ALL_FIELDS:\n if field not in msg:\n continue\n if field in _LISTFIELDS:\n # we can have multiple lines\n values = msg.get_all(field)\n if field in _LISTTUPLEFIELDS and values is not None:\n values = [tuple(value.split(',')) for value in values]\n self.set(field, values)\n else:\n # single line\n value = msg[field]\n if value is not None and value != 'UNKNOWN':\n self.set(field, value)\n\n # PEP 566 specifies that the body be used for the description, if\n # available\n body = msg.get_payload()\n self["Description"] = body if body else self["Description"]\n # logger.debug('Attempting to set metadata for %s', self)\n # self.set_metadata_version()\n\n def write(self, filepath, skip_unknown=False):\n """Write the metadata fields to filepath."""\n fp = codecs.open(filepath, 'w', encoding='utf-8')\n try:\n self.write_file(fp, skip_unknown)\n finally:\n fp.close()\n\n def write_file(self, fileobject, skip_unknown=False):\n """Write the PKG-INFO format data to a file object."""\n self.set_metadata_version()\n\n for field in _version2fieldlist(self['Metadata-Version']):\n values = self.get(field)\n if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):\n continue\n if field in _ELEMENTSFIELD:\n self._write_field(fileobject, field, ','.join(values))\n continue\n if field not in _LISTFIELDS:\n if field == 'Description':\n if self.metadata_version in ('1.0', '1.1'):\n values = values.replace('\n', '\n ')\n else:\n values = values.replace('\n', '\n |')\n values = [values]\n\n if field in _LISTTUPLEFIELDS:\n values = [','.join(value) for value in values]\n\n for value in values:\n self._write_field(fileobject, field, value)\n\n def update(self, other=None, **kwargs):\n """Set metadata values from the given iterable `other` and kwargs.\n\n Behavior is like `dict.update`: If `other` has a ``keys`` method,\n they are looped over and ``self[key]`` is assigned ``other[key]``.\n Else, ``other`` is an iterable of ``(key, value)`` iterables.\n\n Keys that don't match a metadata field or that have an empty value are\n dropped.\n """\n\n def _set(key, value):\n if key in _ATTR2FIELD and value:\n self.set(self._convert_name(key), value)\n\n if not other:\n # other is None or empty container\n pass\n elif hasattr(other, 'keys'):\n for k in other.keys():\n _set(k, other[k])\n else:\n for k, v in other:\n _set(k, v)\n\n if kwargs:\n for k, v in kwargs.items():\n _set(k, v)\n\n def set(self, name, value):\n """Control then set a metadata field."""\n name = self._convert_name(name)\n\n if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))):\n if isinstance(value, string_types):\n value = [v.strip() for v in value.split(',')]\n else:\n value = []\n elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))):\n if isinstance(value, string_types):\n value = [value]\n else:\n value = []\n\n if logger.isEnabledFor(logging.WARNING):\n project_name = self['Name']\n\n scheme = get_scheme(self.scheme)\n if name in _PREDICATE_FIELDS and value is not None:\n for v in value:\n # check that the values are valid\n if not scheme.is_valid_matcher(v.split(';')[0]):\n logger.warning("'%s': '%s' is not valid (field '%s')", project_name, v, name)\n # FIXME this rejects UNKNOWN, is that right?\n elif name in _VERSIONS_FIELDS and value is not None:\n if not scheme.is_valid_constraint_list(value):\n logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name)\n elif name in _VERSION_FIELDS and value is not None:\n if not scheme.is_valid_version(value):\n logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name)\n\n if name in _UNICODEFIELDS:\n if name == 'Description':\n value = self._remove_line_prefix(value)\n\n self._fields[name] = value\n\n def get(self, name, default=_MISSING):\n """Get a metadata field."""\n name = self._convert_name(name)\n if name not in self._fields:\n if default is _MISSING:\n default = self._default_value(name)\n return default\n if name in _UNICODEFIELDS:\n value = self._fields[name]\n return value\n elif name in _LISTFIELDS:\n value = self._fields[name]\n if value is None:\n return []\n res = []\n for val in value:\n if name not in _LISTTUPLEFIELDS:\n res.append(val)\n else:\n # That's for Project-URL\n res.append((val[0], val[1]))\n return res\n\n elif name in _ELEMENTSFIELD:\n value = self._fields[name]\n if isinstance(value, string_types):\n return value.split(',')\n return self._fields[name]\n\n def check(self, strict=False):\n """Check if the metadata is compliant. If strict is True then raise if\n no Name or Version are provided"""\n self.set_metadata_version()\n\n # XXX should check the versions (if the file was loaded)\n missing, warnings = [], []\n\n for attr in ('Name', 'Version'): # required by PEP 345\n if attr not in self:\n missing.append(attr)\n\n if strict and missing != []:\n msg = 'missing required metadata: %s' % ', '.join(missing)\n raise MetadataMissingError(msg)\n\n for attr in ('Home-page', 'Author'):\n if attr not in self:\n missing.append(attr)\n\n # checking metadata 1.2 (XXX needs to check 1.1, 1.0)\n if self['Metadata-Version'] != '1.2':\n return missing, warnings\n\n scheme = get_scheme(self.scheme)\n\n def are_valid_constraints(value):\n for v in value:\n if not scheme.is_valid_matcher(v.split(';')[0]):\n return False\n return True\n\n for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),\n (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS,\n scheme.is_valid_version)):\n for field in fields:\n value = self.get(field, None)\n if value is not None and not controller(value):\n warnings.append("Wrong value for '%s': %s" % (field, value))\n\n return missing, warnings\n\n def todict(self, skip_missing=False):\n """Return fields as a dict.\n\n Field names will be converted to use the underscore-lowercase style\n instead of hyphen-mixed case (i.e. home_page instead of Home-page).\n This is as per https://www.python.org/dev/peps/pep-0566/#id17.\n """\n self.set_metadata_version()\n\n fields = _version2fieldlist(self['Metadata-Version'])\n\n data = {}\n\n for field_name in fields:\n if not skip_missing or field_name in self._fields:\n key = _FIELD2ATTR[field_name]\n if key != 'project_url':\n data[key] = self[field_name]\n else:\n data[key] = [','.join(u) for u in self[field_name]]\n\n return data\n\n def add_requirements(self, requirements):\n if self['Metadata-Version'] == '1.1':\n # we can't have 1.1 metadata *and* Setuptools requires\n for field in ('Obsoletes', 'Requires', 'Provides'):\n if field in self:\n del self[field]\n self['Requires-Dist'] += requirements\n\n # Mapping API\n # TODO could add iter* variants\n\n def keys(self):\n return list(_version2fieldlist(self['Metadata-Version']))\n\n def __iter__(self):\n for key in self.keys():\n yield key\n\n def values(self):\n return [self[key] for key in self.keys()]\n\n def items(self):\n return [(key, self[key]) for key in self.keys()]\n\n def __repr__(self):\n return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version)\n\n\nMETADATA_FILENAME = 'pydist.json'\nWHEEL_METADATA_FILENAME = 'metadata.json'\nLEGACY_METADATA_FILENAME = 'METADATA'\n\n\nclass Metadata(object):\n """\n The metadata of a release. This implementation uses 2.1\n metadata where possible. If not possible, it wraps a LegacyMetadata\n instance which handles the key-value metadata format.\n """\n\n METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$')\n\n NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)\n\n FIELDNAME_MATCHER = re.compile('^[A-Z]([0-9A-Z-]*[0-9A-Z])?$', re.I)\n\n VERSION_MATCHER = PEP440_VERSION_RE\n\n SUMMARY_MATCHER = re.compile('.{1,2047}')\n\n METADATA_VERSION = '2.0'\n\n GENERATOR = 'distlib (%s)' % __version__\n\n MANDATORY_KEYS = {\n 'name': (),\n 'version': (),\n 'summary': ('legacy', ),\n }\n\n INDEX_KEYS = ('name version license summary description author '\n 'author_email keywords platform home_page classifiers '\n 'download_url')\n\n DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '\n 'dev_requires provides meta_requires obsoleted_by '\n 'supports_environments')\n\n SYNTAX_VALIDATORS = {\n 'metadata_version': (METADATA_VERSION_MATCHER, ()),\n 'name': (NAME_MATCHER, ('legacy', )),\n 'version': (VERSION_MATCHER, ('legacy', )),\n 'summary': (SUMMARY_MATCHER, ('legacy', )),\n 'dynamic': (FIELDNAME_MATCHER, ('legacy', )),\n }\n\n __slots__ = ('_legacy', '_data', 'scheme')\n\n def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'):\n if [path, fileobj, mapping].count(None) < 2:\n raise TypeError('path, fileobj and mapping are exclusive')\n self._legacy = None\n self._data = None\n self.scheme = scheme\n # import pdb; pdb.set_trace()\n if mapping is not None:\n try:\n self._validate_mapping(mapping, scheme)\n self._data = mapping\n except MetadataUnrecognizedVersionError:\n self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)\n self.validate()\n else:\n data = None\n if path:\n with open(path, 'rb') as f:\n data = f.read()\n elif fileobj:\n data = fileobj.read()\n if data is None:\n # Initialised with no args - to be added\n self._data = {\n 'metadata_version': self.METADATA_VERSION,\n 'generator': self.GENERATOR,\n }\n else:\n if not isinstance(data, text_type):\n data = data.decode('utf-8')\n try:\n self._data = json.loads(data)\n self._validate_mapping(self._data, scheme)\n except ValueError:\n # Note: MetadataUnrecognizedVersionError does not\n # inherit from ValueError (it's a DistlibException,\n # which should not inherit from ValueError).\n # The ValueError comes from the json.load - if that\n # succeeds and we get a validation error, we want\n # that to propagate\n self._legacy = LegacyMetadata(fileobj=StringIO(data), scheme=scheme)\n self.validate()\n\n common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))\n\n none_list = (None, list)\n none_dict = (None, dict)\n\n mapped_keys = {\n 'run_requires': ('Requires-Dist', list),\n 'build_requires': ('Setup-Requires-Dist', list),\n 'dev_requires': none_list,\n 'test_requires': none_list,\n 'meta_requires': none_list,\n 'extras': ('Provides-Extra', list),\n 'modules': none_list,\n 'namespaces': none_list,\n 'exports': none_dict,\n 'commands': none_dict,\n 'classifiers': ('Classifier', list),\n 'source_url': ('Download-URL', None),\n 'metadata_version': ('Metadata-Version', None),\n }\n\n del none_list, none_dict\n\n def __getattribute__(self, key):\n common = object.__getattribute__(self, 'common_keys')\n mapped = object.__getattribute__(self, 'mapped_keys')\n if key in mapped:\n lk, maker = mapped[key]\n if self._legacy:\n if lk is None:\n result = None if maker is None else maker()\n else:\n result = self._legacy.get(lk)\n else:\n value = None if maker is None else maker()\n if key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'):\n result = self._data.get(key, value)\n else:\n # special cases for PEP 459\n sentinel = object()\n result = sentinel\n d = self._data.get('extensions')\n if d:\n if key == 'commands':\n result = d.get('python.commands', value)\n elif key == 'classifiers':\n d = d.get('python.details')\n if d:\n result = d.get(key, value)\n else:\n d = d.get('python.exports')\n if not d:\n d = self._data.get('python.exports')\n if d:\n result = d.get(key, value)\n if result is sentinel:\n result = value\n elif key not in common:\n result = object.__getattribute__(self, key)\n elif self._legacy:\n result = self._legacy.get(key)\n else:\n result = self._data.get(key)\n return result\n\n def _validate_value(self, key, value, scheme=None):\n if key in self.SYNTAX_VALIDATORS:\n pattern, exclusions = self.SYNTAX_VALIDATORS[key]\n if (scheme or self.scheme) not in exclusions:\n m = pattern.match(value)\n if not m:\n raise MetadataInvalidError("'%s' is an invalid value for "\n "the '%s' property" % (value, key))\n\n def __setattr__(self, key, value):\n self._validate_value(key, value)\n common = object.__getattribute__(self, 'common_keys')\n mapped = object.__getattribute__(self, 'mapped_keys')\n if key in mapped:\n lk, _ = mapped[key]\n if self._legacy:\n if lk is None:\n raise NotImplementedError\n self._legacy[lk] = value\n elif key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'):\n self._data[key] = value\n else:\n # special cases for PEP 459\n d = self._data.setdefault('extensions', {})\n if key == 'commands':\n d['python.commands'] = value\n elif key == 'classifiers':\n d = d.setdefault('python.details', {})\n d[key] = value\n else:\n d = d.setdefault('python.exports', {})\n d[key] = value\n elif key not in common:\n object.__setattr__(self, key, value)\n else:\n if key == 'keywords':\n if isinstance(value, string_types):\n value = value.strip()\n if value:\n value = value.split()\n else:\n value = []\n if self._legacy:\n self._legacy[key] = value\n else:\n self._data[key] = value\n\n @property\n def name_and_version(self):\n return _get_name_and_version(self.name, self.version, True)\n\n @property\n def provides(self):\n if self._legacy:\n result = self._legacy['Provides-Dist']\n else:\n result = self._data.setdefault('provides', [])\n s = '%s (%s)' % (self.name, self.version)\n if s not in result:\n result.append(s)\n return result\n\n @provides.setter\n def provides(self, value):\n if self._legacy:\n self._legacy['Provides-Dist'] = value\n else:\n self._data['provides'] = value\n\n def get_requirements(self, reqts, extras=None, env=None):\n """\n Base method to get dependencies, given a set of extras\n to satisfy and an optional environment context.\n :param reqts: A list of sometimes-wanted dependencies,\n perhaps dependent on extras and environment.\n :param extras: A list of optional components being requested.\n :param env: An optional environment for marker evaluation.\n """\n if self._legacy:\n result = reqts\n else:\n result = []\n extras = get_extras(extras or [], self.extras)\n for d in reqts:\n if 'extra' not in d and 'environment' not in d:\n # unconditional\n include = True\n else:\n if 'extra' not in d:\n # Not extra-dependent - only environment-dependent\n include = True\n else:\n include = d.get('extra') in extras\n if include:\n # Not excluded because of extras, check environment\n marker = d.get('environment')\n if marker:\n include = interpret(marker, env)\n if include:\n result.extend(d['requires'])\n for key in ('build', 'dev', 'test'):\n e = ':%s:' % key\n if e in extras:\n extras.remove(e)\n # A recursive call, but it should terminate since 'test'\n # has been removed from the extras\n reqts = self._data.get('%s_requires' % key, [])\n result.extend(self.get_requirements(reqts, extras=extras, env=env))\n return result\n\n @property\n def dictionary(self):\n if self._legacy:\n return self._from_legacy()\n return self._data\n\n @property\n def dependencies(self):\n if self._legacy:\n raise NotImplementedError\n else:\n return extract_by_key(self._data, self.DEPENDENCY_KEYS)\n\n @dependencies.setter\n def dependencies(self, value):\n if self._legacy:\n raise NotImplementedError\n else:\n self._data.update(value)\n\n def _validate_mapping(self, mapping, scheme):\n if mapping.get('metadata_version') != self.METADATA_VERSION:\n raise MetadataUnrecognizedVersionError()\n missing = []\n for key, exclusions in self.MANDATORY_KEYS.items():\n if key not in mapping:\n if scheme not in exclusions:\n missing.append(key)\n if missing:\n msg = 'Missing metadata items: %s' % ', '.join(missing)\n raise MetadataMissingError(msg)\n for k, v in mapping.items():\n self._validate_value(k, v, scheme)\n\n def validate(self):\n if self._legacy:\n missing, warnings = self._legacy.check(True)\n if missing or warnings:\n logger.warning('Metadata: missing: %s, warnings: %s', missing, warnings)\n else:\n self._validate_mapping(self._data, self.scheme)\n\n def todict(self):\n if self._legacy:\n return self._legacy.todict(True)\n else:\n result = extract_by_key(self._data, self.INDEX_KEYS)\n return result\n\n def _from_legacy(self):\n assert self._legacy and not self._data\n result = {\n 'metadata_version': self.METADATA_VERSION,\n 'generator': self.GENERATOR,\n }\n lmd = self._legacy.todict(True) # skip missing ones\n for k in ('name', 'version', 'license', 'summary', 'description', 'classifier'):\n if k in lmd:\n if k == 'classifier':\n nk = 'classifiers'\n else:\n nk = k\n result[nk] = lmd[k]\n kw = lmd.get('Keywords', [])\n if kw == ['']:\n kw = []\n result['keywords'] = kw\n keys = (('requires_dist', 'run_requires'), ('setup_requires_dist', 'build_requires'))\n for ok, nk in keys:\n if ok in lmd and lmd[ok]:\n result[nk] = [{'requires': lmd[ok]}]\n result['provides'] = self.provides\n # author = {}\n # maintainer = {}\n return result\n\n LEGACY_MAPPING = {\n 'name': 'Name',\n 'version': 'Version',\n ('extensions', 'python.details', 'license'): 'License',\n 'summary': 'Summary',\n 'description': 'Description',\n ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page',\n ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author',\n ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email',\n 'source_url': 'Download-URL',\n ('extensions', 'python.details', 'classifiers'): 'Classifier',\n }\n\n def _to_legacy(self):\n\n def process_entries(entries):\n reqts = set()\n for e in entries:\n extra = e.get('extra')\n env = e.get('environment')\n rlist = e['requires']\n for r in rlist:\n if not env and not extra:\n reqts.add(r)\n else:\n marker = ''\n if extra:\n marker = 'extra == "%s"' % extra\n if env:\n if marker:\n marker = '(%s) and %s' % (env, marker)\n else:\n marker = env\n reqts.add(';'.join((r, marker)))\n return reqts\n\n assert self._data and not self._legacy\n result = LegacyMetadata()\n nmd = self._data\n # import pdb; pdb.set_trace()\n for nk, ok in self.LEGACY_MAPPING.items():\n if not isinstance(nk, tuple):\n if nk in nmd:\n result[ok] = nmd[nk]\n else:\n d = nmd\n found = True\n for k in nk:\n try:\n d = d[k]\n except (KeyError, IndexError):\n found = False\n break\n if found:\n result[ok] = d\n r1 = process_entries(self.run_requires + self.meta_requires)\n r2 = process_entries(self.build_requires + self.dev_requires)\n if self.extras:\n result['Provides-Extra'] = sorted(self.extras)\n result['Requires-Dist'] = sorted(r1)\n result['Setup-Requires-Dist'] = sorted(r2)\n # TODO: any other fields wanted\n return result\n\n def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):\n if [path, fileobj].count(None) != 1:\n raise ValueError('Exactly one of path and fileobj is needed')\n self.validate()\n if legacy:\n if self._legacy:\n legacy_md = self._legacy\n else:\n legacy_md = self._to_legacy()\n if path:\n legacy_md.write(path, skip_unknown=skip_unknown)\n else:\n legacy_md.write_file(fileobj, skip_unknown=skip_unknown)\n else:\n if self._legacy:\n d = self._from_legacy()\n else:\n d = self._data\n if fileobj:\n json.dump(d, fileobj, ensure_ascii=True, indent=2, sort_keys=True)\n else:\n with codecs.open(path, 'w', 'utf-8') as f:\n json.dump(d, f, ensure_ascii=True, indent=2, sort_keys=True)\n\n def add_requirements(self, requirements):\n if self._legacy:\n self._legacy.add_requirements(requirements)\n else:\n run_requires = self._data.setdefault('run_requires', [])\n always = None\n for entry in run_requires:\n if 'environment' not in entry and 'extra' not in entry:\n always = entry\n break\n if always is None:\n always = {'requires': requirements}\n run_requires.insert(0, always)\n else:\n rset = set(always['requires']) | set(requirements)\n always['requires'] = sorted(rset)\n\n def __repr__(self):\n name = self.name or '(no name)'\n version = self.version or 'no version'\n return '<%s %s %s (%s)>' % (self.__class__.__name__, self.metadata_version, name, version)\n
.venv\Lib\site-packages\pip\_vendor\distlib\metadata.py
metadata.py
Python
38,724
0.95
0.256062
0.085911
python-kit
973
2023-12-07T19:58:20.196422
MIT
false
ece60750b1ff238bfc8321839eef38f8
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nfrom __future__ import unicode_literals\n\nimport bisect\nimport io\nimport logging\nimport os\nimport pkgutil\nimport sys\nimport types\nimport zipimport\n\nfrom . import DistlibException\nfrom .util import cached_property, get_cache_base, Cache\n\nlogger = logging.getLogger(__name__)\n\n\ncache = None # created when needed\n\n\nclass ResourceCache(Cache):\n def __init__(self, base=None):\n if base is None:\n # Use native string to avoid issues on 2.x: see Python #20140.\n base = os.path.join(get_cache_base(), str('resource-cache'))\n super(ResourceCache, self).__init__(base)\n\n def is_stale(self, resource, path):\n """\n Is the cache stale for the given resource?\n\n :param resource: The :class:`Resource` being cached.\n :param path: The path of the resource in the cache.\n :return: True if the cache is stale.\n """\n # Cache invalidation is a hard problem :-)\n return True\n\n def get(self, resource):\n """\n Get a resource into the cache,\n\n :param resource: A :class:`Resource` instance.\n :return: The pathname of the resource in the cache.\n """\n prefix, path = resource.finder.get_cache_info(resource)\n if prefix is None:\n result = path\n else:\n result = os.path.join(self.base, self.prefix_to_dir(prefix), path)\n dirname = os.path.dirname(result)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n if not os.path.exists(result):\n stale = True\n else:\n stale = self.is_stale(resource, path)\n if stale:\n # write the bytes of the resource to the cache location\n with open(result, 'wb') as f:\n f.write(resource.bytes)\n return result\n\n\nclass ResourceBase(object):\n def __init__(self, finder, name):\n self.finder = finder\n self.name = name\n\n\nclass Resource(ResourceBase):\n """\n A class representing an in-package resource, such as a data file. This is\n not normally instantiated by user code, but rather by a\n :class:`ResourceFinder` which manages the resource.\n """\n is_container = False # Backwards compatibility\n\n def as_stream(self):\n """\n Get the resource as a stream.\n\n This is not a property to make it obvious that it returns a new stream\n each time.\n """\n return self.finder.get_stream(self)\n\n @cached_property\n def file_path(self):\n global cache\n if cache is None:\n cache = ResourceCache()\n return cache.get(self)\n\n @cached_property\n def bytes(self):\n return self.finder.get_bytes(self)\n\n @cached_property\n def size(self):\n return self.finder.get_size(self)\n\n\nclass ResourceContainer(ResourceBase):\n is_container = True # Backwards compatibility\n\n @cached_property\n def resources(self):\n return self.finder.get_resources(self)\n\n\nclass ResourceFinder(object):\n """\n Resource finder for file system resources.\n """\n\n if sys.platform.startswith('java'):\n skipped_extensions = ('.pyc', '.pyo', '.class')\n else:\n skipped_extensions = ('.pyc', '.pyo')\n\n def __init__(self, module):\n self.module = module\n self.loader = getattr(module, '__loader__', None)\n self.base = os.path.dirname(getattr(module, '__file__', ''))\n\n def _adjust_path(self, path):\n return os.path.realpath(path)\n\n def _make_path(self, resource_name):\n # Issue #50: need to preserve type of path on Python 2.x\n # like os.path._get_sep\n if isinstance(resource_name, bytes): # should only happen on 2.x\n sep = b'/'\n else:\n sep = '/'\n parts = resource_name.split(sep)\n parts.insert(0, self.base)\n result = os.path.join(*parts)\n return self._adjust_path(result)\n\n def _find(self, path):\n return os.path.exists(path)\n\n def get_cache_info(self, resource):\n return None, resource.path\n\n def find(self, resource_name):\n path = self._make_path(resource_name)\n if not self._find(path):\n result = None\n else:\n if self._is_directory(path):\n result = ResourceContainer(self, resource_name)\n else:\n result = Resource(self, resource_name)\n result.path = path\n return result\n\n def get_stream(self, resource):\n return open(resource.path, 'rb')\n\n def get_bytes(self, resource):\n with open(resource.path, 'rb') as f:\n return f.read()\n\n def get_size(self, resource):\n return os.path.getsize(resource.path)\n\n def get_resources(self, resource):\n def allowed(f):\n return (f != '__pycache__' and not\n f.endswith(self.skipped_extensions))\n return set([f for f in os.listdir(resource.path) if allowed(f)])\n\n def is_container(self, resource):\n return self._is_directory(resource.path)\n\n _is_directory = staticmethod(os.path.isdir)\n\n def iterator(self, resource_name):\n resource = self.find(resource_name)\n if resource is not None:\n todo = [resource]\n while todo:\n resource = todo.pop(0)\n yield resource\n if resource.is_container:\n rname = resource.name\n for name in resource.resources:\n if not rname:\n new_name = name\n else:\n new_name = '/'.join([rname, name])\n child = self.find(new_name)\n if child.is_container:\n todo.append(child)\n else:\n yield child\n\n\nclass ZipResourceFinder(ResourceFinder):\n """\n Resource finder for resources in .zip files.\n """\n def __init__(self, module):\n super(ZipResourceFinder, self).__init__(module)\n archive = self.loader.archive\n self.prefix_len = 1 + len(archive)\n # PyPy doesn't have a _files attr on zipimporter, and you can't set one\n if hasattr(self.loader, '_files'):\n self._files = self.loader._files\n else:\n self._files = zipimport._zip_directory_cache[archive]\n self.index = sorted(self._files)\n\n def _adjust_path(self, path):\n return path\n\n def _find(self, path):\n path = path[self.prefix_len:]\n if path in self._files:\n result = True\n else:\n if path and path[-1] != os.sep:\n path = path + os.sep\n i = bisect.bisect(self.index, path)\n try:\n result = self.index[i].startswith(path)\n except IndexError:\n result = False\n if not result:\n logger.debug('_find failed: %r %r', path, self.loader.prefix)\n else:\n logger.debug('_find worked: %r %r', path, self.loader.prefix)\n return result\n\n def get_cache_info(self, resource):\n prefix = self.loader.archive\n path = resource.path[1 + len(prefix):]\n return prefix, path\n\n def get_bytes(self, resource):\n return self.loader.get_data(resource.path)\n\n def get_stream(self, resource):\n return io.BytesIO(self.get_bytes(resource))\n\n def get_size(self, resource):\n path = resource.path[self.prefix_len:]\n return self._files[path][3]\n\n def get_resources(self, resource):\n path = resource.path[self.prefix_len:]\n if path and path[-1] != os.sep:\n path += os.sep\n plen = len(path)\n result = set()\n i = bisect.bisect(self.index, path)\n while i < len(self.index):\n if not self.index[i].startswith(path):\n break\n s = self.index[i][plen:]\n result.add(s.split(os.sep, 1)[0]) # only immediate children\n i += 1\n return result\n\n def _is_directory(self, path):\n path = path[self.prefix_len:]\n if path and path[-1] != os.sep:\n path += os.sep\n i = bisect.bisect(self.index, path)\n try:\n result = self.index[i].startswith(path)\n except IndexError:\n result = False\n return result\n\n\n_finder_registry = {\n type(None): ResourceFinder,\n zipimport.zipimporter: ZipResourceFinder\n}\n\ntry:\n # In Python 3.6, _frozen_importlib -> _frozen_importlib_external\n try:\n import _frozen_importlib_external as _fi\n except ImportError:\n import _frozen_importlib as _fi\n _finder_registry[_fi.SourceFileLoader] = ResourceFinder\n _finder_registry[_fi.FileFinder] = ResourceFinder\n # See issue #146\n _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder\n del _fi\nexcept (ImportError, AttributeError):\n pass\n\n\ndef register_finder(loader, finder_maker):\n _finder_registry[type(loader)] = finder_maker\n\n\n_finder_cache = {}\n\n\ndef finder(package):\n """\n Return a resource finder for a package.\n :param package: The name of the package.\n :return: A :class:`ResourceFinder` instance for the package.\n """\n if package in _finder_cache:\n result = _finder_cache[package]\n else:\n if package not in sys.modules:\n __import__(package)\n module = sys.modules[package]\n path = getattr(module, '__path__', None)\n if path is None:\n raise DistlibException('You cannot get a finder for a module, '\n 'only for a package')\n loader = getattr(module, '__loader__', None)\n finder_maker = _finder_registry.get(type(loader))\n if finder_maker is None:\n raise DistlibException('Unable to locate finder for %r' % package)\n result = finder_maker(module)\n _finder_cache[package] = result\n return result\n\n\n_dummy_module = types.ModuleType(str('__dummy__'))\n\n\ndef finder_for_path(path):\n """\n Return a resource finder for a path, which should represent a container.\n\n :param path: The path.\n :return: A :class:`ResourceFinder` instance for the path.\n """\n result = None\n # calls any path hooks, gets importer into cache\n pkgutil.get_importer(path)\n loader = sys.path_importer_cache.get(path)\n finder = _finder_registry.get(type(loader))\n if finder:\n module = _dummy_module\n module.__file__ = os.path.join(path, '')\n module.__loader__ = loader\n result = finder(module)\n return result\n
.venv\Lib\site-packages\pip\_vendor\distlib\resources.py
resources.py
Python
10,820
0.95
0.259777
0.050847
python-kit
471
2024-03-27T17:57:41.832144
GPL-3.0
false
669a65482a124662963f972e6d36c6b4
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2023 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nfrom io import BytesIO\nimport logging\nimport os\nimport re\nimport struct\nimport sys\nimport time\nfrom zipfile import ZipInfo\n\nfrom .compat import sysconfig, detect_encoding, ZipFile\nfrom .resources import finder\nfrom .util import (FileOperator, get_export_entry, convert_path, get_executable, get_platform, in_venv)\n\nlogger = logging.getLogger(__name__)\n\n_DEFAULT_MANIFEST = '''\n<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">\n <assemblyIdentity version="1.0.0.0"\n processorArchitecture="X86"\n name="%s"\n type="win32"/>\n\n <!-- Identify the application security requirements. -->\n <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">\n <security>\n <requestedPrivileges>\n <requestedExecutionLevel level="asInvoker" uiAccess="false"/>\n </requestedPrivileges>\n </security>\n </trustInfo>\n</assembly>'''.strip()\n\n# check if Python is called on the first line with this expression\nFIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')\nSCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-\nimport re\nimport sys\nfrom %(module)s import %(import_name)s\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])\n sys.exit(%(func)s())\n'''\n\n# Pre-fetch the contents of all executable wrapper stubs.\n# This is to address https://github.com/pypa/pip/issues/12666.\n# When updating pip, we rename the old pip in place before installing the\n# new version. If we try to fetch a wrapper *after* that rename, the finder\n# machinery will be confused as the package is no longer available at the\n# location where it was imported from. So we load everything into memory in\n# advance.\n\nif os.name == 'nt' or (os.name == 'java' and os._name == 'nt'):\n # Issue 31: don't hardcode an absolute package name, but\n # determine it relative to the current package\n DISTLIB_PACKAGE = __name__.rsplit('.', 1)[0]\n\n WRAPPERS = {\n r.name: r.bytes\n for r in finder(DISTLIB_PACKAGE).iterator("")\n if r.name.endswith(".exe")\n }\n\n\ndef enquote_executable(executable):\n if ' ' in executable:\n # make sure we quote only the executable in case of env\n # for example /usr/bin/env "/dir with spaces/bin/jython"\n # instead of "/usr/bin/env /dir with spaces/bin/jython"\n # otherwise whole\n if executable.startswith('/usr/bin/env '):\n env, _executable = executable.split(' ', 1)\n if ' ' in _executable and not _executable.startswith('"'):\n executable = '%s "%s"' % (env, _executable)\n else:\n if not executable.startswith('"'):\n executable = '"%s"' % executable\n return executable\n\n\n# Keep the old name around (for now), as there is at least one project using it!\n_enquote_executable = enquote_executable\n\n\nclass ScriptMaker(object):\n """\n A class to copy or create scripts from source scripts or callable\n specifications.\n """\n script_template = SCRIPT_TEMPLATE\n\n executable = None # for shebangs\n\n def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None):\n self.source_dir = source_dir\n self.target_dir = target_dir\n self.add_launchers = add_launchers\n self.force = False\n self.clobber = False\n # It only makes sense to set mode bits on POSIX.\n self.set_mode = (os.name == 'posix') or (os.name == 'java' and os._name == 'posix')\n self.variants = set(('', 'X.Y'))\n self._fileop = fileop or FileOperator(dry_run)\n\n self._is_nt = os.name == 'nt' or (os.name == 'java' and os._name == 'nt')\n self.version_info = sys.version_info\n\n def _get_alternate_executable(self, executable, options):\n if options.get('gui', False) and self._is_nt: # pragma: no cover\n dn, fn = os.path.split(executable)\n fn = fn.replace('python', 'pythonw')\n executable = os.path.join(dn, fn)\n return executable\n\n if sys.platform.startswith('java'): # pragma: no cover\n\n def _is_shell(self, executable):\n """\n Determine if the specified executable is a script\n (contains a #! line)\n """\n try:\n with open(executable) as fp:\n return fp.read(2) == '#!'\n except (OSError, IOError):\n logger.warning('Failed to open %s', executable)\n return False\n\n def _fix_jython_executable(self, executable):\n if self._is_shell(executable):\n # Workaround for Jython is not needed on Linux systems.\n import java\n\n if java.lang.System.getProperty('os.name') == 'Linux':\n return executable\n elif executable.lower().endswith('jython.exe'):\n # Use wrapper exe for Jython on Windows\n return executable\n return '/usr/bin/env %s' % executable\n\n def _build_shebang(self, executable, post_interp):\n """\n Build a shebang line. In the simple case (on Windows, or a shebang line\n which is not too long or contains spaces) use a simple formulation for\n the shebang. Otherwise, use /bin/sh as the executable, with a contrived\n shebang which allows the script to run either under Python or sh, using\n suitable quoting. Thanks to Harald Nordgren for his input.\n\n See also: http://www.in-ulm.de/~mascheck/various/shebang/#length\n https://hg.mozilla.org/mozilla-central/file/tip/mach\n """\n if os.name != 'posix':\n simple_shebang = True\n elif getattr(sys, "cross_compiling", False):\n # In a cross-compiling environment, the shebang will likely be a\n # script; this *must* be invoked with the "safe" version of the\n # shebang, or else using os.exec() to run the entry script will\n # fail, raising "OSError 8 [Errno 8] Exec format error".\n simple_shebang = False\n else:\n # Add 3 for '#!' prefix and newline suffix.\n shebang_length = len(executable) + len(post_interp) + 3\n if sys.platform == 'darwin':\n max_shebang_length = 512\n else:\n max_shebang_length = 127\n simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length))\n\n if simple_shebang:\n result = b'#!' + executable + post_interp + b'\n'\n else:\n result = b'#!/bin/sh\n'\n result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'\n result += b"' '''\n"\n return result\n\n def _get_shebang(self, encoding, post_interp=b'', options=None):\n enquote = True\n if self.executable:\n executable = self.executable\n enquote = False # assume this will be taken care of\n elif not sysconfig.is_python_build():\n executable = get_executable()\n elif in_venv(): # pragma: no cover\n executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE'))\n else: # pragma: no cover\n if os.name == 'nt':\n # for Python builds from source on Windows, no Python executables with\n # a version suffix are created, so we use python.exe\n executable = os.path.join(sysconfig.get_config_var('BINDIR'),\n 'python%s' % (sysconfig.get_config_var('EXE')))\n else:\n executable = os.path.join(\n sysconfig.get_config_var('BINDIR'),\n 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE')))\n if options:\n executable = self._get_alternate_executable(executable, options)\n\n if sys.platform.startswith('java'): # pragma: no cover\n executable = self._fix_jython_executable(executable)\n\n # Normalise case for Windows - COMMENTED OUT\n # executable = os.path.normcase(executable)\n # N.B. The normalising operation above has been commented out: See\n # issue #124. Although paths in Windows are generally case-insensitive,\n # they aren't always. For example, a path containing a ẞ (which is a\n # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a\n # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by\n # Windows as equivalent in path names.\n\n # If the user didn't specify an executable, it may be necessary to\n # cater for executable paths with spaces (not uncommon on Windows)\n if enquote:\n executable = enquote_executable(executable)\n # Issue #51: don't use fsencode, since we later try to\n # check that the shebang is decodable using utf-8.\n executable = executable.encode('utf-8')\n # in case of IronPython, play safe and enable frames support\n if (sys.platform == 'cli' and '-X:Frames' not in post_interp and\n '-X:FullFrames' not in post_interp): # pragma: no cover\n post_interp += b' -X:Frames'\n shebang = self._build_shebang(executable, post_interp)\n # Python parser starts to read a script using UTF-8 until\n # it gets a #coding:xxx cookie. The shebang has to be the\n # first line of a file, the #coding:xxx cookie cannot be\n # written before. So the shebang has to be decodable from\n # UTF-8.\n try:\n shebang.decode('utf-8')\n except UnicodeDecodeError: # pragma: no cover\n raise ValueError('The shebang (%r) is not decodable from utf-8' % shebang)\n # If the script is encoded to a custom encoding (use a\n # #coding:xxx cookie), the shebang has to be decodable from\n # the script encoding too.\n if encoding != 'utf-8':\n try:\n shebang.decode(encoding)\n except UnicodeDecodeError: # pragma: no cover\n raise ValueError('The shebang (%r) is not decodable '\n 'from the script encoding (%r)' % (shebang, encoding))\n return shebang\n\n def _get_script_text(self, entry):\n return self.script_template % dict(\n module=entry.prefix, import_name=entry.suffix.split('.')[0], func=entry.suffix)\n\n manifest = _DEFAULT_MANIFEST\n\n def get_manifest(self, exename):\n base = os.path.basename(exename)\n return self.manifest % base\n\n def _write_script(self, names, shebang, script_bytes, filenames, ext):\n use_launcher = self.add_launchers and self._is_nt\n if not use_launcher:\n script_bytes = shebang + script_bytes\n else: # pragma: no cover\n if ext == 'py':\n launcher = self._get_launcher('t')\n else:\n launcher = self._get_launcher('w')\n stream = BytesIO()\n with ZipFile(stream, 'w') as zf:\n source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')\n if source_date_epoch:\n date_time = time.gmtime(int(source_date_epoch))[:6]\n zinfo = ZipInfo(filename='__main__.py', date_time=date_time)\n zf.writestr(zinfo, script_bytes)\n else:\n zf.writestr('__main__.py', script_bytes)\n zip_data = stream.getvalue()\n script_bytes = launcher + shebang + zip_data\n for name in names:\n outname = os.path.join(self.target_dir, name)\n if use_launcher: # pragma: no cover\n n, e = os.path.splitext(outname)\n if e.startswith('.py'):\n outname = n\n outname = '%s.exe' % outname\n try:\n self._fileop.write_binary_file(outname, script_bytes)\n except Exception:\n # Failed writing an executable - it might be in use.\n logger.warning('Failed to write executable - trying to '\n 'use .deleteme logic')\n dfname = '%s.deleteme' % outname\n if os.path.exists(dfname):\n os.remove(dfname) # Not allowed to fail here\n os.rename(outname, dfname) # nor here\n self._fileop.write_binary_file(outname, script_bytes)\n logger.debug('Able to replace executable using '\n '.deleteme logic')\n try:\n os.remove(dfname)\n except Exception:\n pass # still in use - ignore error\n else:\n if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover\n outname = '%s.%s' % (outname, ext)\n if os.path.exists(outname) and not self.clobber:\n logger.warning('Skipping existing file %s', outname)\n continue\n self._fileop.write_binary_file(outname, script_bytes)\n if self.set_mode:\n self._fileop.set_executable_mode([outname])\n filenames.append(outname)\n\n variant_separator = '-'\n\n def get_script_filenames(self, name):\n result = set()\n if '' in self.variants:\n result.add(name)\n if 'X' in self.variants:\n result.add('%s%s' % (name, self.version_info[0]))\n if 'X.Y' in self.variants:\n result.add('%s%s%s.%s' % (name, self.variant_separator, self.version_info[0], self.version_info[1]))\n return result\n\n def _make_script(self, entry, filenames, options=None):\n post_interp = b''\n if options:\n args = options.get('interpreter_args', [])\n if args:\n args = ' %s' % ' '.join(args)\n post_interp = args.encode('utf-8')\n shebang = self._get_shebang('utf-8', post_interp, options=options)\n script = self._get_script_text(entry).encode('utf-8')\n scriptnames = self.get_script_filenames(entry.name)\n if options and options.get('gui', False):\n ext = 'pyw'\n else:\n ext = 'py'\n self._write_script(scriptnames, shebang, script, filenames, ext)\n\n def _copy_script(self, script, filenames):\n adjust = False\n script = os.path.join(self.source_dir, convert_path(script))\n outname = os.path.join(self.target_dir, os.path.basename(script))\n if not self.force and not self._fileop.newer(script, outname):\n logger.debug('not copying %s (up-to-date)', script)\n return\n\n # Always open the file, but ignore failures in dry-run mode --\n # that way, we'll get accurate feedback if we can read the\n # script.\n try:\n f = open(script, 'rb')\n except IOError: # pragma: no cover\n if not self.dry_run:\n raise\n f = None\n else:\n first_line = f.readline()\n if not first_line: # pragma: no cover\n logger.warning('%s is an empty file (skipping)', script)\n return\n\n match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))\n if match:\n adjust = True\n post_interp = match.group(1) or b''\n\n if not adjust:\n if f:\n f.close()\n self._fileop.copy_file(script, outname)\n if self.set_mode:\n self._fileop.set_executable_mode([outname])\n filenames.append(outname)\n else:\n logger.info('copying and adjusting %s -> %s', script, self.target_dir)\n if not self._fileop.dry_run:\n encoding, lines = detect_encoding(f.readline)\n f.seek(0)\n shebang = self._get_shebang(encoding, post_interp)\n if b'pythonw' in first_line: # pragma: no cover\n ext = 'pyw'\n else:\n ext = 'py'\n n = os.path.basename(outname)\n self._write_script([n], shebang, f.read(), filenames, ext)\n if f:\n f.close()\n\n @property\n def dry_run(self):\n return self._fileop.dry_run\n\n @dry_run.setter\n def dry_run(self, value):\n self._fileop.dry_run = value\n\n if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover\n # Executable launcher support.\n # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/\n\n def _get_launcher(self, kind):\n if struct.calcsize('P') == 8: # 64-bit\n bits = '64'\n else:\n bits = '32'\n platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''\n name = '%s%s%s.exe' % (kind, bits, platform_suffix)\n if name not in WRAPPERS:\n msg = ('Unable to find resource %s in package %s' %\n (name, DISTLIB_PACKAGE))\n raise ValueError(msg)\n return WRAPPERS[name]\n\n # Public API follows\n\n def make(self, specification, options=None):\n """\n Make a script.\n\n :param specification: The specification, which is either a valid export\n entry specification (to make a script from a\n callable) or a filename (to make a script by\n copying from a source location).\n :param options: A dictionary of options controlling script generation.\n :return: A list of all absolute pathnames written to.\n """\n filenames = []\n entry = get_export_entry(specification)\n if entry is None:\n self._copy_script(specification, filenames)\n else:\n self._make_script(entry, filenames, options=options)\n return filenames\n\n def make_multiple(self, specifications, options=None):\n """\n Take a list of specifications and make scripts from them,\n :param specifications: A list of specifications.\n :return: A list of all absolute pathnames written to,\n """\n filenames = []\n for specification in specifications:\n filenames.extend(self.make(specification, options))\n return filenames\n
.venv\Lib\site-packages\pip\_vendor\distlib\scripts.py
scripts.py
Python
18,608
0.95
0.214765
0.14787
vue-tools
507
2024-05-18T07:44:30.513670
MIT
false
54e9d6f9f6571fdd935269a54cc80700
MZ
.venv\Lib\site-packages\pip\_vendor\distlib\t32.exe
t32.exe
Other
97,792
0.6
0.006593
0.004474
vue-tools
781
2024-12-09T00:30:29.944917
BSD-3-Clause
false
17640a7257fabaea92bc981eecdb5192
MZ
.venv\Lib\site-packages\pip\_vendor\distlib\t64.exe
t64.exe
Other
108,032
0.6
0.00431
0.002903
vue-tools
873
2023-08-15T13:00:08.866439
MIT
false
054cab5a946b7f20caa127bccfe66a06
#\n# Copyright (C) 2012-2023 The Python Software Foundation.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nimport codecs\nfrom collections import deque\nimport contextlib\nimport csv\nfrom glob import iglob as std_iglob\nimport io\nimport json\nimport logging\nimport os\nimport py_compile\nimport re\nimport socket\ntry:\n import ssl\nexcept ImportError: # pragma: no cover\n ssl = None\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport textwrap\n\ntry:\n import threading\nexcept ImportError: # pragma: no cover\n import dummy_threading as threading\nimport time\n\nfrom . import DistlibException\nfrom .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib,\n xmlrpclib, HTTPHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, ZipFile,\n fsdecode, unquote, urlparse)\n\nlogger = logging.getLogger(__name__)\n\n#\n# Requirement parsing code as per PEP 508\n#\n\nIDENTIFIER = re.compile(r'^([\w\.-]+)\s*')\nVERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')\nCOMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')\nMARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')\nOR = re.compile(r'^or\b\s*')\nAND = re.compile(r'^and\b\s*')\nNON_SPACE = re.compile(r'(\S+)\s*')\nSTRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')\n\n\ndef parse_marker(marker_string):\n """\n Parse a marker string and return a dictionary containing a marker expression.\n\n The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in\n the expression grammar, or strings. A string contained in quotes is to be\n interpreted as a literal string, and a string not contained in quotes is a\n variable (such as os_name).\n """\n\n def marker_var(remaining):\n # either identifier, or literal string\n m = IDENTIFIER.match(remaining)\n if m:\n result = m.groups()[0]\n remaining = remaining[m.end():]\n elif not remaining:\n raise SyntaxError('unexpected end of input')\n else:\n q = remaining[0]\n if q not in '\'"':\n raise SyntaxError('invalid expression: %s' % remaining)\n oq = '\'"'.replace(q, '')\n remaining = remaining[1:]\n parts = [q]\n while remaining:\n # either a string chunk, or oq, or q to terminate\n if remaining[0] == q:\n break\n elif remaining[0] == oq:\n parts.append(oq)\n remaining = remaining[1:]\n else:\n m = STRING_CHUNK.match(remaining)\n if not m:\n raise SyntaxError('error in string literal: %s' % remaining)\n parts.append(m.groups()[0])\n remaining = remaining[m.end():]\n else:\n s = ''.join(parts)\n raise SyntaxError('unterminated string: %s' % s)\n parts.append(q)\n result = ''.join(parts)\n remaining = remaining[1:].lstrip() # skip past closing quote\n return result, remaining\n\n def marker_expr(remaining):\n if remaining and remaining[0] == '(':\n result, remaining = marker(remaining[1:].lstrip())\n if remaining[0] != ')':\n raise SyntaxError('unterminated parenthesis: %s' % remaining)\n remaining = remaining[1:].lstrip()\n else:\n lhs, remaining = marker_var(remaining)\n while remaining:\n m = MARKER_OP.match(remaining)\n if not m:\n break\n op = m.groups()[0]\n remaining = remaining[m.end():]\n rhs, remaining = marker_var(remaining)\n lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}\n result = lhs\n return result, remaining\n\n def marker_and(remaining):\n lhs, remaining = marker_expr(remaining)\n while remaining:\n m = AND.match(remaining)\n if not m:\n break\n remaining = remaining[m.end():]\n rhs, remaining = marker_expr(remaining)\n lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}\n return lhs, remaining\n\n def marker(remaining):\n lhs, remaining = marker_and(remaining)\n while remaining:\n m = OR.match(remaining)\n if not m:\n break\n remaining = remaining[m.end():]\n rhs, remaining = marker_and(remaining)\n lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}\n return lhs, remaining\n\n return marker(marker_string)\n\n\ndef parse_requirement(req):\n """\n Parse a requirement passed in as a string. Return a Container\n whose attributes contain the various parts of the requirement.\n """\n remaining = req.strip()\n if not remaining or remaining.startswith('#'):\n return None\n m = IDENTIFIER.match(remaining)\n if not m:\n raise SyntaxError('name expected: %s' % remaining)\n distname = m.groups()[0]\n remaining = remaining[m.end():]\n extras = mark_expr = versions = uri = None\n if remaining and remaining[0] == '[':\n i = remaining.find(']', 1)\n if i < 0:\n raise SyntaxError('unterminated extra: %s' % remaining)\n s = remaining[1:i]\n remaining = remaining[i + 1:].lstrip()\n extras = []\n while s:\n m = IDENTIFIER.match(s)\n if not m:\n raise SyntaxError('malformed extra: %s' % s)\n extras.append(m.groups()[0])\n s = s[m.end():]\n if not s:\n break\n if s[0] != ',':\n raise SyntaxError('comma expected in extras: %s' % s)\n s = s[1:].lstrip()\n if not extras:\n extras = None\n if remaining:\n if remaining[0] == '@':\n # it's a URI\n remaining = remaining[1:].lstrip()\n m = NON_SPACE.match(remaining)\n if not m:\n raise SyntaxError('invalid URI: %s' % remaining)\n uri = m.groups()[0]\n t = urlparse(uri)\n # there are issues with Python and URL parsing, so this test\n # is a bit crude. See bpo-20271, bpo-23505. Python doesn't\n # always parse invalid URLs correctly - it should raise\n # exceptions for malformed URLs\n if not (t.scheme and t.netloc):\n raise SyntaxError('Invalid URL: %s' % uri)\n remaining = remaining[m.end():].lstrip()\n else:\n\n def get_versions(ver_remaining):\n """\n Return a list of operator, version tuples if any are\n specified, else None.\n """\n m = COMPARE_OP.match(ver_remaining)\n versions = None\n if m:\n versions = []\n while True:\n op = m.groups()[0]\n ver_remaining = ver_remaining[m.end():]\n m = VERSION_IDENTIFIER.match(ver_remaining)\n if not m:\n raise SyntaxError('invalid version: %s' % ver_remaining)\n v = m.groups()[0]\n versions.append((op, v))\n ver_remaining = ver_remaining[m.end():]\n if not ver_remaining or ver_remaining[0] != ',':\n break\n ver_remaining = ver_remaining[1:].lstrip()\n # Some packages have a trailing comma which would break things\n # See issue #148\n if not ver_remaining:\n break\n m = COMPARE_OP.match(ver_remaining)\n if not m:\n raise SyntaxError('invalid constraint: %s' % ver_remaining)\n if not versions:\n versions = None\n return versions, ver_remaining\n\n if remaining[0] != '(':\n versions, remaining = get_versions(remaining)\n else:\n i = remaining.find(')', 1)\n if i < 0:\n raise SyntaxError('unterminated parenthesis: %s' % remaining)\n s = remaining[1:i]\n remaining = remaining[i + 1:].lstrip()\n # As a special diversion from PEP 508, allow a version number\n # a.b.c in parentheses as a synonym for ~= a.b.c (because this\n # is allowed in earlier PEPs)\n if COMPARE_OP.match(s):\n versions, _ = get_versions(s)\n else:\n m = VERSION_IDENTIFIER.match(s)\n if not m:\n raise SyntaxError('invalid constraint: %s' % s)\n v = m.groups()[0]\n s = s[m.end():].lstrip()\n if s:\n raise SyntaxError('invalid constraint: %s' % s)\n versions = [('~=', v)]\n\n if remaining:\n if remaining[0] != ';':\n raise SyntaxError('invalid requirement: %s' % remaining)\n remaining = remaining[1:].lstrip()\n\n mark_expr, remaining = parse_marker(remaining)\n\n if remaining and remaining[0] != '#':\n raise SyntaxError('unexpected trailing data: %s' % remaining)\n\n if not versions:\n rs = distname\n else:\n rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))\n return Container(name=distname, extras=extras, constraints=versions, marker=mark_expr, url=uri, requirement=rs)\n\n\ndef get_resources_dests(resources_root, rules):\n """Find destinations for resources files"""\n\n def get_rel_path(root, path):\n # normalizes and returns a lstripped-/-separated path\n root = root.replace(os.path.sep, '/')\n path = path.replace(os.path.sep, '/')\n assert path.startswith(root)\n return path[len(root):].lstrip('/')\n\n destinations = {}\n for base, suffix, dest in rules:\n prefix = os.path.join(resources_root, base)\n for abs_base in iglob(prefix):\n abs_glob = os.path.join(abs_base, suffix)\n for abs_path in iglob(abs_glob):\n resource_file = get_rel_path(resources_root, abs_path)\n if dest is None: # remove the entry if it was here\n destinations.pop(resource_file, None)\n else:\n rel_path = get_rel_path(abs_base, abs_path)\n rel_dest = dest.replace(os.path.sep, '/').rstrip('/')\n destinations[resource_file] = rel_dest + '/' + rel_path\n return destinations\n\n\ndef in_venv():\n if hasattr(sys, 'real_prefix'):\n # virtualenv venvs\n result = True\n else:\n # PEP 405 venvs\n result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)\n return result\n\n\ndef get_executable():\n # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as\n # changes to the stub launcher mean that sys.executable always points\n # to the stub on OS X\n # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'\n # in os.environ):\n # result = os.environ['__PYVENV_LAUNCHER__']\n # else:\n # result = sys.executable\n # return result\n # Avoid normcasing: see issue #143\n # result = os.path.normcase(sys.executable)\n result = sys.executable\n if not isinstance(result, text_type):\n result = fsdecode(result)\n return result\n\n\ndef proceed(prompt, allowed_chars, error_prompt=None, default=None):\n p = prompt\n while True:\n s = raw_input(p)\n p = prompt\n if not s and default:\n s = default\n if s:\n c = s[0].lower()\n if c in allowed_chars:\n break\n if error_prompt:\n p = '%c: %s\n%s' % (c, error_prompt, prompt)\n return c\n\n\ndef extract_by_key(d, keys):\n if isinstance(keys, string_types):\n keys = keys.split()\n result = {}\n for key in keys:\n if key in d:\n result[key] = d[key]\n return result\n\n\ndef read_exports(stream):\n if sys.version_info[0] >= 3:\n # needs to be a text stream\n stream = codecs.getreader('utf-8')(stream)\n # Try to load as JSON, falling back on legacy format\n data = stream.read()\n stream = StringIO(data)\n try:\n jdata = json.load(stream)\n result = jdata['extensions']['python.exports']['exports']\n for group, entries in result.items():\n for k, v in entries.items():\n s = '%s = %s' % (k, v)\n entry = get_export_entry(s)\n assert entry is not None\n entries[k] = entry\n return result\n except Exception:\n stream.seek(0, 0)\n\n def read_stream(cp, stream):\n if hasattr(cp, 'read_file'):\n cp.read_file(stream)\n else:\n cp.readfp(stream)\n\n cp = configparser.ConfigParser()\n try:\n read_stream(cp, stream)\n except configparser.MissingSectionHeaderError:\n stream.close()\n data = textwrap.dedent(data)\n stream = StringIO(data)\n read_stream(cp, stream)\n\n result = {}\n for key in cp.sections():\n result[key] = entries = {}\n for name, value in cp.items(key):\n s = '%s = %s' % (name, value)\n entry = get_export_entry(s)\n assert entry is not None\n # entry.dist = self\n entries[name] = entry\n return result\n\n\ndef write_exports(exports, stream):\n if sys.version_info[0] >= 3:\n # needs to be a text stream\n stream = codecs.getwriter('utf-8')(stream)\n cp = configparser.ConfigParser()\n for k, v in exports.items():\n # TODO check k, v for valid values\n cp.add_section(k)\n for entry in v.values():\n if entry.suffix is None:\n s = entry.prefix\n else:\n s = '%s:%s' % (entry.prefix, entry.suffix)\n if entry.flags:\n s = '%s [%s]' % (s, ', '.join(entry.flags))\n cp.set(k, entry.name, s)\n cp.write(stream)\n\n\n@contextlib.contextmanager\ndef tempdir():\n td = tempfile.mkdtemp()\n try:\n yield td\n finally:\n shutil.rmtree(td)\n\n\n@contextlib.contextmanager\ndef chdir(d):\n cwd = os.getcwd()\n try:\n os.chdir(d)\n yield\n finally:\n os.chdir(cwd)\n\n\n@contextlib.contextmanager\ndef socket_timeout(seconds=15):\n cto = socket.getdefaulttimeout()\n try:\n socket.setdefaulttimeout(seconds)\n yield\n finally:\n socket.setdefaulttimeout(cto)\n\n\nclass cached_property(object):\n\n def __init__(self, func):\n self.func = func\n # for attr in ('__name__', '__module__', '__doc__'):\n # setattr(self, attr, getattr(func, attr, None))\n\n def __get__(self, obj, cls=None):\n if obj is None:\n return self\n value = self.func(obj)\n object.__setattr__(obj, self.func.__name__, value)\n # obj.__dict__[self.func.__name__] = value = self.func(obj)\n return value\n\n\ndef convert_path(pathname):\n """Return 'pathname' as a name that will work on the native filesystem.\n\n The path is split on '/' and put back together again using the current\n directory separator. Needed because filenames in the setup script are\n always supplied in Unix style, and have to be converted to the local\n convention before we can actually use them in the filesystem. Raises\n ValueError on non-Unix-ish systems if 'pathname' either starts or\n ends with a slash.\n """\n if os.sep == '/':\n return pathname\n if not pathname:\n return pathname\n if pathname[0] == '/':\n raise ValueError("path '%s' cannot be absolute" % pathname)\n if pathname[-1] == '/':\n raise ValueError("path '%s' cannot end with '/'" % pathname)\n\n paths = pathname.split('/')\n while os.curdir in paths:\n paths.remove(os.curdir)\n if not paths:\n return os.curdir\n return os.path.join(*paths)\n\n\nclass FileOperator(object):\n\n def __init__(self, dry_run=False):\n self.dry_run = dry_run\n self.ensured = set()\n self._init_record()\n\n def _init_record(self):\n self.record = False\n self.files_written = set()\n self.dirs_created = set()\n\n def record_as_written(self, path):\n if self.record:\n self.files_written.add(path)\n\n def newer(self, source, target):\n """Tell if the target is newer than the source.\n\n Returns true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't.\n\n Returns false if both exist and 'target' is the same age or younger\n than 'source'. Raise PackagingFileError if 'source' does not exist.\n\n Note that this test is not very accurate: files created in the same\n second will have the same "age".\n """\n if not os.path.exists(source):\n raise DistlibException("file '%r' does not exist" % os.path.abspath(source))\n if not os.path.exists(target):\n return True\n\n return os.stat(source).st_mtime > os.stat(target).st_mtime\n\n def copy_file(self, infile, outfile, check=True):\n """Copy a file respecting dry-run and force flags.\n """\n self.ensure_dir(os.path.dirname(outfile))\n logger.info('Copying %s to %s', infile, outfile)\n if not self.dry_run:\n msg = None\n if check:\n if os.path.islink(outfile):\n msg = '%s is a symlink' % outfile\n elif os.path.exists(outfile) and not os.path.isfile(outfile):\n msg = '%s is a non-regular file' % outfile\n if msg:\n raise ValueError(msg + ' which would be overwritten')\n shutil.copyfile(infile, outfile)\n self.record_as_written(outfile)\n\n def copy_stream(self, instream, outfile, encoding=None):\n assert not os.path.isdir(outfile)\n self.ensure_dir(os.path.dirname(outfile))\n logger.info('Copying stream %s to %s', instream, outfile)\n if not self.dry_run:\n if encoding is None:\n outstream = open(outfile, 'wb')\n else:\n outstream = codecs.open(outfile, 'w', encoding=encoding)\n try:\n shutil.copyfileobj(instream, outstream)\n finally:\n outstream.close()\n self.record_as_written(outfile)\n\n def write_binary_file(self, path, data):\n self.ensure_dir(os.path.dirname(path))\n if not self.dry_run:\n if os.path.exists(path):\n os.remove(path)\n with open(path, 'wb') as f:\n f.write(data)\n self.record_as_written(path)\n\n def write_text_file(self, path, data, encoding):\n self.write_binary_file(path, data.encode(encoding))\n\n def set_mode(self, bits, mask, files):\n if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):\n # Set the executable bits (owner, group, and world) on\n # all the files specified.\n for f in files:\n if self.dry_run:\n logger.info("changing mode of %s", f)\n else:\n mode = (os.stat(f).st_mode | bits) & mask\n logger.info("changing mode of %s to %o", f, mode)\n os.chmod(f, mode)\n\n set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)\n\n def ensure_dir(self, path):\n path = os.path.abspath(path)\n if path not in self.ensured and not os.path.exists(path):\n self.ensured.add(path)\n d, f = os.path.split(path)\n self.ensure_dir(d)\n logger.info('Creating %s' % path)\n if not self.dry_run:\n os.mkdir(path)\n if self.record:\n self.dirs_created.add(path)\n\n def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):\n dpath = cache_from_source(path, not optimize)\n logger.info('Byte-compiling %s to %s', path, dpath)\n if not self.dry_run:\n if force or self.newer(path, dpath):\n if not prefix:\n diagpath = None\n else:\n assert path.startswith(prefix)\n diagpath = path[len(prefix):]\n compile_kwargs = {}\n if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):\n if not isinstance(hashed_invalidation, py_compile.PycInvalidationMode):\n hashed_invalidation = py_compile.PycInvalidationMode.CHECKED_HASH\n compile_kwargs['invalidation_mode'] = hashed_invalidation\n py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error\n self.record_as_written(dpath)\n return dpath\n\n def ensure_removed(self, path):\n if os.path.exists(path):\n if os.path.isdir(path) and not os.path.islink(path):\n logger.debug('Removing directory tree at %s', path)\n if not self.dry_run:\n shutil.rmtree(path)\n if self.record:\n if path in self.dirs_created:\n self.dirs_created.remove(path)\n else:\n if os.path.islink(path):\n s = 'link'\n else:\n s = 'file'\n logger.debug('Removing %s %s', s, path)\n if not self.dry_run:\n os.remove(path)\n if self.record:\n if path in self.files_written:\n self.files_written.remove(path)\n\n def is_writable(self, path):\n result = False\n while not result:\n if os.path.exists(path):\n result = os.access(path, os.W_OK)\n break\n parent = os.path.dirname(path)\n if parent == path:\n break\n path = parent\n return result\n\n def commit(self):\n """\n Commit recorded changes, turn off recording, return\n changes.\n """\n assert self.record\n result = self.files_written, self.dirs_created\n self._init_record()\n return result\n\n def rollback(self):\n if not self.dry_run:\n for f in list(self.files_written):\n if os.path.exists(f):\n os.remove(f)\n # dirs should all be empty now, except perhaps for\n # __pycache__ subdirs\n # reverse so that subdirs appear before their parents\n dirs = sorted(self.dirs_created, reverse=True)\n for d in dirs:\n flist = os.listdir(d)\n if flist:\n assert flist == ['__pycache__']\n sd = os.path.join(d, flist[0])\n os.rmdir(sd)\n os.rmdir(d) # should fail if non-empty\n self._init_record()\n\n\ndef resolve(module_name, dotted_path):\n if module_name in sys.modules:\n mod = sys.modules[module_name]\n else:\n mod = __import__(module_name)\n if dotted_path is None:\n result = mod\n else:\n parts = dotted_path.split('.')\n result = getattr(mod, parts.pop(0))\n for p in parts:\n result = getattr(result, p)\n return result\n\n\nclass ExportEntry(object):\n\n def __init__(self, name, prefix, suffix, flags):\n self.name = name\n self.prefix = prefix\n self.suffix = suffix\n self.flags = flags\n\n @cached_property\n def value(self):\n return resolve(self.prefix, self.suffix)\n\n def __repr__(self): # pragma: no cover\n return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, self.suffix, self.flags)\n\n def __eq__(self, other):\n if not isinstance(other, ExportEntry):\n result = False\n else:\n result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and\n self.flags == other.flags)\n return result\n\n __hash__ = object.__hash__\n\n\nENTRY_RE = re.compile(\n r'''(?P<name>([^\[]\S*))\n \s*=\s*(?P<callable>(\w+)([:\.]\w+)*)\n \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?\n ''', re.VERBOSE)\n\n\ndef get_export_entry(specification):\n m = ENTRY_RE.search(specification)\n if not m:\n result = None\n if '[' in specification or ']' in specification:\n raise DistlibException("Invalid specification "\n "'%s'" % specification)\n else:\n d = m.groupdict()\n name = d['name']\n path = d['callable']\n colons = path.count(':')\n if colons == 0:\n prefix, suffix = path, None\n else:\n if colons != 1:\n raise DistlibException("Invalid specification "\n "'%s'" % specification)\n prefix, suffix = path.split(':')\n flags = d['flags']\n if flags is None:\n if '[' in specification or ']' in specification:\n raise DistlibException("Invalid specification "\n "'%s'" % specification)\n flags = []\n else:\n flags = [f.strip() for f in flags.split(',')]\n result = ExportEntry(name, prefix, suffix, flags)\n return result\n\n\ndef get_cache_base(suffix=None):\n """\n Return the default base location for distlib caches. If the directory does\n not exist, it is created. Use the suffix provided for the base directory,\n and default to '.distlib' if it isn't provided.\n\n On Windows, if LOCALAPPDATA is defined in the environment, then it is\n assumed to be a directory, and will be the parent directory of the result.\n On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home\n directory - using os.expanduser('~') - will be the parent directory of\n the result.\n\n The result is just the directory '.distlib' in the parent directory as\n determined above, or with the name specified with ``suffix``.\n """\n if suffix is None:\n suffix = '.distlib'\n if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:\n result = os.path.expandvars('$localappdata')\n else:\n # Assume posix, or old Windows\n result = os.path.expanduser('~')\n # we use 'isdir' instead of 'exists', because we want to\n # fail if there's a file with that name\n if os.path.isdir(result):\n usable = os.access(result, os.W_OK)\n if not usable:\n logger.warning('Directory exists but is not writable: %s', result)\n else:\n try:\n os.makedirs(result)\n usable = True\n except OSError:\n logger.warning('Unable to create %s', result, exc_info=True)\n usable = False\n if not usable:\n result = tempfile.mkdtemp()\n logger.warning('Default location unusable, using %s', result)\n return os.path.join(result, suffix)\n\n\ndef path_to_cache_dir(path, use_abspath=True):\n """\n Convert an absolute path to a directory name for use in a cache.\n\n The algorithm used is:\n\n #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.\n #. Any occurrence of ``os.sep`` is replaced with ``'--'``.\n #. ``'.cache'`` is appended.\n """\n d, p = os.path.splitdrive(os.path.abspath(path) if use_abspath else path)\n if d:\n d = d.replace(':', '---')\n p = p.replace(os.sep, '--')\n return d + p + '.cache'\n\n\ndef ensure_slash(s):\n if not s.endswith('/'):\n return s + '/'\n return s\n\n\ndef parse_credentials(netloc):\n username = password = None\n if '@' in netloc:\n prefix, netloc = netloc.rsplit('@', 1)\n if ':' not in prefix:\n username = prefix\n else:\n username, password = prefix.split(':', 1)\n if username:\n username = unquote(username)\n if password:\n password = unquote(password)\n return username, password, netloc\n\n\ndef get_process_umask():\n result = os.umask(0o22)\n os.umask(result)\n return result\n\n\ndef is_string_sequence(seq):\n result = True\n i = None\n for i, s in enumerate(seq):\n if not isinstance(s, string_types):\n result = False\n break\n assert i is not None\n return result\n\n\nPROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'\n '([a-z0-9_.+-]+)', re.I)\nPYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')\n\n\ndef split_filename(filename, project_name=None):\n """\n Extract name, version, python version from a filename (no extension)\n\n Return name, version, pyver or None\n """\n result = None\n pyver = None\n filename = unquote(filename).replace(' ', '-')\n m = PYTHON_VERSION.search(filename)\n if m:\n pyver = m.group(1)\n filename = filename[:m.start()]\n if project_name and len(filename) > len(project_name) + 1:\n m = re.match(re.escape(project_name) + r'\b', filename)\n if m:\n n = m.end()\n result = filename[:n], filename[n + 1:], pyver\n if result is None:\n m = PROJECT_NAME_AND_VERSION.match(filename)\n if m:\n result = m.group(1), m.group(3), pyver\n return result\n\n\n# Allow spaces in name because of legacy dists like "Twisted Core"\nNAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'\n r'\(\s*(?P<ver>[^\s)]+)\)$')\n\n\ndef parse_name_and_version(p):\n """\n A utility method used to get name and version from a string.\n\n From e.g. a Provides-Dist value.\n\n :param p: A value in a form 'foo (1.0)'\n :return: The name and version as a tuple.\n """\n m = NAME_VERSION_RE.match(p)\n if not m:\n raise DistlibException('Ill-formed name/version string: \'%s\'' % p)\n d = m.groupdict()\n return d['name'].strip().lower(), d['ver']\n\n\ndef get_extras(requested, available):\n result = set()\n requested = set(requested or [])\n available = set(available or [])\n if '*' in requested:\n requested.remove('*')\n result |= available\n for r in requested:\n if r == '-':\n result.add(r)\n elif r.startswith('-'):\n unwanted = r[1:]\n if unwanted not in available:\n logger.warning('undeclared extra: %s' % unwanted)\n if unwanted in result:\n result.remove(unwanted)\n else:\n if r not in available:\n logger.warning('undeclared extra: %s' % r)\n result.add(r)\n return result\n\n\n#\n# Extended metadata functionality\n#\n\n\ndef _get_external_data(url):\n result = {}\n try:\n # urlopen might fail if it runs into redirections,\n # because of Python issue #13696. Fixed in locators\n # using a custom redirect handler.\n resp = urlopen(url)\n headers = resp.info()\n ct = headers.get('Content-Type')\n if not ct.startswith('application/json'):\n logger.debug('Unexpected response for JSON request: %s', ct)\n else:\n reader = codecs.getreader('utf-8')(resp)\n # data = reader.read().decode('utf-8')\n # result = json.loads(data)\n result = json.load(reader)\n except Exception as e:\n logger.exception('Failed to get external data for %s: %s', url, e)\n return result\n\n\n_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'\n\n\ndef get_project_data(name):\n url = '%s/%s/project.json' % (name[0].upper(), name)\n url = urljoin(_external_data_base_url, url)\n result = _get_external_data(url)\n return result\n\n\ndef get_package_data(name, version):\n url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)\n url = urljoin(_external_data_base_url, url)\n return _get_external_data(url)\n\n\nclass Cache(object):\n """\n A class implementing a cache for resources that need to live in the file system\n e.g. shared libraries. This class was moved from resources to here because it\n could be used by other modules, e.g. the wheel module.\n """\n\n def __init__(self, base):\n """\n Initialise an instance.\n\n :param base: The base directory where the cache should be located.\n """\n # we use 'isdir' instead of 'exists', because we want to\n # fail if there's a file with that name\n if not os.path.isdir(base): # pragma: no cover\n os.makedirs(base)\n if (os.stat(base).st_mode & 0o77) != 0:\n logger.warning('Directory \'%s\' is not private', base)\n self.base = os.path.abspath(os.path.normpath(base))\n\n def prefix_to_dir(self, prefix, use_abspath=True):\n """\n Converts a resource prefix to a directory name in the cache.\n """\n return path_to_cache_dir(prefix, use_abspath=use_abspath)\n\n def clear(self):\n """\n Clear the cache.\n """\n not_removed = []\n for fn in os.listdir(self.base):\n fn = os.path.join(self.base, fn)\n try:\n if os.path.islink(fn) or os.path.isfile(fn):\n os.remove(fn)\n elif os.path.isdir(fn):\n shutil.rmtree(fn)\n except Exception:\n not_removed.append(fn)\n return not_removed\n\n\nclass EventMixin(object):\n """\n A very simple publish/subscribe system.\n """\n\n def __init__(self):\n self._subscribers = {}\n\n def add(self, event, subscriber, append=True):\n """\n Add a subscriber for an event.\n\n :param event: The name of an event.\n :param subscriber: The subscriber to be added (and called when the\n event is published).\n :param append: Whether to append or prepend the subscriber to an\n existing subscriber list for the event.\n """\n subs = self._subscribers\n if event not in subs:\n subs[event] = deque([subscriber])\n else:\n sq = subs[event]\n if append:\n sq.append(subscriber)\n else:\n sq.appendleft(subscriber)\n\n def remove(self, event, subscriber):\n """\n Remove a subscriber for an event.\n\n :param event: The name of an event.\n :param subscriber: The subscriber to be removed.\n """\n subs = self._subscribers\n if event not in subs:\n raise ValueError('No subscribers: %r' % event)\n subs[event].remove(subscriber)\n\n def get_subscribers(self, event):\n """\n Return an iterator for the subscribers for an event.\n :param event: The event to return subscribers for.\n """\n return iter(self._subscribers.get(event, ()))\n\n def publish(self, event, *args, **kwargs):\n """\n Publish a event and return a list of values returned by its\n subscribers.\n\n :param event: The event to publish.\n :param args: The positional arguments to pass to the event's\n subscribers.\n :param kwargs: The keyword arguments to pass to the event's\n subscribers.\n """\n result = []\n for subscriber in self.get_subscribers(event):\n try:\n value = subscriber(event, *args, **kwargs)\n except Exception:\n logger.exception('Exception during event publication')\n value = None\n result.append(value)\n logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result)\n return result\n\n\n#\n# Simple sequencing\n#\nclass Sequencer(object):\n\n def __init__(self):\n self._preds = {}\n self._succs = {}\n self._nodes = set() # nodes with no preds/succs\n\n def add_node(self, node):\n self._nodes.add(node)\n\n def remove_node(self, node, edges=False):\n if node in self._nodes:\n self._nodes.remove(node)\n if edges:\n for p in set(self._preds.get(node, ())):\n self.remove(p, node)\n for s in set(self._succs.get(node, ())):\n self.remove(node, s)\n # Remove empties\n for k, v in list(self._preds.items()):\n if not v:\n del self._preds[k]\n for k, v in list(self._succs.items()):\n if not v:\n del self._succs[k]\n\n def add(self, pred, succ):\n assert pred != succ\n self._preds.setdefault(succ, set()).add(pred)\n self._succs.setdefault(pred, set()).add(succ)\n\n def remove(self, pred, succ):\n assert pred != succ\n try:\n preds = self._preds[succ]\n succs = self._succs[pred]\n except KeyError: # pragma: no cover\n raise ValueError('%r not a successor of anything' % succ)\n try:\n preds.remove(pred)\n succs.remove(succ)\n except KeyError: # pragma: no cover\n raise ValueError('%r not a successor of %r' % (succ, pred))\n\n def is_step(self, step):\n return (step in self._preds or step in self._succs or step in self._nodes)\n\n def get_steps(self, final):\n if not self.is_step(final):\n raise ValueError('Unknown: %r' % final)\n result = []\n todo = []\n seen = set()\n todo.append(final)\n while todo:\n step = todo.pop(0)\n if step in seen:\n # if a step was already seen,\n # move it to the end (so it will appear earlier\n # when reversed on return) ... but not for the\n # final step, as that would be confusing for\n # users\n if step != final:\n result.remove(step)\n result.append(step)\n else:\n seen.add(step)\n result.append(step)\n preds = self._preds.get(step, ())\n todo.extend(preds)\n return reversed(result)\n\n @property\n def strong_connections(self):\n # http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm\n index_counter = [0]\n stack = []\n lowlinks = {}\n index = {}\n result = []\n\n graph = self._succs\n\n def strongconnect(node):\n # set the depth index for this node to the smallest unused index\n index[node] = index_counter[0]\n lowlinks[node] = index_counter[0]\n index_counter[0] += 1\n stack.append(node)\n\n # Consider successors\n try:\n successors = graph[node]\n except Exception:\n successors = []\n for successor in successors:\n if successor not in lowlinks:\n # Successor has not yet been visited\n strongconnect(successor)\n lowlinks[node] = min(lowlinks[node], lowlinks[successor])\n elif successor in stack:\n # the successor is in the stack and hence in the current\n # strongly connected component (SCC)\n lowlinks[node] = min(lowlinks[node], index[successor])\n\n # If `node` is a root node, pop the stack and generate an SCC\n if lowlinks[node] == index[node]:\n connected_component = []\n\n while True:\n successor = stack.pop()\n connected_component.append(successor)\n if successor == node:\n break\n component = tuple(connected_component)\n # storing the result\n result.append(component)\n\n for node in graph:\n if node not in lowlinks:\n strongconnect(node)\n\n return result\n\n @property\n def dot(self):\n result = ['digraph G {']\n for succ in self._preds:\n preds = self._preds[succ]\n for pred in preds:\n result.append(' %s -> %s;' % (pred, succ))\n for node in self._nodes:\n result.append(' %s;' % node)\n result.append('}')\n return '\n'.join(result)\n\n\n#\n# Unarchiving functionality for zip, tar, tgz, tbz, whl\n#\n\nARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl')\n\n\ndef unarchive(archive_filename, dest_dir, format=None, check=True):\n\n def check_path(path):\n if not isinstance(path, text_type):\n path = path.decode('utf-8')\n p = os.path.abspath(os.path.join(dest_dir, path))\n if not p.startswith(dest_dir) or p[plen] != os.sep:\n raise ValueError('path outside destination: %r' % p)\n\n dest_dir = os.path.abspath(dest_dir)\n plen = len(dest_dir)\n archive = None\n if format is None:\n if archive_filename.endswith(('.zip', '.whl')):\n format = 'zip'\n elif archive_filename.endswith(('.tar.gz', '.tgz')):\n format = 'tgz'\n mode = 'r:gz'\n elif archive_filename.endswith(('.tar.bz2', '.tbz')):\n format = 'tbz'\n mode = 'r:bz2'\n elif archive_filename.endswith('.tar'):\n format = 'tar'\n mode = 'r'\n else: # pragma: no cover\n raise ValueError('Unknown format for %r' % archive_filename)\n try:\n if format == 'zip':\n archive = ZipFile(archive_filename, 'r')\n if check:\n names = archive.namelist()\n for name in names:\n check_path(name)\n else:\n archive = tarfile.open(archive_filename, mode)\n if check:\n names = archive.getnames()\n for name in names:\n check_path(name)\n if format != 'zip' and sys.version_info[0] < 3:\n # See Python issue 17153. If the dest path contains Unicode,\n # tarfile extraction fails on Python 2.x if a member path name\n # contains non-ASCII characters - it leads to an implicit\n # bytes -> unicode conversion using ASCII to decode.\n for tarinfo in archive.getmembers():\n if not isinstance(tarinfo.name, text_type):\n tarinfo.name = tarinfo.name.decode('utf-8')\n\n # Limit extraction of dangerous items, if this Python\n # allows it easily. If not, just trust the input.\n # See: https://docs.python.org/3/library/tarfile.html#extraction-filters\n def extraction_filter(member, path):\n """Run tarfile.tar_filter, but raise the expected ValueError"""\n # This is only called if the current Python has tarfile filters\n try:\n return tarfile.tar_filter(member, path)\n except tarfile.FilterError as exc:\n raise ValueError(str(exc))\n\n archive.extraction_filter = extraction_filter\n\n archive.extractall(dest_dir)\n\n finally:\n if archive:\n archive.close()\n\n\ndef zip_dir(directory):\n """zip a directory tree into a BytesIO object"""\n result = io.BytesIO()\n dlen = len(directory)\n with ZipFile(result, "w") as zf:\n for root, dirs, files in os.walk(directory):\n for name in files:\n full = os.path.join(root, name)\n rel = root[dlen:]\n dest = os.path.join(rel, name)\n zf.write(full, dest)\n return result\n\n\n#\n# Simple progress bar\n#\n\nUNITS = ('', 'K', 'M', 'G', 'T', 'P')\n\n\nclass Progress(object):\n unknown = 'UNKNOWN'\n\n def __init__(self, minval=0, maxval=100):\n assert maxval is None or maxval >= minval\n self.min = self.cur = minval\n self.max = maxval\n self.started = None\n self.elapsed = 0\n self.done = False\n\n def update(self, curval):\n assert self.min <= curval\n assert self.max is None or curval <= self.max\n self.cur = curval\n now = time.time()\n if self.started is None:\n self.started = now\n else:\n self.elapsed = now - self.started\n\n def increment(self, incr):\n assert incr >= 0\n self.update(self.cur + incr)\n\n def start(self):\n self.update(self.min)\n return self\n\n def stop(self):\n if self.max is not None:\n self.update(self.max)\n self.done = True\n\n @property\n def maximum(self):\n return self.unknown if self.max is None else self.max\n\n @property\n def percentage(self):\n if self.done:\n result = '100 %'\n elif self.max is None:\n result = ' ?? %'\n else:\n v = 100.0 * (self.cur - self.min) / (self.max - self.min)\n result = '%3d %%' % v\n return result\n\n def format_duration(self, duration):\n if (duration <= 0) and self.max is None or self.cur == self.min:\n result = '??:??:??'\n # elif duration < 1:\n # result = '--:--:--'\n else:\n result = time.strftime('%H:%M:%S', time.gmtime(duration))\n return result\n\n @property\n def ETA(self):\n if self.done:\n prefix = 'Done'\n t = self.elapsed\n # import pdb; pdb.set_trace()\n else:\n prefix = 'ETA '\n if self.max is None:\n t = -1\n elif self.elapsed == 0 or (self.cur == self.min):\n t = 0\n else:\n # import pdb; pdb.set_trace()\n t = float(self.max - self.min)\n t /= self.cur - self.min\n t = (t - 1) * self.elapsed\n return '%s: %s' % (prefix, self.format_duration(t))\n\n @property\n def speed(self):\n if self.elapsed == 0:\n result = 0.0\n else:\n result = (self.cur - self.min) / self.elapsed\n for unit in UNITS:\n if result < 1000:\n break\n result /= 1000.0\n return '%d %sB/s' % (result, unit)\n\n\n#\n# Glob functionality\n#\n\nRICH_GLOB = re.compile(r'\{([^}]*)\}')\n_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')\n_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')\n\n\ndef iglob(path_glob):\n """Extended globbing function that supports ** and {opt1,opt2,opt3}."""\n if _CHECK_RECURSIVE_GLOB.search(path_glob):\n msg = """invalid glob %r: recursive glob "**" must be used alone"""\n raise ValueError(msg % path_glob)\n if _CHECK_MISMATCH_SET.search(path_glob):\n msg = """invalid glob %r: mismatching set marker '{' or '}'"""\n raise ValueError(msg % path_glob)\n return _iglob(path_glob)\n\n\ndef _iglob(path_glob):\n rich_path_glob = RICH_GLOB.split(path_glob, 1)\n if len(rich_path_glob) > 1:\n assert len(rich_path_glob) == 3, rich_path_glob\n prefix, set, suffix = rich_path_glob\n for item in set.split(','):\n for path in _iglob(''.join((prefix, item, suffix))):\n yield path\n else:\n if '**' not in path_glob:\n for item in std_iglob(path_glob):\n yield item\n else:\n prefix, radical = path_glob.split('**', 1)\n if prefix == '':\n prefix = '.'\n if radical == '':\n radical = '*'\n else:\n # we support both\n radical = radical.lstrip('/')\n radical = radical.lstrip('\\')\n for path, dir, files in os.walk(prefix):\n path = os.path.normpath(path)\n for fn in _iglob(os.path.join(path, radical)):\n yield fn\n\n\nif ssl:\n from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, CertificateError)\n\n #\n # HTTPSConnection which verifies certificates/matches domains\n #\n\n class HTTPSConnection(httplib.HTTPSConnection):\n ca_certs = None # set this to the path to the certs file (.pem)\n check_domain = True # only used if ca_certs is not None\n\n # noinspection PyPropertyAccess\n def connect(self):\n sock = socket.create_connection((self.host, self.port), self.timeout)\n if getattr(self, '_tunnel_host', False):\n self.sock = sock\n self._tunnel()\n\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n if hasattr(ssl, 'OP_NO_SSLv2'):\n context.options |= ssl.OP_NO_SSLv2\n if getattr(self, 'cert_file', None):\n context.load_cert_chain(self.cert_file, self.key_file)\n kwargs = {}\n if self.ca_certs:\n context.verify_mode = ssl.CERT_REQUIRED\n context.load_verify_locations(cafile=self.ca_certs)\n if getattr(ssl, 'HAS_SNI', False):\n kwargs['server_hostname'] = self.host\n\n self.sock = context.wrap_socket(sock, **kwargs)\n if self.ca_certs and self.check_domain:\n try:\n match_hostname(self.sock.getpeercert(), self.host)\n logger.debug('Host verified: %s', self.host)\n except CertificateError: # pragma: no cover\n self.sock.shutdown(socket.SHUT_RDWR)\n self.sock.close()\n raise\n\n class HTTPSHandler(BaseHTTPSHandler):\n\n def __init__(self, ca_certs, check_domain=True):\n BaseHTTPSHandler.__init__(self)\n self.ca_certs = ca_certs\n self.check_domain = check_domain\n\n def _conn_maker(self, *args, **kwargs):\n """\n This is called to create a connection instance. Normally you'd\n pass a connection class to do_open, but it doesn't actually check for\n a class, and just expects a callable. As long as we behave just as a\n constructor would have, we should be OK. If it ever changes so that\n we *must* pass a class, we'll create an UnsafeHTTPSConnection class\n which just sets check_domain to False in the class definition, and\n choose which one to pass to do_open.\n """\n result = HTTPSConnection(*args, **kwargs)\n if self.ca_certs:\n result.ca_certs = self.ca_certs\n result.check_domain = self.check_domain\n return result\n\n def https_open(self, req):\n try:\n return self.do_open(self._conn_maker, req)\n except URLError as e:\n if 'certificate verify failed' in str(e.reason):\n raise CertificateError('Unable to verify server certificate '\n 'for %s' % req.host)\n else:\n raise\n\n #\n # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-\n # Middle proxy using HTTP listens on port 443, or an index mistakenly serves\n # HTML containing a http://xyz link when it should be https://xyz),\n # you can use the following handler class, which does not allow HTTP traffic.\n #\n # It works by inheriting from HTTPHandler - so build_opener won't add a\n # handler for HTTP itself.\n #\n class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):\n\n def http_open(self, req):\n raise URLError('Unexpected HTTP request on what should be a secure '\n 'connection: %s' % req)\n\n\n#\n# XML-RPC with timeouts\n#\nclass Transport(xmlrpclib.Transport):\n\n def __init__(self, timeout, use_datetime=0):\n self.timeout = timeout\n xmlrpclib.Transport.__init__(self, use_datetime)\n\n def make_connection(self, host):\n h, eh, x509 = self.get_host_info(host)\n if not self._connection or host != self._connection[0]:\n self._extra_headers = eh\n self._connection = host, httplib.HTTPConnection(h)\n return self._connection[1]\n\n\nif ssl:\n\n class SafeTransport(xmlrpclib.SafeTransport):\n\n def __init__(self, timeout, use_datetime=0):\n self.timeout = timeout\n xmlrpclib.SafeTransport.__init__(self, use_datetime)\n\n def make_connection(self, host):\n h, eh, kwargs = self.get_host_info(host)\n if not kwargs:\n kwargs = {}\n kwargs['timeout'] = self.timeout\n if not self._connection or host != self._connection[0]:\n self._extra_headers = eh\n self._connection = host, httplib.HTTPSConnection(h, None, **kwargs)\n return self._connection[1]\n\n\nclass ServerProxy(xmlrpclib.ServerProxy):\n\n def __init__(self, uri, **kwargs):\n self.timeout = timeout = kwargs.pop('timeout', None)\n # The above classes only come into play if a timeout\n # is specified\n if timeout is not None:\n # scheme = splittype(uri) # deprecated as of Python 3.8\n scheme = urlparse(uri)[0]\n use_datetime = kwargs.get('use_datetime', 0)\n if scheme == 'https':\n tcls = SafeTransport\n else:\n tcls = Transport\n kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)\n self.transport = t\n xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)\n\n\n#\n# CSV functionality. This is provided because on 2.x, the csv module can't\n# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.\n#\n\n\ndef _csv_open(fn, mode, **kwargs):\n if sys.version_info[0] < 3:\n mode += 'b'\n else:\n kwargs['newline'] = ''\n # Python 3 determines encoding from locale. Force 'utf-8'\n # file encoding to match other forced utf-8 encoding\n kwargs['encoding'] = 'utf-8'\n return open(fn, mode, **kwargs)\n\n\nclass CSVBase(object):\n defaults = {\n 'delimiter': str(','), # The strs are used because we need native\n 'quotechar': str('"'), # str in the csv API (2.x won't take\n 'lineterminator': str('\n') # Unicode)\n }\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc_info):\n self.stream.close()\n\n\nclass CSVReader(CSVBase):\n\n def __init__(self, **kwargs):\n if 'stream' in kwargs:\n stream = kwargs['stream']\n if sys.version_info[0] >= 3:\n # needs to be a text stream\n stream = codecs.getreader('utf-8')(stream)\n self.stream = stream\n else:\n self.stream = _csv_open(kwargs['path'], 'r')\n self.reader = csv.reader(self.stream, **self.defaults)\n\n def __iter__(self):\n return self\n\n def next(self):\n result = next(self.reader)\n if sys.version_info[0] < 3:\n for i, item in enumerate(result):\n if not isinstance(item, text_type):\n result[i] = item.decode('utf-8')\n return result\n\n __next__ = next\n\n\nclass CSVWriter(CSVBase):\n\n def __init__(self, fn, **kwargs):\n self.stream = _csv_open(fn, 'w')\n self.writer = csv.writer(self.stream, **self.defaults)\n\n def writerow(self, row):\n if sys.version_info[0] < 3:\n r = []\n for item in row:\n if isinstance(item, text_type):\n item = item.encode('utf-8')\n r.append(item)\n row = r\n self.writer.writerow(row)\n\n\n#\n# Configurator functionality\n#\n\n\nclass Configurator(BaseConfigurator):\n\n value_converters = dict(BaseConfigurator.value_converters)\n value_converters['inc'] = 'inc_convert'\n\n def __init__(self, config, base=None):\n super(Configurator, self).__init__(config)\n self.base = base or os.getcwd()\n\n def configure_custom(self, config):\n\n def convert(o):\n if isinstance(o, (list, tuple)):\n result = type(o)([convert(i) for i in o])\n elif isinstance(o, dict):\n if '()' in o:\n result = self.configure_custom(o)\n else:\n result = {}\n for k in o:\n result[k] = convert(o[k])\n else:\n result = self.convert(o)\n return result\n\n c = config.pop('()')\n if not callable(c):\n c = self.resolve(c)\n props = config.pop('.', None)\n # Check for valid identifiers\n args = config.pop('[]', ())\n if args:\n args = tuple([convert(o) for o in args])\n items = [(k, convert(config[k])) for k in config if valid_ident(k)]\n kwargs = dict(items)\n result = c(*args, **kwargs)\n if props:\n for n, v in props.items():\n setattr(result, n, convert(v))\n return result\n\n def __getitem__(self, key):\n result = self.config[key]\n if isinstance(result, dict) and '()' in result:\n self.config[key] = result = self.configure_custom(result)\n return result\n\n def inc_convert(self, value):\n """Default converter for the inc:// protocol."""\n if not os.path.isabs(value):\n value = os.path.join(self.base, value)\n with codecs.open(value, 'r', encoding='utf-8') as f:\n result = json.load(f)\n return result\n\n\nclass SubprocessMixin(object):\n """\n Mixin for running subprocesses and capturing their output\n """\n\n def __init__(self, verbose=False, progress=None):\n self.verbose = verbose\n self.progress = progress\n\n def reader(self, stream, context):\n """\n Read lines from a subprocess' output stream and either pass to a progress\n callable (if specified) or write progress information to sys.stderr.\n """\n progress = self.progress\n verbose = self.verbose\n while True:\n s = stream.readline()\n if not s:\n break\n if progress is not None:\n progress(s, context)\n else:\n if not verbose:\n sys.stderr.write('.')\n else:\n sys.stderr.write(s.decode('utf-8'))\n sys.stderr.flush()\n stream.close()\n\n def run_command(self, cmd, **kwargs):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)\n t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))\n t1.start()\n t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))\n t2.start()\n p.wait()\n t1.join()\n t2.join()\n if self.progress is not None:\n self.progress('done.', 'main')\n elif self.verbose:\n sys.stderr.write('done.\n')\n return p\n\n\ndef normalize_name(name):\n """Normalize a python package name a la PEP 503"""\n # https://www.python.org/dev/peps/pep-0503/#normalized-names\n return re.sub('[-_.]+', '-', name).lower()\n\n\n# def _get_pypirc_command():\n# """\n# Get the distutils command for interacting with PyPI configurations.\n# :return: the command.\n# """\n# from distutils.core import Distribution\n# from distutils.config import PyPIRCCommand\n# d = Distribution()\n# return PyPIRCCommand(d)\n\n\nclass PyPIRCFile(object):\n\n DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'\n DEFAULT_REALM = 'pypi'\n\n def __init__(self, fn=None, url=None):\n if fn is None:\n fn = os.path.join(os.path.expanduser('~'), '.pypirc')\n self.filename = fn\n self.url = url\n\n def read(self):\n result = {}\n\n if os.path.exists(self.filename):\n repository = self.url or self.DEFAULT_REPOSITORY\n\n config = configparser.RawConfigParser()\n config.read(self.filename)\n sections = config.sections()\n if 'distutils' in sections:\n # let's get the list of servers\n index_servers = config.get('distutils', 'index-servers')\n _servers = [server.strip() for server in index_servers.split('\n') if server.strip() != '']\n if _servers == []:\n # nothing set, let's try to get the default pypi\n if 'pypi' in sections:\n _servers = ['pypi']\n else:\n for server in _servers:\n result = {'server': server}\n result['username'] = config.get(server, 'username')\n\n # optional params\n for key, default in (('repository', self.DEFAULT_REPOSITORY), ('realm', self.DEFAULT_REALM),\n ('password', None)):\n if config.has_option(server, key):\n result[key] = config.get(server, key)\n else:\n result[key] = default\n\n # work around people having "repository" for the "pypi"\n # section of their config set to the HTTP (rather than\n # HTTPS) URL\n if (server == 'pypi' and repository in (self.DEFAULT_REPOSITORY, 'pypi')):\n result['repository'] = self.DEFAULT_REPOSITORY\n elif (result['server'] != repository and result['repository'] != repository):\n result = {}\n elif 'server-login' in sections:\n # old format\n server = 'server-login'\n if config.has_option(server, 'repository'):\n repository = config.get(server, 'repository')\n else:\n repository = self.DEFAULT_REPOSITORY\n result = {\n 'username': config.get(server, 'username'),\n 'password': config.get(server, 'password'),\n 'repository': repository,\n 'server': server,\n 'realm': self.DEFAULT_REALM\n }\n return result\n\n def update(self, username, password):\n # import pdb; pdb.set_trace()\n config = configparser.RawConfigParser()\n fn = self.filename\n config.read(fn)\n if not config.has_section('pypi'):\n config.add_section('pypi')\n config.set('pypi', 'username', username)\n config.set('pypi', 'password', password)\n with open(fn, 'w') as f:\n config.write(f)\n\n\ndef _load_pypirc(index):\n """\n Read the PyPI access configuration as supported by distutils.\n """\n return PyPIRCFile(url=index.url).read()\n\n\ndef _store_pypirc(index):\n PyPIRCFile().update(index.username, index.password)\n\n\n#\n# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor\n# tweaks\n#\n\n\ndef get_host_platform():\n """Return a string that identifies the current platform. This is used mainly to\n distinguish platform-specific build directories and platform-specific built\n distributions. Typically includes the OS name and version and the\n architecture (as supplied by 'os.uname()'), although the exact information\n included depends on the OS; eg. on Linux, the kernel version isn't\n particularly important.\n\n Examples of returned values:\n linux-i586\n linux-alpha (?)\n solaris-2.6-sun4u\n\n Windows will return one of:\n win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)\n win32 (all others - specifically, sys.platform is returned)\n\n For other non-POSIX platforms, currently just returns 'sys.platform'.\n\n """\n if os.name == 'nt':\n if 'amd64' in sys.version.lower():\n return 'win-amd64'\n if '(arm)' in sys.version.lower():\n return 'win-arm32'\n if '(arm64)' in sys.version.lower():\n return 'win-arm64'\n return sys.platform\n\n # Set for cross builds explicitly\n if "_PYTHON_HOST_PLATFORM" in os.environ:\n return os.environ["_PYTHON_HOST_PLATFORM"]\n\n if os.name != 'posix' or not hasattr(os, 'uname'):\n # XXX what about the architecture? NT is Intel or Alpha,\n # Mac OS is M68k or PPC, etc.\n return sys.platform\n\n # Try to distinguish various flavours of Unix\n\n (osname, host, release, version, machine) = os.uname()\n\n # Convert the OS name to lowercase, remove '/' characters, and translate\n # spaces (for "Power Macintosh")\n osname = osname.lower().replace('/', '')\n machine = machine.replace(' ', '_').replace('/', '-')\n\n if osname[:5] == 'linux':\n # At least on Linux/Intel, 'machine' is the processor --\n # i386, etc.\n # XXX what about Alpha, SPARC, etc?\n return "%s-%s" % (osname, machine)\n\n elif osname[:5] == 'sunos':\n if release[0] >= '5': # SunOS 5 == Solaris 2\n osname = 'solaris'\n release = '%d.%s' % (int(release[0]) - 3, release[2:])\n # We can't use 'platform.architecture()[0]' because a\n # bootstrap problem. We use a dict to get an error\n # if some suspicious happens.\n bitness = {2147483647: '32bit', 9223372036854775807: '64bit'}\n machine += '.%s' % bitness[sys.maxsize]\n # fall through to standard osname-release-machine representation\n elif osname[:3] == 'aix':\n from _aix_support import aix_platform\n return aix_platform()\n elif osname[:6] == 'cygwin':\n osname = 'cygwin'\n rel_re = re.compile(r'[\d.]+', re.ASCII)\n m = rel_re.match(release)\n if m:\n release = m.group()\n elif osname[:6] == 'darwin':\n import _osx_support\n try:\n from distutils import sysconfig\n except ImportError:\n import sysconfig\n osname, release, machine = _osx_support.get_platform_osx(sysconfig.get_config_vars(), osname, release, machine)\n\n return '%s-%s-%s' % (osname, release, machine)\n\n\n_TARGET_TO_PLAT = {\n 'x86': 'win32',\n 'x64': 'win-amd64',\n 'arm': 'win-arm32',\n}\n\n\ndef get_platform():\n if os.name != 'nt':\n return get_host_platform()\n cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')\n if cross_compilation_target not in _TARGET_TO_PLAT:\n return get_host_platform()\n return _TARGET_TO_PLAT[cross_compilation_target]\n
.venv\Lib\site-packages\pip\_vendor\distlib\util.py
util.py
Python
66,682
0.75
0.259577
0.098817
vue-tools
211
2025-06-14T21:03:42.479213
GPL-3.0
false
3f732b3a89303bbd73855eaa78cc532f
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2023 The Python Software Foundation.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\n"""\nImplementation of a flexible versioning scheme providing support for PEP-440,\nsetuptools-compatible and semantic versioning.\n"""\n\nimport logging\nimport re\n\nfrom .compat import string_types\nfrom .util import parse_requirement\n\n__all__ = ['NormalizedVersion', 'NormalizedMatcher',\n 'LegacyVersion', 'LegacyMatcher',\n 'SemanticVersion', 'SemanticMatcher',\n 'UnsupportedVersionError', 'get_scheme']\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnsupportedVersionError(ValueError):\n """This is an unsupported version."""\n pass\n\n\nclass Version(object):\n def __init__(self, s):\n self._string = s = s.strip()\n self._parts = parts = self.parse(s)\n assert isinstance(parts, tuple)\n assert len(parts) > 0\n\n def parse(self, s):\n raise NotImplementedError('please implement in a subclass')\n\n def _check_compatible(self, other):\n if type(self) != type(other):\n raise TypeError('cannot compare %r and %r' % (self, other))\n\n def __eq__(self, other):\n self._check_compatible(other)\n return self._parts == other._parts\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __lt__(self, other):\n self._check_compatible(other)\n return self._parts < other._parts\n\n def __gt__(self, other):\n return not (self.__lt__(other) or self.__eq__(other))\n\n def __le__(self, other):\n return self.__lt__(other) or self.__eq__(other)\n\n def __ge__(self, other):\n return self.__gt__(other) or self.__eq__(other)\n\n # See http://docs.python.org/reference/datamodel#object.__hash__\n def __hash__(self):\n return hash(self._parts)\n\n def __repr__(self):\n return "%s('%s')" % (self.__class__.__name__, self._string)\n\n def __str__(self):\n return self._string\n\n @property\n def is_prerelease(self):\n raise NotImplementedError('Please implement in subclasses.')\n\n\nclass Matcher(object):\n version_class = None\n\n # value is either a callable or the name of a method\n _operators = {\n '<': lambda v, c, p: v < c,\n '>': lambda v, c, p: v > c,\n '<=': lambda v, c, p: v == c or v < c,\n '>=': lambda v, c, p: v == c or v > c,\n '==': lambda v, c, p: v == c,\n '===': lambda v, c, p: v == c,\n # by default, compatible => >=.\n '~=': lambda v, c, p: v == c or v > c,\n '!=': lambda v, c, p: v != c,\n }\n\n # this is a method only to support alternative implementations\n # via overriding\n def parse_requirement(self, s):\n return parse_requirement(s)\n\n def __init__(self, s):\n if self.version_class is None:\n raise ValueError('Please specify a version class')\n self._string = s = s.strip()\n r = self.parse_requirement(s)\n if not r:\n raise ValueError('Not valid: %r' % s)\n self.name = r.name\n self.key = self.name.lower() # for case-insensitive comparisons\n clist = []\n if r.constraints:\n # import pdb; pdb.set_trace()\n for op, s in r.constraints:\n if s.endswith('.*'):\n if op not in ('==', '!='):\n raise ValueError('\'.*\' not allowed for '\n '%r constraints' % op)\n # Could be a partial version (e.g. for '2.*') which\n # won't parse as a version, so keep it as a string\n vn, prefix = s[:-2], True\n # Just to check that vn is a valid version\n self.version_class(vn)\n else:\n # Should parse as a version, so we can create an\n # instance for the comparison\n vn, prefix = self.version_class(s), False\n clist.append((op, vn, prefix))\n self._parts = tuple(clist)\n\n def match(self, version):\n """\n Check if the provided version matches the constraints.\n\n :param version: The version to match against this instance.\n :type version: String or :class:`Version` instance.\n """\n if isinstance(version, string_types):\n version = self.version_class(version)\n for operator, constraint, prefix in self._parts:\n f = self._operators.get(operator)\n if isinstance(f, string_types):\n f = getattr(self, f)\n if not f:\n msg = ('%r not implemented '\n 'for %s' % (operator, self.__class__.__name__))\n raise NotImplementedError(msg)\n if not f(version, constraint, prefix):\n return False\n return True\n\n @property\n def exact_version(self):\n result = None\n if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):\n result = self._parts[0][1]\n return result\n\n def _check_compatible(self, other):\n if type(self) != type(other) or self.name != other.name:\n raise TypeError('cannot compare %s and %s' % (self, other))\n\n def __eq__(self, other):\n self._check_compatible(other)\n return self.key == other.key and self._parts == other._parts\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n # See http://docs.python.org/reference/datamodel#object.__hash__\n def __hash__(self):\n return hash(self.key) + hash(self._parts)\n\n def __repr__(self):\n return "%s(%r)" % (self.__class__.__name__, self._string)\n\n def __str__(self):\n return self._string\n\n\nPEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|alpha|b|beta|c|rc|pre|preview)(\d+)?)?'\n r'(\.(post|r|rev)(\d+)?)?([._-]?(dev)(\d+)?)?'\n r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$', re.I)\n\n\ndef _pep_440_key(s):\n s = s.strip()\n m = PEP440_VERSION_RE.match(s)\n if not m:\n raise UnsupportedVersionError('Not a valid version: %s' % s)\n groups = m.groups()\n nums = tuple(int(v) for v in groups[1].split('.'))\n while len(nums) > 1 and nums[-1] == 0:\n nums = nums[:-1]\n\n if not groups[0]:\n epoch = 0\n else:\n epoch = int(groups[0][:-1])\n pre = groups[4:6]\n post = groups[7:9]\n dev = groups[10:12]\n local = groups[13]\n if pre == (None, None):\n pre = ()\n else:\n if pre[1] is None:\n pre = pre[0], 0\n else:\n pre = pre[0], int(pre[1])\n if post == (None, None):\n post = ()\n else:\n if post[1] is None:\n post = post[0], 0\n else:\n post = post[0], int(post[1])\n if dev == (None, None):\n dev = ()\n else:\n if dev[1] is None:\n dev = dev[0], 0\n else:\n dev = dev[0], int(dev[1])\n if local is None:\n local = ()\n else:\n parts = []\n for part in local.split('.'):\n # to ensure that numeric compares as > lexicographic, avoid\n # comparing them directly, but encode a tuple which ensures\n # correct sorting\n if part.isdigit():\n part = (1, int(part))\n else:\n part = (0, part)\n parts.append(part)\n local = tuple(parts)\n if not pre:\n # either before pre-release, or final release and after\n if not post and dev:\n # before pre-release\n pre = ('a', -1) # to sort before a0\n else:\n pre = ('z',) # to sort after all pre-releases\n # now look at the state of post and dev.\n if not post:\n post = ('_',) # sort before 'a'\n if not dev:\n dev = ('final',)\n\n return epoch, nums, pre, post, dev, local\n\n\n_normalized_key = _pep_440_key\n\n\nclass NormalizedVersion(Version):\n """A rational version.\n\n Good:\n 1.2 # equivalent to "1.2.0"\n 1.2.0\n 1.2a1\n 1.2.3a2\n 1.2.3b1\n 1.2.3c1\n 1.2.3.4\n TODO: fill this out\n\n Bad:\n 1 # minimum two numbers\n 1.2a # release level must have a release serial\n 1.2.3b\n """\n def parse(self, s):\n result = _normalized_key(s)\n # _normalized_key loses trailing zeroes in the release\n # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0\n # However, PEP 440 prefix matching needs it: for example,\n # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).\n m = PEP440_VERSION_RE.match(s) # must succeed\n groups = m.groups()\n self._release_clause = tuple(int(v) for v in groups[1].split('.'))\n return result\n\n PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])\n\n @property\n def is_prerelease(self):\n return any(t[0] in self.PREREL_TAGS for t in self._parts if t)\n\n\ndef _match_prefix(x, y):\n x = str(x)\n y = str(y)\n if x == y:\n return True\n if not x.startswith(y):\n return False\n n = len(y)\n return x[n] == '.'\n\n\nclass NormalizedMatcher(Matcher):\n version_class = NormalizedVersion\n\n # value is either a callable or the name of a method\n _operators = {\n '~=': '_match_compatible',\n '<': '_match_lt',\n '>': '_match_gt',\n '<=': '_match_le',\n '>=': '_match_ge',\n '==': '_match_eq',\n '===': '_match_arbitrary',\n '!=': '_match_ne',\n }\n\n def _adjust_local(self, version, constraint, prefix):\n if prefix:\n strip_local = '+' not in constraint and version._parts[-1]\n else:\n # both constraint and version are\n # NormalizedVersion instances.\n # If constraint does not have a local component,\n # ensure the version doesn't, either.\n strip_local = not constraint._parts[-1] and version._parts[-1]\n if strip_local:\n s = version._string.split('+', 1)[0]\n version = self.version_class(s)\n return version, constraint\n\n def _match_lt(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n if version >= constraint:\n return False\n release_clause = constraint._release_clause\n pfx = '.'.join([str(i) for i in release_clause])\n return not _match_prefix(version, pfx)\n\n def _match_gt(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n if version <= constraint:\n return False\n release_clause = constraint._release_clause\n pfx = '.'.join([str(i) for i in release_clause])\n return not _match_prefix(version, pfx)\n\n def _match_le(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n return version <= constraint\n\n def _match_ge(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n return version >= constraint\n\n def _match_eq(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n if not prefix:\n result = (version == constraint)\n else:\n result = _match_prefix(version, constraint)\n return result\n\n def _match_arbitrary(self, version, constraint, prefix):\n return str(version) == str(constraint)\n\n def _match_ne(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n if not prefix:\n result = (version != constraint)\n else:\n result = not _match_prefix(version, constraint)\n return result\n\n def _match_compatible(self, version, constraint, prefix):\n version, constraint = self._adjust_local(version, constraint, prefix)\n if version == constraint:\n return True\n if version < constraint:\n return False\n# if not prefix:\n# return True\n release_clause = constraint._release_clause\n if len(release_clause) > 1:\n release_clause = release_clause[:-1]\n pfx = '.'.join([str(i) for i in release_clause])\n return _match_prefix(version, pfx)\n\n\n_REPLACEMENTS = (\n (re.compile('[.+-]$'), ''), # remove trailing puncts\n (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start\n (re.compile('^[.-]'), ''), # remove leading puncts\n (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses\n (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)\n (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)\n (re.compile('[.]{2,}'), '.'), # multiple runs of '.'\n (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha\n (re.compile(r'\b(pre-alpha|prealpha)\b'),\n 'pre.alpha'), # standardise\n (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses\n)\n\n_SUFFIX_REPLACEMENTS = (\n (re.compile('^[:~._+-]+'), ''), # remove leading puncts\n (re.compile('[,*")([\\]]'), ''), # remove unwanted chars\n (re.compile('[~:+_ -]'), '.'), # replace illegal chars\n (re.compile('[.]{2,}'), '.'), # multiple runs of '.'\n (re.compile(r'\.$'), ''), # trailing '.'\n)\n\n_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')\n\n\ndef _suggest_semantic_version(s):\n """\n Try to suggest a semantic form for a version for which\n _suggest_normalized_version couldn't come up with anything.\n """\n result = s.strip().lower()\n for pat, repl in _REPLACEMENTS:\n result = pat.sub(repl, result)\n if not result:\n result = '0.0.0'\n\n # Now look for numeric prefix, and separate it out from\n # the rest.\n # import pdb; pdb.set_trace()\n m = _NUMERIC_PREFIX.match(result)\n if not m:\n prefix = '0.0.0'\n suffix = result\n else:\n prefix = m.groups()[0].split('.')\n prefix = [int(i) for i in prefix]\n while len(prefix) < 3:\n prefix.append(0)\n if len(prefix) == 3:\n suffix = result[m.end():]\n else:\n suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]\n prefix = prefix[:3]\n prefix = '.'.join([str(i) for i in prefix])\n suffix = suffix.strip()\n if suffix:\n # import pdb; pdb.set_trace()\n # massage the suffix.\n for pat, repl in _SUFFIX_REPLACEMENTS:\n suffix = pat.sub(repl, suffix)\n\n if not suffix:\n result = prefix\n else:\n sep = '-' if 'dev' in suffix else '+'\n result = prefix + sep + suffix\n if not is_semver(result):\n result = None\n return result\n\n\ndef _suggest_normalized_version(s):\n """Suggest a normalized version close to the given version string.\n\n If you have a version string that isn't rational (i.e. NormalizedVersion\n doesn't like it) then you might be able to get an equivalent (or close)\n rational version from this function.\n\n This does a number of simple normalizations to the given string, based\n on observation of versions currently in use on PyPI. Given a dump of\n those version during PyCon 2009, 4287 of them:\n - 2312 (53.93%) match NormalizedVersion without change\n with the automatic suggestion\n - 3474 (81.04%) match when using this suggestion method\n\n @param s {str} An irrational version string.\n @returns A rational version string, or None, if couldn't determine one.\n """\n try:\n _normalized_key(s)\n return s # already rational\n except UnsupportedVersionError:\n pass\n\n rs = s.lower()\n\n # part of this could use maketrans\n for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),\n ('beta', 'b'), ('rc', 'c'), ('-final', ''),\n ('-pre', 'c'),\n ('-release', ''), ('.release', ''), ('-stable', ''),\n ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),\n ('final', '')):\n rs = rs.replace(orig, repl)\n\n # if something ends with dev or pre, we add a 0\n rs = re.sub(r"pre$", r"pre0", rs)\n rs = re.sub(r"dev$", r"dev0", rs)\n\n # if we have something like "b-2" or "a.2" at the end of the\n # version, that is probably beta, alpha, etc\n # let's remove the dash or dot\n rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)\n\n # 1.0-dev-r371 -> 1.0.dev371\n # 0.1-dev-r79 -> 0.1.dev79\n rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)\n\n # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1\n rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)\n\n # Clean: v0.3, v1.0\n if rs.startswith('v'):\n rs = rs[1:]\n\n # Clean leading '0's on numbers.\n # TODO: unintended side-effect on, e.g., "2003.05.09"\n # PyPI stats: 77 (~2%) better\n rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)\n\n # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers\n # zero.\n # PyPI stats: 245 (7.56%) better\n rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)\n\n # the 'dev-rNNN' tag is a dev tag\n rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)\n\n # clean the - when used as a pre delimiter\n rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)\n\n # a terminal "dev" or "devel" can be changed into ".dev0"\n rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)\n\n # a terminal "dev" can be changed into ".dev0"\n rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)\n\n # a terminal "final" or "stable" can be removed\n rs = re.sub(r"(final|stable)$", "", rs)\n\n # The 'r' and the '-' tags are post release tags\n # 0.4a1.r10 -> 0.4a1.post10\n # 0.9.33-17222 -> 0.9.33.post17222\n # 0.9.33-r17222 -> 0.9.33.post17222\n rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)\n\n # Clean 'r' instead of 'dev' usage:\n # 0.9.33+r17222 -> 0.9.33.dev17222\n # 1.0dev123 -> 1.0.dev123\n # 1.0.git123 -> 1.0.dev123\n # 1.0.bzr123 -> 1.0.dev123\n # 0.1a0dev.123 -> 0.1a0.dev123\n # PyPI stats: ~150 (~4%) better\n rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)\n\n # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:\n # 0.2.pre1 -> 0.2c1\n # 0.2-c1 -> 0.2c1\n # 1.0preview123 -> 1.0c123\n # PyPI stats: ~21 (0.62%) better\n rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)\n\n # Tcl/Tk uses "px" for their post release markers\n rs = re.sub(r"p(\d+)$", r".post\1", rs)\n\n try:\n _normalized_key(rs)\n except UnsupportedVersionError:\n rs = None\n return rs\n\n#\n# Legacy version processing (distribute-compatible)\n#\n\n\n_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)\n_VERSION_REPLACE = {\n 'pre': 'c',\n 'preview': 'c',\n '-': 'final-',\n 'rc': 'c',\n 'dev': '@',\n '': None,\n '.': None,\n}\n\n\ndef _legacy_key(s):\n def get_parts(s):\n result = []\n for p in _VERSION_PART.split(s.lower()):\n p = _VERSION_REPLACE.get(p, p)\n if p:\n if '0' <= p[:1] <= '9':\n p = p.zfill(8)\n else:\n p = '*' + p\n result.append(p)\n result.append('*final')\n return result\n\n result = []\n for p in get_parts(s):\n if p.startswith('*'):\n if p < '*final':\n while result and result[-1] == '*final-':\n result.pop()\n while result and result[-1] == '00000000':\n result.pop()\n result.append(p)\n return tuple(result)\n\n\nclass LegacyVersion(Version):\n def parse(self, s):\n return _legacy_key(s)\n\n @property\n def is_prerelease(self):\n result = False\n for x in self._parts:\n if (isinstance(x, string_types) and x.startswith('*') and x < '*final'):\n result = True\n break\n return result\n\n\nclass LegacyMatcher(Matcher):\n version_class = LegacyVersion\n\n _operators = dict(Matcher._operators)\n _operators['~='] = '_match_compatible'\n\n numeric_re = re.compile(r'^(\d+(\.\d+)*)')\n\n def _match_compatible(self, version, constraint, prefix):\n if version < constraint:\n return False\n m = self.numeric_re.match(str(constraint))\n if not m:\n logger.warning('Cannot compute compatible match for version %s '\n ' and constraint %s', version, constraint)\n return True\n s = m.groups()[0]\n if '.' in s:\n s = s.rsplit('.', 1)[0]\n return _match_prefix(version, s)\n\n#\n# Semantic versioning\n#\n\n\n_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'\n r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'\n r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)\n\n\ndef is_semver(s):\n return _SEMVER_RE.match(s)\n\n\ndef _semantic_key(s):\n def make_tuple(s, absent):\n if s is None:\n result = (absent,)\n else:\n parts = s[1:].split('.')\n # We can't compare ints and strings on Python 3, so fudge it\n # by zero-filling numeric values so simulate a numeric comparison\n result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])\n return result\n\n m = is_semver(s)\n if not m:\n raise UnsupportedVersionError(s)\n groups = m.groups()\n major, minor, patch = [int(i) for i in groups[:3]]\n # choose the '|' and '*' so that versions sort correctly\n pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')\n return (major, minor, patch), pre, build\n\n\nclass SemanticVersion(Version):\n def parse(self, s):\n return _semantic_key(s)\n\n @property\n def is_prerelease(self):\n return self._parts[1][0] != '|'\n\n\nclass SemanticMatcher(Matcher):\n version_class = SemanticVersion\n\n\nclass VersionScheme(object):\n def __init__(self, key, matcher, suggester=None):\n self.key = key\n self.matcher = matcher\n self.suggester = suggester\n\n def is_valid_version(self, s):\n try:\n self.matcher.version_class(s)\n result = True\n except UnsupportedVersionError:\n result = False\n return result\n\n def is_valid_matcher(self, s):\n try:\n self.matcher(s)\n result = True\n except UnsupportedVersionError:\n result = False\n return result\n\n def is_valid_constraint_list(self, s):\n """\n Used for processing some metadata fields\n """\n # See issue #140. Be tolerant of a single trailing comma.\n if s.endswith(','):\n s = s[:-1]\n return self.is_valid_matcher('dummy_name (%s)' % s)\n\n def suggest(self, s):\n if self.suggester is None:\n result = None\n else:\n result = self.suggester(s)\n return result\n\n\n_SCHEMES = {\n 'normalized': VersionScheme(_normalized_key, NormalizedMatcher,\n _suggest_normalized_version),\n 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),\n 'semantic': VersionScheme(_semantic_key, SemanticMatcher,\n _suggest_semantic_version),\n}\n\n_SCHEMES['default'] = _SCHEMES['normalized']\n\n\ndef get_scheme(name):\n if name not in _SCHEMES:\n raise ValueError('unknown scheme name: %r' % name)\n return _SCHEMES[name]\n
.venv\Lib\site-packages\pip\_vendor\distlib\version.py
version.py
Python
23,727
0.95
0.230667
0.139159
react-lib
822
2024-05-27T09:42:26.488744
Apache-2.0
false
de186d521247ccd3d66e90ead8f7a3b2
MZ
.venv\Lib\site-packages\pip\_vendor\distlib\w32.exe
w32.exe
Other
91,648
0.6
0.007246
0.002457
node-utils
105
2025-05-10T21:02:31.799093
Apache-2.0
false
4fc0cc28323076e8ce502631c316254a
MZ
.venv\Lib\site-packages\pip\_vendor\distlib\w64.exe
w64.exe
Other
101,888
0.6
0.004518
0.003049
vue-tools
808
2023-10-23T07:23:50.106241
GPL-3.0
false
25e8b53283a9ed1a4b9273a0d3b0c9dc
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2023 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nfrom __future__ import unicode_literals\n\nimport base64\nimport codecs\nimport datetime\nfrom email import message_from_file\nimport hashlib\nimport json\nimport logging\nimport os\nimport posixpath\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport zipfile\n\nfrom . import __version__, DistlibException\nfrom .compat import sysconfig, ZipFile, fsdecode, text_type, filter\nfrom .database import InstalledDistribution\nfrom .metadata import Metadata, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME\nfrom .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, cached_property, get_cache_base,\n read_exports, tempdir, get_platform)\nfrom .version import NormalizedVersion, UnsupportedVersionError\n\nlogger = logging.getLogger(__name__)\n\ncache = None # created when needed\n\nif hasattr(sys, 'pypy_version_info'): # pragma: no cover\n IMP_PREFIX = 'pp'\nelif sys.platform.startswith('java'): # pragma: no cover\n IMP_PREFIX = 'jy'\nelif sys.platform == 'cli': # pragma: no cover\n IMP_PREFIX = 'ip'\nelse:\n IMP_PREFIX = 'cp'\n\nVER_SUFFIX = sysconfig.get_config_var('py_version_nodot')\nif not VER_SUFFIX: # pragma: no cover\n VER_SUFFIX = '%s%s' % sys.version_info[:2]\nPYVER = 'py' + VER_SUFFIX\nIMPVER = IMP_PREFIX + VER_SUFFIX\n\nARCH = get_platform().replace('-', '_').replace('.', '_')\n\nABI = sysconfig.get_config_var('SOABI')\nif ABI and ABI.startswith('cpython-'):\n ABI = ABI.replace('cpython-', 'cp').split('-')[0]\nelse:\n\n def _derive_abi():\n parts = ['cp', VER_SUFFIX]\n if sysconfig.get_config_var('Py_DEBUG'):\n parts.append('d')\n if IMP_PREFIX == 'cp':\n vi = sys.version_info[:2]\n if vi < (3, 8):\n wpm = sysconfig.get_config_var('WITH_PYMALLOC')\n if wpm is None:\n wpm = True\n if wpm:\n parts.append('m')\n if vi < (3, 3):\n us = sysconfig.get_config_var('Py_UNICODE_SIZE')\n if us == 4 or (us is None and sys.maxunicode == 0x10FFFF):\n parts.append('u')\n return ''.join(parts)\n\n ABI = _derive_abi()\n del _derive_abi\n\nFILENAME_RE = re.compile(\n r'''\n(?P<nm>[^-]+)\n-(?P<vn>\d+[^-]*)\n(-(?P<bn>\d+[^-]*))?\n-(?P<py>\w+\d+(\.\w+\d+)*)\n-(?P<bi>\w+)\n-(?P<ar>\w+(\.\w+)*)\n\.whl$\n''', re.IGNORECASE | re.VERBOSE)\n\nNAME_VERSION_RE = re.compile(r'''\n(?P<nm>[^-]+)\n-(?P<vn>\d+[^-]*)\n(-(?P<bn>\d+[^-]*))?$\n''', re.IGNORECASE | re.VERBOSE)\n\nSHEBANG_RE = re.compile(br'\s*#![^\r\n]*')\nSHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')\nSHEBANG_PYTHON = b'#!python'\nSHEBANG_PYTHONW = b'#!pythonw'\n\nif os.sep == '/':\n to_posix = lambda o: o\nelse:\n to_posix = lambda o: o.replace(os.sep, '/')\n\nif sys.version_info[0] < 3:\n import imp\nelse:\n imp = None\n import importlib.machinery\n import importlib.util\n\n\ndef _get_suffixes():\n if imp:\n return [s[0] for s in imp.get_suffixes()]\n else:\n return importlib.machinery.EXTENSION_SUFFIXES\n\n\ndef _load_dynamic(name, path):\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n if imp:\n return imp.load_dynamic(name, path)\n else:\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n sys.modules[name] = module\n spec.loader.exec_module(module)\n return module\n\n\nclass Mounter(object):\n\n def __init__(self):\n self.impure_wheels = {}\n self.libs = {}\n\n def add(self, pathname, extensions):\n self.impure_wheels[pathname] = extensions\n self.libs.update(extensions)\n\n def remove(self, pathname):\n extensions = self.impure_wheels.pop(pathname)\n for k, v in extensions:\n if k in self.libs:\n del self.libs[k]\n\n def find_module(self, fullname, path=None):\n if fullname in self.libs:\n result = self\n else:\n result = None\n return result\n\n def load_module(self, fullname):\n if fullname in sys.modules:\n result = sys.modules[fullname]\n else:\n if fullname not in self.libs:\n raise ImportError('unable to find extension for %s' % fullname)\n result = _load_dynamic(fullname, self.libs[fullname])\n result.__loader__ = self\n parts = fullname.rsplit('.', 1)\n if len(parts) > 1:\n result.__package__ = parts[0]\n return result\n\n\n_hook = Mounter()\n\n\nclass Wheel(object):\n """\n Class to build and install from Wheel files (PEP 427).\n """\n\n wheel_version = (1, 1)\n hash_kind = 'sha256'\n\n def __init__(self, filename=None, sign=False, verify=False):\n """\n Initialise an instance using a (valid) filename.\n """\n self.sign = sign\n self.should_verify = verify\n self.buildver = ''\n self.pyver = [PYVER]\n self.abi = ['none']\n self.arch = ['any']\n self.dirname = os.getcwd()\n if filename is None:\n self.name = 'dummy'\n self.version = '0.1'\n self._filename = self.filename\n else:\n m = NAME_VERSION_RE.match(filename)\n if m:\n info = m.groupdict('')\n self.name = info['nm']\n # Reinstate the local version separator\n self.version = info['vn'].replace('_', '-')\n self.buildver = info['bn']\n self._filename = self.filename\n else:\n dirname, filename = os.path.split(filename)\n m = FILENAME_RE.match(filename)\n if not m:\n raise DistlibException('Invalid name or '\n 'filename: %r' % filename)\n if dirname:\n self.dirname = os.path.abspath(dirname)\n self._filename = filename\n info = m.groupdict('')\n self.name = info['nm']\n self.version = info['vn']\n self.buildver = info['bn']\n self.pyver = info['py'].split('.')\n self.abi = info['bi'].split('.')\n self.arch = info['ar'].split('.')\n\n @property\n def filename(self):\n """\n Build and return a filename from the various components.\n """\n if self.buildver:\n buildver = '-' + self.buildver\n else:\n buildver = ''\n pyver = '.'.join(self.pyver)\n abi = '.'.join(self.abi)\n arch = '.'.join(self.arch)\n # replace - with _ as a local version separator\n version = self.version.replace('-', '_')\n return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch)\n\n @property\n def exists(self):\n path = os.path.join(self.dirname, self.filename)\n return os.path.isfile(path)\n\n @property\n def tags(self):\n for pyver in self.pyver:\n for abi in self.abi:\n for arch in self.arch:\n yield pyver, abi, arch\n\n @cached_property\n def metadata(self):\n pathname = os.path.join(self.dirname, self.filename)\n name_ver = '%s-%s' % (self.name, self.version)\n info_dir = '%s.dist-info' % name_ver\n wrapper = codecs.getreader('utf-8')\n with ZipFile(pathname, 'r') as zf:\n self.get_wheel_metadata(zf)\n # wv = wheel_metadata['Wheel-Version'].split('.', 1)\n # file_version = tuple([int(i) for i in wv])\n # if file_version < (1, 1):\n # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,\n # LEGACY_METADATA_FILENAME]\n # else:\n # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]\n fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]\n result = None\n for fn in fns:\n try:\n metadata_filename = posixpath.join(info_dir, fn)\n with zf.open(metadata_filename) as bf:\n wf = wrapper(bf)\n result = Metadata(fileobj=wf)\n if result:\n break\n except KeyError:\n pass\n if not result:\n raise ValueError('Invalid wheel, because metadata is '\n 'missing: looked in %s' % ', '.join(fns))\n return result\n\n def get_wheel_metadata(self, zf):\n name_ver = '%s-%s' % (self.name, self.version)\n info_dir = '%s.dist-info' % name_ver\n metadata_filename = posixpath.join(info_dir, 'WHEEL')\n with zf.open(metadata_filename) as bf:\n wf = codecs.getreader('utf-8')(bf)\n message = message_from_file(wf)\n return dict(message)\n\n @cached_property\n def info(self):\n pathname = os.path.join(self.dirname, self.filename)\n with ZipFile(pathname, 'r') as zf:\n result = self.get_wheel_metadata(zf)\n return result\n\n def process_shebang(self, data):\n m = SHEBANG_RE.match(data)\n if m:\n end = m.end()\n shebang, data_after_shebang = data[:end], data[end:]\n # Preserve any arguments after the interpreter\n if b'pythonw' in shebang.lower():\n shebang_python = SHEBANG_PYTHONW\n else:\n shebang_python = SHEBANG_PYTHON\n m = SHEBANG_DETAIL_RE.match(shebang)\n if m:\n args = b' ' + m.groups()[-1]\n else:\n args = b''\n shebang = shebang_python + args\n data = shebang + data_after_shebang\n else:\n cr = data.find(b'\r')\n lf = data.find(b'\n')\n if cr < 0 or cr > lf:\n term = b'\n'\n else:\n if data[cr:cr + 2] == b'\r\n':\n term = b'\r\n'\n else:\n term = b'\r'\n data = SHEBANG_PYTHON + term + data\n return data\n\n def get_hash(self, data, hash_kind=None):\n if hash_kind is None:\n hash_kind = self.hash_kind\n try:\n hasher = getattr(hashlib, hash_kind)\n except AttributeError:\n raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)\n result = hasher(data).digest()\n result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')\n return hash_kind, result\n\n def write_record(self, records, record_path, archive_record_path):\n records = list(records) # make a copy, as mutated\n records.append((archive_record_path, '', ''))\n with CSVWriter(record_path) as writer:\n for row in records:\n writer.writerow(row)\n\n def write_records(self, info, libdir, archive_paths):\n records = []\n distinfo, info_dir = info\n # hasher = getattr(hashlib, self.hash_kind)\n for ap, p in archive_paths:\n with open(p, 'rb') as f:\n data = f.read()\n digest = '%s=%s' % self.get_hash(data)\n size = os.path.getsize(p)\n records.append((ap, digest, size))\n\n p = os.path.join(distinfo, 'RECORD')\n ap = to_posix(os.path.join(info_dir, 'RECORD'))\n self.write_record(records, p, ap)\n archive_paths.append((ap, p))\n\n def build_zip(self, pathname, archive_paths):\n with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:\n for ap, p in archive_paths:\n logger.debug('Wrote %s to %s in wheel', p, ap)\n zf.write(p, ap)\n\n def build(self, paths, tags=None, wheel_version=None):\n """\n Build a wheel from files in specified paths, and use any specified tags\n when determining the name of the wheel.\n """\n if tags is None:\n tags = {}\n\n libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]\n if libkey == 'platlib':\n is_pure = 'false'\n default_pyver = [IMPVER]\n default_abi = [ABI]\n default_arch = [ARCH]\n else:\n is_pure = 'true'\n default_pyver = [PYVER]\n default_abi = ['none']\n default_arch = ['any']\n\n self.pyver = tags.get('pyver', default_pyver)\n self.abi = tags.get('abi', default_abi)\n self.arch = tags.get('arch', default_arch)\n\n libdir = paths[libkey]\n\n name_ver = '%s-%s' % (self.name, self.version)\n data_dir = '%s.data' % name_ver\n info_dir = '%s.dist-info' % name_ver\n\n archive_paths = []\n\n # First, stuff which is not in site-packages\n for key in ('data', 'headers', 'scripts'):\n if key not in paths:\n continue\n path = paths[key]\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n for fn in files:\n p = fsdecode(os.path.join(root, fn))\n rp = os.path.relpath(p, path)\n ap = to_posix(os.path.join(data_dir, key, rp))\n archive_paths.append((ap, p))\n if key == 'scripts' and not p.endswith('.exe'):\n with open(p, 'rb') as f:\n data = f.read()\n data = self.process_shebang(data)\n with open(p, 'wb') as f:\n f.write(data)\n\n # Now, stuff which is in site-packages, other than the\n # distinfo stuff.\n path = libdir\n distinfo = None\n for root, dirs, files in os.walk(path):\n if root == path:\n # At the top level only, save distinfo for later\n # and skip it for now\n for i, dn in enumerate(dirs):\n dn = fsdecode(dn)\n if dn.endswith('.dist-info'):\n distinfo = os.path.join(root, dn)\n del dirs[i]\n break\n assert distinfo, '.dist-info directory expected, not found'\n\n for fn in files:\n # comment out next suite to leave .pyc files in\n if fsdecode(fn).endswith(('.pyc', '.pyo')):\n continue\n p = os.path.join(root, fn)\n rp = to_posix(os.path.relpath(p, path))\n archive_paths.append((rp, p))\n\n # Now distinfo. Assumed to be flat, i.e. os.listdir is enough.\n files = os.listdir(distinfo)\n for fn in files:\n if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):\n p = fsdecode(os.path.join(distinfo, fn))\n ap = to_posix(os.path.join(info_dir, fn))\n archive_paths.append((ap, p))\n\n wheel_metadata = [\n 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),\n 'Generator: distlib %s' % __version__,\n 'Root-Is-Purelib: %s' % is_pure,\n ]\n for pyver, abi, arch in self.tags:\n wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))\n p = os.path.join(distinfo, 'WHEEL')\n with open(p, 'w') as f:\n f.write('\n'.join(wheel_metadata))\n ap = to_posix(os.path.join(info_dir, 'WHEEL'))\n archive_paths.append((ap, p))\n\n # sort the entries by archive path. Not needed by any spec, but it\n # keeps the archive listing and RECORD tidier than they would otherwise\n # be. Use the number of path segments to keep directory entries together,\n # and keep the dist-info stuff at the end.\n def sorter(t):\n ap = t[0]\n n = ap.count('/')\n if '.dist-info' in ap:\n n += 10000\n return (n, ap)\n\n archive_paths = sorted(archive_paths, key=sorter)\n\n # Now, at last, RECORD.\n # Paths in here are archive paths - nothing else makes sense.\n self.write_records((distinfo, info_dir), libdir, archive_paths)\n # Now, ready to build the zip file\n pathname = os.path.join(self.dirname, self.filename)\n self.build_zip(pathname, archive_paths)\n return pathname\n\n def skip_entry(self, arcname):\n """\n Determine whether an archive entry should be skipped when verifying\n or installing.\n """\n # The signature file won't be in RECORD,\n # and we don't currently don't do anything with it\n # We also skip directories, as they won't be in RECORD\n # either. See:\n #\n # https://github.com/pypa/wheel/issues/294\n # https://github.com/pypa/wheel/issues/287\n # https://github.com/pypa/wheel/pull/289\n #\n return arcname.endswith(('/', '/RECORD.jws'))\n\n def install(self, paths, maker, **kwargs):\n """\n Install a wheel to the specified paths. If kwarg ``warner`` is\n specified, it should be a callable, which will be called with two\n tuples indicating the wheel version of this software and the wheel\n version in the file, if there is a discrepancy in the versions.\n This can be used to issue any warnings to raise any exceptions.\n If kwarg ``lib_only`` is True, only the purelib/platlib files are\n installed, and the headers, scripts, data and dist-info metadata are\n not written. If kwarg ``bytecode_hashed_invalidation`` is True, written\n bytecode will try to use file-hash based invalidation (PEP-552) on\n supported interpreter versions (CPython 3.7+).\n\n The return value is a :class:`InstalledDistribution` instance unless\n ``options.lib_only`` is True, in which case the return value is ``None``.\n """\n\n dry_run = maker.dry_run\n warner = kwargs.get('warner')\n lib_only = kwargs.get('lib_only', False)\n bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)\n\n pathname = os.path.join(self.dirname, self.filename)\n name_ver = '%s-%s' % (self.name, self.version)\n data_dir = '%s.data' % name_ver\n info_dir = '%s.dist-info' % name_ver\n\n metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)\n wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')\n record_name = posixpath.join(info_dir, 'RECORD')\n\n wrapper = codecs.getreader('utf-8')\n\n with ZipFile(pathname, 'r') as zf:\n with zf.open(wheel_metadata_name) as bwf:\n wf = wrapper(bwf)\n message = message_from_file(wf)\n wv = message['Wheel-Version'].split('.', 1)\n file_version = tuple([int(i) for i in wv])\n if (file_version != self.wheel_version) and warner:\n warner(self.wheel_version, file_version)\n\n if message['Root-Is-Purelib'] == 'true':\n libdir = paths['purelib']\n else:\n libdir = paths['platlib']\n\n records = {}\n with zf.open(record_name) as bf:\n with CSVReader(stream=bf) as reader:\n for row in reader:\n p = row[0]\n records[p] = row\n\n data_pfx = posixpath.join(data_dir, '')\n info_pfx = posixpath.join(info_dir, '')\n script_pfx = posixpath.join(data_dir, 'scripts', '')\n\n # make a new instance rather than a copy of maker's,\n # as we mutate it\n fileop = FileOperator(dry_run=dry_run)\n fileop.record = True # so we can rollback if needed\n\n bc = not sys.dont_write_bytecode # Double negatives. Lovely!\n\n outfiles = [] # for RECORD writing\n\n # for script copying/shebang processing\n workdir = tempfile.mkdtemp()\n # set target dir later\n # we default add_launchers to False, as the\n # Python Launcher should be used instead\n maker.source_dir = workdir\n maker.target_dir = None\n try:\n for zinfo in zf.infolist():\n arcname = zinfo.filename\n if isinstance(arcname, text_type):\n u_arcname = arcname\n else:\n u_arcname = arcname.decode('utf-8')\n if self.skip_entry(u_arcname):\n continue\n row = records[u_arcname]\n if row[2] and str(zinfo.file_size) != row[2]:\n raise DistlibException('size mismatch for '\n '%s' % u_arcname)\n if row[1]:\n kind, value = row[1].split('=', 1)\n with zf.open(arcname) as bf:\n data = bf.read()\n _, digest = self.get_hash(data, kind)\n if digest != value:\n raise DistlibException('digest mismatch for '\n '%s' % arcname)\n\n if lib_only and u_arcname.startswith((info_pfx, data_pfx)):\n logger.debug('lib_only: skipping %s', u_arcname)\n continue\n is_script = (u_arcname.startswith(script_pfx) and not u_arcname.endswith('.exe'))\n\n if u_arcname.startswith(data_pfx):\n _, where, rp = u_arcname.split('/', 2)\n outfile = os.path.join(paths[where], convert_path(rp))\n else:\n # meant for site-packages.\n if u_arcname in (wheel_metadata_name, record_name):\n continue\n outfile = os.path.join(libdir, convert_path(u_arcname))\n if not is_script:\n with zf.open(arcname) as bf:\n fileop.copy_stream(bf, outfile)\n # Issue #147: permission bits aren't preserved. Using\n # zf.extract(zinfo, libdir) should have worked, but didn't,\n # see https://www.thetopsites.net/article/53834422.shtml\n # So ... manually preserve permission bits as given in zinfo\n if os.name == 'posix':\n # just set the normal permission bits\n os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF)\n outfiles.append(outfile)\n # Double check the digest of the written file\n if not dry_run and row[1]:\n with open(outfile, 'rb') as bf:\n data = bf.read()\n _, newdigest = self.get_hash(data, kind)\n if newdigest != digest:\n raise DistlibException('digest mismatch '\n 'on write for '\n '%s' % outfile)\n if bc and outfile.endswith('.py'):\n try:\n pyc = fileop.byte_compile(outfile, hashed_invalidation=bc_hashed_invalidation)\n outfiles.append(pyc)\n except Exception:\n # Don't give up if byte-compilation fails,\n # but log it and perhaps warn the user\n logger.warning('Byte-compilation failed', exc_info=True)\n else:\n fn = os.path.basename(convert_path(arcname))\n workname = os.path.join(workdir, fn)\n with zf.open(arcname) as bf:\n fileop.copy_stream(bf, workname)\n\n dn, fn = os.path.split(outfile)\n maker.target_dir = dn\n filenames = maker.make(fn)\n fileop.set_executable_mode(filenames)\n outfiles.extend(filenames)\n\n if lib_only:\n logger.debug('lib_only: returning None')\n dist = None\n else:\n # Generate scripts\n\n # Try to get pydist.json so we can see if there are\n # any commands to generate. If this fails (e.g. because\n # of a legacy wheel), log a warning but don't give up.\n commands = None\n file_version = self.info['Wheel-Version']\n if file_version == '1.0':\n # Use legacy info\n ep = posixpath.join(info_dir, 'entry_points.txt')\n try:\n with zf.open(ep) as bwf:\n epdata = read_exports(bwf)\n commands = {}\n for key in ('console', 'gui'):\n k = '%s_scripts' % key\n if k in epdata:\n commands['wrap_%s' % key] = d = {}\n for v in epdata[k].values():\n s = '%s:%s' % (v.prefix, v.suffix)\n if v.flags:\n s += ' [%s]' % ','.join(v.flags)\n d[v.name] = s\n except Exception:\n logger.warning('Unable to read legacy script '\n 'metadata, so cannot generate '\n 'scripts')\n else:\n try:\n with zf.open(metadata_name) as bwf:\n wf = wrapper(bwf)\n commands = json.load(wf).get('extensions')\n if commands:\n commands = commands.get('python.commands')\n except Exception:\n logger.warning('Unable to read JSON metadata, so '\n 'cannot generate scripts')\n if commands:\n console_scripts = commands.get('wrap_console', {})\n gui_scripts = commands.get('wrap_gui', {})\n if console_scripts or gui_scripts:\n script_dir = paths.get('scripts', '')\n if not os.path.isdir(script_dir):\n raise ValueError('Valid script path not '\n 'specified')\n maker.target_dir = script_dir\n for k, v in console_scripts.items():\n script = '%s = %s' % (k, v)\n filenames = maker.make(script)\n fileop.set_executable_mode(filenames)\n\n if gui_scripts:\n options = {'gui': True}\n for k, v in gui_scripts.items():\n script = '%s = %s' % (k, v)\n filenames = maker.make(script, options)\n fileop.set_executable_mode(filenames)\n\n p = os.path.join(libdir, info_dir)\n dist = InstalledDistribution(p)\n\n # Write SHARED\n paths = dict(paths) # don't change passed in dict\n del paths['purelib']\n del paths['platlib']\n paths['lib'] = libdir\n p = dist.write_shared_locations(paths, dry_run)\n if p:\n outfiles.append(p)\n\n # Write RECORD\n dist.write_installed_files(outfiles, paths['prefix'], dry_run)\n return dist\n except Exception: # pragma: no cover\n logger.exception('installation failed.')\n fileop.rollback()\n raise\n finally:\n shutil.rmtree(workdir)\n\n def _get_dylib_cache(self):\n global cache\n if cache is None:\n # Use native string to avoid issues on 2.x: see Python #20140.\n base = os.path.join(get_cache_base(), str('dylib-cache'), '%s.%s' % sys.version_info[:2])\n cache = Cache(base)\n return cache\n\n def _get_extensions(self):\n pathname = os.path.join(self.dirname, self.filename)\n name_ver = '%s-%s' % (self.name, self.version)\n info_dir = '%s.dist-info' % name_ver\n arcname = posixpath.join(info_dir, 'EXTENSIONS')\n wrapper = codecs.getreader('utf-8')\n result = []\n with ZipFile(pathname, 'r') as zf:\n try:\n with zf.open(arcname) as bf:\n wf = wrapper(bf)\n extensions = json.load(wf)\n cache = self._get_dylib_cache()\n prefix = cache.prefix_to_dir(self.filename, use_abspath=False)\n cache_base = os.path.join(cache.base, prefix)\n if not os.path.isdir(cache_base):\n os.makedirs(cache_base)\n for name, relpath in extensions.items():\n dest = os.path.join(cache_base, convert_path(relpath))\n if not os.path.exists(dest):\n extract = True\n else:\n file_time = os.stat(dest).st_mtime\n file_time = datetime.datetime.fromtimestamp(file_time)\n info = zf.getinfo(relpath)\n wheel_time = datetime.datetime(*info.date_time)\n extract = wheel_time > file_time\n if extract:\n zf.extract(relpath, cache_base)\n result.append((name, dest))\n except KeyError:\n pass\n return result\n\n def is_compatible(self):\n """\n Determine if a wheel is compatible with the running system.\n """\n return is_compatible(self)\n\n def is_mountable(self):\n """\n Determine if a wheel is asserted as mountable by its metadata.\n """\n return True # for now - metadata details TBD\n\n def mount(self, append=False):\n pathname = os.path.abspath(os.path.join(self.dirname, self.filename))\n if not self.is_compatible():\n msg = 'Wheel %s not compatible with this Python.' % pathname\n raise DistlibException(msg)\n if not self.is_mountable():\n msg = 'Wheel %s is marked as not mountable.' % pathname\n raise DistlibException(msg)\n if pathname in sys.path:\n logger.debug('%s already in path', pathname)\n else:\n if append:\n sys.path.append(pathname)\n else:\n sys.path.insert(0, pathname)\n extensions = self._get_extensions()\n if extensions:\n if _hook not in sys.meta_path:\n sys.meta_path.append(_hook)\n _hook.add(pathname, extensions)\n\n def unmount(self):\n pathname = os.path.abspath(os.path.join(self.dirname, self.filename))\n if pathname not in sys.path:\n logger.debug('%s not in path', pathname)\n else:\n sys.path.remove(pathname)\n if pathname in _hook.impure_wheels:\n _hook.remove(pathname)\n if not _hook.impure_wheels:\n if _hook in sys.meta_path:\n sys.meta_path.remove(_hook)\n\n def verify(self):\n pathname = os.path.join(self.dirname, self.filename)\n name_ver = '%s-%s' % (self.name, self.version)\n # data_dir = '%s.data' % name_ver\n info_dir = '%s.dist-info' % name_ver\n\n # metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)\n wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')\n record_name = posixpath.join(info_dir, 'RECORD')\n\n wrapper = codecs.getreader('utf-8')\n\n with ZipFile(pathname, 'r') as zf:\n with zf.open(wheel_metadata_name) as bwf:\n wf = wrapper(bwf)\n message_from_file(wf)\n # wv = message['Wheel-Version'].split('.', 1)\n # file_version = tuple([int(i) for i in wv])\n # TODO version verification\n\n records = {}\n with zf.open(record_name) as bf:\n with CSVReader(stream=bf) as reader:\n for row in reader:\n p = row[0]\n records[p] = row\n\n for zinfo in zf.infolist():\n arcname = zinfo.filename\n if isinstance(arcname, text_type):\n u_arcname = arcname\n else:\n u_arcname = arcname.decode('utf-8')\n # See issue #115: some wheels have .. in their entries, but\n # in the filename ... e.g. __main__..py ! So the check is\n # updated to look for .. in the directory portions\n p = u_arcname.split('/')\n if '..' in p:\n raise DistlibException('invalid entry in '\n 'wheel: %r' % u_arcname)\n\n if self.skip_entry(u_arcname):\n continue\n row = records[u_arcname]\n if row[2] and str(zinfo.file_size) != row[2]:\n raise DistlibException('size mismatch for '\n '%s' % u_arcname)\n if row[1]:\n kind, value = row[1].split('=', 1)\n with zf.open(arcname) as bf:\n data = bf.read()\n _, digest = self.get_hash(data, kind)\n if digest != value:\n raise DistlibException('digest mismatch for '\n '%s' % arcname)\n\n def update(self, modifier, dest_dir=None, **kwargs):\n """\n Update the contents of a wheel in a generic way. The modifier should\n be a callable which expects a dictionary argument: its keys are\n archive-entry paths, and its values are absolute filesystem paths\n where the contents the corresponding archive entries can be found. The\n modifier is free to change the contents of the files pointed to, add\n new entries and remove entries, before returning. This method will\n extract the entire contents of the wheel to a temporary location, call\n the modifier, and then use the passed (and possibly updated)\n dictionary to write a new wheel. If ``dest_dir`` is specified, the new\n wheel is written there -- otherwise, the original wheel is overwritten.\n\n The modifier should return True if it updated the wheel, else False.\n This method returns the same value the modifier returns.\n """\n\n def get_version(path_map, info_dir):\n version = path = None\n key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)\n if key not in path_map:\n key = '%s/PKG-INFO' % info_dir\n if key in path_map:\n path = path_map[key]\n version = Metadata(path=path).version\n return version, path\n\n def update_version(version, path):\n updated = None\n try:\n NormalizedVersion(version)\n i = version.find('-')\n if i < 0:\n updated = '%s+1' % version\n else:\n parts = [int(s) for s in version[i + 1:].split('.')]\n parts[-1] += 1\n updated = '%s+%s' % (version[:i], '.'.join(str(i) for i in parts))\n except UnsupportedVersionError:\n logger.debug('Cannot update non-compliant (PEP-440) '\n 'version %r', version)\n if updated:\n md = Metadata(path=path)\n md.version = updated\n legacy = path.endswith(LEGACY_METADATA_FILENAME)\n md.write(path=path, legacy=legacy)\n logger.debug('Version updated from %r to %r', version, updated)\n\n pathname = os.path.join(self.dirname, self.filename)\n name_ver = '%s-%s' % (self.name, self.version)\n info_dir = '%s.dist-info' % name_ver\n record_name = posixpath.join(info_dir, 'RECORD')\n with tempdir() as workdir:\n with ZipFile(pathname, 'r') as zf:\n path_map = {}\n for zinfo in zf.infolist():\n arcname = zinfo.filename\n if isinstance(arcname, text_type):\n u_arcname = arcname\n else:\n u_arcname = arcname.decode('utf-8')\n if u_arcname == record_name:\n continue\n if '..' in u_arcname:\n raise DistlibException('invalid entry in '\n 'wheel: %r' % u_arcname)\n zf.extract(zinfo, workdir)\n path = os.path.join(workdir, convert_path(u_arcname))\n path_map[u_arcname] = path\n\n # Remember the version.\n original_version, _ = get_version(path_map, info_dir)\n # Files extracted. Call the modifier.\n modified = modifier(path_map, **kwargs)\n if modified:\n # Something changed - need to build a new wheel.\n current_version, path = get_version(path_map, info_dir)\n if current_version and (current_version == original_version):\n # Add or update local version to signify changes.\n update_version(current_version, path)\n # Decide where the new wheel goes.\n if dest_dir is None:\n fd, newpath = tempfile.mkstemp(suffix='.whl', prefix='wheel-update-', dir=workdir)\n os.close(fd)\n else:\n if not os.path.isdir(dest_dir):\n raise DistlibException('Not a directory: %r' % dest_dir)\n newpath = os.path.join(dest_dir, self.filename)\n archive_paths = list(path_map.items())\n distinfo = os.path.join(workdir, info_dir)\n info = distinfo, info_dir\n self.write_records(info, workdir, archive_paths)\n self.build_zip(newpath, archive_paths)\n if dest_dir is None:\n shutil.copyfile(newpath, pathname)\n return modified\n\n\ndef _get_glibc_version():\n import platform\n ver = platform.libc_ver()\n result = []\n if ver[0] == 'glibc':\n for s in ver[1].split('.'):\n result.append(int(s) if s.isdigit() else 0)\n result = tuple(result)\n return result\n\n\ndef compatible_tags():\n """\n Return (pyver, abi, arch) tuples compatible with this Python.\n """\n class _Version:\n def __init__(self, major, minor):\n self.major = major\n self.major_minor = (major, minor)\n self.string = ''.join((str(major), str(minor)))\n\n def __str__(self):\n return self.string\n\n\n versions = [\n _Version(sys.version_info.major, minor_version)\n for minor_version in range(sys.version_info.minor, -1, -1)\n ]\n abis = []\n for suffix in _get_suffixes():\n if suffix.startswith('.abi'):\n abis.append(suffix.split('.', 2)[1])\n abis.sort()\n if ABI != 'none':\n abis.insert(0, ABI)\n abis.append('none')\n result = []\n\n arches = [ARCH]\n if sys.platform == 'darwin':\n m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)\n if m:\n name, major, minor, arch = m.groups()\n minor = int(minor)\n matches = [arch]\n if arch in ('i386', 'ppc'):\n matches.append('fat')\n if arch in ('i386', 'ppc', 'x86_64'):\n matches.append('fat3')\n if arch in ('ppc64', 'x86_64'):\n matches.append('fat64')\n if arch in ('i386', 'x86_64'):\n matches.append('intel')\n if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):\n matches.append('universal')\n while minor >= 0:\n for match in matches:\n s = '%s_%s_%s_%s' % (name, major, minor, match)\n if s != ARCH: # already there\n arches.append(s)\n minor -= 1\n\n # Most specific - our Python version, ABI and arch\n for i, version_object in enumerate(versions):\n version = str(version_object)\n add_abis = []\n\n if i == 0:\n add_abis = abis\n\n if IMP_PREFIX == 'cp' and version_object.major_minor >= (3, 2):\n limited_api_abi = 'abi' + str(version_object.major)\n if limited_api_abi not in add_abis:\n add_abis.append(limited_api_abi)\n\n for abi in add_abis:\n for arch in arches:\n result.append((''.join((IMP_PREFIX, version)), abi, arch))\n # manylinux\n if abi != 'none' and sys.platform.startswith('linux'):\n arch = arch.replace('linux_', '')\n parts = _get_glibc_version()\n if len(parts) == 2:\n if parts >= (2, 5):\n result.append((''.join((IMP_PREFIX, version)), abi, 'manylinux1_%s' % arch))\n if parts >= (2, 12):\n result.append((''.join((IMP_PREFIX, version)), abi, 'manylinux2010_%s' % arch))\n if parts >= (2, 17):\n result.append((''.join((IMP_PREFIX, version)), abi, 'manylinux2014_%s' % arch))\n result.append((''.join(\n (IMP_PREFIX, version)), abi, 'manylinux_%s_%s_%s' % (parts[0], parts[1], arch)))\n\n # where no ABI / arch dependency, but IMP_PREFIX dependency\n for i, version_object in enumerate(versions):\n version = str(version_object)\n result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))\n if i == 0:\n result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))\n\n # no IMP_PREFIX, ABI or arch dependency\n for i, version_object in enumerate(versions):\n version = str(version_object)\n result.append((''.join(('py', version)), 'none', 'any'))\n if i == 0:\n result.append((''.join(('py', version[0])), 'none', 'any'))\n\n return set(result)\n\n\nCOMPATIBLE_TAGS = compatible_tags()\n\ndel compatible_tags\n\n\ndef is_compatible(wheel, tags=None):\n if not isinstance(wheel, Wheel):\n wheel = Wheel(wheel) # assume it's a filename\n result = False\n if tags is None:\n tags = COMPATIBLE_TAGS\n for ver, abi, arch in tags:\n if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:\n result = True\n break\n return result\n
.venv\Lib\site-packages\pip\_vendor\distlib\wheel.py
wheel.py
Python
43,979
0.95
0.218182
0.082737
react-lib
563
2024-07-20T10:34:18.309953
BSD-3-Clause
false
2ee92159fb5c8270cb7abb150ad4b548
# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2023 Vinay Sajip.\n# Licensed to the Python Software Foundation under a contributor agreement.\n# See LICENSE.txt and CONTRIBUTORS.txt.\n#\nimport logging\n\n__version__ = '0.3.9'\n\n\nclass DistlibException(Exception):\n pass\n\n\ntry:\n from logging import NullHandler\nexcept ImportError: # pragma: no cover\n\n class NullHandler(logging.Handler):\n\n def handle(self, record):\n pass\n\n def emit(self, record):\n pass\n\n def createLock(self):\n self.lock = None\n\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(NullHandler())\n
.venv\Lib\site-packages\pip\_vendor\distlib\__init__.py
__init__.py
Python
625
0.95
0.181818
0.272727
react-lib
385
2024-12-28T05:46:28.767627
GPL-3.0
false
c80896a013333c7f894bdea80a3d97e6
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\compat.cpython-313.pyc
compat.cpython-313.pyc
Other
45,868
0.95
0.082873
0.003012
node-utils
456
2025-06-02T03:24:47.440444
BSD-3-Clause
false
ce20f27941f4ccb8ecc7360b4240cf1d
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\database.cpython-313.pyc
database.cpython-313.pyc
Other
65,238
0.75
0.109231
0.011532
react-lib
931
2024-04-28T20:40:18.791578
BSD-3-Clause
false
333c06a12dcff17c1a973c5a2dd1104e
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\index.cpython-313.pyc
index.cpython-313.pyc
Other
23,555
0.95
0.099379
0
vue-tools
858
2025-04-01T18:50:09.201481
Apache-2.0
false
94488021f6d41b4336266e83ea78586c
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\locators.cpython-313.pyc
locators.cpython-313.pyc
Other
59,534
0.75
0.096563
0.012324
node-utils
408
2024-05-19T11:55:01.809724
GPL-3.0
false
8ffa73c671e88ba14968f1a3db847d79
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\manifest.cpython-313.pyc
manifest.cpython-313.pyc
Other
15,035
0.8
0.023392
0.013245
react-lib
701
2024-05-27T09:29:49.292287
Apache-2.0
false
94a3b9fd75bfac376d1bfcd47e3f94b1
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\markers.cpython-313.pyc
markers.cpython-313.pyc
Other
7,690
0.95
0.049383
0.026316
awesome-app
39
2023-08-01T22:45:08.607593
MIT
false
5b318cf26aec4599f22533650d4e88bd
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\metadata.cpython-313.pyc
metadata.cpython-313.pyc
Other
42,593
0.95
0.026667
0.021429
node-utils
425
2025-02-05T14:52:35.784959
Apache-2.0
false
e9c2fdab5ead16b1003d8c156aef945d
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\resources.cpython-313.pyc
resources.cpython-313.pyc
Other
17,660
0.95
0.105263
0
node-utils
715
2024-09-14T20:14:58.738337
Apache-2.0
false
c0027df08eeb5aa10f2f418661595529
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\scripts.cpython-313.pyc
scripts.cpython-313.pyc
Other
20,236
0.95
0.026178
0
vue-tools
928
2024-03-18T06:36:07.680658
BSD-3-Clause
false
044d7f38570f55e614f8a2c39d6aa8c7
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\util.cpython-313.pyc
util.cpython-313.pyc
Other
90,118
0.75
0.053672
0.010736
awesome-app
503
2024-05-22T03:03:54.117616
MIT
false
dd0e15fa797913b9d41403761f72caf9
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\version.cpython-313.pyc
version.cpython-313.pyc
Other
30,982
0.95
0.04461
0.011905
python-kit
412
2024-07-04T08:33:19.545219
MIT
false
9dbe7c96571f29fbc7fd4c32994a84cb
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\wheel.cpython-313.pyc
wheel.cpython-313.pyc
Other
53,782
0.95
0.021459
0.04
awesome-app
716
2023-10-09T12:46:38.235759
GPL-3.0
false
935f7c5c7426973f9df54baa6124e6c6
\n\n
.venv\Lib\site-packages\pip\_vendor\distlib\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,358
0.8
0
0
node-utils
318
2025-05-31T09:44:41.254198
GPL-3.0
false
af86f73584d576b5a7037f50bb983bc9
#!/usr/bin/env python\n# Copyright 2015-2021 Nir Cohen\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n"""\nThe ``distro`` package (``distro`` stands for Linux Distribution) provides\ninformation about the Linux distribution it runs on, such as a reliable\nmachine-readable distro ID, or version information.\n\nIt is the recommended replacement for Python's original\n:py:func:`platform.linux_distribution` function, but it provides much more\nfunctionality. An alternative implementation became necessary because Python\n3.5 deprecated this function, and Python 3.8 removed it altogether. Its\npredecessor function :py:func:`platform.dist` was already deprecated since\nPython 2.6 and removed in Python 3.8. Still, there are many cases in which\naccess to OS distribution information is needed. See `Python issue 1322\n<https://bugs.python.org/issue1322>`_ for more information.\n"""\n\nimport argparse\nimport json\nimport logging\nimport os\nimport re\nimport shlex\nimport subprocess\nimport sys\nimport warnings\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n Optional,\n Sequence,\n TextIO,\n Tuple,\n Type,\n)\n\ntry:\n from typing import TypedDict\nexcept ImportError:\n # Python 3.7\n TypedDict = dict\n\n__version__ = "1.9.0"\n\n\nclass VersionDict(TypedDict):\n major: str\n minor: str\n build_number: str\n\n\nclass InfoDict(TypedDict):\n id: str\n version: str\n version_parts: VersionDict\n like: str\n codename: str\n\n\n_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")\n_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")\n_OS_RELEASE_BASENAME = "os-release"\n\n#: Translation table for normalizing the "ID" attribute defined in os-release\n#: files, for use by the :func:`distro.id` method.\n#:\n#: * Key: Value as defined in the os-release file, translated to lower case,\n#: with blanks translated to underscores.\n#:\n#: * Value: Normalized value.\nNORMALIZED_OS_ID = {\n "ol": "oracle", # Oracle Linux\n "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap\n}\n\n#: Translation table for normalizing the "Distributor ID" attribute returned by\n#: the lsb_release command, for use by the :func:`distro.id` method.\n#:\n#: * Key: Value as returned by the lsb_release command, translated to lower\n#: case, with blanks translated to underscores.\n#:\n#: * Value: Normalized value.\nNORMALIZED_LSB_ID = {\n "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4\n "enterpriseenterpriseserver": "oracle", # Oracle Linux 5\n "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation\n "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server\n "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode\n}\n\n#: Translation table for normalizing the distro ID derived from the file name\n#: of distro release files, for use by the :func:`distro.id` method.\n#:\n#: * Key: Value as derived from the file name of a distro release file,\n#: translated to lower case, with blanks translated to underscores.\n#:\n#: * Value: Normalized value.\nNORMALIZED_DISTRO_ID = {\n "redhat": "rhel", # RHEL 6.x, 7.x\n}\n\n# Pattern for content of distro release file (reversed)\n_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(\n r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"\n)\n\n# Pattern for base file name of distro release file\n_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")\n\n# Base file names to be looked up for if _UNIXCONFDIR is not readable.\n_DISTRO_RELEASE_BASENAMES = [\n "SuSE-release",\n "altlinux-release",\n "arch-release",\n "base-release",\n "centos-release",\n "fedora-release",\n "gentoo-release",\n "mageia-release",\n "mandrake-release",\n "mandriva-release",\n "mandrivalinux-release",\n "manjaro-release",\n "oracle-release",\n "redhat-release",\n "rocky-release",\n "sl-release",\n "slackware-version",\n]\n\n# Base file names to be ignored when searching for distro release file\n_DISTRO_RELEASE_IGNORE_BASENAMES = (\n "debian_version",\n "lsb-release",\n "oem-release",\n _OS_RELEASE_BASENAME,\n "system-release",\n "plesk-release",\n "iredmail-release",\n "board-release",\n "ec2_version",\n)\n\n\ndef linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]:\n """\n .. deprecated:: 1.6.0\n\n :func:`distro.linux_distribution()` is deprecated. It should only be\n used as a compatibility shim with Python's\n :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,\n :func:`distro.version` and :func:`distro.name` instead.\n\n Return information about the current OS distribution as a tuple\n ``(id_name, version, codename)`` with items as follows:\n\n * ``id_name``: If *full_distribution_name* is false, the result of\n :func:`distro.id`. Otherwise, the result of :func:`distro.name`.\n\n * ``version``: The result of :func:`distro.version`.\n\n * ``codename``: The extra item (usually in parentheses) after the\n os-release version number, or the result of :func:`distro.codename`.\n\n The interface of this function is compatible with the original\n :py:func:`platform.linux_distribution` function, supporting a subset of\n its parameters.\n\n The data it returns may not exactly be the same, because it uses more data\n sources than the original function, and that may lead to different data if\n the OS distribution is not consistent across multiple data sources it\n provides (there are indeed such distributions ...).\n\n Another reason for differences is the fact that the :func:`distro.id`\n method normalizes the distro ID string to a reliable machine-readable value\n for a number of popular OS distributions.\n """\n warnings.warn(\n "distro.linux_distribution() is deprecated. It should only be used as a "\n "compatibility shim with Python's platform.linux_distribution(). Please use "\n "distro.id(), distro.version() and distro.name() instead.",\n DeprecationWarning,\n stacklevel=2,\n )\n return _distro.linux_distribution(full_distribution_name)\n\n\ndef id() -> str:\n """\n Return the distro ID of the current distribution, as a\n machine-readable string.\n\n For a number of OS distributions, the returned distro ID value is\n *reliable*, in the sense that it is documented and that it does not change\n across releases of the distribution.\n\n This package maintains the following reliable distro ID values:\n\n ============== =========================================\n Distro ID Distribution\n ============== =========================================\n "ubuntu" Ubuntu\n "debian" Debian\n "rhel" RedHat Enterprise Linux\n "centos" CentOS\n "fedora" Fedora\n "sles" SUSE Linux Enterprise Server\n "opensuse" openSUSE\n "amzn" Amazon Linux\n "arch" Arch Linux\n "buildroot" Buildroot\n "cloudlinux" CloudLinux OS\n "exherbo" Exherbo Linux\n "gentoo" GenToo Linux\n "ibm_powerkvm" IBM PowerKVM\n "kvmibm" KVM for IBM z Systems\n "linuxmint" Linux Mint\n "mageia" Mageia\n "mandriva" Mandriva Linux\n "parallels" Parallels\n "pidora" Pidora\n "raspbian" Raspbian\n "oracle" Oracle Linux (and Oracle Enterprise Linux)\n "scientific" Scientific Linux\n "slackware" Slackware\n "xenserver" XenServer\n "openbsd" OpenBSD\n "netbsd" NetBSD\n "freebsd" FreeBSD\n "midnightbsd" MidnightBSD\n "rocky" Rocky Linux\n "aix" AIX\n "guix" Guix System\n "altlinux" ALT Linux\n ============== =========================================\n\n If you have a need to get distros for reliable IDs added into this set,\n or if you find that the :func:`distro.id` function returns a different\n distro ID for one of the listed distros, please create an issue in the\n `distro issue tracker`_.\n\n **Lookup hierarchy and transformations:**\n\n First, the ID is obtained from the following sources, in the specified\n order. The first available and non-empty value is used:\n\n * the value of the "ID" attribute of the os-release file,\n\n * the value of the "Distributor ID" attribute returned by the lsb_release\n command,\n\n * the first part of the file name of the distro release file,\n\n The so determined ID value then passes the following transformations,\n before it is returned by this method:\n\n * it is translated to lower case,\n\n * blanks (which should not be there anyway) are translated to underscores,\n\n * a normalization of the ID is performed, based upon\n `normalization tables`_. The purpose of this normalization is to ensure\n that the ID is as reliable as possible, even across incompatible changes\n in the OS distributions. A common reason for an incompatible change is\n the addition of an os-release file, or the addition of the lsb_release\n command, with ID values that differ from what was previously determined\n from the distro release file name.\n """\n return _distro.id()\n\n\ndef name(pretty: bool = False) -> str:\n """\n Return the name of the current OS distribution, as a human-readable\n string.\n\n If *pretty* is false, the name is returned without version or codename.\n (e.g. "CentOS Linux")\n\n If *pretty* is true, the version and codename are appended.\n (e.g. "CentOS Linux 7.1.1503 (Core)")\n\n **Lookup hierarchy:**\n\n The name is obtained from the following sources, in the specified order.\n The first available and non-empty value is used:\n\n * If *pretty* is false:\n\n - the value of the "NAME" attribute of the os-release file,\n\n - the value of the "Distributor ID" attribute returned by the lsb_release\n command,\n\n - the value of the "<name>" field of the distro release file.\n\n * If *pretty* is true:\n\n - the value of the "PRETTY_NAME" attribute of the os-release file,\n\n - the value of the "Description" attribute returned by the lsb_release\n command,\n\n - the value of the "<name>" field of the distro release file, appended\n with the value of the pretty version ("<version_id>" and "<codename>"\n fields) of the distro release file, if available.\n """\n return _distro.name(pretty)\n\n\ndef version(pretty: bool = False, best: bool = False) -> str:\n """\n Return the version of the current OS distribution, as a human-readable\n string.\n\n If *pretty* is false, the version is returned without codename (e.g.\n "7.0").\n\n If *pretty* is true, the codename in parenthesis is appended, if the\n codename is non-empty (e.g. "7.0 (Maipo)").\n\n Some distributions provide version numbers with different precisions in\n the different sources of distribution information. Examining the different\n sources in a fixed priority order does not always yield the most precise\n version (e.g. for Debian 8.2, or CentOS 7.1).\n\n Some other distributions may not provide this kind of information. In these\n cases, an empty string would be returned. This behavior can be observed\n with rolling releases distributions (e.g. Arch Linux).\n\n The *best* parameter can be used to control the approach for the returned\n version:\n\n If *best* is false, the first non-empty version number in priority order of\n the examined sources is returned.\n\n If *best* is true, the most precise version number out of all examined\n sources is returned.\n\n **Lookup hierarchy:**\n\n In all cases, the version number is obtained from the following sources.\n If *best* is false, this order represents the priority order:\n\n * the value of the "VERSION_ID" attribute of the os-release file,\n * the value of the "Release" attribute returned by the lsb_release\n command,\n * the version number parsed from the "<version_id>" field of the first line\n of the distro release file,\n * the version number parsed from the "PRETTY_NAME" attribute of the\n os-release file, if it follows the format of the distro release files.\n * the version number parsed from the "Description" attribute returned by\n the lsb_release command, if it follows the format of the distro release\n files.\n """\n return _distro.version(pretty, best)\n\n\ndef version_parts(best: bool = False) -> Tuple[str, str, str]:\n """\n Return the version of the current OS distribution as a tuple\n ``(major, minor, build_number)`` with items as follows:\n\n * ``major``: The result of :func:`distro.major_version`.\n\n * ``minor``: The result of :func:`distro.minor_version`.\n\n * ``build_number``: The result of :func:`distro.build_number`.\n\n For a description of the *best* parameter, see the :func:`distro.version`\n method.\n """\n return _distro.version_parts(best)\n\n\ndef major_version(best: bool = False) -> str:\n """\n Return the major version of the current OS distribution, as a string,\n if provided.\n Otherwise, the empty string is returned. The major version is the first\n part of the dot-separated version string.\n\n For a description of the *best* parameter, see the :func:`distro.version`\n method.\n """\n return _distro.major_version(best)\n\n\ndef minor_version(best: bool = False) -> str:\n """\n Return the minor version of the current OS distribution, as a string,\n if provided.\n Otherwise, the empty string is returned. The minor version is the second\n part of the dot-separated version string.\n\n For a description of the *best* parameter, see the :func:`distro.version`\n method.\n """\n return _distro.minor_version(best)\n\n\ndef build_number(best: bool = False) -> str:\n """\n Return the build number of the current OS distribution, as a string,\n if provided.\n Otherwise, the empty string is returned. The build number is the third part\n of the dot-separated version string.\n\n For a description of the *best* parameter, see the :func:`distro.version`\n method.\n """\n return _distro.build_number(best)\n\n\ndef like() -> str:\n """\n Return a space-separated list of distro IDs of distributions that are\n closely related to the current OS distribution in regards to packaging\n and programming interfaces, for example distributions the current\n distribution is a derivative from.\n\n **Lookup hierarchy:**\n\n This information item is only provided by the os-release file.\n For details, see the description of the "ID_LIKE" attribute in the\n `os-release man page\n <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.\n """\n return _distro.like()\n\n\ndef codename() -> str:\n """\n Return the codename for the release of the current OS distribution,\n as a string.\n\n If the distribution does not have a codename, an empty string is returned.\n\n Note that the returned codename is not always really a codename. For\n example, openSUSE returns "x86_64". This function does not handle such\n cases in any special way and just returns the string it finds, if any.\n\n **Lookup hierarchy:**\n\n * the codename within the "VERSION" attribute of the os-release file, if\n provided,\n\n * the value of the "Codename" attribute returned by the lsb_release\n command,\n\n * the value of the "<codename>" field of the distro release file.\n """\n return _distro.codename()\n\n\ndef info(pretty: bool = False, best: bool = False) -> InfoDict:\n """\n Return certain machine-readable information items about the current OS\n distribution in a dictionary, as shown in the following example:\n\n .. sourcecode:: python\n\n {\n 'id': 'rhel',\n 'version': '7.0',\n 'version_parts': {\n 'major': '7',\n 'minor': '0',\n 'build_number': ''\n },\n 'like': 'fedora',\n 'codename': 'Maipo'\n }\n\n The dictionary structure and keys are always the same, regardless of which\n information items are available in the underlying data sources. The values\n for the various keys are as follows:\n\n * ``id``: The result of :func:`distro.id`.\n\n * ``version``: The result of :func:`distro.version`.\n\n * ``version_parts -> major``: The result of :func:`distro.major_version`.\n\n * ``version_parts -> minor``: The result of :func:`distro.minor_version`.\n\n * ``version_parts -> build_number``: The result of\n :func:`distro.build_number`.\n\n * ``like``: The result of :func:`distro.like`.\n\n * ``codename``: The result of :func:`distro.codename`.\n\n For a description of the *pretty* and *best* parameters, see the\n :func:`distro.version` method.\n """\n return _distro.info(pretty, best)\n\n\ndef os_release_info() -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information items\n from the os-release file data source of the current OS distribution.\n\n See `os-release file`_ for details about these information items.\n """\n return _distro.os_release_info()\n\n\ndef lsb_release_info() -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information items\n from the lsb_release command data source of the current OS distribution.\n\n See `lsb_release command output`_ for details about these information\n items.\n """\n return _distro.lsb_release_info()\n\n\ndef distro_release_info() -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information items\n from the distro release file data source of the current OS distribution.\n\n See `distro release file`_ for details about these information items.\n """\n return _distro.distro_release_info()\n\n\ndef uname_info() -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information items\n from the distro release file data source of the current OS distribution.\n """\n return _distro.uname_info()\n\n\ndef os_release_attr(attribute: str) -> str:\n """\n Return a single named information item from the os-release file data source\n of the current OS distribution.\n\n Parameters:\n\n * ``attribute`` (string): Key of the information item.\n\n Returns:\n\n * (string): Value of the information item, if the item exists.\n The empty string, if the item does not exist.\n\n See `os-release file`_ for details about these information items.\n """\n return _distro.os_release_attr(attribute)\n\n\ndef lsb_release_attr(attribute: str) -> str:\n """\n Return a single named information item from the lsb_release command output\n data source of the current OS distribution.\n\n Parameters:\n\n * ``attribute`` (string): Key of the information item.\n\n Returns:\n\n * (string): Value of the information item, if the item exists.\n The empty string, if the item does not exist.\n\n See `lsb_release command output`_ for details about these information\n items.\n """\n return _distro.lsb_release_attr(attribute)\n\n\ndef distro_release_attr(attribute: str) -> str:\n """\n Return a single named information item from the distro release file\n data source of the current OS distribution.\n\n Parameters:\n\n * ``attribute`` (string): Key of the information item.\n\n Returns:\n\n * (string): Value of the information item, if the item exists.\n The empty string, if the item does not exist.\n\n See `distro release file`_ for details about these information items.\n """\n return _distro.distro_release_attr(attribute)\n\n\ndef uname_attr(attribute: str) -> str:\n """\n Return a single named information item from the distro release file\n data source of the current OS distribution.\n\n Parameters:\n\n * ``attribute`` (string): Key of the information item.\n\n Returns:\n\n * (string): Value of the information item, if the item exists.\n The empty string, if the item does not exist.\n """\n return _distro.uname_attr(attribute)\n\n\ntry:\n from functools import cached_property\nexcept ImportError:\n # Python < 3.8\n class cached_property: # type: ignore\n """A version of @property which caches the value. On access, it calls the\n underlying function and sets the value in `__dict__` so future accesses\n will not re-call the property.\n """\n\n def __init__(self, f: Callable[[Any], Any]) -> None:\n self._fname = f.__name__\n self._f = f\n\n def __get__(self, obj: Any, owner: Type[Any]) -> Any:\n assert obj is not None, f"call {self._fname} on an instance"\n ret = obj.__dict__[self._fname] = self._f(obj)\n return ret\n\n\nclass LinuxDistribution:\n """\n Provides information about a OS distribution.\n\n This package creates a private module-global instance of this class with\n default initialization arguments, that is used by the\n `consolidated accessor functions`_ and `single source accessor functions`_.\n By using default initialization arguments, that module-global instance\n returns data about the current OS distribution (i.e. the distro this\n package runs on).\n\n Normally, it is not necessary to create additional instances of this class.\n However, in situations where control is needed over the exact data sources\n that are used, instances of this class can be created with a specific\n distro release file, or a specific os-release file, or without invoking the\n lsb_release command.\n """\n\n def __init__(\n self,\n include_lsb: Optional[bool] = None,\n os_release_file: str = "",\n distro_release_file: str = "",\n include_uname: Optional[bool] = None,\n root_dir: Optional[str] = None,\n include_oslevel: Optional[bool] = None,\n ) -> None:\n """\n The initialization method of this class gathers information from the\n available data sources, and stores that in private instance attributes.\n Subsequent access to the information items uses these private instance\n attributes, so that the data sources are read only once.\n\n Parameters:\n\n * ``include_lsb`` (bool): Controls whether the\n `lsb_release command output`_ is included as a data source.\n\n If the lsb_release command is not available in the program execution\n path, the data source for the lsb_release command will be empty.\n\n * ``os_release_file`` (string): The path name of the\n `os-release file`_ that is to be used as a data source.\n\n An empty string (the default) will cause the default path name to\n be used (see `os-release file`_ for details).\n\n If the specified or defaulted os-release file does not exist, the\n data source for the os-release file will be empty.\n\n * ``distro_release_file`` (string): The path name of the\n `distro release file`_ that is to be used as a data source.\n\n An empty string (the default) will cause a default search algorithm\n to be used (see `distro release file`_ for details).\n\n If the specified distro release file does not exist, or if no default\n distro release file can be found, the data source for the distro\n release file will be empty.\n\n * ``include_uname`` (bool): Controls whether uname command output is\n included as a data source. If the uname command is not available in\n the program execution path the data source for the uname command will\n be empty.\n\n * ``root_dir`` (string): The absolute path to the root directory to use\n to find distro-related information files. Note that ``include_*``\n parameters must not be enabled in combination with ``root_dir``.\n\n * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command\n output is included as a data source. If the oslevel command is not\n available in the program execution path the data source will be\n empty.\n\n Public instance attributes:\n\n * ``os_release_file`` (string): The path name of the\n `os-release file`_ that is actually used as a data source. The\n empty string if no distro release file is used as a data source.\n\n * ``distro_release_file`` (string): The path name of the\n `distro release file`_ that is actually used as a data source. The\n empty string if no distro release file is used as a data source.\n\n * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.\n This controls whether the lsb information will be loaded.\n\n * ``include_uname`` (bool): The result of the ``include_uname``\n parameter. This controls whether the uname information will\n be loaded.\n\n * ``include_oslevel`` (bool): The result of the ``include_oslevel``\n parameter. This controls whether (AIX) oslevel information will be\n loaded.\n\n * ``root_dir`` (string): The result of the ``root_dir`` parameter.\n The absolute path to the root directory to use to find distro-related\n information files.\n\n Raises:\n\n * :py:exc:`ValueError`: Initialization parameters combination is not\n supported.\n\n * :py:exc:`OSError`: Some I/O issue with an os-release file or distro\n release file.\n\n * :py:exc:`UnicodeError`: A data source has unexpected characters or\n uses an unexpected encoding.\n """\n self.root_dir = root_dir\n self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR\n self.usr_lib_dir = (\n os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR\n )\n\n if os_release_file:\n self.os_release_file = os_release_file\n else:\n etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)\n usr_lib_os_release_file = os.path.join(\n self.usr_lib_dir, _OS_RELEASE_BASENAME\n )\n\n # NOTE: The idea is to respect order **and** have it set\n # at all times for API backwards compatibility.\n if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(\n usr_lib_os_release_file\n ):\n self.os_release_file = etc_dir_os_release_file\n else:\n self.os_release_file = usr_lib_os_release_file\n\n self.distro_release_file = distro_release_file or "" # updated later\n\n is_root_dir_defined = root_dir is not None\n if is_root_dir_defined and (include_lsb or include_uname or include_oslevel):\n raise ValueError(\n "Including subprocess data sources from specific root_dir is disallowed"\n " to prevent false information"\n )\n self.include_lsb = (\n include_lsb if include_lsb is not None else not is_root_dir_defined\n )\n self.include_uname = (\n include_uname if include_uname is not None else not is_root_dir_defined\n )\n self.include_oslevel = (\n include_oslevel if include_oslevel is not None else not is_root_dir_defined\n )\n\n def __repr__(self) -> str:\n """Return repr of all info"""\n return (\n "LinuxDistribution("\n "os_release_file={self.os_release_file!r}, "\n "distro_release_file={self.distro_release_file!r}, "\n "include_lsb={self.include_lsb!r}, "\n "include_uname={self.include_uname!r}, "\n "include_oslevel={self.include_oslevel!r}, "\n "root_dir={self.root_dir!r}, "\n "_os_release_info={self._os_release_info!r}, "\n "_lsb_release_info={self._lsb_release_info!r}, "\n "_distro_release_info={self._distro_release_info!r}, "\n "_uname_info={self._uname_info!r}, "\n "_oslevel_info={self._oslevel_info!r})".format(self=self)\n )\n\n def linux_distribution(\n self, full_distribution_name: bool = True\n ) -> Tuple[str, str, str]:\n """\n Return information about the OS distribution that is compatible\n with Python's :func:`platform.linux_distribution`, supporting a subset\n of its parameters.\n\n For details, see :func:`distro.linux_distribution`.\n """\n return (\n self.name() if full_distribution_name else self.id(),\n self.version(),\n self._os_release_info.get("release_codename") or self.codename(),\n )\n\n def id(self) -> str:\n """Return the distro ID of the OS distribution, as a string.\n\n For details, see :func:`distro.id`.\n """\n\n def normalize(distro_id: str, table: Dict[str, str]) -> str:\n distro_id = distro_id.lower().replace(" ", "_")\n return table.get(distro_id, distro_id)\n\n distro_id = self.os_release_attr("id")\n if distro_id:\n return normalize(distro_id, NORMALIZED_OS_ID)\n\n distro_id = self.lsb_release_attr("distributor_id")\n if distro_id:\n return normalize(distro_id, NORMALIZED_LSB_ID)\n\n distro_id = self.distro_release_attr("id")\n if distro_id:\n return normalize(distro_id, NORMALIZED_DISTRO_ID)\n\n distro_id = self.uname_attr("id")\n if distro_id:\n return normalize(distro_id, NORMALIZED_DISTRO_ID)\n\n return ""\n\n def name(self, pretty: bool = False) -> str:\n """\n Return the name of the OS distribution, as a string.\n\n For details, see :func:`distro.name`.\n """\n name = (\n self.os_release_attr("name")\n or self.lsb_release_attr("distributor_id")\n or self.distro_release_attr("name")\n or self.uname_attr("name")\n )\n if pretty:\n name = self.os_release_attr("pretty_name") or self.lsb_release_attr(\n "description"\n )\n if not name:\n name = self.distro_release_attr("name") or self.uname_attr("name")\n version = self.version(pretty=True)\n if version:\n name = f"{name} {version}"\n return name or ""\n\n def version(self, pretty: bool = False, best: bool = False) -> str:\n """\n Return the version of the OS distribution, as a string.\n\n For details, see :func:`distro.version`.\n """\n versions = [\n self.os_release_attr("version_id"),\n self.lsb_release_attr("release"),\n self.distro_release_attr("version_id"),\n self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(\n "version_id", ""\n ),\n self._parse_distro_release_content(\n self.lsb_release_attr("description")\n ).get("version_id", ""),\n self.uname_attr("release"),\n ]\n if self.uname_attr("id").startswith("aix"):\n # On AIX platforms, prefer oslevel command output.\n versions.insert(0, self.oslevel_info())\n elif self.id() == "debian" or "debian" in self.like().split():\n # On Debian-like, add debian_version file content to candidates list.\n versions.append(self._debian_version)\n version = ""\n if best:\n # This algorithm uses the last version in priority order that has\n # the best precision. If the versions are not in conflict, that\n # does not matter; otherwise, using the last one instead of the\n # first one might be considered a surprise.\n for v in versions:\n if v.count(".") > version.count(".") or version == "":\n version = v\n else:\n for v in versions:\n if v != "":\n version = v\n break\n if pretty and version and self.codename():\n version = f"{version} ({self.codename()})"\n return version\n\n def version_parts(self, best: bool = False) -> Tuple[str, str, str]:\n """\n Return the version of the OS distribution, as a tuple of version\n numbers.\n\n For details, see :func:`distro.version_parts`.\n """\n version_str = self.version(best=best)\n if version_str:\n version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")\n matches = version_regex.match(version_str)\n if matches:\n major, minor, build_number = matches.groups()\n return major, minor or "", build_number or ""\n return "", "", ""\n\n def major_version(self, best: bool = False) -> str:\n """\n Return the major version number of the current distribution.\n\n For details, see :func:`distro.major_version`.\n """\n return self.version_parts(best)[0]\n\n def minor_version(self, best: bool = False) -> str:\n """\n Return the minor version number of the current distribution.\n\n For details, see :func:`distro.minor_version`.\n """\n return self.version_parts(best)[1]\n\n def build_number(self, best: bool = False) -> str:\n """\n Return the build number of the current distribution.\n\n For details, see :func:`distro.build_number`.\n """\n return self.version_parts(best)[2]\n\n def like(self) -> str:\n """\n Return the IDs of distributions that are like the OS distribution.\n\n For details, see :func:`distro.like`.\n """\n return self.os_release_attr("id_like") or ""\n\n def codename(self) -> str:\n """\n Return the codename of the OS distribution.\n\n For details, see :func:`distro.codename`.\n """\n try:\n # Handle os_release specially since distros might purposefully set\n # this to empty string to have no codename\n return self._os_release_info["codename"]\n except KeyError:\n return (\n self.lsb_release_attr("codename")\n or self.distro_release_attr("codename")\n or ""\n )\n\n def info(self, pretty: bool = False, best: bool = False) -> InfoDict:\n """\n Return certain machine-readable information about the OS\n distribution.\n\n For details, see :func:`distro.info`.\n """\n return InfoDict(\n id=self.id(),\n version=self.version(pretty, best),\n version_parts=VersionDict(\n major=self.major_version(best),\n minor=self.minor_version(best),\n build_number=self.build_number(best),\n ),\n like=self.like(),\n codename=self.codename(),\n )\n\n def os_release_info(self) -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information\n items from the os-release file data source of the OS distribution.\n\n For details, see :func:`distro.os_release_info`.\n """\n return self._os_release_info\n\n def lsb_release_info(self) -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information\n items from the lsb_release command data source of the OS\n distribution.\n\n For details, see :func:`distro.lsb_release_info`.\n """\n return self._lsb_release_info\n\n def distro_release_info(self) -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information\n items from the distro release file data source of the OS\n distribution.\n\n For details, see :func:`distro.distro_release_info`.\n """\n return self._distro_release_info\n\n def uname_info(self) -> Dict[str, str]:\n """\n Return a dictionary containing key-value pairs for the information\n items from the uname command data source of the OS distribution.\n\n For details, see :func:`distro.uname_info`.\n """\n return self._uname_info\n\n def oslevel_info(self) -> str:\n """\n Return AIX' oslevel command output.\n """\n return self._oslevel_info\n\n def os_release_attr(self, attribute: str) -> str:\n """\n Return a single named information item from the os-release file data\n source of the OS distribution.\n\n For details, see :func:`distro.os_release_attr`.\n """\n return self._os_release_info.get(attribute, "")\n\n def lsb_release_attr(self, attribute: str) -> str:\n """\n Return a single named information item from the lsb_release command\n output data source of the OS distribution.\n\n For details, see :func:`distro.lsb_release_attr`.\n """\n return self._lsb_release_info.get(attribute, "")\n\n def distro_release_attr(self, attribute: str) -> str:\n """\n Return a single named information item from the distro release file\n data source of the OS distribution.\n\n For details, see :func:`distro.distro_release_attr`.\n """\n return self._distro_release_info.get(attribute, "")\n\n def uname_attr(self, attribute: str) -> str:\n """\n Return a single named information item from the uname command\n output data source of the OS distribution.\n\n For details, see :func:`distro.uname_attr`.\n """\n return self._uname_info.get(attribute, "")\n\n @cached_property\n def _os_release_info(self) -> Dict[str, str]:\n """\n Get the information items from the specified os-release file.\n\n Returns:\n A dictionary containing all information items.\n """\n if os.path.isfile(self.os_release_file):\n with open(self.os_release_file, encoding="utf-8") as release_file:\n return self._parse_os_release_content(release_file)\n return {}\n\n @staticmethod\n def _parse_os_release_content(lines: TextIO) -> Dict[str, str]:\n """\n Parse the lines of an os-release file.\n\n Parameters:\n\n * lines: Iterable through the lines in the os-release file.\n Each line must be a unicode string or a UTF-8 encoded byte\n string.\n\n Returns:\n A dictionary containing all information items.\n """\n props = {}\n lexer = shlex.shlex(lines, posix=True)\n lexer.whitespace_split = True\n\n tokens = list(lexer)\n for token in tokens:\n # At this point, all shell-like parsing has been done (i.e.\n # comments processed, quotes and backslash escape sequences\n # processed, multi-line values assembled, trailing newlines\n # stripped, etc.), so the tokens are now either:\n # * variable assignments: var=value\n # * commands or their arguments (not allowed in os-release)\n # Ignore any tokens that are not variable assignments\n if "=" in token:\n k, v = token.split("=", 1)\n props[k.lower()] = v\n\n if "version" in props:\n # extract release codename (if any) from version attribute\n match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"])\n if match:\n release_codename = match.group(1) or match.group(2)\n props["codename"] = props["release_codename"] = release_codename\n\n if "version_codename" in props:\n # os-release added a version_codename field. Use that in\n # preference to anything else Note that some distros purposefully\n # do not have code names. They should be setting\n # version_codename=""\n props["codename"] = props["version_codename"]\n elif "ubuntu_codename" in props:\n # Same as above but a non-standard field name used on older Ubuntus\n props["codename"] = props["ubuntu_codename"]\n\n return props\n\n @cached_property\n def _lsb_release_info(self) -> Dict[str, str]:\n """\n Get the information items from the lsb_release command output.\n\n Returns:\n A dictionary containing all information items.\n """\n if not self.include_lsb:\n return {}\n try:\n cmd = ("lsb_release", "-a")\n stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)\n # Command not found or lsb_release returned error\n except (OSError, subprocess.CalledProcessError):\n return {}\n content = self._to_str(stdout).splitlines()\n return self._parse_lsb_release_content(content)\n\n @staticmethod\n def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]:\n """\n Parse the output of the lsb_release command.\n\n Parameters:\n\n * lines: Iterable through the lines of the lsb_release output.\n Each line must be a unicode string or a UTF-8 encoded byte\n string.\n\n Returns:\n A dictionary containing all information items.\n """\n props = {}\n for line in lines:\n kv = line.strip("\n").split(":", 1)\n if len(kv) != 2:\n # Ignore lines without colon.\n continue\n k, v = kv\n props.update({k.replace(" ", "_").lower(): v.strip()})\n return props\n\n @cached_property\n def _uname_info(self) -> Dict[str, str]:\n if not self.include_uname:\n return {}\n try:\n cmd = ("uname", "-rs")\n stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)\n except OSError:\n return {}\n content = self._to_str(stdout).splitlines()\n return self._parse_uname_content(content)\n\n @cached_property\n def _oslevel_info(self) -> str:\n if not self.include_oslevel:\n return ""\n try:\n stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL)\n except (OSError, subprocess.CalledProcessError):\n return ""\n return self._to_str(stdout).strip()\n\n @cached_property\n def _debian_version(self) -> str:\n try:\n with open(\n os.path.join(self.etc_dir, "debian_version"), encoding="ascii"\n ) as fp:\n return fp.readline().rstrip()\n except FileNotFoundError:\n return ""\n\n @staticmethod\n def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]:\n if not lines:\n return {}\n props = {}\n match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())\n if match:\n name, version = match.groups()\n\n # This is to prevent the Linux kernel version from\n # appearing as the 'best' version on otherwise\n # identifiable distributions.\n if name == "Linux":\n return {}\n props["id"] = name.lower()\n props["name"] = name\n props["release"] = version\n return props\n\n @staticmethod\n def _to_str(bytestring: bytes) -> str:\n encoding = sys.getfilesystemencoding()\n return bytestring.decode(encoding)\n\n @cached_property\n def _distro_release_info(self) -> Dict[str, str]:\n """\n Get the information items from the specified distro release file.\n\n Returns:\n A dictionary containing all information items.\n """\n if self.distro_release_file:\n # If it was specified, we use it and parse what we can, even if\n # its file name or content does not match the expected pattern.\n distro_info = self._parse_distro_release_file(self.distro_release_file)\n basename = os.path.basename(self.distro_release_file)\n # The file name pattern for user-specified distro release files\n # is somewhat more tolerant (compared to when searching for the\n # file), because we want to use what was specified as best as\n # possible.\n match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)\n else:\n try:\n basenames = [\n basename\n for basename in os.listdir(self.etc_dir)\n if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES\n and os.path.isfile(os.path.join(self.etc_dir, basename))\n ]\n # We sort for repeatability in cases where there are multiple\n # distro specific files; e.g. CentOS, Oracle, Enterprise all\n # containing `redhat-release` on top of their own.\n basenames.sort()\n except OSError:\n # This may occur when /etc is not readable but we can't be\n # sure about the *-release files. Check common entries of\n # /etc for information. If they turn out to not be there the\n # error is handled in `_parse_distro_release_file()`.\n basenames = _DISTRO_RELEASE_BASENAMES\n for basename in basenames:\n match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)\n if match is None:\n continue\n filepath = os.path.join(self.etc_dir, basename)\n distro_info = self._parse_distro_release_file(filepath)\n # The name is always present if the pattern matches.\n if "name" not in distro_info:\n continue\n self.distro_release_file = filepath\n break\n else: # the loop didn't "break": no candidate.\n return {}\n\n if match is not None:\n distro_info["id"] = match.group(1)\n\n # CloudLinux < 7: manually enrich info with proper id.\n if "cloudlinux" in distro_info.get("name", "").lower():\n distro_info["id"] = "cloudlinux"\n\n return distro_info\n\n def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]:\n """\n Parse a distro release file.\n\n Parameters:\n\n * filepath: Path name of the distro release file.\n\n Returns:\n A dictionary containing all information items.\n """\n try:\n with open(filepath, encoding="utf-8") as fp:\n # Only parse the first line. For instance, on SLES there\n # are multiple lines. We don't want them...\n return self._parse_distro_release_content(fp.readline())\n except OSError:\n # Ignore not being able to read a specific, seemingly version\n # related file.\n # See https://github.com/python-distro/distro/issues/162\n return {}\n\n @staticmethod\n def _parse_distro_release_content(line: str) -> Dict[str, str]:\n """\n Parse a line from a distro release file.\n\n Parameters:\n * line: Line from the distro release file. Must be a unicode string\n or a UTF-8 encoded byte string.\n\n Returns:\n A dictionary containing all information items.\n """\n matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])\n distro_info = {}\n if matches:\n # regexp ensures non-None\n distro_info["name"] = matches.group(3)[::-1]\n if matches.group(2):\n distro_info["version_id"] = matches.group(2)[::-1]\n if matches.group(1):\n distro_info["codename"] = matches.group(1)[::-1]\n elif line:\n distro_info["name"] = line.strip()\n return distro_info\n\n\n_distro = LinuxDistribution()\n\n\ndef main() -> None:\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n\n parser = argparse.ArgumentParser(description="OS distro info tool")\n parser.add_argument(\n "--json", "-j", help="Output in machine readable format", action="store_true"\n )\n\n parser.add_argument(\n "--root-dir",\n "-r",\n type=str,\n dest="root_dir",\n help="Path to the root filesystem directory (defaults to /)",\n )\n\n args = parser.parse_args()\n\n if args.root_dir:\n dist = LinuxDistribution(\n include_lsb=False,\n include_uname=False,\n include_oslevel=False,\n root_dir=args.root_dir,\n )\n else:\n dist = _distro\n\n if args.json:\n logger.info(json.dumps(dist.info(), indent=4, sort_keys=True))\n else:\n logger.info("Name: %s", dist.name(pretty=True))\n distribution_version = dist.version(pretty=True)\n logger.info("Version: %s", distribution_version)\n distribution_codename = dist.codename()\n logger.info("Codename: %s", distribution_codename)\n\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\pip\_vendor\distro\distro.py
distro.py
Python
49,430
0.95
0.151105
0.134632
python-kit
409
2025-07-07T16:33:17.477490
Apache-2.0
false
7d770f8e9463818bf6b8ea645aac445e
from .distro import (\n NORMALIZED_DISTRO_ID,\n NORMALIZED_LSB_ID,\n NORMALIZED_OS_ID,\n LinuxDistribution,\n __version__,\n build_number,\n codename,\n distro_release_attr,\n distro_release_info,\n id,\n info,\n like,\n linux_distribution,\n lsb_release_attr,\n lsb_release_info,\n major_version,\n minor_version,\n name,\n os_release_attr,\n os_release_info,\n uname_attr,\n uname_info,\n version,\n version_parts,\n)\n\n__all__ = [\n "NORMALIZED_DISTRO_ID",\n "NORMALIZED_LSB_ID",\n "NORMALIZED_OS_ID",\n "LinuxDistribution",\n "build_number",\n "codename",\n "distro_release_attr",\n "distro_release_info",\n "id",\n "info",\n "like",\n "linux_distribution",\n "lsb_release_attr",\n "lsb_release_info",\n "major_version",\n "minor_version",\n "name",\n "os_release_attr",\n "os_release_info",\n "uname_attr",\n "uname_info",\n "version",\n "version_parts",\n]\n\n__version__ = __version__\n
.venv\Lib\site-packages\pip\_vendor\distro\__init__.py
__init__.py
Python
981
0.85
0
0
awesome-app
425
2023-11-07T16:26:09.019275
MIT
false
5b9b7efb166424292d033eb05b9de265
from .distro import main\n\nif __name__ == "__main__":\n main()\n
.venv\Lib\site-packages\pip\_vendor\distro\__main__.py
__main__.py
Python
64
0.65
0.25
0
vue-tools
393
2023-10-29T01:46:11.770770
GPL-3.0
false
9ba2b2b4dfc91b521f07858fc5547a23
\n\n
.venv\Lib\site-packages\pip\_vendor\distro\__pycache__\distro.cpython-313.pyc
distro.cpython-313.pyc
Other
52,146
0.95
0.07931
0.088483
vue-tools
833
2025-06-03T09:42:46.514615
MIT
false
bc428fd72c95bc5d83d332e218907a12
\n\n
.venv\Lib\site-packages\pip\_vendor\distro\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
963
0.7
0
0
vue-tools
10
2023-10-24T05:01:43.974215
GPL-3.0
false
9f5de52e2aafe90620ad1ba6d0adda73
\n\n
.venv\Lib\site-packages\pip\_vendor\distro\__pycache__\__main__.cpython-313.pyc
__main__.cpython-313.pyc
Other
297
0.7
0
0
node-utils
444
2024-04-17T17:00:21.737458
GPL-3.0
false
3c8120eddf3bc6aea28171d19dc94e66
import codecs\nimport re\nfrom typing import Any, Optional, Tuple\n\nfrom .core import IDNAError, alabel, decode, encode, ulabel\n\n_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]")\n\n\nclass Codec(codecs.Codec):\n def encode(self, data: str, errors: str = "strict") -> Tuple[bytes, int]:\n if errors != "strict":\n raise IDNAError('Unsupported error handling "{}"'.format(errors))\n\n if not data:\n return b"", 0\n\n return encode(data), len(data)\n\n def decode(self, data: bytes, errors: str = "strict") -> Tuple[str, int]:\n if errors != "strict":\n raise IDNAError('Unsupported error handling "{}"'.format(errors))\n\n if not data:\n return "", 0\n\n return decode(data), len(data)\n\n\nclass IncrementalEncoder(codecs.BufferedIncrementalEncoder):\n def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[bytes, int]:\n if errors != "strict":\n raise IDNAError('Unsupported error handling "{}"'.format(errors))\n\n if not data:\n return b"", 0\n\n labels = _unicode_dots_re.split(data)\n trailing_dot = b""\n if labels:\n if not labels[-1]:\n trailing_dot = b"."\n del labels[-1]\n elif not final:\n # Keep potentially unfinished label until the next call\n del labels[-1]\n if labels:\n trailing_dot = b"."\n\n result = []\n size = 0\n for label in labels:\n result.append(alabel(label))\n if size:\n size += 1\n size += len(label)\n\n # Join with U+002E\n result_bytes = b".".join(result) + trailing_dot\n size += len(trailing_dot)\n return result_bytes, size\n\n\nclass IncrementalDecoder(codecs.BufferedIncrementalDecoder):\n def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]:\n if errors != "strict":\n raise IDNAError('Unsupported error handling "{}"'.format(errors))\n\n if not data:\n return ("", 0)\n\n if not isinstance(data, str):\n data = str(data, "ascii")\n\n labels = _unicode_dots_re.split(data)\n trailing_dot = ""\n if labels:\n if not labels[-1]:\n trailing_dot = "."\n del labels[-1]\n elif not final:\n # Keep potentially unfinished label until the next call\n del labels[-1]\n if labels:\n trailing_dot = "."\n\n result = []\n size = 0\n for label in labels:\n result.append(ulabel(label))\n if size:\n size += 1\n size += len(label)\n\n result_str = ".".join(result) + trailing_dot\n size += len(trailing_dot)\n return (result_str, size)\n\n\nclass StreamWriter(Codec, codecs.StreamWriter):\n pass\n\n\nclass StreamReader(Codec, codecs.StreamReader):\n pass\n\n\ndef search_function(name: str) -> Optional[codecs.CodecInfo]:\n if name != "idna2008":\n return None\n return codecs.CodecInfo(\n name=name,\n encode=Codec().encode,\n decode=Codec().decode,\n incrementalencoder=IncrementalEncoder,\n incrementaldecoder=IncrementalDecoder,\n streamwriter=StreamWriter,\n streamreader=StreamReader,\n )\n\n\ncodecs.register(search_function)\n
.venv\Lib\site-packages\pip\_vendor\idna\codec.py
codec.py
Python
3,422
0.95
0.245902
0.032609
node-utils
92
2023-08-14T14:33:17.204245
BSD-3-Clause
false
abcf05aec6db6b1dcef409433f57fcd2
from typing import Any, Union\n\nfrom .core import decode, encode\n\n\ndef ToASCII(label: str) -> bytes:\n return encode(label)\n\n\ndef ToUnicode(label: Union[bytes, bytearray]) -> str:\n return decode(label)\n\n\ndef nameprep(s: Any) -> None:\n raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")\n
.venv\Lib\site-packages\pip\_vendor\idna\compat.py
compat.py
Python
316
0.85
0.2
0
vue-tools
215
2025-07-03T00:39:50.631007
BSD-3-Clause
false
481871cd052957124183a01fed88b799
import bisect\nimport re\nimport unicodedata\nfrom typing import Optional, Union\n\nfrom . import idnadata\nfrom .intranges import intranges_contain\n\n_virama_combining_class = 9\n_alabel_prefix = b"xn--"\n_unicode_dots_re = re.compile("[\u002e\u3002\uff0e\uff61]")\n\n\nclass IDNAError(UnicodeError):\n """Base exception for all IDNA-encoding related problems"""\n\n pass\n\n\nclass IDNABidiError(IDNAError):\n """Exception when bidirectional requirements are not satisfied"""\n\n pass\n\n\nclass InvalidCodepoint(IDNAError):\n """Exception when a disallowed or unallocated codepoint is used"""\n\n pass\n\n\nclass InvalidCodepointContext(IDNAError):\n """Exception when the codepoint is not valid in the context it is used"""\n\n pass\n\n\ndef _combining_class(cp: int) -> int:\n v = unicodedata.combining(chr(cp))\n if v == 0:\n if not unicodedata.name(chr(cp)):\n raise ValueError("Unknown character in unicodedata")\n return v\n\n\ndef _is_script(cp: str, script: str) -> bool:\n return intranges_contain(ord(cp), idnadata.scripts[script])\n\n\ndef _punycode(s: str) -> bytes:\n return s.encode("punycode")\n\n\ndef _unot(s: int) -> str:\n return "U+{:04X}".format(s)\n\n\ndef valid_label_length(label: Union[bytes, str]) -> bool:\n if len(label) > 63:\n return False\n return True\n\n\ndef valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:\n if len(label) > (254 if trailing_dot else 253):\n return False\n return True\n\n\ndef check_bidi(label: str, check_ltr: bool = False) -> bool:\n # Bidi rules should only be applied if string contains RTL characters\n bidi_label = False\n for idx, cp in enumerate(label, 1):\n direction = unicodedata.bidirectional(cp)\n if direction == "":\n # String likely comes from a newer version of Unicode\n raise IDNABidiError("Unknown directionality in label {} at position {}".format(repr(label), idx))\n if direction in ["R", "AL", "AN"]:\n bidi_label = True\n if not bidi_label and not check_ltr:\n return True\n\n # Bidi rule 1\n direction = unicodedata.bidirectional(label[0])\n if direction in ["R", "AL"]:\n rtl = True\n elif direction == "L":\n rtl = False\n else:\n raise IDNABidiError("First codepoint in label {} must be directionality L, R or AL".format(repr(label)))\n\n valid_ending = False\n number_type: Optional[str] = None\n for idx, cp in enumerate(label, 1):\n direction = unicodedata.bidirectional(cp)\n\n if rtl:\n # Bidi rule 2\n if direction not in [\n "R",\n "AL",\n "AN",\n "EN",\n "ES",\n "CS",\n "ET",\n "ON",\n "BN",\n "NSM",\n ]:\n raise IDNABidiError("Invalid direction for codepoint at position {} in a right-to-left label".format(idx))\n # Bidi rule 3\n if direction in ["R", "AL", "EN", "AN"]:\n valid_ending = True\n elif direction != "NSM":\n valid_ending = False\n # Bidi rule 4\n if direction in ["AN", "EN"]:\n if not number_type:\n number_type = direction\n else:\n if number_type != direction:\n raise IDNABidiError("Can not mix numeral types in a right-to-left label")\n else:\n # Bidi rule 5\n if direction not in ["L", "EN", "ES", "CS", "ET", "ON", "BN", "NSM"]:\n raise IDNABidiError("Invalid direction for codepoint at position {} in a left-to-right label".format(idx))\n # Bidi rule 6\n if direction in ["L", "EN"]:\n valid_ending = True\n elif direction != "NSM":\n valid_ending = False\n\n if not valid_ending:\n raise IDNABidiError("Label ends with illegal codepoint directionality")\n\n return True\n\n\ndef check_initial_combiner(label: str) -> bool:\n if unicodedata.category(label[0])[0] == "M":\n raise IDNAError("Label begins with an illegal combining character")\n return True\n\n\ndef check_hyphen_ok(label: str) -> bool:\n if label[2:4] == "--":\n raise IDNAError("Label has disallowed hyphens in 3rd and 4th position")\n if label[0] == "-" or label[-1] == "-":\n raise IDNAError("Label must not start or end with a hyphen")\n return True\n\n\ndef check_nfc(label: str) -> None:\n if unicodedata.normalize("NFC", label) != label:\n raise IDNAError("Label must be in Normalization Form C")\n\n\ndef valid_contextj(label: str, pos: int) -> bool:\n cp_value = ord(label[pos])\n\n if cp_value == 0x200C:\n if pos > 0:\n if _combining_class(ord(label[pos - 1])) == _virama_combining_class:\n return True\n\n ok = False\n for i in range(pos - 1, -1, -1):\n joining_type = idnadata.joining_types.get(ord(label[i]))\n if joining_type == ord("T"):\n continue\n elif joining_type in [ord("L"), ord("D")]:\n ok = True\n break\n else:\n break\n\n if not ok:\n return False\n\n ok = False\n for i in range(pos + 1, len(label)):\n joining_type = idnadata.joining_types.get(ord(label[i]))\n if joining_type == ord("T"):\n continue\n elif joining_type in [ord("R"), ord("D")]:\n ok = True\n break\n else:\n break\n return ok\n\n if cp_value == 0x200D:\n if pos > 0:\n if _combining_class(ord(label[pos - 1])) == _virama_combining_class:\n return True\n return False\n\n else:\n return False\n\n\ndef valid_contexto(label: str, pos: int, exception: bool = False) -> bool:\n cp_value = ord(label[pos])\n\n if cp_value == 0x00B7:\n if 0 < pos < len(label) - 1:\n if ord(label[pos - 1]) == 0x006C and ord(label[pos + 1]) == 0x006C:\n return True\n return False\n\n elif cp_value == 0x0375:\n if pos < len(label) - 1 and len(label) > 1:\n return _is_script(label[pos + 1], "Greek")\n return False\n\n elif cp_value == 0x05F3 or cp_value == 0x05F4:\n if pos > 0:\n return _is_script(label[pos - 1], "Hebrew")\n return False\n\n elif cp_value == 0x30FB:\n for cp in label:\n if cp == "\u30fb":\n continue\n if _is_script(cp, "Hiragana") or _is_script(cp, "Katakana") or _is_script(cp, "Han"):\n return True\n return False\n\n elif 0x660 <= cp_value <= 0x669:\n for cp in label:\n if 0x6F0 <= ord(cp) <= 0x06F9:\n return False\n return True\n\n elif 0x6F0 <= cp_value <= 0x6F9:\n for cp in label:\n if 0x660 <= ord(cp) <= 0x0669:\n return False\n return True\n\n return False\n\n\ndef check_label(label: Union[str, bytes, bytearray]) -> None:\n if isinstance(label, (bytes, bytearray)):\n label = label.decode("utf-8")\n if len(label) == 0:\n raise IDNAError("Empty Label")\n\n check_nfc(label)\n check_hyphen_ok(label)\n check_initial_combiner(label)\n\n for pos, cp in enumerate(label):\n cp_value = ord(cp)\n if intranges_contain(cp_value, idnadata.codepoint_classes["PVALID"]):\n continue\n elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTJ"]):\n try:\n if not valid_contextj(label, pos):\n raise InvalidCodepointContext(\n "Joiner {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label))\n )\n except ValueError:\n raise IDNAError(\n "Unknown codepoint adjacent to joiner {} at position {} in {}".format(\n _unot(cp_value), pos + 1, repr(label)\n )\n )\n elif intranges_contain(cp_value, idnadata.codepoint_classes["CONTEXTO"]):\n if not valid_contexto(label, pos):\n raise InvalidCodepointContext(\n "Codepoint {} not allowed at position {} in {}".format(_unot(cp_value), pos + 1, repr(label))\n )\n else:\n raise InvalidCodepoint(\n "Codepoint {} at position {} of {} not allowed".format(_unot(cp_value), pos + 1, repr(label))\n )\n\n check_bidi(label)\n\n\ndef alabel(label: str) -> bytes:\n try:\n label_bytes = label.encode("ascii")\n ulabel(label_bytes)\n if not valid_label_length(label_bytes):\n raise IDNAError("Label too long")\n return label_bytes\n except UnicodeEncodeError:\n pass\n\n check_label(label)\n label_bytes = _alabel_prefix + _punycode(label)\n\n if not valid_label_length(label_bytes):\n raise IDNAError("Label too long")\n\n return label_bytes\n\n\ndef ulabel(label: Union[str, bytes, bytearray]) -> str:\n if not isinstance(label, (bytes, bytearray)):\n try:\n label_bytes = label.encode("ascii")\n except UnicodeEncodeError:\n check_label(label)\n return label\n else:\n label_bytes = label\n\n label_bytes = label_bytes.lower()\n if label_bytes.startswith(_alabel_prefix):\n label_bytes = label_bytes[len(_alabel_prefix) :]\n if not label_bytes:\n raise IDNAError("Malformed A-label, no Punycode eligible content found")\n if label_bytes.decode("ascii")[-1] == "-":\n raise IDNAError("A-label must not end with a hyphen")\n else:\n check_label(label_bytes)\n return label_bytes.decode("ascii")\n\n try:\n label = label_bytes.decode("punycode")\n except UnicodeError:\n raise IDNAError("Invalid A-label")\n check_label(label)\n return label\n\n\ndef uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:\n """Re-map the characters in the string according to UTS46 processing."""\n from .uts46data import uts46data\n\n output = ""\n\n for pos, char in enumerate(domain):\n code_point = ord(char)\n try:\n uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1]\n status = uts46row[1]\n replacement: Optional[str] = None\n if len(uts46row) == 3:\n replacement = uts46row[2]\n if (\n status == "V"\n or (status == "D" and not transitional)\n or (status == "3" and not std3_rules and replacement is None)\n ):\n output += char\n elif replacement is not None and (\n status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional)\n ):\n output += replacement\n elif status != "I":\n raise IndexError()\n except IndexError:\n raise InvalidCodepoint(\n "Codepoint {} not allowed at position {} in {}".format(_unot(code_point), pos + 1, repr(domain))\n )\n\n return unicodedata.normalize("NFC", output)\n\n\ndef encode(\n s: Union[str, bytes, bytearray],\n strict: bool = False,\n uts46: bool = False,\n std3_rules: bool = False,\n transitional: bool = False,\n) -> bytes:\n if not isinstance(s, str):\n try:\n s = str(s, "ascii")\n except UnicodeDecodeError:\n raise IDNAError("should pass a unicode string to the function rather than a byte string.")\n if uts46:\n s = uts46_remap(s, std3_rules, transitional)\n trailing_dot = False\n result = []\n if strict:\n labels = s.split(".")\n else:\n labels = _unicode_dots_re.split(s)\n if not labels or labels == [""]:\n raise IDNAError("Empty domain")\n if labels[-1] == "":\n del labels[-1]\n trailing_dot = True\n for label in labels:\n s = alabel(label)\n if s:\n result.append(s)\n else:\n raise IDNAError("Empty label")\n if trailing_dot:\n result.append(b"")\n s = b".".join(result)\n if not valid_string_length(s, trailing_dot):\n raise IDNAError("Domain too long")\n return s\n\n\ndef decode(\n s: Union[str, bytes, bytearray],\n strict: bool = False,\n uts46: bool = False,\n std3_rules: bool = False,\n) -> str:\n try:\n if not isinstance(s, str):\n s = str(s, "ascii")\n except UnicodeDecodeError:\n raise IDNAError("Invalid ASCII in A-label")\n if uts46:\n s = uts46_remap(s, std3_rules, False)\n trailing_dot = False\n result = []\n if not strict:\n labels = _unicode_dots_re.split(s)\n else:\n labels = s.split(".")\n if not labels or labels == [""]:\n raise IDNAError("Empty domain")\n if not labels[-1]:\n del labels[-1]\n trailing_dot = True\n for label in labels:\n s = ulabel(label)\n if s:\n result.append(s)\n else:\n raise IDNAError("Empty label")\n if trailing_dot:\n result.append("")\n return ".".join(result)\n
.venv\Lib\site-packages\pip\_vendor\idna\core.py
core.py
Python
13,239
0.95
0.26087
0.022346
react-lib
471
2024-11-20T09:55:47.896884
GPL-3.0
false
e34a706ba83f975803a2489d5252b049
# This file is automatically generated by tools/idna-data\n\n__version__ = "15.1.0"\nscripts = {\n "Greek": (\n 0x37000000374,\n 0x37500000378,\n 0x37A0000037E,\n 0x37F00000380,\n 0x38400000385,\n 0x38600000387,\n 0x3880000038B,\n 0x38C0000038D,\n 0x38E000003A2,\n 0x3A3000003E2,\n 0x3F000000400,\n 0x1D2600001D2B,\n 0x1D5D00001D62,\n 0x1D6600001D6B,\n 0x1DBF00001DC0,\n 0x1F0000001F16,\n 0x1F1800001F1E,\n 0x1F2000001F46,\n 0x1F4800001F4E,\n 0x1F5000001F58,\n 0x1F5900001F5A,\n 0x1F5B00001F5C,\n 0x1F5D00001F5E,\n 0x1F5F00001F7E,\n 0x1F8000001FB5,\n 0x1FB600001FC5,\n 0x1FC600001FD4,\n 0x1FD600001FDC,\n 0x1FDD00001FF0,\n 0x1FF200001FF5,\n 0x1FF600001FFF,\n 0x212600002127,\n 0xAB650000AB66,\n 0x101400001018F,\n 0x101A0000101A1,\n 0x1D2000001D246,\n ),\n "Han": (\n 0x2E8000002E9A,\n 0x2E9B00002EF4,\n 0x2F0000002FD6,\n 0x300500003006,\n 0x300700003008,\n 0x30210000302A,\n 0x30380000303C,\n 0x340000004DC0,\n 0x4E000000A000,\n 0xF9000000FA6E,\n 0xFA700000FADA,\n 0x16FE200016FE4,\n 0x16FF000016FF2,\n 0x200000002A6E0,\n 0x2A7000002B73A,\n 0x2B7400002B81E,\n 0x2B8200002CEA2,\n 0x2CEB00002EBE1,\n 0x2EBF00002EE5E,\n 0x2F8000002FA1E,\n 0x300000003134B,\n 0x31350000323B0,\n ),\n "Hebrew": (\n 0x591000005C8,\n 0x5D0000005EB,\n 0x5EF000005F5,\n 0xFB1D0000FB37,\n 0xFB380000FB3D,\n 0xFB3E0000FB3F,\n 0xFB400000FB42,\n 0xFB430000FB45,\n 0xFB460000FB50,\n ),\n "Hiragana": (\n 0x304100003097,\n 0x309D000030A0,\n 0x1B0010001B120,\n 0x1B1320001B133,\n 0x1B1500001B153,\n 0x1F2000001F201,\n ),\n "Katakana": (\n 0x30A1000030FB,\n 0x30FD00003100,\n 0x31F000003200,\n 0x32D0000032FF,\n 0x330000003358,\n 0xFF660000FF70,\n 0xFF710000FF9E,\n 0x1AFF00001AFF4,\n 0x1AFF50001AFFC,\n 0x1AFFD0001AFFF,\n 0x1B0000001B001,\n 0x1B1200001B123,\n 0x1B1550001B156,\n 0x1B1640001B168,\n ),\n}\njoining_types = {\n 0xAD: 84,\n 0x300: 84,\n 0x301: 84,\n 0x302: 84,\n 0x303: 84,\n 0x304: 84,\n 0x305: 84,\n 0x306: 84,\n 0x307: 84,\n 0x308: 84,\n 0x309: 84,\n 0x30A: 84,\n 0x30B: 84,\n 0x30C: 84,\n 0x30D: 84,\n 0x30E: 84,\n 0x30F: 84,\n 0x310: 84,\n 0x311: 84,\n 0x312: 84,\n 0x313: 84,\n 0x314: 84,\n 0x315: 84,\n 0x316: 84,\n 0x317: 84,\n 0x318: 84,\n 0x319: 84,\n 0x31A: 84,\n 0x31B: 84,\n 0x31C: 84,\n 0x31D: 84,\n 0x31E: 84,\n 0x31F: 84,\n 0x320: 84,\n 0x321: 84,\n 0x322: 84,\n 0x323: 84,\n 0x324: 84,\n 0x325: 84,\n 0x326: 84,\n 0x327: 84,\n 0x328: 84,\n 0x329: 84,\n 0x32A: 84,\n 0x32B: 84,\n 0x32C: 84,\n 0x32D: 84,\n 0x32E: 84,\n 0x32F: 84,\n 0x330: 84,\n 0x331: 84,\n 0x332: 84,\n 0x333: 84,\n 0x334: 84,\n 0x335: 84,\n 0x336: 84,\n 0x337: 84,\n 0x338: 84,\n 0x339: 84,\n 0x33A: 84,\n 0x33B: 84,\n 0x33C: 84,\n 0x33D: 84,\n 0x33E: 84,\n 0x33F: 84,\n 0x340: 84,\n 0x341: 84,\n 0x342: 84,\n 0x343: 84,\n 0x344: 84,\n 0x345: 84,\n 0x346: 84,\n 0x347: 84,\n 0x348: 84,\n 0x349: 84,\n 0x34A: 84,\n 0x34B: 84,\n 0x34C: 84,\n 0x34D: 84,\n 0x34E: 84,\n 0x34F: 84,\n 0x350: 84,\n 0x351: 84,\n 0x352: 84,\n 0x353: 84,\n 0x354: 84,\n 0x355: 84,\n 0x356: 84,\n 0x357: 84,\n 0x358: 84,\n 0x359: 84,\n 0x35A: 84,\n 0x35B: 84,\n 0x35C: 84,\n 0x35D: 84,\n 0x35E: 84,\n 0x35F: 84,\n 0x360: 84,\n 0x361: 84,\n 0x362: 84,\n 0x363: 84,\n 0x364: 84,\n 0x365: 84,\n 0x366: 84,\n 0x367: 84,\n 0x368: 84,\n 0x369: 84,\n 0x36A: 84,\n 0x36B: 84,\n 0x36C: 84,\n 0x36D: 84,\n 0x36E: 84,\n 0x36F: 84,\n 0x483: 84,\n 0x484: 84,\n 0x485: 84,\n 0x486: 84,\n 0x487: 84,\n 0x488: 84,\n 0x489: 84,\n 0x591: 84,\n 0x592: 84,\n 0x593: 84,\n 0x594: 84,\n 0x595: 84,\n 0x596: 84,\n 0x597: 84,\n 0x598: 84,\n 0x599: 84,\n 0x59A: 84,\n 0x59B: 84,\n 0x59C: 84,\n 0x59D: 84,\n 0x59E: 84,\n 0x59F: 84,\n 0x5A0: 84,\n 0x5A1: 84,\n 0x5A2: 84,\n 0x5A3: 84,\n 0x5A4: 84,\n 0x5A5: 84,\n 0x5A6: 84,\n 0x5A7: 84,\n 0x5A8: 84,\n 0x5A9: 84,\n 0x5AA: 84,\n 0x5AB: 84,\n 0x5AC: 84,\n 0x5AD: 84,\n 0x5AE: 84,\n 0x5AF: 84,\n 0x5B0: 84,\n 0x5B1: 84,\n 0x5B2: 84,\n 0x5B3: 84,\n 0x5B4: 84,\n 0x5B5: 84,\n 0x5B6: 84,\n 0x5B7: 84,\n 0x5B8: 84,\n 0x5B9: 84,\n 0x5BA: 84,\n 0x5BB: 84,\n 0x5BC: 84,\n 0x5BD: 84,\n 0x5BF: 84,\n 0x5C1: 84,\n 0x5C2: 84,\n 0x5C4: 84,\n 0x5C5: 84,\n 0x5C7: 84,\n 0x610: 84,\n 0x611: 84,\n 0x612: 84,\n 0x613: 84,\n 0x614: 84,\n 0x615: 84,\n 0x616: 84,\n 0x617: 84,\n 0x618: 84,\n 0x619: 84,\n 0x61A: 84,\n 0x61C: 84,\n 0x620: 68,\n 0x622: 82,\n 0x623: 82,\n 0x624: 82,\n 0x625: 82,\n 0x626: 68,\n 0x627: 82,\n 0x628: 68,\n 0x629: 82,\n 0x62A: 68,\n 0x62B: 68,\n 0x62C: 68,\n 0x62D: 68,\n 0x62E: 68,\n 0x62F: 82,\n 0x630: 82,\n 0x631: 82,\n 0x632: 82,\n 0x633: 68,\n 0x634: 68,\n 0x635: 68,\n 0x636: 68,\n 0x637: 68,\n 0x638: 68,\n 0x639: 68,\n 0x63A: 68,\n 0x63B: 68,\n 0x63C: 68,\n 0x63D: 68,\n 0x63E: 68,\n 0x63F: 68,\n 0x640: 67,\n 0x641: 68,\n 0x642: 68,\n 0x643: 68,\n 0x644: 68,\n 0x645: 68,\n 0x646: 68,\n 0x647: 68,\n 0x648: 82,\n 0x649: 68,\n 0x64A: 68,\n 0x64B: 84,\n 0x64C: 84,\n 0x64D: 84,\n 0x64E: 84,\n 0x64F: 84,\n 0x650: 84,\n 0x651: 84,\n 0x652: 84,\n 0x653: 84,\n 0x654: 84,\n 0x655: 84,\n 0x656: 84,\n 0x657: 84,\n 0x658: 84,\n 0x659: 84,\n 0x65A: 84,\n 0x65B: 84,\n 0x65C: 84,\n 0x65D: 84,\n 0x65E: 84,\n 0x65F: 84,\n 0x66E: 68,\n 0x66F: 68,\n 0x670: 84,\n 0x671: 82,\n 0x672: 82,\n 0x673: 82,\n 0x675: 82,\n 0x676: 82,\n 0x677: 82,\n 0x678: 68,\n 0x679: 68,\n 0x67A: 68,\n 0x67B: 68,\n 0x67C: 68,\n 0x67D: 68,\n 0x67E: 68,\n 0x67F: 68,\n 0x680: 68,\n 0x681: 68,\n 0x682: 68,\n 0x683: 68,\n 0x684: 68,\n 0x685: 68,\n 0x686: 68,\n 0x687: 68,\n 0x688: 82,\n 0x689: 82,\n 0x68A: 82,\n 0x68B: 82,\n 0x68C: 82,\n 0x68D: 82,\n 0x68E: 82,\n 0x68F: 82,\n 0x690: 82,\n 0x691: 82,\n 0x692: 82,\n 0x693: 82,\n 0x694: 82,\n 0x695: 82,\n 0x696: 82,\n 0x697: 82,\n 0x698: 82,\n 0x699: 82,\n 0x69A: 68,\n 0x69B: 68,\n 0x69C: 68,\n 0x69D: 68,\n 0x69E: 68,\n 0x69F: 68,\n 0x6A0: 68,\n 0x6A1: 68,\n 0x6A2: 68,\n 0x6A3: 68,\n 0x6A4: 68,\n 0x6A5: 68,\n 0x6A6: 68,\n 0x6A7: 68,\n 0x6A8: 68,\n 0x6A9: 68,\n 0x6AA: 68,\n 0x6AB: 68,\n 0x6AC: 68,\n 0x6AD: 68,\n 0x6AE: 68,\n 0x6AF: 68,\n 0x6B0: 68,\n 0x6B1: 68,\n 0x6B2: 68,\n 0x6B3: 68,\n 0x6B4: 68,\n 0x6B5: 68,\n 0x6B6: 68,\n 0x6B7: 68,\n 0x6B8: 68,\n 0x6B9: 68,\n 0x6BA: 68,\n 0x6BB: 68,\n 0x6BC: 68,\n 0x6BD: 68,\n 0x6BE: 68,\n 0x6BF: 68,\n 0x6C0: 82,\n 0x6C1: 68,\n 0x6C2: 68,\n 0x6C3: 82,\n 0x6C4: 82,\n 0x6C5: 82,\n 0x6C6: 82,\n 0x6C7: 82,\n 0x6C8: 82,\n 0x6C9: 82,\n 0x6CA: 82,\n 0x6CB: 82,\n 0x6CC: 68,\n 0x6CD: 82,\n 0x6CE: 68,\n 0x6CF: 82,\n 0x6D0: 68,\n 0x6D1: 68,\n 0x6D2: 82,\n 0x6D3: 82,\n 0x6D5: 82,\n 0x6D6: 84,\n 0x6D7: 84,\n 0x6D8: 84,\n 0x6D9: 84,\n 0x6DA: 84,\n 0x6DB: 84,\n 0x6DC: 84,\n 0x6DF: 84,\n 0x6E0: 84,\n 0x6E1: 84,\n 0x6E2: 84,\n 0x6E3: 84,\n 0x6E4: 84,\n 0x6E7: 84,\n 0x6E8: 84,\n 0x6EA: 84,\n 0x6EB: 84,\n 0x6EC: 84,\n 0x6ED: 84,\n 0x6EE: 82,\n 0x6EF: 82,\n 0x6FA: 68,\n 0x6FB: 68,\n 0x6FC: 68,\n 0x6FF: 68,\n 0x70F: 84,\n 0x710: 82,\n 0x711: 84,\n 0x712: 68,\n 0x713: 68,\n 0x714: 68,\n 0x715: 82,\n 0x716: 82,\n 0x717: 82,\n 0x718: 82,\n 0x719: 82,\n 0x71A: 68,\n 0x71B: 68,\n 0x71C: 68,\n 0x71D: 68,\n 0x71E: 82,\n 0x71F: 68,\n 0x720: 68,\n 0x721: 68,\n 0x722: 68,\n 0x723: 68,\n 0x724: 68,\n 0x725: 68,\n 0x726: 68,\n 0x727: 68,\n 0x728: 82,\n 0x729: 68,\n 0x72A: 82,\n 0x72B: 68,\n 0x72C: 82,\n 0x72D: 68,\n 0x72E: 68,\n 0x72F: 82,\n 0x730: 84,\n 0x731: 84,\n 0x732: 84,\n 0x733: 84,\n 0x734: 84,\n 0x735: 84,\n 0x736: 84,\n 0x737: 84,\n 0x738: 84,\n 0x739: 84,\n 0x73A: 84,\n 0x73B: 84,\n 0x73C: 84,\n 0x73D: 84,\n 0x73E: 84,\n 0x73F: 84,\n 0x740: 84,\n 0x741: 84,\n 0x742: 84,\n 0x743: 84,\n 0x744: 84,\n 0x745: 84,\n 0x746: 84,\n 0x747: 84,\n 0x748: 84,\n 0x749: 84,\n 0x74A: 84,\n 0x74D: 82,\n 0x74E: 68,\n 0x74F: 68,\n 0x750: 68,\n 0x751: 68,\n 0x752: 68,\n 0x753: 68,\n 0x754: 68,\n 0x755: 68,\n 0x756: 68,\n 0x757: 68,\n 0x758: 68,\n 0x759: 82,\n 0x75A: 82,\n 0x75B: 82,\n 0x75C: 68,\n 0x75D: 68,\n 0x75E: 68,\n 0x75F: 68,\n 0x760: 68,\n 0x761: 68,\n 0x762: 68,\n 0x763: 68,\n 0x764: 68,\n 0x765: 68,\n 0x766: 68,\n 0x767: 68,\n 0x768: 68,\n 0x769: 68,\n 0x76A: 68,\n 0x76B: 82,\n 0x76C: 82,\n 0x76D: 68,\n 0x76E: 68,\n 0x76F: 68,\n 0x770: 68,\n 0x771: 82,\n 0x772: 68,\n 0x773: 82,\n 0x774: 82,\n 0x775: 68,\n 0x776: 68,\n 0x777: 68,\n 0x778: 82,\n 0x779: 82,\n 0x77A: 68,\n 0x77B: 68,\n 0x77C: 68,\n 0x77D: 68,\n 0x77E: 68,\n 0x77F: 68,\n 0x7A6: 84,\n 0x7A7: 84,\n 0x7A8: 84,\n 0x7A9: 84,\n 0x7AA: 84,\n 0x7AB: 84,\n 0x7AC: 84,\n 0x7AD: 84,\n 0x7AE: 84,\n 0x7AF: 84,\n 0x7B0: 84,\n 0x7CA: 68,\n 0x7CB: 68,\n 0x7CC: 68,\n 0x7CD: 68,\n 0x7CE: 68,\n 0x7CF: 68,\n 0x7D0: 68,\n 0x7D1: 68,\n 0x7D2: 68,\n 0x7D3: 68,\n 0x7D4: 68,\n 0x7D5: 68,\n 0x7D6: 68,\n 0x7D7: 68,\n 0x7D8: 68,\n 0x7D9: 68,\n 0x7DA: 68,\n 0x7DB: 68,\n 0x7DC: 68,\n 0x7DD: 68,\n 0x7DE: 68,\n 0x7DF: 68,\n 0x7E0: 68,\n 0x7E1: 68,\n 0x7E2: 68,\n 0x7E3: 68,\n 0x7E4: 68,\n 0x7E5: 68,\n 0x7E6: 68,\n 0x7E7: 68,\n 0x7E8: 68,\n 0x7E9: 68,\n 0x7EA: 68,\n 0x7EB: 84,\n 0x7EC: 84,\n 0x7ED: 84,\n 0x7EE: 84,\n 0x7EF: 84,\n 0x7F0: 84,\n 0x7F1: 84,\n 0x7F2: 84,\n 0x7F3: 84,\n 0x7FA: 67,\n 0x7FD: 84,\n 0x816: 84,\n 0x817: 84,\n 0x818: 84,\n 0x819: 84,\n 0x81B: 84,\n 0x81C: 84,\n 0x81D: 84,\n 0x81E: 84,\n 0x81F: 84,\n 0x820: 84,\n 0x821: 84,\n 0x822: 84,\n 0x823: 84,\n 0x825: 84,\n 0x826: 84,\n 0x827: 84,\n 0x829: 84,\n 0x82A: 84,\n 0x82B: 84,\n 0x82C: 84,\n 0x82D: 84,\n 0x840: 82,\n 0x841: 68,\n 0x842: 68,\n 0x843: 68,\n 0x844: 68,\n 0x845: 68,\n 0x846: 82,\n 0x847: 82,\n 0x848: 68,\n 0x849: 82,\n 0x84A: 68,\n 0x84B: 68,\n 0x84C: 68,\n 0x84D: 68,\n 0x84E: 68,\n 0x84F: 68,\n 0x850: 68,\n 0x851: 68,\n 0x852: 68,\n 0x853: 68,\n 0x854: 82,\n 0x855: 68,\n 0x856: 82,\n 0x857: 82,\n 0x858: 82,\n 0x859: 84,\n 0x85A: 84,\n 0x85B: 84,\n 0x860: 68,\n 0x862: 68,\n 0x863: 68,\n 0x864: 68,\n 0x865: 68,\n 0x867: 82,\n 0x868: 68,\n 0x869: 82,\n 0x86A: 82,\n 0x870: 82,\n 0x871: 82,\n 0x872: 82,\n 0x873: 82,\n 0x874: 82,\n 0x875: 82,\n 0x876: 82,\n 0x877: 82,\n 0x878: 82,\n 0x879: 82,\n 0x87A: 82,\n 0x87B: 82,\n 0x87C: 82,\n 0x87D: 82,\n 0x87E: 82,\n 0x87F: 82,\n 0x880: 82,\n 0x881: 82,\n 0x882: 82,\n 0x883: 67,\n 0x884: 67,\n 0x885: 67,\n 0x886: 68,\n 0x889: 68,\n 0x88A: 68,\n 0x88B: 68,\n 0x88C: 68,\n 0x88D: 68,\n 0x88E: 82,\n 0x898: 84,\n 0x899: 84,\n 0x89A: 84,\n 0x89B: 84,\n 0x89C: 84,\n 0x89D: 84,\n 0x89E: 84,\n 0x89F: 84,\n 0x8A0: 68,\n 0x8A1: 68,\n 0x8A2: 68,\n 0x8A3: 68,\n 0x8A4: 68,\n 0x8A5: 68,\n 0x8A6: 68,\n 0x8A7: 68,\n 0x8A8: 68,\n 0x8A9: 68,\n 0x8AA: 82,\n 0x8AB: 82,\n 0x8AC: 82,\n 0x8AE: 82,\n 0x8AF: 68,\n 0x8B0: 68,\n 0x8B1: 82,\n 0x8B2: 82,\n 0x8B3: 68,\n 0x8B4: 68,\n 0x8B5: 68,\n 0x8B6: 68,\n 0x8B7: 68,\n 0x8B8: 68,\n 0x8B9: 82,\n 0x8BA: 68,\n 0x8BB: 68,\n 0x8BC: 68,\n 0x8BD: 68,\n 0x8BE: 68,\n 0x8BF: 68,\n 0x8C0: 68,\n 0x8C1: 68,\n 0x8C2: 68,\n 0x8C3: 68,\n 0x8C4: 68,\n 0x8C5: 68,\n 0x8C6: 68,\n 0x8C7: 68,\n 0x8C8: 68,\n 0x8CA: 84,\n 0x8CB: 84,\n 0x8CC: 84,\n 0x8CD: 84,\n 0x8CE: 84,\n 0x8CF: 84,\n 0x8D0: 84,\n 0x8D1: 84,\n 0x8D2: 84,\n 0x8D3: 84,\n 0x8D4: 84,\n 0x8D5: 84,\n 0x8D6: 84,\n 0x8D7: 84,\n 0x8D8: 84,\n 0x8D9: 84,\n 0x8DA: 84,\n 0x8DB: 84,\n 0x8DC: 84,\n 0x8DD: 84,\n 0x8DE: 84,\n 0x8DF: 84,\n 0x8E0: 84,\n 0x8E1: 84,\n 0x8E3: 84,\n 0x8E4: 84,\n 0x8E5: 84,\n 0x8E6: 84,\n 0x8E7: 84,\n 0x8E8: 84,\n 0x8E9: 84,\n 0x8EA: 84,\n 0x8EB: 84,\n 0x8EC: 84,\n 0x8ED: 84,\n 0x8EE: 84,\n 0x8EF: 84,\n 0x8F0: 84,\n 0x8F1: 84,\n 0x8F2: 84,\n 0x8F3: 84,\n 0x8F4: 84,\n 0x8F5: 84,\n 0x8F6: 84,\n 0x8F7: 84,\n 0x8F8: 84,\n 0x8F9: 84,\n 0x8FA: 84,\n 0x8FB: 84,\n 0x8FC: 84,\n 0x8FD: 84,\n 0x8FE: 84,\n 0x8FF: 84,\n 0x900: 84,\n 0x901: 84,\n 0x902: 84,\n 0x93A: 84,\n 0x93C: 84,\n 0x941: 84,\n 0x942: 84,\n 0x943: 84,\n 0x944: 84,\n 0x945: 84,\n 0x946: 84,\n 0x947: 84,\n 0x948: 84,\n 0x94D: 84,\n 0x951: 84,\n 0x952: 84,\n 0x953: 84,\n 0x954: 84,\n 0x955: 84,\n 0x956: 84,\n 0x957: 84,\n 0x962: 84,\n 0x963: 84,\n 0x981: 84,\n 0x9BC: 84,\n 0x9C1: 84,\n 0x9C2: 84,\n 0x9C3: 84,\n 0x9C4: 84,\n 0x9CD: 84,\n 0x9E2: 84,\n 0x9E3: 84,\n 0x9FE: 84,\n 0xA01: 84,\n 0xA02: 84,\n 0xA3C: 84,\n 0xA41: 84,\n 0xA42: 84,\n 0xA47: 84,\n 0xA48: 84,\n 0xA4B: 84,\n 0xA4C: 84,\n 0xA4D: 84,\n 0xA51: 84,\n 0xA70: 84,\n 0xA71: 84,\n 0xA75: 84,\n 0xA81: 84,\n 0xA82: 84,\n 0xABC: 84,\n 0xAC1: 84,\n 0xAC2: 84,\n 0xAC3: 84,\n 0xAC4: 84,\n 0xAC5: 84,\n 0xAC7: 84,\n 0xAC8: 84,\n 0xACD: 84,\n 0xAE2: 84,\n 0xAE3: 84,\n 0xAFA: 84,\n 0xAFB: 84,\n 0xAFC: 84,\n 0xAFD: 84,\n 0xAFE: 84,\n 0xAFF: 84,\n 0xB01: 84,\n 0xB3C: 84,\n 0xB3F: 84,\n 0xB41: 84,\n 0xB42: 84,\n 0xB43: 84,\n 0xB44: 84,\n 0xB4D: 84,\n 0xB55: 84,\n 0xB56: 84,\n 0xB62: 84,\n 0xB63: 84,\n 0xB82: 84,\n 0xBC0: 84,\n 0xBCD: 84,\n 0xC00: 84,\n 0xC04: 84,\n 0xC3C: 84,\n 0xC3E: 84,\n 0xC3F: 84,\n 0xC40: 84,\n 0xC46: 84,\n 0xC47: 84,\n 0xC48: 84,\n 0xC4A: 84,\n 0xC4B: 84,\n 0xC4C: 84,\n 0xC4D: 84,\n 0xC55: 84,\n 0xC56: 84,\n 0xC62: 84,\n 0xC63: 84,\n 0xC81: 84,\n 0xCBC: 84,\n 0xCBF: 84,\n 0xCC6: 84,\n 0xCCC: 84,\n 0xCCD: 84,\n 0xCE2: 84,\n 0xCE3: 84,\n 0xD00: 84,\n 0xD01: 84,\n 0xD3B: 84,\n 0xD3C: 84,\n 0xD41: 84,\n 0xD42: 84,\n 0xD43: 84,\n 0xD44: 84,\n 0xD4D: 84,\n 0xD62: 84,\n 0xD63: 84,\n 0xD81: 84,\n 0xDCA: 84,\n 0xDD2: 84,\n 0xDD3: 84,\n 0xDD4: 84,\n 0xDD6: 84,\n 0xE31: 84,\n 0xE34: 84,\n 0xE35: 84,\n 0xE36: 84,\n 0xE37: 84,\n 0xE38: 84,\n 0xE39: 84,\n 0xE3A: 84,\n 0xE47: 84,\n 0xE48: 84,\n 0xE49: 84,\n 0xE4A: 84,\n 0xE4B: 84,\n 0xE4C: 84,\n 0xE4D: 84,\n 0xE4E: 84,\n 0xEB1: 84,\n 0xEB4: 84,\n 0xEB5: 84,\n 0xEB6: 84,\n 0xEB7: 84,\n 0xEB8: 84,\n 0xEB9: 84,\n 0xEBA: 84,\n 0xEBB: 84,\n 0xEBC: 84,\n 0xEC8: 84,\n 0xEC9: 84,\n 0xECA: 84,\n 0xECB: 84,\n 0xECC: 84,\n 0xECD: 84,\n 0xECE: 84,\n 0xF18: 84,\n 0xF19: 84,\n 0xF35: 84,\n 0xF37: 84,\n 0xF39: 84,\n 0xF71: 84,\n 0xF72: 84,\n 0xF73: 84,\n 0xF74: 84,\n 0xF75: 84,\n 0xF76: 84,\n 0xF77: 84,\n 0xF78: 84,\n 0xF79: 84,\n 0xF7A: 84,\n 0xF7B: 84,\n 0xF7C: 84,\n 0xF7D: 84,\n 0xF7E: 84,\n 0xF80: 84,\n 0xF81: 84,\n 0xF82: 84,\n 0xF83: 84,\n 0xF84: 84,\n 0xF86: 84,\n 0xF87: 84,\n 0xF8D: 84,\n 0xF8E: 84,\n 0xF8F: 84,\n 0xF90: 84,\n 0xF91: 84,\n 0xF92: 84,\n 0xF93: 84,\n 0xF94: 84,\n 0xF95: 84,\n 0xF96: 84,\n 0xF97: 84,\n 0xF99: 84,\n 0xF9A: 84,\n 0xF9B: 84,\n 0xF9C: 84,\n 0xF9D: 84,\n 0xF9E: 84,\n 0xF9F: 84,\n 0xFA0: 84,\n 0xFA1: 84,\n 0xFA2: 84,\n 0xFA3: 84,\n 0xFA4: 84,\n 0xFA5: 84,\n 0xFA6: 84,\n 0xFA7: 84,\n 0xFA8: 84,\n 0xFA9: 84,\n 0xFAA: 84,\n 0xFAB: 84,\n 0xFAC: 84,\n 0xFAD: 84,\n 0xFAE: 84,\n 0xFAF: 84,\n 0xFB0: 84,\n 0xFB1: 84,\n 0xFB2: 84,\n 0xFB3: 84,\n 0xFB4: 84,\n 0xFB5: 84,\n 0xFB6: 84,\n 0xFB7: 84,\n 0xFB8: 84,\n 0xFB9: 84,\n 0xFBA: 84,\n 0xFBB: 84,\n 0xFBC: 84,\n 0xFC6: 84,\n 0x102D: 84,\n 0x102E: 84,\n 0x102F: 84,\n 0x1030: 84,\n 0x1032: 84,\n 0x1033: 84,\n 0x1034: 84,\n 0x1035: 84,\n 0x1036: 84,\n 0x1037: 84,\n 0x1039: 84,\n 0x103A: 84,\n 0x103D: 84,\n 0x103E: 84,\n 0x1058: 84,\n 0x1059: 84,\n 0x105E: 84,\n 0x105F: 84,\n 0x1060: 84,\n 0x1071: 84,\n 0x1072: 84,\n 0x1073: 84,\n 0x1074: 84,\n 0x1082: 84,\n 0x1085: 84,\n 0x1086: 84,\n 0x108D: 84,\n 0x109D: 84,\n 0x135D: 84,\n 0x135E: 84,\n 0x135F: 84,\n 0x1712: 84,\n 0x1713: 84,\n 0x1714: 84,\n 0x1732: 84,\n 0x1733: 84,\n 0x1752: 84,\n 0x1753: 84,\n 0x1772: 84,\n 0x1773: 84,\n 0x17B4: 84,\n 0x17B5: 84,\n 0x17B7: 84,\n 0x17B8: 84,\n 0x17B9: 84,\n 0x17BA: 84,\n 0x17BB: 84,\n 0x17BC: 84,\n 0x17BD: 84,\n 0x17C6: 84,\n 0x17C9: 84,\n 0x17CA: 84,\n 0x17CB: 84,\n 0x17CC: 84,\n 0x17CD: 84,\n 0x17CE: 84,\n 0x17CF: 84,\n 0x17D0: 84,\n 0x17D1: 84,\n 0x17D2: 84,\n 0x17D3: 84,\n 0x17DD: 84,\n 0x1807: 68,\n 0x180A: 67,\n 0x180B: 84,\n 0x180C: 84,\n 0x180D: 84,\n 0x180F: 84,\n 0x1820: 68,\n 0x1821: 68,\n 0x1822: 68,\n 0x1823: 68,\n 0x1824: 68,\n 0x1825: 68,\n 0x1826: 68,\n 0x1827: 68,\n 0x1828: 68,\n 0x1829: 68,\n 0x182A: 68,\n 0x182B: 68,\n 0x182C: 68,\n 0x182D: 68,\n 0x182E: 68,\n 0x182F: 68,\n 0x1830: 68,\n 0x1831: 68,\n 0x1832: 68,\n 0x1833: 68,\n 0x1834: 68,\n 0x1835: 68,\n 0x1836: 68,\n 0x1837: 68,\n 0x1838: 68,\n 0x1839: 68,\n 0x183A: 68,\n 0x183B: 68,\n 0x183C: 68,\n 0x183D: 68,\n 0x183E: 68,\n 0x183F: 68,\n 0x1840: 68,\n 0x1841: 68,\n 0x1842: 68,\n 0x1843: 68,\n 0x1844: 68,\n 0x1845: 68,\n 0x1846: 68,\n 0x1847: 68,\n 0x1848: 68,\n 0x1849: 68,\n 0x184A: 68,\n 0x184B: 68,\n 0x184C: 68,\n 0x184D: 68,\n 0x184E: 68,\n 0x184F: 68,\n 0x1850: 68,\n 0x1851: 68,\n 0x1852: 68,\n 0x1853: 68,\n 0x1854: 68,\n 0x1855: 68,\n 0x1856: 68,\n 0x1857: 68,\n 0x1858: 68,\n 0x1859: 68,\n 0x185A: 68,\n 0x185B: 68,\n 0x185C: 68,\n 0x185D: 68,\n 0x185E: 68,\n 0x185F: 68,\n 0x1860: 68,\n 0x1861: 68,\n 0x1862: 68,\n 0x1863: 68,\n 0x1864: 68,\n 0x1865: 68,\n 0x1866: 68,\n 0x1867: 68,\n 0x1868: 68,\n 0x1869: 68,\n 0x186A: 68,\n 0x186B: 68,\n 0x186C: 68,\n 0x186D: 68,\n 0x186E: 68,\n 0x186F: 68,\n 0x1870: 68,\n 0x1871: 68,\n 0x1872: 68,\n 0x1873: 68,\n 0x1874: 68,\n 0x1875: 68,\n 0x1876: 68,\n 0x1877: 68,\n 0x1878: 68,\n 0x1885: 84,\n 0x1886: 84,\n 0x1887: 68,\n 0x1888: 68,\n 0x1889: 68,\n 0x188A: 68,\n 0x188B: 68,\n 0x188C: 68,\n 0x188D: 68,\n 0x188E: 68,\n 0x188F: 68,\n 0x1890: 68,\n 0x1891: 68,\n 0x1892: 68,\n 0x1893: 68,\n 0x1894: 68,\n 0x1895: 68,\n 0x1896: 68,\n 0x1897: 68,\n 0x1898: 68,\n 0x1899: 68,\n 0x189A: 68,\n 0x189B: 68,\n 0x189C: 68,\n 0x189D: 68,\n 0x189E: 68,\n 0x189F: 68,\n 0x18A0: 68,\n 0x18A1: 68,\n 0x18A2: 68,\n 0x18A3: 68,\n 0x18A4: 68,\n 0x18A5: 68,\n 0x18A6: 68,\n 0x18A7: 68,\n 0x18A8: 68,\n 0x18A9: 84,\n 0x18AA: 68,\n 0x1920: 84,\n 0x1921: 84,\n 0x1922: 84,\n 0x1927: 84,\n 0x1928: 84,\n 0x1932: 84,\n 0x1939: 84,\n 0x193A: 84,\n 0x193B: 84,\n 0x1A17: 84,\n 0x1A18: 84,\n 0x1A1B: 84,\n 0x1A56: 84,\n 0x1A58: 84,\n 0x1A59: 84,\n 0x1A5A: 84,\n 0x1A5B: 84,\n 0x1A5C: 84,\n 0x1A5D: 84,\n 0x1A5E: 84,\n 0x1A60: 84,\n 0x1A62: 84,\n 0x1A65: 84,\n 0x1A66: 84,\n 0x1A67: 84,\n 0x1A68: 84,\n 0x1A69: 84,\n 0x1A6A: 84,\n 0x1A6B: 84,\n 0x1A6C: 84,\n 0x1A73: 84,\n 0x1A74: 84,\n 0x1A75: 84,\n 0x1A76: 84,\n 0x1A77: 84,\n 0x1A78: 84,\n 0x1A79: 84,\n 0x1A7A: 84,\n 0x1A7B: 84,\n 0x1A7C: 84,\n 0x1A7F: 84,\n 0x1AB0: 84,\n 0x1AB1: 84,\n 0x1AB2: 84,\n 0x1AB3: 84,\n 0x1AB4: 84,\n 0x1AB5: 84,\n 0x1AB6: 84,\n 0x1AB7: 84,\n 0x1AB8: 84,\n 0x1AB9: 84,\n 0x1ABA: 84,\n 0x1ABB: 84,\n 0x1ABC: 84,\n 0x1ABD: 84,\n 0x1ABE: 84,\n 0x1ABF: 84,\n 0x1AC0: 84,\n 0x1AC1: 84,\n 0x1AC2: 84,\n 0x1AC3: 84,\n 0x1AC4: 84,\n 0x1AC5: 84,\n 0x1AC6: 84,\n 0x1AC7: 84,\n 0x1AC8: 84,\n 0x1AC9: 84,\n 0x1ACA: 84,\n 0x1ACB: 84,\n 0x1ACC: 84,\n 0x1ACD: 84,\n 0x1ACE: 84,\n 0x1B00: 84,\n 0x1B01: 84,\n 0x1B02: 84,\n 0x1B03: 84,\n 0x1B34: 84,\n 0x1B36: 84,\n 0x1B37: 84,\n 0x1B38: 84,\n 0x1B39: 84,\n 0x1B3A: 84,\n 0x1B3C: 84,\n 0x1B42: 84,\n 0x1B6B: 84,\n 0x1B6C: 84,\n 0x1B6D: 84,\n 0x1B6E: 84,\n 0x1B6F: 84,\n 0x1B70: 84,\n 0x1B71: 84,\n 0x1B72: 84,\n 0x1B73: 84,\n 0x1B80: 84,\n 0x1B81: 84,\n 0x1BA2: 84,\n 0x1BA3: 84,\n 0x1BA4: 84,\n 0x1BA5: 84,\n 0x1BA8: 84,\n 0x1BA9: 84,\n 0x1BAB: 84,\n 0x1BAC: 84,\n 0x1BAD: 84,\n 0x1BE6: 84,\n 0x1BE8: 84,\n 0x1BE9: 84,\n 0x1BED: 84,\n 0x1BEF: 84,\n 0x1BF0: 84,\n 0x1BF1: 84,\n 0x1C2C: 84,\n 0x1C2D: 84,\n 0x1C2E: 84,\n 0x1C2F: 84,\n 0x1C30: 84,\n 0x1C31: 84,\n 0x1C32: 84,\n 0x1C33: 84,\n 0x1C36: 84,\n 0x1C37: 84,\n 0x1CD0: 84,\n 0x1CD1: 84,\n 0x1CD2: 84,\n 0x1CD4: 84,\n 0x1CD5: 84,\n 0x1CD6: 84,\n 0x1CD7: 84,\n 0x1CD8: 84,\n 0x1CD9: 84,\n 0x1CDA: 84,\n 0x1CDB: 84,\n 0x1CDC: 84,\n 0x1CDD: 84,\n 0x1CDE: 84,\n 0x1CDF: 84,\n 0x1CE0: 84,\n 0x1CE2: 84,\n 0x1CE3: 84,\n 0x1CE4: 84,\n 0x1CE5: 84,\n 0x1CE6: 84,\n 0x1CE7: 84,\n 0x1CE8: 84,\n 0x1CED: 84,\n 0x1CF4: 84,\n 0x1CF8: 84,\n 0x1CF9: 84,\n 0x1DC0: 84,\n 0x1DC1: 84,\n 0x1DC2: 84,\n 0x1DC3: 84,\n 0x1DC4: 84,\n 0x1DC5: 84,\n 0x1DC6: 84,\n 0x1DC7: 84,\n 0x1DC8: 84,\n 0x1DC9: 84,\n 0x1DCA: 84,\n 0x1DCB: 84,\n 0x1DCC: 84,\n 0x1DCD: 84,\n 0x1DCE: 84,\n 0x1DCF: 84,\n 0x1DD0: 84,\n 0x1DD1: 84,\n 0x1DD2: 84,\n 0x1DD3: 84,\n 0x1DD4: 84,\n 0x1DD5: 84,\n 0x1DD6: 84,\n 0x1DD7: 84,\n 0x1DD8: 84,\n 0x1DD9: 84,\n 0x1DDA: 84,\n 0x1DDB: 84,\n 0x1DDC: 84,\n 0x1DDD: 84,\n 0x1DDE: 84,\n 0x1DDF: 84,\n 0x1DE0: 84,\n 0x1DE1: 84,\n 0x1DE2: 84,\n 0x1DE3: 84,\n 0x1DE4: 84,\n 0x1DE5: 84,\n 0x1DE6: 84,\n 0x1DE7: 84,\n 0x1DE8: 84,\n 0x1DE9: 84,\n 0x1DEA: 84,\n 0x1DEB: 84,\n 0x1DEC: 84,\n 0x1DED: 84,\n 0x1DEE: 84,\n 0x1DEF: 84,\n 0x1DF0: 84,\n 0x1DF1: 84,\n 0x1DF2: 84,\n 0x1DF3: 84,\n 0x1DF4: 84,\n 0x1DF5: 84,\n 0x1DF6: 84,\n 0x1DF7: 84,\n 0x1DF8: 84,\n 0x1DF9: 84,\n 0x1DFA: 84,\n 0x1DFB: 84,\n 0x1DFC: 84,\n 0x1DFD: 84,\n 0x1DFE: 84,\n 0x1DFF: 84,\n 0x200B: 84,\n 0x200D: 67,\n 0x200E: 84,\n 0x200F: 84,\n 0x202A: 84,\n 0x202B: 84,\n 0x202C: 84,\n 0x202D: 84,\n 0x202E: 84,\n 0x2060: 84,\n 0x2061: 84,\n 0x2062: 84,\n 0x2063: 84,\n 0x2064: 84,\n 0x206A: 84,\n 0x206B: 84,\n 0x206C: 84,\n 0x206D: 84,\n 0x206E: 84,\n 0x206F: 84,\n 0x20D0: 84,\n 0x20D1: 84,\n 0x20D2: 84,\n 0x20D3: 84,\n 0x20D4: 84,\n 0x20D5: 84,\n 0x20D6: 84,\n 0x20D7: 84,\n 0x20D8: 84,\n 0x20D9: 84,\n 0x20DA: 84,\n 0x20DB: 84,\n 0x20DC: 84,\n 0x20DD: 84,\n 0x20DE: 84,\n 0x20DF: 84,\n 0x20E0: 84,\n 0x20E1: 84,\n 0x20E2: 84,\n 0x20E3: 84,\n 0x20E4: 84,\n 0x20E5: 84,\n 0x20E6: 84,\n 0x20E7: 84,\n 0x20E8: 84,\n 0x20E9: 84,\n 0x20EA: 84,\n 0x20EB: 84,\n 0x20EC: 84,\n 0x20ED: 84,\n 0x20EE: 84,\n 0x20EF: 84,\n 0x20F0: 84,\n 0x2CEF: 84,\n 0x2CF0: 84,\n 0x2CF1: 84,\n 0x2D7F: 84,\n 0x2DE0: 84,\n 0x2DE1: 84,\n 0x2DE2: 84,\n 0x2DE3: 84,\n 0x2DE4: 84,\n 0x2DE5: 84,\n 0x2DE6: 84,\n 0x2DE7: 84,\n 0x2DE8: 84,\n 0x2DE9: 84,\n 0x2DEA: 84,\n 0x2DEB: 84,\n 0x2DEC: 84,\n 0x2DED: 84,\n 0x2DEE: 84,\n 0x2DEF: 84,\n 0x2DF0: 84,\n 0x2DF1: 84,\n 0x2DF2: 84,\n 0x2DF3: 84,\n 0x2DF4: 84,\n 0x2DF5: 84,\n 0x2DF6: 84,\n 0x2DF7: 84,\n 0x2DF8: 84,\n 0x2DF9: 84,\n 0x2DFA: 84,\n 0x2DFB: 84,\n 0x2DFC: 84,\n 0x2DFD: 84,\n 0x2DFE: 84,\n 0x2DFF: 84,\n 0x302A: 84,\n 0x302B: 84,\n 0x302C: 84,\n 0x302D: 84,\n 0x3099: 84,\n 0x309A: 84,\n 0xA66F: 84,\n 0xA670: 84,\n 0xA671: 84,\n 0xA672: 84,\n 0xA674: 84,\n 0xA675: 84,\n 0xA676: 84,\n 0xA677: 84,\n 0xA678: 84,\n 0xA679: 84,\n 0xA67A: 84,\n 0xA67B: 84,\n 0xA67C: 84,\n 0xA67D: 84,\n 0xA69E: 84,\n 0xA69F: 84,\n 0xA6F0: 84,\n 0xA6F1: 84,\n 0xA802: 84,\n 0xA806: 84,\n 0xA80B: 84,\n 0xA825: 84,\n 0xA826: 84,\n 0xA82C: 84,\n 0xA840: 68,\n 0xA841: 68,\n 0xA842: 68,\n 0xA843: 68,\n 0xA844: 68,\n 0xA845: 68,\n 0xA846: 68,\n 0xA847: 68,\n 0xA848: 68,\n 0xA849: 68,\n 0xA84A: 68,\n 0xA84B: 68,\n 0xA84C: 68,\n 0xA84D: 68,\n 0xA84E: 68,\n 0xA84F: 68,\n 0xA850: 68,\n 0xA851: 68,\n 0xA852: 68,\n 0xA853: 68,\n 0xA854: 68,\n 0xA855: 68,\n 0xA856: 68,\n 0xA857: 68,\n 0xA858: 68,\n 0xA859: 68,\n 0xA85A: 68,\n 0xA85B: 68,\n 0xA85C: 68,\n 0xA85D: 68,\n 0xA85E: 68,\n 0xA85F: 68,\n 0xA860: 68,\n 0xA861: 68,\n 0xA862: 68,\n 0xA863: 68,\n 0xA864: 68,\n 0xA865: 68,\n 0xA866: 68,\n 0xA867: 68,\n 0xA868: 68,\n 0xA869: 68,\n 0xA86A: 68,\n 0xA86B: 68,\n 0xA86C: 68,\n 0xA86D: 68,\n 0xA86E: 68,\n 0xA86F: 68,\n 0xA870: 68,\n 0xA871: 68,\n 0xA872: 76,\n 0xA8C4: 84,\n 0xA8C5: 84,\n 0xA8E0: 84,\n 0xA8E1: 84,\n 0xA8E2: 84,\n 0xA8E3: 84,\n 0xA8E4: 84,\n 0xA8E5: 84,\n 0xA8E6: 84,\n 0xA8E7: 84,\n 0xA8E8: 84,\n 0xA8E9: 84,\n 0xA8EA: 84,\n 0xA8EB: 84,\n 0xA8EC: 84,\n 0xA8ED: 84,\n 0xA8EE: 84,\n 0xA8EF: 84,\n 0xA8F0: 84,\n 0xA8F1: 84,\n 0xA8FF: 84,\n 0xA926: 84,\n 0xA927: 84,\n 0xA928: 84,\n 0xA929: 84,\n 0xA92A: 84,\n 0xA92B: 84,\n 0xA92C: 84,\n 0xA92D: 84,\n 0xA947: 84,\n 0xA948: 84,\n 0xA949: 84,\n 0xA94A: 84,\n 0xA94B: 84,\n 0xA94C: 84,\n 0xA94D: 84,\n 0xA94E: 84,\n 0xA94F: 84,\n 0xA950: 84,\n 0xA951: 84,\n 0xA980: 84,\n 0xA981: 84,\n 0xA982: 84,\n 0xA9B3: 84,\n 0xA9B6: 84,\n 0xA9B7: 84,\n 0xA9B8: 84,\n 0xA9B9: 84,\n 0xA9BC: 84,\n 0xA9BD: 84,\n 0xA9E5: 84,\n 0xAA29: 84,\n 0xAA2A: 84,\n 0xAA2B: 84,\n 0xAA2C: 84,\n 0xAA2D: 84,\n 0xAA2E: 84,\n 0xAA31: 84,\n 0xAA32: 84,\n 0xAA35: 84,\n 0xAA36: 84,\n 0xAA43: 84,\n 0xAA4C: 84,\n 0xAA7C: 84,\n 0xAAB0: 84,\n 0xAAB2: 84,\n 0xAAB3: 84,\n 0xAAB4: 84,\n 0xAAB7: 84,\n 0xAAB8: 84,\n 0xAABE: 84,\n 0xAABF: 84,\n 0xAAC1: 84,\n 0xAAEC: 84,\n 0xAAED: 84,\n 0xAAF6: 84,\n 0xABE5: 84,\n 0xABE8: 84,\n 0xABED: 84,\n 0xFB1E: 84,\n 0xFE00: 84,\n 0xFE01: 84,\n 0xFE02: 84,\n 0xFE03: 84,\n 0xFE04: 84,\n 0xFE05: 84,\n 0xFE06: 84,\n 0xFE07: 84,\n 0xFE08: 84,\n 0xFE09: 84,\n 0xFE0A: 84,\n 0xFE0B: 84,\n 0xFE0C: 84,\n 0xFE0D: 84,\n 0xFE0E: 84,\n 0xFE0F: 84,\n 0xFE20: 84,\n 0xFE21: 84,\n 0xFE22: 84,\n 0xFE23: 84,\n 0xFE24: 84,\n 0xFE25: 84,\n 0xFE26: 84,\n 0xFE27: 84,\n 0xFE28: 84,\n 0xFE29: 84,\n 0xFE2A: 84,\n 0xFE2B: 84,\n 0xFE2C: 84,\n 0xFE2D: 84,\n 0xFE2E: 84,\n 0xFE2F: 84,\n 0xFEFF: 84,\n 0xFFF9: 84,\n 0xFFFA: 84,\n 0xFFFB: 84,\n 0x101FD: 84,\n 0x102E0: 84,\n 0x10376: 84,\n 0x10377: 84,\n 0x10378: 84,\n 0x10379: 84,\n 0x1037A: 84,\n 0x10A01: 84,\n 0x10A02: 84,\n 0x10A03: 84,\n 0x10A05: 84,\n 0x10A06: 84,\n 0x10A0C: 84,\n 0x10A0D: 84,\n 0x10A0E: 84,\n 0x10A0F: 84,\n 0x10A38: 84,\n 0x10A39: 84,\n 0x10A3A: 84,\n 0x10A3F: 84,\n 0x10AC0: 68,\n 0x10AC1: 68,\n 0x10AC2: 68,\n 0x10AC3: 68,\n 0x10AC4: 68,\n 0x10AC5: 82,\n 0x10AC7: 82,\n 0x10AC9: 82,\n 0x10ACA: 82,\n 0x10ACD: 76,\n 0x10ACE: 82,\n 0x10ACF: 82,\n 0x10AD0: 82,\n 0x10AD1: 82,\n 0x10AD2: 82,\n 0x10AD3: 68,\n 0x10AD4: 68,\n 0x10AD5: 68,\n 0x10AD6: 68,\n 0x10AD7: 76,\n 0x10AD8: 68,\n 0x10AD9: 68,\n 0x10ADA: 68,\n 0x10ADB: 68,\n 0x10ADC: 68,\n 0x10ADD: 82,\n 0x10ADE: 68,\n 0x10ADF: 68,\n 0x10AE0: 68,\n 0x10AE1: 82,\n 0x10AE4: 82,\n 0x10AE5: 84,\n 0x10AE6: 84,\n 0x10AEB: 68,\n 0x10AEC: 68,\n 0x10AED: 68,\n 0x10AEE: 68,\n 0x10AEF: 82,\n 0x10B80: 68,\n 0x10B81: 82,\n 0x10B82: 68,\n 0x10B83: 82,\n 0x10B84: 82,\n 0x10B85: 82,\n 0x10B86: 68,\n 0x10B87: 68,\n 0x10B88: 68,\n 0x10B89: 82,\n 0x10B8A: 68,\n 0x10B8B: 68,\n 0x10B8C: 82,\n 0x10B8D: 68,\n 0x10B8E: 82,\n 0x10B8F: 82,\n 0x10B90: 68,\n 0x10B91: 82,\n 0x10BA9: 82,\n 0x10BAA: 82,\n 0x10BAB: 82,\n 0x10BAC: 82,\n 0x10BAD: 68,\n 0x10BAE: 68,\n 0x10D00: 76,\n 0x10D01: 68,\n 0x10D02: 68,\n 0x10D03: 68,\n 0x10D04: 68,\n 0x10D05: 68,\n 0x10D06: 68,\n 0x10D07: 68,\n 0x10D08: 68,\n 0x10D09: 68,\n 0x10D0A: 68,\n 0x10D0B: 68,\n 0x10D0C: 68,\n 0x10D0D: 68,\n 0x10D0E: 68,\n 0x10D0F: 68,\n 0x10D10: 68,\n 0x10D11: 68,\n 0x10D12: 68,\n 0x10D13: 68,\n 0x10D14: 68,\n 0x10D15: 68,\n 0x10D16: 68,\n 0x10D17: 68,\n 0x10D18: 68,\n 0x10D19: 68,\n 0x10D1A: 68,\n 0x10D1B: 68,\n 0x10D1C: 68,\n 0x10D1D: 68,\n 0x10D1E: 68,\n 0x10D1F: 68,\n 0x10D20: 68,\n 0x10D21: 68,\n 0x10D22: 82,\n 0x10D23: 68,\n 0x10D24: 84,\n 0x10D25: 84,\n 0x10D26: 84,\n 0x10D27: 84,\n 0x10EAB: 84,\n 0x10EAC: 84,\n 0x10EFD: 84,\n 0x10EFE: 84,\n 0x10EFF: 84,\n 0x10F30: 68,\n 0x10F31: 68,\n 0x10F32: 68,\n 0x10F33: 82,\n 0x10F34: 68,\n 0x10F35: 68,\n 0x10F36: 68,\n 0x10F37: 68,\n 0x10F38: 68,\n 0x10F39: 68,\n 0x10F3A: 68,\n 0x10F3B: 68,\n 0x10F3C: 68,\n 0x10F3D: 68,\n 0x10F3E: 68,\n 0x10F3F: 68,\n 0x10F40: 68,\n 0x10F41: 68,\n 0x10F42: 68,\n 0x10F43: 68,\n 0x10F44: 68,\n 0x10F46: 84,\n 0x10F47: 84,\n 0x10F48: 84,\n 0x10F49: 84,\n 0x10F4A: 84,\n 0x10F4B: 84,\n 0x10F4C: 84,\n 0x10F4D: 84,\n 0x10F4E: 84,\n 0x10F4F: 84,\n 0x10F50: 84,\n 0x10F51: 68,\n 0x10F52: 68,\n 0x10F53: 68,\n 0x10F54: 82,\n 0x10F70: 68,\n 0x10F71: 68,\n 0x10F72: 68,\n 0x10F73: 68,\n 0x10F74: 82,\n 0x10F75: 82,\n 0x10F76: 68,\n 0x10F77: 68,\n 0x10F78: 68,\n 0x10F79: 68,\n 0x10F7A: 68,\n 0x10F7B: 68,\n 0x10F7C: 68,\n 0x10F7D: 68,\n 0x10F7E: 68,\n 0x10F7F: 68,\n 0x10F80: 68,\n 0x10F81: 68,\n 0x10F82: 84,\n 0x10F83: 84,\n 0x10F84: 84,\n 0x10F85: 84,\n 0x10FB0: 68,\n 0x10FB2: 68,\n 0x10FB3: 68,\n 0x10FB4: 82,\n 0x10FB5: 82,\n 0x10FB6: 82,\n 0x10FB8: 68,\n 0x10FB9: 82,\n 0x10FBA: 82,\n 0x10FBB: 68,\n 0x10FBC: 68,\n 0x10FBD: 82,\n 0x10FBE: 68,\n 0x10FBF: 68,\n 0x10FC1: 68,\n 0x10FC2: 82,\n 0x10FC3: 82,\n 0x10FC4: 68,\n 0x10FC9: 82,\n 0x10FCA: 68,\n 0x10FCB: 76,\n 0x11001: 84,\n 0x11038: 84,\n 0x11039: 84,\n 0x1103A: 84,\n 0x1103B: 84,\n 0x1103C: 84,\n 0x1103D: 84,\n 0x1103E: 84,\n 0x1103F: 84,\n 0x11040: 84,\n 0x11041: 84,\n 0x11042: 84,\n 0x11043: 84,\n 0x11044: 84,\n 0x11045: 84,\n 0x11046: 84,\n 0x11070: 84,\n 0x11073: 84,\n 0x11074: 84,\n 0x1107F: 84,\n 0x11080: 84,\n 0x11081: 84,\n 0x110B3: 84,\n 0x110B4: 84,\n 0x110B5: 84,\n 0x110B6: 84,\n 0x110B9: 84,\n 0x110BA: 84,\n 0x110C2: 84,\n 0x11100: 84,\n 0x11101: 84,\n 0x11102: 84,\n 0x11127: 84,\n 0x11128: 84,\n 0x11129: 84,\n 0x1112A: 84,\n 0x1112B: 84,\n 0x1112D: 84,\n 0x1112E: 84,\n 0x1112F: 84,\n 0x11130: 84,\n 0x11131: 84,\n 0x11132: 84,\n 0x11133: 84,\n 0x11134: 84,\n 0x11173: 84,\n 0x11180: 84,\n 0x11181: 84,\n 0x111B6: 84,\n 0x111B7: 84,\n 0x111B8: 84,\n 0x111B9: 84,\n 0x111BA: 84,\n 0x111BB: 84,\n 0x111BC: 84,\n 0x111BD: 84,\n 0x111BE: 84,\n 0x111C9: 84,\n 0x111CA: 84,\n 0x111CB: 84,\n 0x111CC: 84,\n 0x111CF: 84,\n 0x1122F: 84,\n 0x11230: 84,\n 0x11231: 84,\n 0x11234: 84,\n 0x11236: 84,\n 0x11237: 84,\n 0x1123E: 84,\n 0x11241: 84,\n 0x112DF: 84,\n 0x112E3: 84,\n 0x112E4: 84,\n 0x112E5: 84,\n 0x112E6: 84,\n 0x112E7: 84,\n 0x112E8: 84,\n 0x112E9: 84,\n 0x112EA: 84,\n 0x11300: 84,\n 0x11301: 84,\n 0x1133B: 84,\n 0x1133C: 84,\n 0x11340: 84,\n 0x11366: 84,\n 0x11367: 84,\n 0x11368: 84,\n 0x11369: 84,\n 0x1136A: 84,\n 0x1136B: 84,\n 0x1136C: 84,\n 0x11370: 84,\n 0x11371: 84,\n 0x11372: 84,\n 0x11373: 84,\n 0x11374: 84,\n 0x11438: 84,\n 0x11439: 84,\n 0x1143A: 84,\n 0x1143B: 84,\n 0x1143C: 84,\n 0x1143D: 84,\n 0x1143E: 84,\n 0x1143F: 84,\n 0x11442: 84,\n 0x11443: 84,\n 0x11444: 84,\n 0x11446: 84,\n 0x1145E: 84,\n 0x114B3: 84,\n 0x114B4: 84,\n 0x114B5: 84,\n 0x114B6: 84,\n 0x114B7: 84,\n 0x114B8: 84,\n 0x114BA: 84,\n 0x114BF: 84,\n 0x114C0: 84,\n 0x114C2: 84,\n 0x114C3: 84,\n 0x115B2: 84,\n 0x115B3: 84,\n 0x115B4: 84,\n 0x115B5: 84,\n 0x115BC: 84,\n 0x115BD: 84,\n 0x115BF: 84,\n 0x115C0: 84,\n 0x115DC: 84,\n 0x115DD: 84,\n 0x11633: 84,\n 0x11634: 84,\n 0x11635: 84,\n 0x11636: 84,\n 0x11637: 84,\n 0x11638: 84,\n 0x11639: 84,\n 0x1163A: 84,\n 0x1163D: 84,\n 0x1163F: 84,\n 0x11640: 84,\n 0x116AB: 84,\n 0x116AD: 84,\n 0x116B0: 84,\n 0x116B1: 84,\n 0x116B2: 84,\n 0x116B3: 84,\n 0x116B4: 84,\n 0x116B5: 84,\n 0x116B7: 84,\n 0x1171D: 84,\n 0x1171E: 84,\n 0x1171F: 84,\n 0x11722: 84,\n 0x11723: 84,\n 0x11724: 84,\n 0x11725: 84,\n 0x11727: 84,\n 0x11728: 84,\n 0x11729: 84,\n 0x1172A: 84,\n 0x1172B: 84,\n 0x1182F: 84,\n 0x11830: 84,\n 0x11831: 84,\n 0x11832: 84,\n 0x11833: 84,\n 0x11834: 84,\n 0x11835: 84,\n 0x11836: 84,\n 0x11837: 84,\n 0x11839: 84,\n 0x1183A: 84,\n 0x1193B: 84,\n 0x1193C: 84,\n 0x1193E: 84,\n 0x11943: 84,\n 0x119D4: 84,\n 0x119D5: 84,\n 0x119D6: 84,\n 0x119D7: 84,\n 0x119DA: 84,\n 0x119DB: 84,\n 0x119E0: 84,\n 0x11A01: 84,\n 0x11A02: 84,\n 0x11A03: 84,\n 0x11A04: 84,\n 0x11A05: 84,\n 0x11A06: 84,\n 0x11A07: 84,\n 0x11A08: 84,\n 0x11A09: 84,\n 0x11A0A: 84,\n 0x11A33: 84,\n 0x11A34: 84,\n 0x11A35: 84,\n 0x11A36: 84,\n 0x11A37: 84,\n 0x11A38: 84,\n 0x11A3B: 84,\n 0x11A3C: 84,\n 0x11A3D: 84,\n 0x11A3E: 84,\n 0x11A47: 84,\n 0x11A51: 84,\n 0x11A52: 84,\n 0x11A53: 84,\n 0x11A54: 84,\n 0x11A55: 84,\n 0x11A56: 84,\n 0x11A59: 84,\n 0x11A5A: 84,\n 0x11A5B: 84,\n 0x11A8A: 84,\n 0x11A8B: 84,\n 0x11A8C: 84,\n 0x11A8D: 84,\n 0x11A8E: 84,\n 0x11A8F: 84,\n 0x11A90: 84,\n 0x11A91: 84,\n 0x11A92: 84,\n 0x11A93: 84,\n 0x11A94: 84,\n 0x11A95: 84,\n 0x11A96: 84,\n 0x11A98: 84,\n 0x11A99: 84,\n 0x11C30: 84,\n 0x11C31: 84,\n 0x11C32: 84,\n 0x11C33: 84,\n 0x11C34: 84,\n 0x11C35: 84,\n 0x11C36: 84,\n 0x11C38: 84,\n 0x11C39: 84,\n 0x11C3A: 84,\n 0x11C3B: 84,\n 0x11C3C: 84,\n 0x11C3D: 84,\n 0x11C3F: 84,\n 0x11C92: 84,\n 0x11C93: 84,\n 0x11C94: 84,\n 0x11C95: 84,\n 0x11C96: 84,\n 0x11C97: 84,\n 0x11C98: 84,\n 0x11C99: 84,\n 0x11C9A: 84,\n 0x11C9B: 84,\n 0x11C9C: 84,\n 0x11C9D: 84,\n 0x11C9E: 84,\n 0x11C9F: 84,\n 0x11CA0: 84,\n 0x11CA1: 84,\n 0x11CA2: 84,\n 0x11CA3: 84,\n 0x11CA4: 84,\n 0x11CA5: 84,\n 0x11CA6: 84,\n 0x11CA7: 84,\n 0x11CAA: 84,\n 0x11CAB: 84,\n 0x11CAC: 84,\n 0x11CAD: 84,\n 0x11CAE: 84,\n 0x11CAF: 84,\n 0x11CB0: 84,\n 0x11CB2: 84,\n 0x11CB3: 84,\n 0x11CB5: 84,\n 0x11CB6: 84,\n 0x11D31: 84,\n 0x11D32: 84,\n 0x11D33: 84,\n 0x11D34: 84,\n 0x11D35: 84,\n 0x11D36: 84,\n 0x11D3A: 84,\n 0x11D3C: 84,\n 0x11D3D: 84,\n 0x11D3F: 84,\n 0x11D40: 84,\n 0x11D41: 84,\n 0x11D42: 84,\n 0x11D43: 84,\n 0x11D44: 84,\n 0x11D45: 84,\n 0x11D47: 84,\n 0x11D90: 84,\n 0x11D91: 84,\n 0x11D95: 84,\n 0x11D97: 84,\n 0x11EF3: 84,\n 0x11EF4: 84,\n 0x11F00: 84,\n 0x11F01: 84,\n 0x11F36: 84,\n 0x11F37: 84,\n 0x11F38: 84,\n 0x11F39: 84,\n 0x11F3A: 84,\n 0x11F40: 84,\n 0x11F42: 84,\n 0x13430: 84,\n 0x13431: 84,\n 0x13432: 84,\n 0x13433: 84,\n 0x13434: 84,\n 0x13435: 84,\n 0x13436: 84,\n 0x13437: 84,\n 0x13438: 84,\n 0x13439: 84,\n 0x1343A: 84,\n 0x1343B: 84,\n 0x1343C: 84,\n 0x1343D: 84,\n 0x1343E: 84,\n 0x1343F: 84,\n 0x13440: 84,\n 0x13447: 84,\n 0x13448: 84,\n 0x13449: 84,\n 0x1344A: 84,\n 0x1344B: 84,\n 0x1344C: 84,\n 0x1344D: 84,\n 0x1344E: 84,\n 0x1344F: 84,\n 0x13450: 84,\n 0x13451: 84,\n 0x13452: 84,\n 0x13453: 84,\n 0x13454: 84,\n 0x13455: 84,\n 0x16AF0: 84,\n 0x16AF1: 84,\n 0x16AF2: 84,\n 0x16AF3: 84,\n 0x16AF4: 84,\n 0x16B30: 84,\n 0x16B31: 84,\n 0x16B32: 84,\n 0x16B33: 84,\n 0x16B34: 84,\n 0x16B35: 84,\n 0x16B36: 84,\n 0x16F4F: 84,\n 0x16F8F: 84,\n 0x16F90: 84,\n 0x16F91: 84,\n 0x16F92: 84,\n 0x16FE4: 84,\n 0x1BC9D: 84,\n 0x1BC9E: 84,\n 0x1BCA0: 84,\n 0x1BCA1: 84,\n 0x1BCA2: 84,\n 0x1BCA3: 84,\n 0x1CF00: 84,\n 0x1CF01: 84,\n 0x1CF02: 84,\n 0x1CF03: 84,\n 0x1CF04: 84,\n 0x1CF05: 84,\n 0x1CF06: 84,\n 0x1CF07: 84,\n 0x1CF08: 84,\n 0x1CF09: 84,\n 0x1CF0A: 84,\n 0x1CF0B: 84,\n 0x1CF0C: 84,\n 0x1CF0D: 84,\n 0x1CF0E: 84,\n 0x1CF0F: 84,\n 0x1CF10: 84,\n 0x1CF11: 84,\n 0x1CF12: 84,\n 0x1CF13: 84,\n 0x1CF14: 84,\n 0x1CF15: 84,\n 0x1CF16: 84,\n 0x1CF17: 84,\n 0x1CF18: 84,\n 0x1CF19: 84,\n 0x1CF1A: 84,\n 0x1CF1B: 84,\n 0x1CF1C: 84,\n 0x1CF1D: 84,\n 0x1CF1E: 84,\n 0x1CF1F: 84,\n 0x1CF20: 84,\n 0x1CF21: 84,\n 0x1CF22: 84,\n 0x1CF23: 84,\n 0x1CF24: 84,\n 0x1CF25: 84,\n 0x1CF26: 84,\n 0x1CF27: 84,\n 0x1CF28: 84,\n 0x1CF29: 84,\n 0x1CF2A: 84,\n 0x1CF2B: 84,\n 0x1CF2C: 84,\n 0x1CF2D: 84,\n 0x1CF30: 84,\n 0x1CF31: 84,\n 0x1CF32: 84,\n 0x1CF33: 84,\n 0x1CF34: 84,\n 0x1CF35: 84,\n 0x1CF36: 84,\n 0x1CF37: 84,\n 0x1CF38: 84,\n 0x1CF39: 84,\n 0x1CF3A: 84,\n 0x1CF3B: 84,\n 0x1CF3C: 84,\n 0x1CF3D: 84,\n 0x1CF3E: 84,\n 0x1CF3F: 84,\n 0x1CF40: 84,\n 0x1CF41: 84,\n 0x1CF42: 84,\n 0x1CF43: 84,\n 0x1CF44: 84,\n 0x1CF45: 84,\n 0x1CF46: 84,\n 0x1D167: 84,\n 0x1D168: 84,\n 0x1D169: 84,\n 0x1D173: 84,\n 0x1D174: 84,\n 0x1D175: 84,\n 0x1D176: 84,\n 0x1D177: 84,\n 0x1D178: 84,\n 0x1D179: 84,\n 0x1D17A: 84,\n 0x1D17B: 84,\n 0x1D17C: 84,\n 0x1D17D: 84,\n 0x1D17E: 84,\n 0x1D17F: 84,\n 0x1D180: 84,\n 0x1D181: 84,\n 0x1D182: 84,\n 0x1D185: 84,\n 0x1D186: 84,\n 0x1D187: 84,\n 0x1D188: 84,\n 0x1D189: 84,\n 0x1D18A: 84,\n 0x1D18B: 84,\n 0x1D1AA: 84,\n 0x1D1AB: 84,\n 0x1D1AC: 84,\n 0x1D1AD: 84,\n 0x1D242: 84,\n 0x1D243: 84,\n 0x1D244: 84,\n 0x1DA00: 84,\n 0x1DA01: 84,\n 0x1DA02: 84,\n 0x1DA03: 84,\n 0x1DA04: 84,\n 0x1DA05: 84,\n 0x1DA06: 84,\n 0x1DA07: 84,\n 0x1DA08: 84,\n 0x1DA09: 84,\n 0x1DA0A: 84,\n 0x1DA0B: 84,\n 0x1DA0C: 84,\n 0x1DA0D: 84,\n 0x1DA0E: 84,\n 0x1DA0F: 84,\n 0x1DA10: 84,\n 0x1DA11: 84,\n 0x1DA12: 84,\n 0x1DA13: 84,\n 0x1DA14: 84,\n 0x1DA15: 84,\n 0x1DA16: 84,\n 0x1DA17: 84,\n 0x1DA18: 84,\n 0x1DA19: 84,\n 0x1DA1A: 84,\n 0x1DA1B: 84,\n 0x1DA1C: 84,\n 0x1DA1D: 84,\n 0x1DA1E: 84,\n 0x1DA1F: 84,\n 0x1DA20: 84,\n 0x1DA21: 84,\n 0x1DA22: 84,\n 0x1DA23: 84,\n 0x1DA24: 84,\n 0x1DA25: 84,\n 0x1DA26: 84,\n 0x1DA27: 84,\n 0x1DA28: 84,\n 0x1DA29: 84,\n 0x1DA2A: 84,\n 0x1DA2B: 84,\n 0x1DA2C: 84,\n 0x1DA2D: 84,\n 0x1DA2E: 84,\n 0x1DA2F: 84,\n 0x1DA30: 84,\n 0x1DA31: 84,\n 0x1DA32: 84,\n 0x1DA33: 84,\n 0x1DA34: 84,\n 0x1DA35: 84,\n 0x1DA36: 84,\n 0x1DA3B: 84,\n 0x1DA3C: 84,\n 0x1DA3D: 84,\n 0x1DA3E: 84,\n 0x1DA3F: 84,\n 0x1DA40: 84,\n 0x1DA41: 84,\n 0x1DA42: 84,\n 0x1DA43: 84,\n 0x1DA44: 84,\n 0x1DA45: 84,\n 0x1DA46: 84,\n 0x1DA47: 84,\n 0x1DA48: 84,\n 0x1DA49: 84,\n 0x1DA4A: 84,\n 0x1DA4B: 84,\n 0x1DA4C: 84,\n 0x1DA4D: 84,\n 0x1DA4E: 84,\n 0x1DA4F: 84,\n 0x1DA50: 84,\n 0x1DA51: 84,\n 0x1DA52: 84,\n 0x1DA53: 84,\n 0x1DA54: 84,\n 0x1DA55: 84,\n 0x1DA56: 84,\n 0x1DA57: 84,\n 0x1DA58: 84,\n 0x1DA59: 84,\n 0x1DA5A: 84,\n 0x1DA5B: 84,\n 0x1DA5C: 84,\n 0x1DA5D: 84,\n 0x1DA5E: 84,\n 0x1DA5F: 84,\n 0x1DA60: 84,\n 0x1DA61: 84,\n 0x1DA62: 84,\n 0x1DA63: 84,\n 0x1DA64: 84,\n 0x1DA65: 84,\n 0x1DA66: 84,\n 0x1DA67: 84,\n 0x1DA68: 84,\n 0x1DA69: 84,\n 0x1DA6A: 84,\n 0x1DA6B: 84,\n 0x1DA6C: 84,\n 0x1DA75: 84,\n 0x1DA84: 84,\n 0x1DA9B: 84,\n 0x1DA9C: 84,\n 0x1DA9D: 84,\n 0x1DA9E: 84,\n 0x1DA9F: 84,\n 0x1DAA1: 84,\n 0x1DAA2: 84,\n 0x1DAA3: 84,\n 0x1DAA4: 84,\n 0x1DAA5: 84,\n 0x1DAA6: 84,\n 0x1DAA7: 84,\n 0x1DAA8: 84,\n 0x1DAA9: 84,\n 0x1DAAA: 84,\n 0x1DAAB: 84,\n 0x1DAAC: 84,\n 0x1DAAD: 84,\n 0x1DAAE: 84,\n 0x1DAAF: 84,\n 0x1E000: 84,\n 0x1E001: 84,\n 0x1E002: 84,\n 0x1E003: 84,\n 0x1E004: 84,\n 0x1E005: 84,\n 0x1E006: 84,\n 0x1E008: 84,\n 0x1E009: 84,\n 0x1E00A: 84,\n 0x1E00B: 84,\n 0x1E00C: 84,\n 0x1E00D: 84,\n 0x1E00E: 84,\n 0x1E00F: 84,\n 0x1E010: 84,\n 0x1E011: 84,\n 0x1E012: 84,\n 0x1E013: 84,\n 0x1E014: 84,\n 0x1E015: 84,\n 0x1E016: 84,\n 0x1E017: 84,\n 0x1E018: 84,\n 0x1E01B: 84,\n 0x1E01C: 84,\n 0x1E01D: 84,\n 0x1E01E: 84,\n 0x1E01F: 84,\n 0x1E020: 84,\n 0x1E021: 84,\n 0x1E023: 84,\n 0x1E024: 84,\n 0x1E026: 84,\n 0x1E027: 84,\n 0x1E028: 84,\n 0x1E029: 84,\n 0x1E02A: 84,\n 0x1E08F: 84,\n 0x1E130: 84,\n 0x1E131: 84,\n 0x1E132: 84,\n 0x1E133: 84,\n 0x1E134: 84,\n 0x1E135: 84,\n 0x1E136: 84,\n 0x1E2AE: 84,\n 0x1E2EC: 84,\n 0x1E2ED: 84,\n 0x1E2EE: 84,\n 0x1E2EF: 84,\n 0x1E4EC: 84,\n 0x1E4ED: 84,\n 0x1E4EE: 84,\n 0x1E4EF: 84,\n 0x1E8D0: 84,\n 0x1E8D1: 84,\n 0x1E8D2: 84,\n 0x1E8D3: 84,\n 0x1E8D4: 84,\n 0x1E8D5: 84,\n 0x1E8D6: 84,\n 0x1E900: 68,\n 0x1E901: 68,\n 0x1E902: 68,\n 0x1E903: 68,\n 0x1E904: 68,\n 0x1E905: 68,\n 0x1E906: 68,\n 0x1E907: 68,\n 0x1E908: 68,\n 0x1E909: 68,\n 0x1E90A: 68,\n 0x1E90B: 68,\n 0x1E90C: 68,\n 0x1E90D: 68,\n 0x1E90E: 68,\n 0x1E90F: 68,\n 0x1E910: 68,\n 0x1E911: 68,\n 0x1E912: 68,\n 0x1E913: 68,\n 0x1E914: 68,\n 0x1E915: 68,\n 0x1E916: 68,\n 0x1E917: 68,\n 0x1E918: 68,\n 0x1E919: 68,\n 0x1E91A: 68,\n 0x1E91B: 68,\n 0x1E91C: 68,\n 0x1E91D: 68,\n 0x1E91E: 68,\n 0x1E91F: 68,\n 0x1E920: 68,\n 0x1E921: 68,\n 0x1E922: 68,\n 0x1E923: 68,\n 0x1E924: 68,\n 0x1E925: 68,\n 0x1E926: 68,\n 0x1E927: 68,\n 0x1E928: 68,\n 0x1E929: 68,\n 0x1E92A: 68,\n 0x1E92B: 68,\n 0x1E92C: 68,\n 0x1E92D: 68,\n 0x1E92E: 68,\n 0x1E92F: 68,\n 0x1E930: 68,\n 0x1E931: 68,\n 0x1E932: 68,\n 0x1E933: 68,\n 0x1E934: 68,\n 0x1E935: 68,\n 0x1E936: 68,\n 0x1E937: 68,\n 0x1E938: 68,\n 0x1E939: 68,\n 0x1E93A: 68,\n 0x1E93B: 68,\n 0x1E93C: 68,\n 0x1E93D: 68,\n 0x1E93E: 68,\n 0x1E93F: 68,\n 0x1E940: 68,\n 0x1E941: 68,\n 0x1E942: 68,\n 0x1E943: 68,\n 0x1E944: 84,\n 0x1E945: 84,\n 0x1E946: 84,\n 0x1E947: 84,\n 0x1E948: 84,\n 0x1E949: 84,\n 0x1E94A: 84,\n 0x1E94B: 84,\n 0xE0001: 84,\n 0xE0020: 84,\n 0xE0021: 84,\n 0xE0022: 84,\n 0xE0023: 84,\n 0xE0024: 84,\n 0xE0025: 84,\n 0xE0026: 84,\n 0xE0027: 84,\n 0xE0028: 84,\n 0xE0029: 84,\n 0xE002A: 84,\n 0xE002B: 84,\n 0xE002C: 84,\n 0xE002D: 84,\n 0xE002E: 84,\n 0xE002F: 84,\n 0xE0030: 84,\n 0xE0031: 84,\n 0xE0032: 84,\n 0xE0033: 84,\n 0xE0034: 84,\n 0xE0035: 84,\n 0xE0036: 84,\n 0xE0037: 84,\n 0xE0038: 84,\n 0xE0039: 84,\n 0xE003A: 84,\n 0xE003B: 84,\n 0xE003C: 84,\n 0xE003D: 84,\n 0xE003E: 84,\n 0xE003F: 84,\n 0xE0040: 84,\n 0xE0041: 84,\n 0xE0042: 84,\n 0xE0043: 84,\n 0xE0044: 84,\n 0xE0045: 84,\n 0xE0046: 84,\n 0xE0047: 84,\n 0xE0048: 84,\n 0xE0049: 84,\n 0xE004A: 84,\n 0xE004B: 84,\n 0xE004C: 84,\n 0xE004D: 84,\n 0xE004E: 84,\n 0xE004F: 84,\n 0xE0050: 84,\n 0xE0051: 84,\n 0xE0052: 84,\n 0xE0053: 84,\n 0xE0054: 84,\n 0xE0055: 84,\n 0xE0056: 84,\n 0xE0057: 84,\n 0xE0058: 84,\n 0xE0059: 84,\n 0xE005A: 84,\n 0xE005B: 84,\n 0xE005C: 84,\n 0xE005D: 84,\n 0xE005E: 84,\n 0xE005F: 84,\n 0xE0060: 84,\n 0xE0061: 84,\n 0xE0062: 84,\n 0xE0063: 84,\n 0xE0064: 84,\n 0xE0065: 84,\n 0xE0066: 84,\n 0xE0067: 84,\n 0xE0068: 84,\n 0xE0069: 84,\n 0xE006A: 84,\n 0xE006B: 84,\n 0xE006C: 84,\n 0xE006D: 84,\n 0xE006E: 84,\n 0xE006F: 84,\n 0xE0070: 84,\n 0xE0071: 84,\n 0xE0072: 84,\n 0xE0073: 84,\n 0xE0074: 84,\n 0xE0075: 84,\n 0xE0076: 84,\n 0xE0077: 84,\n 0xE0078: 84,\n 0xE0079: 84,\n 0xE007A: 84,\n 0xE007B: 84,\n 0xE007C: 84,\n 0xE007D: 84,\n 0xE007E: 84,\n 0xE007F: 84,\n 0xE0100: 84,\n 0xE0101: 84,\n 0xE0102: 84,\n 0xE0103: 84,\n 0xE0104: 84,\n 0xE0105: 84,\n 0xE0106: 84,\n 0xE0107: 84,\n 0xE0108: 84,\n 0xE0109: 84,\n 0xE010A: 84,\n 0xE010B: 84,\n 0xE010C: 84,\n 0xE010D: 84,\n 0xE010E: 84,\n 0xE010F: 84,\n 0xE0110: 84,\n 0xE0111: 84,\n 0xE0112: 84,\n 0xE0113: 84,\n 0xE0114: 84,\n 0xE0115: 84,\n 0xE0116: 84,\n 0xE0117: 84,\n 0xE0118: 84,\n 0xE0119: 84,\n 0xE011A: 84,\n 0xE011B: 84,\n 0xE011C: 84,\n 0xE011D: 84,\n 0xE011E: 84,\n 0xE011F: 84,\n 0xE0120: 84,\n 0xE0121: 84,\n 0xE0122: 84,\n 0xE0123: 84,\n 0xE0124: 84,\n 0xE0125: 84,\n 0xE0126: 84,\n 0xE0127: 84,\n 0xE0128: 84,\n 0xE0129: 84,\n 0xE012A: 84,\n 0xE012B: 84,\n 0xE012C: 84,\n 0xE012D: 84,\n 0xE012E: 84,\n 0xE012F: 84,\n 0xE0130: 84,\n 0xE0131: 84,\n 0xE0132: 84,\n 0xE0133: 84,\n 0xE0134: 84,\n 0xE0135: 84,\n 0xE0136: 84,\n 0xE0137: 84,\n 0xE0138: 84,\n 0xE0139: 84,\n 0xE013A: 84,\n 0xE013B: 84,\n 0xE013C: 84,\n 0xE013D: 84,\n 0xE013E: 84,\n 0xE013F: 84,\n 0xE0140: 84,\n 0xE0141: 84,\n 0xE0142: 84,\n 0xE0143: 84,\n 0xE0144: 84,\n 0xE0145: 84,\n 0xE0146: 84,\n 0xE0147: 84,\n 0xE0148: 84,\n 0xE0149: 84,\n 0xE014A: 84,\n 0xE014B: 84,\n 0xE014C: 84,\n 0xE014D: 84,\n 0xE014E: 84,\n 0xE014F: 84,\n 0xE0150: 84,\n 0xE0151: 84,\n 0xE0152: 84,\n 0xE0153: 84,\n 0xE0154: 84,\n 0xE0155: 84,\n 0xE0156: 84,\n 0xE0157: 84,\n 0xE0158: 84,\n 0xE0159: 84,\n 0xE015A: 84,\n 0xE015B: 84,\n 0xE015C: 84,\n 0xE015D: 84,\n 0xE015E: 84,\n 0xE015F: 84,\n 0xE0160: 84,\n 0xE0161: 84,\n 0xE0162: 84,\n 0xE0163: 84,\n 0xE0164: 84,\n 0xE0165: 84,\n 0xE0166: 84,\n 0xE0167: 84,\n 0xE0168: 84,\n 0xE0169: 84,\n 0xE016A: 84,\n 0xE016B: 84,\n 0xE016C: 84,\n 0xE016D: 84,\n 0xE016E: 84,\n 0xE016F: 84,\n 0xE0170: 84,\n 0xE0171: 84,\n 0xE0172: 84,\n 0xE0173: 84,\n 0xE0174: 84,\n 0xE0175: 84,\n 0xE0176: 84,\n 0xE0177: 84,\n 0xE0178: 84,\n 0xE0179: 84,\n 0xE017A: 84,\n 0xE017B: 84,\n 0xE017C: 84,\n 0xE017D: 84,\n 0xE017E: 84,\n 0xE017F: 84,\n 0xE0180: 84,\n 0xE0181: 84,\n 0xE0182: 84,\n 0xE0183: 84,\n 0xE0184: 84,\n 0xE0185: 84,\n 0xE0186: 84,\n 0xE0187: 84,\n 0xE0188: 84,\n 0xE0189: 84,\n 0xE018A: 84,\n 0xE018B: 84,\n 0xE018C: 84,\n 0xE018D: 84,\n 0xE018E: 84,\n 0xE018F: 84,\n 0xE0190: 84,\n 0xE0191: 84,\n 0xE0192: 84,\n 0xE0193: 84,\n 0xE0194: 84,\n 0xE0195: 84,\n 0xE0196: 84,\n 0xE0197: 84,\n 0xE0198: 84,\n 0xE0199: 84,\n 0xE019A: 84,\n 0xE019B: 84,\n 0xE019C: 84,\n 0xE019D: 84,\n 0xE019E: 84,\n 0xE019F: 84,\n 0xE01A0: 84,\n 0xE01A1: 84,\n 0xE01A2: 84,\n 0xE01A3: 84,\n 0xE01A4: 84,\n 0xE01A5: 84,\n 0xE01A6: 84,\n 0xE01A7: 84,\n 0xE01A8: 84,\n 0xE01A9: 84,\n 0xE01AA: 84,\n 0xE01AB: 84,\n 0xE01AC: 84,\n 0xE01AD: 84,\n 0xE01AE: 84,\n 0xE01AF: 84,\n 0xE01B0: 84,\n 0xE01B1: 84,\n 0xE01B2: 84,\n 0xE01B3: 84,\n 0xE01B4: 84,\n 0xE01B5: 84,\n 0xE01B6: 84,\n 0xE01B7: 84,\n 0xE01B8: 84,\n 0xE01B9: 84,\n 0xE01BA: 84,\n 0xE01BB: 84,\n 0xE01BC: 84,\n 0xE01BD: 84,\n 0xE01BE: 84,\n 0xE01BF: 84,\n 0xE01C0: 84,\n 0xE01C1: 84,\n 0xE01C2: 84,\n 0xE01C3: 84,\n 0xE01C4: 84,\n 0xE01C5: 84,\n 0xE01C6: 84,\n 0xE01C7: 84,\n 0xE01C8: 84,\n 0xE01C9: 84,\n 0xE01CA: 84,\n 0xE01CB: 84,\n 0xE01CC: 84,\n 0xE01CD: 84,\n 0xE01CE: 84,\n 0xE01CF: 84,\n 0xE01D0: 84,\n 0xE01D1: 84,\n 0xE01D2: 84,\n 0xE01D3: 84,\n 0xE01D4: 84,\n 0xE01D5: 84,\n 0xE01D6: 84,\n 0xE01D7: 84,\n 0xE01D8: 84,\n 0xE01D9: 84,\n 0xE01DA: 84,\n 0xE01DB: 84,\n 0xE01DC: 84,\n 0xE01DD: 84,\n 0xE01DE: 84,\n 0xE01DF: 84,\n 0xE01E0: 84,\n 0xE01E1: 84,\n 0xE01E2: 84,\n 0xE01E3: 84,\n 0xE01E4: 84,\n 0xE01E5: 84,\n 0xE01E6: 84,\n 0xE01E7: 84,\n 0xE01E8: 84,\n 0xE01E9: 84,\n 0xE01EA: 84,\n 0xE01EB: 84,\n 0xE01EC: 84,\n 0xE01ED: 84,\n 0xE01EE: 84,\n 0xE01EF: 84,\n}\ncodepoint_classes = {\n "PVALID": (\n 0x2D0000002E,\n 0x300000003A,\n 0x610000007B,\n 0xDF000000F7,\n 0xF800000100,\n 0x10100000102,\n 0x10300000104,\n 0x10500000106,\n 0x10700000108,\n 0x1090000010A,\n 0x10B0000010C,\n 0x10D0000010E,\n 0x10F00000110,\n 0x11100000112,\n 0x11300000114,\n 0x11500000116,\n 0x11700000118,\n 0x1190000011A,\n 0x11B0000011C,\n 0x11D0000011E,\n 0x11F00000120,\n 0x12100000122,\n 0x12300000124,\n 0x12500000126,\n 0x12700000128,\n 0x1290000012A,\n 0x12B0000012C,\n 0x12D0000012E,\n 0x12F00000130,\n 0x13100000132,\n 0x13500000136,\n 0x13700000139,\n 0x13A0000013B,\n 0x13C0000013D,\n 0x13E0000013F,\n 0x14200000143,\n 0x14400000145,\n 0x14600000147,\n 0x14800000149,\n 0x14B0000014C,\n 0x14D0000014E,\n 0x14F00000150,\n 0x15100000152,\n 0x15300000154,\n 0x15500000156,\n 0x15700000158,\n 0x1590000015A,\n 0x15B0000015C,\n 0x15D0000015E,\n 0x15F00000160,\n 0x16100000162,\n 0x16300000164,\n 0x16500000166,\n 0x16700000168,\n 0x1690000016A,\n 0x16B0000016C,\n 0x16D0000016E,\n 0x16F00000170,\n 0x17100000172,\n 0x17300000174,\n 0x17500000176,\n 0x17700000178,\n 0x17A0000017B,\n 0x17C0000017D,\n 0x17E0000017F,\n 0x18000000181,\n 0x18300000184,\n 0x18500000186,\n 0x18800000189,\n 0x18C0000018E,\n 0x19200000193,\n 0x19500000196,\n 0x1990000019C,\n 0x19E0000019F,\n 0x1A1000001A2,\n 0x1A3000001A4,\n 0x1A5000001A6,\n 0x1A8000001A9,\n 0x1AA000001AC,\n 0x1AD000001AE,\n 0x1B0000001B1,\n 0x1B4000001B5,\n 0x1B6000001B7,\n 0x1B9000001BC,\n 0x1BD000001C4,\n 0x1CE000001CF,\n 0x1D0000001D1,\n 0x1D2000001D3,\n 0x1D4000001D5,\n 0x1D6000001D7,\n 0x1D8000001D9,\n 0x1DA000001DB,\n 0x1DC000001DE,\n 0x1DF000001E0,\n 0x1E1000001E2,\n 0x1E3000001E4,\n 0x1E5000001E6,\n 0x1E7000001E8,\n 0x1E9000001EA,\n 0x1EB000001EC,\n 0x1ED000001EE,\n 0x1EF000001F1,\n 0x1F5000001F6,\n 0x1F9000001FA,\n 0x1FB000001FC,\n 0x1FD000001FE,\n 0x1FF00000200,\n 0x20100000202,\n 0x20300000204,\n 0x20500000206,\n 0x20700000208,\n 0x2090000020A,\n 0x20B0000020C,\n 0x20D0000020E,\n 0x20F00000210,\n 0x21100000212,\n 0x21300000214,\n 0x21500000216,\n 0x21700000218,\n 0x2190000021A,\n 0x21B0000021C,\n 0x21D0000021E,\n 0x21F00000220,\n 0x22100000222,\n 0x22300000224,\n 0x22500000226,\n 0x22700000228,\n 0x2290000022A,\n 0x22B0000022C,\n 0x22D0000022E,\n 0x22F00000230,\n 0x23100000232,\n 0x2330000023A,\n 0x23C0000023D,\n 0x23F00000241,\n 0x24200000243,\n 0x24700000248,\n 0x2490000024A,\n 0x24B0000024C,\n 0x24D0000024E,\n 0x24F000002B0,\n 0x2B9000002C2,\n 0x2C6000002D2,\n 0x2EC000002ED,\n 0x2EE000002EF,\n 0x30000000340,\n 0x34200000343,\n 0x3460000034F,\n 0x35000000370,\n 0x37100000372,\n 0x37300000374,\n 0x37700000378,\n 0x37B0000037E,\n 0x39000000391,\n 0x3AC000003CF,\n 0x3D7000003D8,\n 0x3D9000003DA,\n 0x3DB000003DC,\n 0x3DD000003DE,\n 0x3DF000003E0,\n 0x3E1000003E2,\n 0x3E3000003E4,\n 0x3E5000003E6,\n 0x3E7000003E8,\n 0x3E9000003EA,\n 0x3EB000003EC,\n 0x3ED000003EE,\n 0x3EF000003F0,\n 0x3F3000003F4,\n 0x3F8000003F9,\n 0x3FB000003FD,\n 0x43000000460,\n 0x46100000462,\n 0x46300000464,\n 0x46500000466,\n 0x46700000468,\n 0x4690000046A,\n 0x46B0000046C,\n 0x46D0000046E,\n 0x46F00000470,\n 0x47100000472,\n 0x47300000474,\n 0x47500000476,\n 0x47700000478,\n 0x4790000047A,\n 0x47B0000047C,\n 0x47D0000047E,\n 0x47F00000480,\n 0x48100000482,\n 0x48300000488,\n 0x48B0000048C,\n 0x48D0000048E,\n 0x48F00000490,\n 0x49100000492,\n 0x49300000494,\n 0x49500000496,\n 0x49700000498,\n 0x4990000049A,\n 0x49B0000049C,\n 0x49D0000049E,\n 0x49F000004A0,\n 0x4A1000004A2,\n 0x4A3000004A4,\n 0x4A5000004A6,\n 0x4A7000004A8,\n 0x4A9000004AA,\n 0x4AB000004AC,\n 0x4AD000004AE,\n 0x4AF000004B0,\n 0x4B1000004B2,\n 0x4B3000004B4,\n 0x4B5000004B6,\n 0x4B7000004B8,\n 0x4B9000004BA,\n 0x4BB000004BC,\n 0x4BD000004BE,\n 0x4BF000004C0,\n 0x4C2000004C3,\n 0x4C4000004C5,\n 0x4C6000004C7,\n 0x4C8000004C9,\n 0x4CA000004CB,\n 0x4CC000004CD,\n 0x4CE000004D0,\n 0x4D1000004D2,\n 0x4D3000004D4,\n 0x4D5000004D6,\n 0x4D7000004D8,\n 0x4D9000004DA,\n 0x4DB000004DC,\n 0x4DD000004DE,\n 0x4DF000004E0,\n 0x4E1000004E2,\n 0x4E3000004E4,\n 0x4E5000004E6,\n 0x4E7000004E8,\n 0x4E9000004EA,\n 0x4EB000004EC,\n 0x4ED000004EE,\n 0x4EF000004F0,\n 0x4F1000004F2,\n 0x4F3000004F4,\n 0x4F5000004F6,\n 0x4F7000004F8,\n 0x4F9000004FA,\n 0x4FB000004FC,\n 0x4FD000004FE,\n 0x4FF00000500,\n 0x50100000502,\n 0x50300000504,\n 0x50500000506,\n 0x50700000508,\n 0x5090000050A,\n 0x50B0000050C,\n 0x50D0000050E,\n 0x50F00000510,\n 0x51100000512,\n 0x51300000514,\n 0x51500000516,\n 0x51700000518,\n 0x5190000051A,\n 0x51B0000051C,\n 0x51D0000051E,\n 0x51F00000520,\n 0x52100000522,\n 0x52300000524,\n 0x52500000526,\n 0x52700000528,\n 0x5290000052A,\n 0x52B0000052C,\n 0x52D0000052E,\n 0x52F00000530,\n 0x5590000055A,\n 0x56000000587,\n 0x58800000589,\n 0x591000005BE,\n 0x5BF000005C0,\n 0x5C1000005C3,\n 0x5C4000005C6,\n 0x5C7000005C8,\n 0x5D0000005EB,\n 0x5EF000005F3,\n 0x6100000061B,\n 0x62000000640,\n 0x64100000660,\n 0x66E00000675,\n 0x679000006D4,\n 0x6D5000006DD,\n 0x6DF000006E9,\n 0x6EA000006F0,\n 0x6FA00000700,\n 0x7100000074B,\n 0x74D000007B2,\n 0x7C0000007F6,\n 0x7FD000007FE,\n 0x8000000082E,\n 0x8400000085C,\n 0x8600000086B,\n 0x87000000888,\n 0x8890000088F,\n 0x898000008E2,\n 0x8E300000958,\n 0x96000000964,\n 0x96600000970,\n 0x97100000984,\n 0x9850000098D,\n 0x98F00000991,\n 0x993000009A9,\n 0x9AA000009B1,\n 0x9B2000009B3,\n 0x9B6000009BA,\n 0x9BC000009C5,\n 0x9C7000009C9,\n 0x9CB000009CF,\n 0x9D7000009D8,\n 0x9E0000009E4,\n 0x9E6000009F2,\n 0x9FC000009FD,\n 0x9FE000009FF,\n 0xA0100000A04,\n 0xA0500000A0B,\n 0xA0F00000A11,\n 0xA1300000A29,\n 0xA2A00000A31,\n 0xA3200000A33,\n 0xA3500000A36,\n 0xA3800000A3A,\n 0xA3C00000A3D,\n 0xA3E00000A43,\n 0xA4700000A49,\n 0xA4B00000A4E,\n 0xA5100000A52,\n 0xA5C00000A5D,\n 0xA6600000A76,\n 0xA8100000A84,\n 0xA8500000A8E,\n 0xA8F00000A92,\n 0xA9300000AA9,\n 0xAAA00000AB1,\n 0xAB200000AB4,\n 0xAB500000ABA,\n 0xABC00000AC6,\n 0xAC700000ACA,\n 0xACB00000ACE,\n 0xAD000000AD1,\n 0xAE000000AE4,\n 0xAE600000AF0,\n 0xAF900000B00,\n 0xB0100000B04,\n 0xB0500000B0D,\n 0xB0F00000B11,\n 0xB1300000B29,\n 0xB2A00000B31,\n 0xB3200000B34,\n 0xB3500000B3A,\n 0xB3C00000B45,\n 0xB4700000B49,\n 0xB4B00000B4E,\n 0xB5500000B58,\n 0xB5F00000B64,\n 0xB6600000B70,\n 0xB7100000B72,\n 0xB8200000B84,\n 0xB8500000B8B,\n 0xB8E00000B91,\n 0xB9200000B96,\n 0xB9900000B9B,\n 0xB9C00000B9D,\n 0xB9E00000BA0,\n 0xBA300000BA5,\n 0xBA800000BAB,\n 0xBAE00000BBA,\n 0xBBE00000BC3,\n 0xBC600000BC9,\n 0xBCA00000BCE,\n 0xBD000000BD1,\n 0xBD700000BD8,\n 0xBE600000BF0,\n 0xC0000000C0D,\n 0xC0E00000C11,\n 0xC1200000C29,\n 0xC2A00000C3A,\n 0xC3C00000C45,\n 0xC4600000C49,\n 0xC4A00000C4E,\n 0xC5500000C57,\n 0xC5800000C5B,\n 0xC5D00000C5E,\n 0xC6000000C64,\n 0xC6600000C70,\n 0xC8000000C84,\n 0xC8500000C8D,\n 0xC8E00000C91,\n 0xC9200000CA9,\n 0xCAA00000CB4,\n 0xCB500000CBA,\n 0xCBC00000CC5,\n 0xCC600000CC9,\n 0xCCA00000CCE,\n 0xCD500000CD7,\n 0xCDD00000CDF,\n 0xCE000000CE4,\n 0xCE600000CF0,\n 0xCF100000CF4,\n 0xD0000000D0D,\n 0xD0E00000D11,\n 0xD1200000D45,\n 0xD4600000D49,\n 0xD4A00000D4F,\n 0xD5400000D58,\n 0xD5F00000D64,\n 0xD6600000D70,\n 0xD7A00000D80,\n 0xD8100000D84,\n 0xD8500000D97,\n 0xD9A00000DB2,\n 0xDB300000DBC,\n 0xDBD00000DBE,\n 0xDC000000DC7,\n 0xDCA00000DCB,\n 0xDCF00000DD5,\n 0xDD600000DD7,\n 0xDD800000DE0,\n 0xDE600000DF0,\n 0xDF200000DF4,\n 0xE0100000E33,\n 0xE3400000E3B,\n 0xE4000000E4F,\n 0xE5000000E5A,\n 0xE8100000E83,\n 0xE8400000E85,\n 0xE8600000E8B,\n 0xE8C00000EA4,\n 0xEA500000EA6,\n 0xEA700000EB3,\n 0xEB400000EBE,\n 0xEC000000EC5,\n 0xEC600000EC7,\n 0xEC800000ECF,\n 0xED000000EDA,\n 0xEDE00000EE0,\n 0xF0000000F01,\n 0xF0B00000F0C,\n 0xF1800000F1A,\n 0xF2000000F2A,\n 0xF3500000F36,\n 0xF3700000F38,\n 0xF3900000F3A,\n 0xF3E00000F43,\n 0xF4400000F48,\n 0xF4900000F4D,\n 0xF4E00000F52,\n 0xF5300000F57,\n 0xF5800000F5C,\n 0xF5D00000F69,\n 0xF6A00000F6D,\n 0xF7100000F73,\n 0xF7400000F75,\n 0xF7A00000F81,\n 0xF8200000F85,\n 0xF8600000F93,\n 0xF9400000F98,\n 0xF9900000F9D,\n 0xF9E00000FA2,\n 0xFA300000FA7,\n 0xFA800000FAC,\n 0xFAD00000FB9,\n 0xFBA00000FBD,\n 0xFC600000FC7,\n 0x10000000104A,\n 0x10500000109E,\n 0x10D0000010FB,\n 0x10FD00001100,\n 0x120000001249,\n 0x124A0000124E,\n 0x125000001257,\n 0x125800001259,\n 0x125A0000125E,\n 0x126000001289,\n 0x128A0000128E,\n 0x1290000012B1,\n 0x12B2000012B6,\n 0x12B8000012BF,\n 0x12C0000012C1,\n 0x12C2000012C6,\n 0x12C8000012D7,\n 0x12D800001311,\n 0x131200001316,\n 0x13180000135B,\n 0x135D00001360,\n 0x138000001390,\n 0x13A0000013F6,\n 0x14010000166D,\n 0x166F00001680,\n 0x16810000169B,\n 0x16A0000016EB,\n 0x16F1000016F9,\n 0x170000001716,\n 0x171F00001735,\n 0x174000001754,\n 0x17600000176D,\n 0x176E00001771,\n 0x177200001774,\n 0x1780000017B4,\n 0x17B6000017D4,\n 0x17D7000017D8,\n 0x17DC000017DE,\n 0x17E0000017EA,\n 0x18100000181A,\n 0x182000001879,\n 0x1880000018AB,\n 0x18B0000018F6,\n 0x19000000191F,\n 0x19200000192C,\n 0x19300000193C,\n 0x19460000196E,\n 0x197000001975,\n 0x1980000019AC,\n 0x19B0000019CA,\n 0x19D0000019DA,\n 0x1A0000001A1C,\n 0x1A2000001A5F,\n 0x1A6000001A7D,\n 0x1A7F00001A8A,\n 0x1A9000001A9A,\n 0x1AA700001AA8,\n 0x1AB000001ABE,\n 0x1ABF00001ACF,\n 0x1B0000001B4D,\n 0x1B5000001B5A,\n 0x1B6B00001B74,\n 0x1B8000001BF4,\n 0x1C0000001C38,\n 0x1C4000001C4A,\n 0x1C4D00001C7E,\n 0x1CD000001CD3,\n 0x1CD400001CFB,\n 0x1D0000001D2C,\n 0x1D2F00001D30,\n 0x1D3B00001D3C,\n 0x1D4E00001D4F,\n 0x1D6B00001D78,\n 0x1D7900001D9B,\n 0x1DC000001E00,\n 0x1E0100001E02,\n 0x1E0300001E04,\n 0x1E0500001E06,\n 0x1E0700001E08,\n 0x1E0900001E0A,\n 0x1E0B00001E0C,\n 0x1E0D00001E0E,\n 0x1E0F00001E10,\n 0x1E1100001E12,\n 0x1E1300001E14,\n 0x1E1500001E16,\n 0x1E1700001E18,\n 0x1E1900001E1A,\n 0x1E1B00001E1C,\n 0x1E1D00001E1E,\n 0x1E1F00001E20,\n 0x1E2100001E22,\n 0x1E2300001E24,\n 0x1E2500001E26,\n 0x1E2700001E28,\n 0x1E2900001E2A,\n 0x1E2B00001E2C,\n 0x1E2D00001E2E,\n 0x1E2F00001E30,\n 0x1E3100001E32,\n 0x1E3300001E34,\n 0x1E3500001E36,\n 0x1E3700001E38,\n 0x1E3900001E3A,\n 0x1E3B00001E3C,\n 0x1E3D00001E3E,\n 0x1E3F00001E40,\n 0x1E4100001E42,\n 0x1E4300001E44,\n 0x1E4500001E46,\n 0x1E4700001E48,\n 0x1E4900001E4A,\n 0x1E4B00001E4C,\n 0x1E4D00001E4E,\n 0x1E4F00001E50,\n 0x1E5100001E52,\n 0x1E5300001E54,\n 0x1E5500001E56,\n 0x1E5700001E58,\n 0x1E5900001E5A,\n 0x1E5B00001E5C,\n 0x1E5D00001E5E,\n 0x1E5F00001E60,\n 0x1E6100001E62,\n 0x1E6300001E64,\n 0x1E6500001E66,\n 0x1E6700001E68,\n 0x1E6900001E6A,\n 0x1E6B00001E6C,\n 0x1E6D00001E6E,\n 0x1E6F00001E70,\n 0x1E7100001E72,\n 0x1E7300001E74,\n 0x1E7500001E76,\n 0x1E7700001E78,\n 0x1E7900001E7A,\n 0x1E7B00001E7C,\n 0x1E7D00001E7E,\n 0x1E7F00001E80,\n 0x1E8100001E82,\n 0x1E8300001E84,\n 0x1E8500001E86,\n 0x1E8700001E88,\n 0x1E8900001E8A,\n 0x1E8B00001E8C,\n 0x1E8D00001E8E,\n 0x1E8F00001E90,\n 0x1E9100001E92,\n 0x1E9300001E94,\n 0x1E9500001E9A,\n 0x1E9C00001E9E,\n 0x1E9F00001EA0,\n 0x1EA100001EA2,\n 0x1EA300001EA4,\n 0x1EA500001EA6,\n 0x1EA700001EA8,\n 0x1EA900001EAA,\n 0x1EAB00001EAC,\n 0x1EAD00001EAE,\n 0x1EAF00001EB0,\n 0x1EB100001EB2,\n 0x1EB300001EB4,\n 0x1EB500001EB6,\n 0x1EB700001EB8,\n 0x1EB900001EBA,\n 0x1EBB00001EBC,\n 0x1EBD00001EBE,\n 0x1EBF00001EC0,\n 0x1EC100001EC2,\n 0x1EC300001EC4,\n 0x1EC500001EC6,\n 0x1EC700001EC8,\n 0x1EC900001ECA,\n 0x1ECB00001ECC,\n 0x1ECD00001ECE,\n 0x1ECF00001ED0,\n 0x1ED100001ED2,\n 0x1ED300001ED4,\n 0x1ED500001ED6,\n 0x1ED700001ED8,\n 0x1ED900001EDA,\n 0x1EDB00001EDC,\n 0x1EDD00001EDE,\n 0x1EDF00001EE0,\n 0x1EE100001EE2,\n 0x1EE300001EE4,\n 0x1EE500001EE6,\n 0x1EE700001EE8,\n 0x1EE900001EEA,\n 0x1EEB00001EEC,\n 0x1EED00001EEE,\n 0x1EEF00001EF0,\n 0x1EF100001EF2,\n 0x1EF300001EF4,\n 0x1EF500001EF6,\n 0x1EF700001EF8,\n 0x1EF900001EFA,\n 0x1EFB00001EFC,\n 0x1EFD00001EFE,\n 0x1EFF00001F08,\n 0x1F1000001F16,\n 0x1F2000001F28,\n 0x1F3000001F38,\n 0x1F4000001F46,\n 0x1F5000001F58,\n 0x1F6000001F68,\n 0x1F7000001F71,\n 0x1F7200001F73,\n 0x1F7400001F75,\n 0x1F7600001F77,\n 0x1F7800001F79,\n 0x1F7A00001F7B,\n 0x1F7C00001F7D,\n 0x1FB000001FB2,\n 0x1FB600001FB7,\n 0x1FC600001FC7,\n 0x1FD000001FD3,\n 0x1FD600001FD8,\n 0x1FE000001FE3,\n 0x1FE400001FE8,\n 0x1FF600001FF7,\n 0x214E0000214F,\n 0x218400002185,\n 0x2C3000002C60,\n 0x2C6100002C62,\n 0x2C6500002C67,\n 0x2C6800002C69,\n 0x2C6A00002C6B,\n 0x2C6C00002C6D,\n 0x2C7100002C72,\n 0x2C7300002C75,\n 0x2C7600002C7C,\n 0x2C8100002C82,\n 0x2C8300002C84,\n 0x2C8500002C86,\n 0x2C8700002C88,\n 0x2C8900002C8A,\n 0x2C8B00002C8C,\n 0x2C8D00002C8E,\n 0x2C8F00002C90,\n 0x2C9100002C92,\n 0x2C9300002C94,\n 0x2C9500002C96,\n 0x2C9700002C98,\n 0x2C9900002C9A,\n 0x2C9B00002C9C,\n 0x2C9D00002C9E,\n 0x2C9F00002CA0,\n 0x2CA100002CA2,\n 0x2CA300002CA4,\n 0x2CA500002CA6,\n 0x2CA700002CA8,\n 0x2CA900002CAA,\n 0x2CAB00002CAC,\n 0x2CAD00002CAE,\n 0x2CAF00002CB0,\n 0x2CB100002CB2,\n 0x2CB300002CB4,\n 0x2CB500002CB6,\n 0x2CB700002CB8,\n 0x2CB900002CBA,\n 0x2CBB00002CBC,\n 0x2CBD00002CBE,\n 0x2CBF00002CC0,\n 0x2CC100002CC2,\n 0x2CC300002CC4,\n 0x2CC500002CC6,\n 0x2CC700002CC8,\n 0x2CC900002CCA,\n 0x2CCB00002CCC,\n 0x2CCD00002CCE,\n 0x2CCF00002CD0,\n 0x2CD100002CD2,\n 0x2CD300002CD4,\n 0x2CD500002CD6,\n 0x2CD700002CD8,\n 0x2CD900002CDA,\n 0x2CDB00002CDC,\n 0x2CDD00002CDE,\n 0x2CDF00002CE0,\n 0x2CE100002CE2,\n 0x2CE300002CE5,\n 0x2CEC00002CED,\n 0x2CEE00002CF2,\n 0x2CF300002CF4,\n 0x2D0000002D26,\n 0x2D2700002D28,\n 0x2D2D00002D2E,\n 0x2D3000002D68,\n 0x2D7F00002D97,\n 0x2DA000002DA7,\n 0x2DA800002DAF,\n 0x2DB000002DB7,\n 0x2DB800002DBF,\n 0x2DC000002DC7,\n 0x2DC800002DCF,\n 0x2DD000002DD7,\n 0x2DD800002DDF,\n 0x2DE000002E00,\n 0x2E2F00002E30,\n 0x300500003008,\n 0x302A0000302E,\n 0x303C0000303D,\n 0x304100003097,\n 0x30990000309B,\n 0x309D0000309F,\n 0x30A1000030FB,\n 0x30FC000030FF,\n 0x310500003130,\n 0x31A0000031C0,\n 0x31F000003200,\n 0x340000004DC0,\n 0x4E000000A48D,\n 0xA4D00000A4FE,\n 0xA5000000A60D,\n 0xA6100000A62C,\n 0xA6410000A642,\n 0xA6430000A644,\n 0xA6450000A646,\n 0xA6470000A648,\n 0xA6490000A64A,\n 0xA64B0000A64C,\n 0xA64D0000A64E,\n 0xA64F0000A650,\n 0xA6510000A652,\n 0xA6530000A654,\n 0xA6550000A656,\n 0xA6570000A658,\n 0xA6590000A65A,\n 0xA65B0000A65C,\n 0xA65D0000A65E,\n 0xA65F0000A660,\n 0xA6610000A662,\n 0xA6630000A664,\n 0xA6650000A666,\n 0xA6670000A668,\n 0xA6690000A66A,\n 0xA66B0000A66C,\n 0xA66D0000A670,\n 0xA6740000A67E,\n 0xA67F0000A680,\n 0xA6810000A682,\n 0xA6830000A684,\n 0xA6850000A686,\n 0xA6870000A688,\n 0xA6890000A68A,\n 0xA68B0000A68C,\n 0xA68D0000A68E,\n 0xA68F0000A690,\n 0xA6910000A692,\n 0xA6930000A694,\n 0xA6950000A696,\n 0xA6970000A698,\n 0xA6990000A69A,\n 0xA69B0000A69C,\n 0xA69E0000A6E6,\n 0xA6F00000A6F2,\n 0xA7170000A720,\n 0xA7230000A724,\n 0xA7250000A726,\n 0xA7270000A728,\n 0xA7290000A72A,\n 0xA72B0000A72C,\n 0xA72D0000A72E,\n 0xA72F0000A732,\n 0xA7330000A734,\n 0xA7350000A736,\n 0xA7370000A738,\n 0xA7390000A73A,\n 0xA73B0000A73C,\n 0xA73D0000A73E,\n 0xA73F0000A740,\n 0xA7410000A742,\n 0xA7430000A744,\n 0xA7450000A746,\n 0xA7470000A748,\n 0xA7490000A74A,\n 0xA74B0000A74C,\n 0xA74D0000A74E,\n 0xA74F0000A750,\n 0xA7510000A752,\n 0xA7530000A754,\n 0xA7550000A756,\n 0xA7570000A758,\n 0xA7590000A75A,\n 0xA75B0000A75C,\n 0xA75D0000A75E,\n 0xA75F0000A760,\n 0xA7610000A762,\n 0xA7630000A764,\n 0xA7650000A766,\n 0xA7670000A768,\n 0xA7690000A76A,\n 0xA76B0000A76C,\n 0xA76D0000A76E,\n 0xA76F0000A770,\n 0xA7710000A779,\n 0xA77A0000A77B,\n 0xA77C0000A77D,\n 0xA77F0000A780,\n 0xA7810000A782,\n 0xA7830000A784,\n 0xA7850000A786,\n 0xA7870000A789,\n 0xA78C0000A78D,\n 0xA78E0000A790,\n 0xA7910000A792,\n 0xA7930000A796,\n 0xA7970000A798,\n 0xA7990000A79A,\n 0xA79B0000A79C,\n 0xA79D0000A79E,\n 0xA79F0000A7A0,\n 0xA7A10000A7A2,\n 0xA7A30000A7A4,\n 0xA7A50000A7A6,\n 0xA7A70000A7A8,\n 0xA7A90000A7AA,\n 0xA7AF0000A7B0,\n 0xA7B50000A7B6,\n 0xA7B70000A7B8,\n 0xA7B90000A7BA,\n 0xA7BB0000A7BC,\n 0xA7BD0000A7BE,\n 0xA7BF0000A7C0,\n 0xA7C10000A7C2,\n 0xA7C30000A7C4,\n 0xA7C80000A7C9,\n 0xA7CA0000A7CB,\n 0xA7D10000A7D2,\n 0xA7D30000A7D4,\n 0xA7D50000A7D6,\n 0xA7D70000A7D8,\n 0xA7D90000A7DA,\n 0xA7F60000A7F8,\n 0xA7FA0000A828,\n 0xA82C0000A82D,\n 0xA8400000A874,\n 0xA8800000A8C6,\n 0xA8D00000A8DA,\n 0xA8E00000A8F8,\n 0xA8FB0000A8FC,\n 0xA8FD0000A92E,\n 0xA9300000A954,\n 0xA9800000A9C1,\n 0xA9CF0000A9DA,\n 0xA9E00000A9FF,\n 0xAA000000AA37,\n 0xAA400000AA4E,\n 0xAA500000AA5A,\n 0xAA600000AA77,\n 0xAA7A0000AAC3,\n 0xAADB0000AADE,\n 0xAAE00000AAF0,\n 0xAAF20000AAF7,\n 0xAB010000AB07,\n 0xAB090000AB0F,\n 0xAB110000AB17,\n 0xAB200000AB27,\n 0xAB280000AB2F,\n 0xAB300000AB5B,\n 0xAB600000AB69,\n 0xABC00000ABEB,\n 0xABEC0000ABEE,\n 0xABF00000ABFA,\n 0xAC000000D7A4,\n 0xFA0E0000FA10,\n 0xFA110000FA12,\n 0xFA130000FA15,\n 0xFA1F0000FA20,\n 0xFA210000FA22,\n 0xFA230000FA25,\n 0xFA270000FA2A,\n 0xFB1E0000FB1F,\n 0xFE200000FE30,\n 0xFE730000FE74,\n 0x100000001000C,\n 0x1000D00010027,\n 0x100280001003B,\n 0x1003C0001003E,\n 0x1003F0001004E,\n 0x100500001005E,\n 0x10080000100FB,\n 0x101FD000101FE,\n 0x102800001029D,\n 0x102A0000102D1,\n 0x102E0000102E1,\n 0x1030000010320,\n 0x1032D00010341,\n 0x103420001034A,\n 0x103500001037B,\n 0x103800001039E,\n 0x103A0000103C4,\n 0x103C8000103D0,\n 0x104280001049E,\n 0x104A0000104AA,\n 0x104D8000104FC,\n 0x1050000010528,\n 0x1053000010564,\n 0x10597000105A2,\n 0x105A3000105B2,\n 0x105B3000105BA,\n 0x105BB000105BD,\n 0x1060000010737,\n 0x1074000010756,\n 0x1076000010768,\n 0x1078000010781,\n 0x1080000010806,\n 0x1080800010809,\n 0x1080A00010836,\n 0x1083700010839,\n 0x1083C0001083D,\n 0x1083F00010856,\n 0x1086000010877,\n 0x108800001089F,\n 0x108E0000108F3,\n 0x108F4000108F6,\n 0x1090000010916,\n 0x109200001093A,\n 0x10980000109B8,\n 0x109BE000109C0,\n 0x10A0000010A04,\n 0x10A0500010A07,\n 0x10A0C00010A14,\n 0x10A1500010A18,\n 0x10A1900010A36,\n 0x10A3800010A3B,\n 0x10A3F00010A40,\n 0x10A6000010A7D,\n 0x10A8000010A9D,\n 0x10AC000010AC8,\n 0x10AC900010AE7,\n 0x10B0000010B36,\n 0x10B4000010B56,\n 0x10B6000010B73,\n 0x10B8000010B92,\n 0x10C0000010C49,\n 0x10CC000010CF3,\n 0x10D0000010D28,\n 0x10D3000010D3A,\n 0x10E8000010EAA,\n 0x10EAB00010EAD,\n 0x10EB000010EB2,\n 0x10EFD00010F1D,\n 0x10F2700010F28,\n 0x10F3000010F51,\n 0x10F7000010F86,\n 0x10FB000010FC5,\n 0x10FE000010FF7,\n 0x1100000011047,\n 0x1106600011076,\n 0x1107F000110BB,\n 0x110C2000110C3,\n 0x110D0000110E9,\n 0x110F0000110FA,\n 0x1110000011135,\n 0x1113600011140,\n 0x1114400011148,\n 0x1115000011174,\n 0x1117600011177,\n 0x11180000111C5,\n 0x111C9000111CD,\n 0x111CE000111DB,\n 0x111DC000111DD,\n 0x1120000011212,\n 0x1121300011238,\n 0x1123E00011242,\n 0x1128000011287,\n 0x1128800011289,\n 0x1128A0001128E,\n 0x1128F0001129E,\n 0x1129F000112A9,\n 0x112B0000112EB,\n 0x112F0000112FA,\n 0x1130000011304,\n 0x113050001130D,\n 0x1130F00011311,\n 0x1131300011329,\n 0x1132A00011331,\n 0x1133200011334,\n 0x113350001133A,\n 0x1133B00011345,\n 0x1134700011349,\n 0x1134B0001134E,\n 0x1135000011351,\n 0x1135700011358,\n 0x1135D00011364,\n 0x113660001136D,\n 0x1137000011375,\n 0x114000001144B,\n 0x114500001145A,\n 0x1145E00011462,\n 0x11480000114C6,\n 0x114C7000114C8,\n 0x114D0000114DA,\n 0x11580000115B6,\n 0x115B8000115C1,\n 0x115D8000115DE,\n 0x1160000011641,\n 0x1164400011645,\n 0x116500001165A,\n 0x11680000116B9,\n 0x116C0000116CA,\n 0x117000001171B,\n 0x1171D0001172C,\n 0x117300001173A,\n 0x1174000011747,\n 0x118000001183B,\n 0x118C0000118EA,\n 0x118FF00011907,\n 0x119090001190A,\n 0x1190C00011914,\n 0x1191500011917,\n 0x1191800011936,\n 0x1193700011939,\n 0x1193B00011944,\n 0x119500001195A,\n 0x119A0000119A8,\n 0x119AA000119D8,\n 0x119DA000119E2,\n 0x119E3000119E5,\n 0x11A0000011A3F,\n 0x11A4700011A48,\n 0x11A5000011A9A,\n 0x11A9D00011A9E,\n 0x11AB000011AF9,\n 0x11C0000011C09,\n 0x11C0A00011C37,\n 0x11C3800011C41,\n 0x11C5000011C5A,\n 0x11C7200011C90,\n 0x11C9200011CA8,\n 0x11CA900011CB7,\n 0x11D0000011D07,\n 0x11D0800011D0A,\n 0x11D0B00011D37,\n 0x11D3A00011D3B,\n 0x11D3C00011D3E,\n 0x11D3F00011D48,\n 0x11D5000011D5A,\n 0x11D6000011D66,\n 0x11D6700011D69,\n 0x11D6A00011D8F,\n 0x11D9000011D92,\n 0x11D9300011D99,\n 0x11DA000011DAA,\n 0x11EE000011EF7,\n 0x11F0000011F11,\n 0x11F1200011F3B,\n 0x11F3E00011F43,\n 0x11F5000011F5A,\n 0x11FB000011FB1,\n 0x120000001239A,\n 0x1248000012544,\n 0x12F9000012FF1,\n 0x1300000013430,\n 0x1344000013456,\n 0x1440000014647,\n 0x1680000016A39,\n 0x16A4000016A5F,\n 0x16A6000016A6A,\n 0x16A7000016ABF,\n 0x16AC000016ACA,\n 0x16AD000016AEE,\n 0x16AF000016AF5,\n 0x16B0000016B37,\n 0x16B4000016B44,\n 0x16B5000016B5A,\n 0x16B6300016B78,\n 0x16B7D00016B90,\n 0x16E6000016E80,\n 0x16F0000016F4B,\n 0x16F4F00016F88,\n 0x16F8F00016FA0,\n 0x16FE000016FE2,\n 0x16FE300016FE5,\n 0x16FF000016FF2,\n 0x17000000187F8,\n 0x1880000018CD6,\n 0x18D0000018D09,\n 0x1AFF00001AFF4,\n 0x1AFF50001AFFC,\n 0x1AFFD0001AFFF,\n 0x1B0000001B123,\n 0x1B1320001B133,\n 0x1B1500001B153,\n 0x1B1550001B156,\n 0x1B1640001B168,\n 0x1B1700001B2FC,\n 0x1BC000001BC6B,\n 0x1BC700001BC7D,\n 0x1BC800001BC89,\n 0x1BC900001BC9A,\n 0x1BC9D0001BC9F,\n 0x1CF000001CF2E,\n 0x1CF300001CF47,\n 0x1DA000001DA37,\n 0x1DA3B0001DA6D,\n 0x1DA750001DA76,\n 0x1DA840001DA85,\n 0x1DA9B0001DAA0,\n 0x1DAA10001DAB0,\n 0x1DF000001DF1F,\n 0x1DF250001DF2B,\n 0x1E0000001E007,\n 0x1E0080001E019,\n 0x1E01B0001E022,\n 0x1E0230001E025,\n 0x1E0260001E02B,\n 0x1E08F0001E090,\n 0x1E1000001E12D,\n 0x1E1300001E13E,\n 0x1E1400001E14A,\n 0x1E14E0001E14F,\n 0x1E2900001E2AF,\n 0x1E2C00001E2FA,\n 0x1E4D00001E4FA,\n 0x1E7E00001E7E7,\n 0x1E7E80001E7EC,\n 0x1E7ED0001E7EF,\n 0x1E7F00001E7FF,\n 0x1E8000001E8C5,\n 0x1E8D00001E8D7,\n 0x1E9220001E94C,\n 0x1E9500001E95A,\n 0x200000002A6E0,\n 0x2A7000002B73A,\n 0x2B7400002B81E,\n 0x2B8200002CEA2,\n 0x2CEB00002EBE1,\n 0x2EBF00002EE5E,\n 0x300000003134B,\n 0x31350000323B0,\n ),\n "CONTEXTJ": (0x200C0000200E,),\n "CONTEXTO": (\n 0xB7000000B8,\n 0x37500000376,\n 0x5F3000005F5,\n 0x6600000066A,\n 0x6F0000006FA,\n 0x30FB000030FC,\n ),\n}\n
.venv\Lib\site-packages\pip\_vendor\idna\idnadata.py
idnadata.py
Python
78,306
0.6
0
0.000236
node-utils
354
2024-05-13T07:41:37.252724
MIT
false
6299ac3c46a725d3d2f781b45bc86823
"""\nGiven a list of integers, made up of (hopefully) a small number of long runs\nof consecutive integers, compute a representation of the form\n((start1, end1), (start2, end2) ...). Then answer the question "was x present\nin the original list?" in time O(log(# runs)).\n"""\n\nimport bisect\nfrom typing import List, Tuple\n\n\ndef intranges_from_list(list_: List[int]) -> Tuple[int, ...]:\n """Represent a list of integers as a sequence of ranges:\n ((start_0, end_0), (start_1, end_1), ...), such that the original\n integers are exactly those x such that start_i <= x < end_i for some i.\n\n Ranges are encoded as single integers (start << 32 | end), not as tuples.\n """\n\n sorted_list = sorted(list_)\n ranges = []\n last_write = -1\n for i in range(len(sorted_list)):\n if i + 1 < len(sorted_list):\n if sorted_list[i] == sorted_list[i + 1] - 1:\n continue\n current_range = sorted_list[last_write + 1 : i + 1]\n ranges.append(_encode_range(current_range[0], current_range[-1] + 1))\n last_write = i\n\n return tuple(ranges)\n\n\ndef _encode_range(start: int, end: int) -> int:\n return (start << 32) | end\n\n\ndef _decode_range(r: int) -> Tuple[int, int]:\n return (r >> 32), (r & ((1 << 32) - 1))\n\n\ndef intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool:\n """Determine if `int_` falls into one of the ranges in `ranges`."""\n tuple_ = _encode_range(int_, 0)\n pos = bisect.bisect_left(ranges, tuple_)\n # we could be immediately ahead of a tuple (start, end)\n # with start < int_ <= end\n if pos > 0:\n left, right = _decode_range(ranges[pos - 1])\n if left <= int_ < right:\n return True\n # or we could be immediately behind a tuple (int_, end)\n if pos < len(ranges):\n left, _ = _decode_range(ranges[pos])\n if left == int_:\n return True\n return False\n
.venv\Lib\site-packages\pip\_vendor\idna\intranges.py
intranges.py
Python
1,898
0.95
0.22807
0.066667
awesome-app
653
2024-10-20T22:59:42.705212
GPL-3.0
false
1b295d1420a220f7472fbe79ec1eb0c1
__version__ = "3.10"\n
.venv\Lib\site-packages\pip\_vendor\idna\package_data.py
package_data.py
Python
21
0.5
0
0
react-lib
350
2023-11-19T01:04:17.293489
MIT
false
c3dfa00426f33a0ab9a2309e1bab1dc9
from .core import (\n IDNABidiError,\n IDNAError,\n InvalidCodepoint,\n InvalidCodepointContext,\n alabel,\n check_bidi,\n check_hyphen_ok,\n check_initial_combiner,\n check_label,\n check_nfc,\n decode,\n encode,\n ulabel,\n uts46_remap,\n valid_contextj,\n valid_contexto,\n valid_label_length,\n valid_string_length,\n)\nfrom .intranges import intranges_contain\nfrom .package_data import __version__\n\n__all__ = [\n "__version__",\n "IDNABidiError",\n "IDNAError",\n "InvalidCodepoint",\n "InvalidCodepointContext",\n "alabel",\n "check_bidi",\n "check_hyphen_ok",\n "check_initial_combiner",\n "check_label",\n "check_nfc",\n "decode",\n "encode",\n "intranges_contain",\n "ulabel",\n "uts46_remap",\n "valid_contextj",\n "valid_contexto",\n "valid_label_length",\n "valid_string_length",\n]\n
.venv\Lib\site-packages\pip\_vendor\idna\__init__.py
__init__.py
Python
868
0.85
0
0
react-lib
787
2023-08-18T21:57:08.621561
BSD-3-Clause
false
813a3685e48b6dc4359acf6ede226d5f
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\codec.cpython-313.pyc
codec.cpython-313.pyc
Other
5,313
0.95
0
0
awesome-app
708
2024-03-17T04:58:39.905725
MIT
false
e443bdd76c52b619650d069eeb336092
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\compat.cpython-313.pyc
compat.cpython-313.pyc
Other
899
0.7
0
0
node-utils
574
2023-09-04T10:19:51.910829
MIT
false
bd3b7c728e45c1474e8a23a4c6c6565d
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\core.cpython-313.pyc
core.cpython-313.pyc
Other
16,935
0.95
0.027586
0
node-utils
923
2025-05-21T19:33:12.903579
GPL-3.0
false
0561404568fa4e8429d873ad6ef3affc
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\idnadata.cpython-313.pyc
idnadata.cpython-313.pyc
Other
99,479
0.6
0.005478
0
python-kit
921
2025-02-03T22:36:22.007084
GPL-3.0
false
5417ffd6ddc88939afe79117951a9b73
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\intranges.cpython-313.pyc
intranges.cpython-313.pyc
Other
2,612
0.8
0.055556
0.032258
awesome-app
818
2025-06-01T06:16:39.024722
BSD-3-Clause
false
1fbe4c376b5fe26c6652e672e57dec7c
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\package_data.cpython-313.pyc
package_data.cpython-313.pyc
Other
220
0.7
0
0
vue-tools
305
2025-03-15T13:29:50.421449
BSD-3-Clause
false
3c67f59f17289d5abe7c26179e2fd7b1
\n\n
.venv\Lib\site-packages\pip\_vendor\idna\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
889
0.7
0
0
awesome-app
689
2024-01-01T18:27:25.967647
BSD-3-Clause
false
204d9d2593a11b5f0d7ef120433eac23
class UnpackException(Exception):\n """Base class for some exceptions raised while unpacking.\n\n NOTE: unpack may raise exception other than subclass of\n UnpackException. If you want to catch all error, catch\n Exception instead.\n """\n\n\nclass BufferFull(UnpackException):\n pass\n\n\nclass OutOfData(UnpackException):\n pass\n\n\nclass FormatError(ValueError, UnpackException):\n """Invalid msgpack format"""\n\n\nclass StackError(ValueError, UnpackException):\n """Too nested"""\n\n\n# Deprecated. Use ValueError instead\nUnpackValueError = ValueError\n\n\nclass ExtraData(UnpackValueError):\n """ExtraData is raised when there is trailing data.\n\n This exception is raised while only one-shot (not streaming)\n unpack.\n """\n\n def __init__(self, unpacked, extra):\n self.unpacked = unpacked\n self.extra = extra\n\n def __str__(self):\n return "unpack(b) received extra data."\n\n\n# Deprecated. Use Exception instead to catch all exception during packing.\nPackException = Exception\nPackValueError = ValueError\nPackOverflowError = OverflowError\n
.venv\Lib\site-packages\pip\_vendor\msgpack\exceptions.py
exceptions.py
Python
1,081
0.95
0.3125
0.066667
node-utils
491
2024-09-25T21:13:46.515584
GPL-3.0
false
741a33042796dcc6a1c101898f38e87e
import datetime\nimport struct\nfrom collections import namedtuple\n\n\nclass ExtType(namedtuple("ExtType", "code data")):\n """ExtType represents ext type in msgpack."""\n\n def __new__(cls, code, data):\n if not isinstance(code, int):\n raise TypeError("code must be int")\n if not isinstance(data, bytes):\n raise TypeError("data must be bytes")\n if not 0 <= code <= 127:\n raise ValueError("code must be 0~127")\n return super().__new__(cls, code, data)\n\n\nclass Timestamp:\n """Timestamp represents the Timestamp extension type in msgpack.\n\n When built with Cython, msgpack uses C methods to pack and unpack `Timestamp`.\n When using pure-Python msgpack, :func:`to_bytes` and :func:`from_bytes` are used to pack and\n unpack `Timestamp`.\n\n This class is immutable: Do not override seconds and nanoseconds.\n """\n\n __slots__ = ["seconds", "nanoseconds"]\n\n def __init__(self, seconds, nanoseconds=0):\n """Initialize a Timestamp object.\n\n :param int seconds:\n Number of seconds since the UNIX epoch (00:00:00 UTC Jan 1 1970, minus leap seconds).\n May be negative.\n\n :param int nanoseconds:\n Number of nanoseconds to add to `seconds` to get fractional time.\n Maximum is 999_999_999. Default is 0.\n\n Note: Negative times (before the UNIX epoch) are represented as neg. seconds + pos. ns.\n """\n if not isinstance(seconds, int):\n raise TypeError("seconds must be an integer")\n if not isinstance(nanoseconds, int):\n raise TypeError("nanoseconds must be an integer")\n if not (0 <= nanoseconds < 10**9):\n raise ValueError("nanoseconds must be a non-negative integer less than 999999999.")\n self.seconds = seconds\n self.nanoseconds = nanoseconds\n\n def __repr__(self):\n """String representation of Timestamp."""\n return f"Timestamp(seconds={self.seconds}, nanoseconds={self.nanoseconds})"\n\n def __eq__(self, other):\n """Check for equality with another Timestamp object"""\n if type(other) is self.__class__:\n return self.seconds == other.seconds and self.nanoseconds == other.nanoseconds\n return False\n\n def __ne__(self, other):\n """not-equals method (see :func:`__eq__()`)"""\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.seconds, self.nanoseconds))\n\n @staticmethod\n def from_bytes(b):\n """Unpack bytes into a `Timestamp` object.\n\n Used for pure-Python msgpack unpacking.\n\n :param b: Payload from msgpack ext message with code -1\n :type b: bytes\n\n :returns: Timestamp object unpacked from msgpack ext payload\n :rtype: Timestamp\n """\n if len(b) == 4:\n seconds = struct.unpack("!L", b)[0]\n nanoseconds = 0\n elif len(b) == 8:\n data64 = struct.unpack("!Q", b)[0]\n seconds = data64 & 0x00000003FFFFFFFF\n nanoseconds = data64 >> 34\n elif len(b) == 12:\n nanoseconds, seconds = struct.unpack("!Iq", b)\n else:\n raise ValueError(\n "Timestamp type can only be created from 32, 64, or 96-bit byte objects"\n )\n return Timestamp(seconds, nanoseconds)\n\n def to_bytes(self):\n """Pack this Timestamp object into bytes.\n\n Used for pure-Python msgpack packing.\n\n :returns data: Payload for EXT message with code -1 (timestamp type)\n :rtype: bytes\n """\n if (self.seconds >> 34) == 0: # seconds is non-negative and fits in 34 bits\n data64 = self.nanoseconds << 34 | self.seconds\n if data64 & 0xFFFFFFFF00000000 == 0:\n # nanoseconds is zero and seconds < 2**32, so timestamp 32\n data = struct.pack("!L", data64)\n else:\n # timestamp 64\n data = struct.pack("!Q", data64)\n else:\n # timestamp 96\n data = struct.pack("!Iq", self.nanoseconds, self.seconds)\n return data\n\n @staticmethod\n def from_unix(unix_sec):\n """Create a Timestamp from posix timestamp in seconds.\n\n :param unix_float: Posix timestamp in seconds.\n :type unix_float: int or float\n """\n seconds = int(unix_sec // 1)\n nanoseconds = int((unix_sec % 1) * 10**9)\n return Timestamp(seconds, nanoseconds)\n\n def to_unix(self):\n """Get the timestamp as a floating-point value.\n\n :returns: posix timestamp\n :rtype: float\n """\n return self.seconds + self.nanoseconds / 1e9\n\n @staticmethod\n def from_unix_nano(unix_ns):\n """Create a Timestamp from posix timestamp in nanoseconds.\n\n :param int unix_ns: Posix timestamp in nanoseconds.\n :rtype: Timestamp\n """\n return Timestamp(*divmod(unix_ns, 10**9))\n\n def to_unix_nano(self):\n """Get the timestamp as a unixtime in nanoseconds.\n\n :returns: posix timestamp in nanoseconds\n :rtype: int\n """\n return self.seconds * 10**9 + self.nanoseconds\n\n def to_datetime(self):\n """Get the timestamp as a UTC datetime.\n\n :rtype: `datetime.datetime`\n """\n utc = datetime.timezone.utc\n return datetime.datetime.fromtimestamp(0, utc) + datetime.timedelta(\n seconds=self.seconds, microseconds=self.nanoseconds // 1000\n )\n\n @staticmethod\n def from_datetime(dt):\n """Create a Timestamp from datetime with tzinfo.\n\n :rtype: Timestamp\n """\n return Timestamp(seconds=int(dt.timestamp()), nanoseconds=dt.microsecond * 1000)\n
.venv\Lib\site-packages\pip\_vendor\msgpack\ext.py
ext.py
Python
5,726
0.95
0.182353
0.022222
awesome-app
809
2024-09-24T04:53:27.126391
GPL-3.0
false
99c33251d41a5a3270188f3f6259da30
"""Fallback pure Python implementation of msgpack"""\n\nimport struct\nimport sys\nfrom datetime import datetime as _DateTime\n\nif hasattr(sys, "pypy_version_info"):\n from __pypy__ import newlist_hint\n from __pypy__.builders import BytesBuilder\n\n _USING_STRINGBUILDER = True\n\n class BytesIO:\n def __init__(self, s=b""):\n if s:\n self.builder = BytesBuilder(len(s))\n self.builder.append(s)\n else:\n self.builder = BytesBuilder()\n\n def write(self, s):\n if isinstance(s, memoryview):\n s = s.tobytes()\n elif isinstance(s, bytearray):\n s = bytes(s)\n self.builder.append(s)\n\n def getvalue(self):\n return self.builder.build()\n\nelse:\n from io import BytesIO\n\n _USING_STRINGBUILDER = False\n\n def newlist_hint(size):\n return []\n\n\nfrom .exceptions import BufferFull, ExtraData, FormatError, OutOfData, StackError\nfrom .ext import ExtType, Timestamp\n\nEX_SKIP = 0\nEX_CONSTRUCT = 1\nEX_READ_ARRAY_HEADER = 2\nEX_READ_MAP_HEADER = 3\n\nTYPE_IMMEDIATE = 0\nTYPE_ARRAY = 1\nTYPE_MAP = 2\nTYPE_RAW = 3\nTYPE_BIN = 4\nTYPE_EXT = 5\n\nDEFAULT_RECURSE_LIMIT = 511\n\n\ndef _check_type_strict(obj, t, type=type, tuple=tuple):\n if type(t) is tuple:\n return type(obj) in t\n else:\n return type(obj) is t\n\n\ndef _get_data_from_buffer(obj):\n view = memoryview(obj)\n if view.itemsize != 1:\n raise ValueError("cannot unpack from multi-byte object")\n return view\n\n\ndef unpackb(packed, **kwargs):\n """\n Unpack an object from `packed`.\n\n Raises ``ExtraData`` when *packed* contains extra bytes.\n Raises ``ValueError`` when *packed* is incomplete.\n Raises ``FormatError`` when *packed* is not valid msgpack.\n Raises ``StackError`` when *packed* contains too nested.\n Other exceptions can be raised during unpacking.\n\n See :class:`Unpacker` for options.\n """\n unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs)\n unpacker.feed(packed)\n try:\n ret = unpacker._unpack()\n except OutOfData:\n raise ValueError("Unpack failed: incomplete input")\n except RecursionError:\n raise StackError\n if unpacker._got_extradata():\n raise ExtraData(ret, unpacker._get_extradata())\n return ret\n\n\n_NO_FORMAT_USED = ""\n_MSGPACK_HEADERS = {\n 0xC4: (1, _NO_FORMAT_USED, TYPE_BIN),\n 0xC5: (2, ">H", TYPE_BIN),\n 0xC6: (4, ">I", TYPE_BIN),\n 0xC7: (2, "Bb", TYPE_EXT),\n 0xC8: (3, ">Hb", TYPE_EXT),\n 0xC9: (5, ">Ib", TYPE_EXT),\n 0xCA: (4, ">f"),\n 0xCB: (8, ">d"),\n 0xCC: (1, _NO_FORMAT_USED),\n 0xCD: (2, ">H"),\n 0xCE: (4, ">I"),\n 0xCF: (8, ">Q"),\n 0xD0: (1, "b"),\n 0xD1: (2, ">h"),\n 0xD2: (4, ">i"),\n 0xD3: (8, ">q"),\n 0xD4: (1, "b1s", TYPE_EXT),\n 0xD5: (2, "b2s", TYPE_EXT),\n 0xD6: (4, "b4s", TYPE_EXT),\n 0xD7: (8, "b8s", TYPE_EXT),\n 0xD8: (16, "b16s", TYPE_EXT),\n 0xD9: (1, _NO_FORMAT_USED, TYPE_RAW),\n 0xDA: (2, ">H", TYPE_RAW),\n 0xDB: (4, ">I", TYPE_RAW),\n 0xDC: (2, ">H", TYPE_ARRAY),\n 0xDD: (4, ">I", TYPE_ARRAY),\n 0xDE: (2, ">H", TYPE_MAP),\n 0xDF: (4, ">I", TYPE_MAP),\n}\n\n\nclass Unpacker:\n """Streaming unpacker.\n\n Arguments:\n\n :param file_like:\n File-like object having `.read(n)` method.\n If specified, unpacker reads serialized data from it and `.feed()` is not usable.\n\n :param int read_size:\n Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)\n\n :param bool use_list:\n If true, unpack msgpack array to Python list.\n Otherwise, unpack to Python tuple. (default: True)\n\n :param bool raw:\n If true, unpack msgpack raw to Python bytes.\n Otherwise, unpack to Python str by decoding with UTF-8 encoding (default).\n\n :param int timestamp:\n Control how timestamp type is unpacked:\n\n 0 - Timestamp\n 1 - float (Seconds from the EPOCH)\n 2 - int (Nanoseconds from the EPOCH)\n 3 - datetime.datetime (UTC).\n\n :param bool strict_map_key:\n If true (default), only str or bytes are accepted for map (dict) keys.\n\n :param object_hook:\n When specified, it should be callable.\n Unpacker calls it with a dict argument after unpacking msgpack map.\n (See also simplejson)\n\n :param object_pairs_hook:\n When specified, it should be callable.\n Unpacker calls it with a list of key-value pairs after unpacking msgpack map.\n (See also simplejson)\n\n :param str unicode_errors:\n The error handler for decoding unicode. (default: 'strict')\n This option should be used only when you have msgpack data which\n contains invalid UTF-8 string.\n\n :param int max_buffer_size:\n Limits size of data waiting unpacked. 0 means 2**32-1.\n The default value is 100*1024*1024 (100MiB).\n Raises `BufferFull` exception when it is insufficient.\n You should set this parameter when unpacking data from untrusted source.\n\n :param int max_str_len:\n Deprecated, use *max_buffer_size* instead.\n Limits max length of str. (default: max_buffer_size)\n\n :param int max_bin_len:\n Deprecated, use *max_buffer_size* instead.\n Limits max length of bin. (default: max_buffer_size)\n\n :param int max_array_len:\n Limits max length of array.\n (default: max_buffer_size)\n\n :param int max_map_len:\n Limits max length of map.\n (default: max_buffer_size//2)\n\n :param int max_ext_len:\n Deprecated, use *max_buffer_size* instead.\n Limits max size of ext type. (default: max_buffer_size)\n\n Example of streaming deserialize from file-like object::\n\n unpacker = Unpacker(file_like)\n for o in unpacker:\n process(o)\n\n Example of streaming deserialize from socket::\n\n unpacker = Unpacker()\n while True:\n buf = sock.recv(1024**2)\n if not buf:\n break\n unpacker.feed(buf)\n for o in unpacker:\n process(o)\n\n Raises ``ExtraData`` when *packed* contains extra bytes.\n Raises ``OutOfData`` when *packed* is incomplete.\n Raises ``FormatError`` when *packed* is not valid msgpack.\n Raises ``StackError`` when *packed* contains too nested.\n Other exceptions can be raised during unpacking.\n """\n\n def __init__(\n self,\n file_like=None,\n *,\n read_size=0,\n use_list=True,\n raw=False,\n timestamp=0,\n strict_map_key=True,\n object_hook=None,\n object_pairs_hook=None,\n list_hook=None,\n unicode_errors=None,\n max_buffer_size=100 * 1024 * 1024,\n ext_hook=ExtType,\n max_str_len=-1,\n max_bin_len=-1,\n max_array_len=-1,\n max_map_len=-1,\n max_ext_len=-1,\n ):\n if unicode_errors is None:\n unicode_errors = "strict"\n\n if file_like is None:\n self._feeding = True\n else:\n if not callable(file_like.read):\n raise TypeError("`file_like.read` must be callable")\n self.file_like = file_like\n self._feeding = False\n\n #: array of bytes fed.\n self._buffer = bytearray()\n #: Which position we currently reads\n self._buff_i = 0\n\n # When Unpacker is used as an iterable, between the calls to next(),\n # the buffer is not "consumed" completely, for efficiency sake.\n # Instead, it is done sloppily. To make sure we raise BufferFull at\n # the correct moments, we have to keep track of how sloppy we were.\n # Furthermore, when the buffer is incomplete (that is: in the case\n # we raise an OutOfData) we need to rollback the buffer to the correct\n # state, which _buf_checkpoint records.\n self._buf_checkpoint = 0\n\n if not max_buffer_size:\n max_buffer_size = 2**31 - 1\n if max_str_len == -1:\n max_str_len = max_buffer_size\n if max_bin_len == -1:\n max_bin_len = max_buffer_size\n if max_array_len == -1:\n max_array_len = max_buffer_size\n if max_map_len == -1:\n max_map_len = max_buffer_size // 2\n if max_ext_len == -1:\n max_ext_len = max_buffer_size\n\n self._max_buffer_size = max_buffer_size\n if read_size > self._max_buffer_size:\n raise ValueError("read_size must be smaller than max_buffer_size")\n self._read_size = read_size or min(self._max_buffer_size, 16 * 1024)\n self._raw = bool(raw)\n self._strict_map_key = bool(strict_map_key)\n self._unicode_errors = unicode_errors\n self._use_list = use_list\n if not (0 <= timestamp <= 3):\n raise ValueError("timestamp must be 0..3")\n self._timestamp = timestamp\n self._list_hook = list_hook\n self._object_hook = object_hook\n self._object_pairs_hook = object_pairs_hook\n self._ext_hook = ext_hook\n self._max_str_len = max_str_len\n self._max_bin_len = max_bin_len\n self._max_array_len = max_array_len\n self._max_map_len = max_map_len\n self._max_ext_len = max_ext_len\n self._stream_offset = 0\n\n if list_hook is not None and not callable(list_hook):\n raise TypeError("`list_hook` is not callable")\n if object_hook is not None and not callable(object_hook):\n raise TypeError("`object_hook` is not callable")\n if object_pairs_hook is not None and not callable(object_pairs_hook):\n raise TypeError("`object_pairs_hook` is not callable")\n if object_hook is not None and object_pairs_hook is not None:\n raise TypeError("object_pairs_hook and object_hook are mutually exclusive")\n if not callable(ext_hook):\n raise TypeError("`ext_hook` is not callable")\n\n def feed(self, next_bytes):\n assert self._feeding\n view = _get_data_from_buffer(next_bytes)\n if len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size:\n raise BufferFull\n\n # Strip buffer before checkpoint before reading file.\n if self._buf_checkpoint > 0:\n del self._buffer[: self._buf_checkpoint]\n self._buff_i -= self._buf_checkpoint\n self._buf_checkpoint = 0\n\n # Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython\n self._buffer.extend(view)\n view.release()\n\n def _consume(self):\n """Gets rid of the used parts of the buffer."""\n self._stream_offset += self._buff_i - self._buf_checkpoint\n self._buf_checkpoint = self._buff_i\n\n def _got_extradata(self):\n return self._buff_i < len(self._buffer)\n\n def _get_extradata(self):\n return self._buffer[self._buff_i :]\n\n def read_bytes(self, n):\n ret = self._read(n, raise_outofdata=False)\n self._consume()\n return ret\n\n def _read(self, n, raise_outofdata=True):\n # (int) -> bytearray\n self._reserve(n, raise_outofdata=raise_outofdata)\n i = self._buff_i\n ret = self._buffer[i : i + n]\n self._buff_i = i + len(ret)\n return ret\n\n def _reserve(self, n, raise_outofdata=True):\n remain_bytes = len(self._buffer) - self._buff_i - n\n\n # Fast path: buffer has n bytes already\n if remain_bytes >= 0:\n return\n\n if self._feeding:\n self._buff_i = self._buf_checkpoint\n raise OutOfData\n\n # Strip buffer before checkpoint before reading file.\n if self._buf_checkpoint > 0:\n del self._buffer[: self._buf_checkpoint]\n self._buff_i -= self._buf_checkpoint\n self._buf_checkpoint = 0\n\n # Read from file\n remain_bytes = -remain_bytes\n if remain_bytes + len(self._buffer) > self._max_buffer_size:\n raise BufferFull\n while remain_bytes > 0:\n to_read_bytes = max(self._read_size, remain_bytes)\n read_data = self.file_like.read(to_read_bytes)\n if not read_data:\n break\n assert isinstance(read_data, bytes)\n self._buffer += read_data\n remain_bytes -= len(read_data)\n\n if len(self._buffer) < n + self._buff_i and raise_outofdata:\n self._buff_i = 0 # rollback\n raise OutOfData\n\n def _read_header(self):\n typ = TYPE_IMMEDIATE\n n = 0\n obj = None\n self._reserve(1)\n b = self._buffer[self._buff_i]\n self._buff_i += 1\n if b & 0b10000000 == 0:\n obj = b\n elif b & 0b11100000 == 0b11100000:\n obj = -1 - (b ^ 0xFF)\n elif b & 0b11100000 == 0b10100000:\n n = b & 0b00011111\n typ = TYPE_RAW\n if n > self._max_str_len:\n raise ValueError(f"{n} exceeds max_str_len({self._max_str_len})")\n obj = self._read(n)\n elif b & 0b11110000 == 0b10010000:\n n = b & 0b00001111\n typ = TYPE_ARRAY\n if n > self._max_array_len:\n raise ValueError(f"{n} exceeds max_array_len({self._max_array_len})")\n elif b & 0b11110000 == 0b10000000:\n n = b & 0b00001111\n typ = TYPE_MAP\n if n > self._max_map_len:\n raise ValueError(f"{n} exceeds max_map_len({self._max_map_len})")\n elif b == 0xC0:\n obj = None\n elif b == 0xC2:\n obj = False\n elif b == 0xC3:\n obj = True\n elif 0xC4 <= b <= 0xC6:\n size, fmt, typ = _MSGPACK_HEADERS[b]\n self._reserve(size)\n if len(fmt) > 0:\n n = struct.unpack_from(fmt, self._buffer, self._buff_i)[0]\n else:\n n = self._buffer[self._buff_i]\n self._buff_i += size\n if n > self._max_bin_len:\n raise ValueError(f"{n} exceeds max_bin_len({self._max_bin_len})")\n obj = self._read(n)\n elif 0xC7 <= b <= 0xC9:\n size, fmt, typ = _MSGPACK_HEADERS[b]\n self._reserve(size)\n L, n = struct.unpack_from(fmt, self._buffer, self._buff_i)\n self._buff_i += size\n if L > self._max_ext_len:\n raise ValueError(f"{L} exceeds max_ext_len({self._max_ext_len})")\n obj = self._read(L)\n elif 0xCA <= b <= 0xD3:\n size, fmt = _MSGPACK_HEADERS[b]\n self._reserve(size)\n if len(fmt) > 0:\n obj = struct.unpack_from(fmt, self._buffer, self._buff_i)[0]\n else:\n obj = self._buffer[self._buff_i]\n self._buff_i += size\n elif 0xD4 <= b <= 0xD8:\n size, fmt, typ = _MSGPACK_HEADERS[b]\n if self._max_ext_len < size:\n raise ValueError(f"{size} exceeds max_ext_len({self._max_ext_len})")\n self._reserve(size + 1)\n n, obj = struct.unpack_from(fmt, self._buffer, self._buff_i)\n self._buff_i += size + 1\n elif 0xD9 <= b <= 0xDB:\n size, fmt, typ = _MSGPACK_HEADERS[b]\n self._reserve(size)\n if len(fmt) > 0:\n (n,) = struct.unpack_from(fmt, self._buffer, self._buff_i)\n else:\n n = self._buffer[self._buff_i]\n self._buff_i += size\n if n > self._max_str_len:\n raise ValueError(f"{n} exceeds max_str_len({self._max_str_len})")\n obj = self._read(n)\n elif 0xDC <= b <= 0xDD:\n size, fmt, typ = _MSGPACK_HEADERS[b]\n self._reserve(size)\n (n,) = struct.unpack_from(fmt, self._buffer, self._buff_i)\n self._buff_i += size\n if n > self._max_array_len:\n raise ValueError(f"{n} exceeds max_array_len({self._max_array_len})")\n elif 0xDE <= b <= 0xDF:\n size, fmt, typ = _MSGPACK_HEADERS[b]\n self._reserve(size)\n (n,) = struct.unpack_from(fmt, self._buffer, self._buff_i)\n self._buff_i += size\n if n > self._max_map_len:\n raise ValueError(f"{n} exceeds max_map_len({self._max_map_len})")\n else:\n raise FormatError("Unknown header: 0x%x" % b)\n return typ, n, obj\n\n def _unpack(self, execute=EX_CONSTRUCT):\n typ, n, obj = self._read_header()\n\n if execute == EX_READ_ARRAY_HEADER:\n if typ != TYPE_ARRAY:\n raise ValueError("Expected array")\n return n\n if execute == EX_READ_MAP_HEADER:\n if typ != TYPE_MAP:\n raise ValueError("Expected map")\n return n\n # TODO should we eliminate the recursion?\n if typ == TYPE_ARRAY:\n if execute == EX_SKIP:\n for i in range(n):\n # TODO check whether we need to call `list_hook`\n self._unpack(EX_SKIP)\n return\n ret = newlist_hint(n)\n for i in range(n):\n ret.append(self._unpack(EX_CONSTRUCT))\n if self._list_hook is not None:\n ret = self._list_hook(ret)\n # TODO is the interaction between `list_hook` and `use_list` ok?\n return ret if self._use_list else tuple(ret)\n if typ == TYPE_MAP:\n if execute == EX_SKIP:\n for i in range(n):\n # TODO check whether we need to call hooks\n self._unpack(EX_SKIP)\n self._unpack(EX_SKIP)\n return\n if self._object_pairs_hook is not None:\n ret = self._object_pairs_hook(\n (self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT)) for _ in range(n)\n )\n else:\n ret = {}\n for _ in range(n):\n key = self._unpack(EX_CONSTRUCT)\n if self._strict_map_key and type(key) not in (str, bytes):\n raise ValueError("%s is not allowed for map key" % str(type(key)))\n if isinstance(key, str):\n key = sys.intern(key)\n ret[key] = self._unpack(EX_CONSTRUCT)\n if self._object_hook is not None:\n ret = self._object_hook(ret)\n return ret\n if execute == EX_SKIP:\n return\n if typ == TYPE_RAW:\n if self._raw:\n obj = bytes(obj)\n else:\n obj = obj.decode("utf_8", self._unicode_errors)\n return obj\n if typ == TYPE_BIN:\n return bytes(obj)\n if typ == TYPE_EXT:\n if n == -1: # timestamp\n ts = Timestamp.from_bytes(bytes(obj))\n if self._timestamp == 1:\n return ts.to_unix()\n elif self._timestamp == 2:\n return ts.to_unix_nano()\n elif self._timestamp == 3:\n return ts.to_datetime()\n else:\n return ts\n else:\n return self._ext_hook(n, bytes(obj))\n assert typ == TYPE_IMMEDIATE\n return obj\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n ret = self._unpack(EX_CONSTRUCT)\n self._consume()\n return ret\n except OutOfData:\n self._consume()\n raise StopIteration\n except RecursionError:\n raise StackError\n\n next = __next__\n\n def skip(self):\n self._unpack(EX_SKIP)\n self._consume()\n\n def unpack(self):\n try:\n ret = self._unpack(EX_CONSTRUCT)\n except RecursionError:\n raise StackError\n self._consume()\n return ret\n\n def read_array_header(self):\n ret = self._unpack(EX_READ_ARRAY_HEADER)\n self._consume()\n return ret\n\n def read_map_header(self):\n ret = self._unpack(EX_READ_MAP_HEADER)\n self._consume()\n return ret\n\n def tell(self):\n return self._stream_offset\n\n\nclass Packer:\n """\n MessagePack Packer\n\n Usage::\n\n packer = Packer()\n astream.write(packer.pack(a))\n astream.write(packer.pack(b))\n\n Packer's constructor has some keyword arguments:\n\n :param default:\n When specified, it should be callable.\n Convert user type to builtin type that Packer supports.\n See also simplejson's document.\n\n :param bool use_single_float:\n Use single precision float type for float. (default: False)\n\n :param bool autoreset:\n Reset buffer after each pack and return its content as `bytes`. (default: True).\n If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.\n\n :param bool use_bin_type:\n Use bin type introduced in msgpack spec 2.0 for bytes.\n It also enables str8 type for unicode. (default: True)\n\n :param bool strict_types:\n If set to true, types will be checked to be exact. Derived classes\n from serializable types will not be serialized and will be\n treated as unsupported type and forwarded to default.\n Additionally tuples will not be serialized as lists.\n This is useful when trying to implement accurate serialization\n for python types.\n\n :param bool datetime:\n If set to true, datetime with tzinfo is packed into Timestamp type.\n Note that the tzinfo is stripped in the timestamp.\n You can get UTC datetime with `timestamp=3` option of the Unpacker.\n\n :param str unicode_errors:\n The error handler for encoding unicode. (default: 'strict')\n DO NOT USE THIS!! This option is kept for very specific usage.\n\n :param int buf_size:\n Internal buffer size. This option is used only for C implementation.\n """\n\n def __init__(\n self,\n *,\n default=None,\n use_single_float=False,\n autoreset=True,\n use_bin_type=True,\n strict_types=False,\n datetime=False,\n unicode_errors=None,\n buf_size=None,\n ):\n self._strict_types = strict_types\n self._use_float = use_single_float\n self._autoreset = autoreset\n self._use_bin_type = use_bin_type\n self._buffer = BytesIO()\n self._datetime = bool(datetime)\n self._unicode_errors = unicode_errors or "strict"\n if default is not None and not callable(default):\n raise TypeError("default must be callable")\n self._default = default\n\n def _pack(\n self,\n obj,\n nest_limit=DEFAULT_RECURSE_LIMIT,\n check=isinstance,\n check_type_strict=_check_type_strict,\n ):\n default_used = False\n if self._strict_types:\n check = check_type_strict\n list_types = list\n else:\n list_types = (list, tuple)\n while True:\n if nest_limit < 0:\n raise ValueError("recursion limit exceeded")\n if obj is None:\n return self._buffer.write(b"\xc0")\n if check(obj, bool):\n if obj:\n return self._buffer.write(b"\xc3")\n return self._buffer.write(b"\xc2")\n if check(obj, int):\n if 0 <= obj < 0x80:\n return self._buffer.write(struct.pack("B", obj))\n if -0x20 <= obj < 0:\n return self._buffer.write(struct.pack("b", obj))\n if 0x80 <= obj <= 0xFF:\n return self._buffer.write(struct.pack("BB", 0xCC, obj))\n if -0x80 <= obj < 0:\n return self._buffer.write(struct.pack(">Bb", 0xD0, obj))\n if 0xFF < obj <= 0xFFFF:\n return self._buffer.write(struct.pack(">BH", 0xCD, obj))\n if -0x8000 <= obj < -0x80:\n return self._buffer.write(struct.pack(">Bh", 0xD1, obj))\n if 0xFFFF < obj <= 0xFFFFFFFF:\n return self._buffer.write(struct.pack(">BI", 0xCE, obj))\n if -0x80000000 <= obj < -0x8000:\n return self._buffer.write(struct.pack(">Bi", 0xD2, obj))\n if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF:\n return self._buffer.write(struct.pack(">BQ", 0xCF, obj))\n if -0x8000000000000000 <= obj < -0x80000000:\n return self._buffer.write(struct.pack(">Bq", 0xD3, obj))\n if not default_used and self._default is not None:\n obj = self._default(obj)\n default_used = True\n continue\n raise OverflowError("Integer value out of range")\n if check(obj, (bytes, bytearray)):\n n = len(obj)\n if n >= 2**32:\n raise ValueError("%s is too large" % type(obj).__name__)\n self._pack_bin_header(n)\n return self._buffer.write(obj)\n if check(obj, str):\n obj = obj.encode("utf-8", self._unicode_errors)\n n = len(obj)\n if n >= 2**32:\n raise ValueError("String is too large")\n self._pack_raw_header(n)\n return self._buffer.write(obj)\n if check(obj, memoryview):\n n = obj.nbytes\n if n >= 2**32:\n raise ValueError("Memoryview is too large")\n self._pack_bin_header(n)\n return self._buffer.write(obj)\n if check(obj, float):\n if self._use_float:\n return self._buffer.write(struct.pack(">Bf", 0xCA, obj))\n return self._buffer.write(struct.pack(">Bd", 0xCB, obj))\n if check(obj, (ExtType, Timestamp)):\n if check(obj, Timestamp):\n code = -1\n data = obj.to_bytes()\n else:\n code = obj.code\n data = obj.data\n assert isinstance(code, int)\n assert isinstance(data, bytes)\n L = len(data)\n if L == 1:\n self._buffer.write(b"\xd4")\n elif L == 2:\n self._buffer.write(b"\xd5")\n elif L == 4:\n self._buffer.write(b"\xd6")\n elif L == 8:\n self._buffer.write(b"\xd7")\n elif L == 16:\n self._buffer.write(b"\xd8")\n elif L <= 0xFF:\n self._buffer.write(struct.pack(">BB", 0xC7, L))\n elif L <= 0xFFFF:\n self._buffer.write(struct.pack(">BH", 0xC8, L))\n else:\n self._buffer.write(struct.pack(">BI", 0xC9, L))\n self._buffer.write(struct.pack("b", code))\n self._buffer.write(data)\n return\n if check(obj, list_types):\n n = len(obj)\n self._pack_array_header(n)\n for i in range(n):\n self._pack(obj[i], nest_limit - 1)\n return\n if check(obj, dict):\n return self._pack_map_pairs(len(obj), obj.items(), nest_limit - 1)\n\n if self._datetime and check(obj, _DateTime) and obj.tzinfo is not None:\n obj = Timestamp.from_datetime(obj)\n default_used = 1\n continue\n\n if not default_used and self._default is not None:\n obj = self._default(obj)\n default_used = 1\n continue\n\n if self._datetime and check(obj, _DateTime):\n raise ValueError(f"Cannot serialize {obj!r} where tzinfo=None")\n\n raise TypeError(f"Cannot serialize {obj!r}")\n\n def pack(self, obj):\n try:\n self._pack(obj)\n except:\n self._buffer = BytesIO() # force reset\n raise\n if self._autoreset:\n ret = self._buffer.getvalue()\n self._buffer = BytesIO()\n return ret\n\n def pack_map_pairs(self, pairs):\n self._pack_map_pairs(len(pairs), pairs)\n if self._autoreset:\n ret = self._buffer.getvalue()\n self._buffer = BytesIO()\n return ret\n\n def pack_array_header(self, n):\n if n >= 2**32:\n raise ValueError\n self._pack_array_header(n)\n if self._autoreset:\n ret = self._buffer.getvalue()\n self._buffer = BytesIO()\n return ret\n\n def pack_map_header(self, n):\n if n >= 2**32:\n raise ValueError\n self._pack_map_header(n)\n if self._autoreset:\n ret = self._buffer.getvalue()\n self._buffer = BytesIO()\n return ret\n\n def pack_ext_type(self, typecode, data):\n if not isinstance(typecode, int):\n raise TypeError("typecode must have int type.")\n if not 0 <= typecode <= 127:\n raise ValueError("typecode should be 0-127")\n if not isinstance(data, bytes):\n raise TypeError("data must have bytes type")\n L = len(data)\n if L > 0xFFFFFFFF:\n raise ValueError("Too large data")\n if L == 1:\n self._buffer.write(b"\xd4")\n elif L == 2:\n self._buffer.write(b"\xd5")\n elif L == 4:\n self._buffer.write(b"\xd6")\n elif L == 8:\n self._buffer.write(b"\xd7")\n elif L == 16:\n self._buffer.write(b"\xd8")\n elif L <= 0xFF:\n self._buffer.write(b"\xc7" + struct.pack("B", L))\n elif L <= 0xFFFF:\n self._buffer.write(b"\xc8" + struct.pack(">H", L))\n else:\n self._buffer.write(b"\xc9" + struct.pack(">I", L))\n self._buffer.write(struct.pack("B", typecode))\n self._buffer.write(data)\n\n def _pack_array_header(self, n):\n if n <= 0x0F:\n return self._buffer.write(struct.pack("B", 0x90 + n))\n if n <= 0xFFFF:\n return self._buffer.write(struct.pack(">BH", 0xDC, n))\n if n <= 0xFFFFFFFF:\n return self._buffer.write(struct.pack(">BI", 0xDD, n))\n raise ValueError("Array is too large")\n\n def _pack_map_header(self, n):\n if n <= 0x0F:\n return self._buffer.write(struct.pack("B", 0x80 + n))\n if n <= 0xFFFF:\n return self._buffer.write(struct.pack(">BH", 0xDE, n))\n if n <= 0xFFFFFFFF:\n return self._buffer.write(struct.pack(">BI", 0xDF, n))\n raise ValueError("Dict is too large")\n\n def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):\n self._pack_map_header(n)\n for k, v in pairs:\n self._pack(k, nest_limit - 1)\n self._pack(v, nest_limit - 1)\n\n def _pack_raw_header(self, n):\n if n <= 0x1F:\n self._buffer.write(struct.pack("B", 0xA0 + n))\n elif self._use_bin_type and n <= 0xFF:\n self._buffer.write(struct.pack(">BB", 0xD9, n))\n elif n <= 0xFFFF:\n self._buffer.write(struct.pack(">BH", 0xDA, n))\n elif n <= 0xFFFFFFFF:\n self._buffer.write(struct.pack(">BI", 0xDB, n))\n else:\n raise ValueError("Raw is too large")\n\n def _pack_bin_header(self, n):\n if not self._use_bin_type:\n return self._pack_raw_header(n)\n elif n <= 0xFF:\n return self._buffer.write(struct.pack(">BB", 0xC4, n))\n elif n <= 0xFFFF:\n return self._buffer.write(struct.pack(">BH", 0xC5, n))\n elif n <= 0xFFFFFFFF:\n return self._buffer.write(struct.pack(">BI", 0xC6, n))\n else:\n raise ValueError("Bin is too large")\n\n def bytes(self):\n """Return internal buffer contents as bytes object"""\n return self._buffer.getvalue()\n\n def reset(self):\n """Reset internal buffer.\n\n This method is useful only when autoreset=False.\n """\n self._buffer = BytesIO()\n\n def getbuffer(self):\n """Return view of internal buffer."""\n if _USING_STRINGBUILDER:\n return memoryview(self.bytes())\n else:\n return self._buffer.getbuffer()\n
.venv\Lib\site-packages\pip\_vendor\msgpack\fallback.py
fallback.py
Python
32,390
0.95
0.204521
0.025735
awesome-app
692
2024-07-19T14:46:09.235981
BSD-3-Clause
false
052694bc7c6a9f969ffac186e4a5cdc7
# ruff: noqa: F401\nimport os\n\nfrom .exceptions import * # noqa: F403\nfrom .ext import ExtType, Timestamp\n\nversion = (1, 1, 0)\n__version__ = "1.1.0"\n\n\nif os.environ.get("MSGPACK_PUREPYTHON"):\n from .fallback import Packer, Unpacker, unpackb\nelse:\n try:\n from ._cmsgpack import Packer, Unpacker, unpackb\n except ImportError:\n from .fallback import Packer, Unpacker, unpackb\n\n\ndef pack(o, stream, **kwargs):\n """\n Pack object `o` and write it to `stream`\n\n See :class:`Packer` for options.\n """\n packer = Packer(**kwargs)\n stream.write(packer.pack(o))\n\n\ndef packb(o, **kwargs):\n """\n Pack object `o` and return packed bytes\n\n See :class:`Packer` for options.\n """\n return Packer(**kwargs).pack(o)\n\n\ndef unpack(stream, **kwargs):\n """\n Unpack an object from `stream`.\n\n Raises `ExtraData` when `stream` contains extra bytes.\n See :class:`Unpacker` for options.\n """\n data = stream.read()\n return unpackb(data, **kwargs)\n\n\n# alias for compatibility to simplejson/marshal/pickle.\nload = unpack\nloads = unpackb\n\ndump = pack\ndumps = packb\n
.venv\Lib\site-packages\pip\_vendor\msgpack\__init__.py
__init__.py
Python
1,109
0.95
0.218182
0.051282
python-kit
557
2024-04-05T13:07:00.973003
BSD-3-Clause
false
3b925fad3eff80b05a379e2e03b43a6b
\n\n
.venv\Lib\site-packages\pip\_vendor\msgpack\__pycache__\exceptions.cpython-313.pyc
exceptions.cpython-313.pyc
Other
2,167
0.95
0.133333
0
python-kit
965
2024-01-19T02:53:38.924320
MIT
false
badadb5e05c77e7574dceed4614683b5
\n\n
.venv\Lib\site-packages\pip\_vendor\msgpack\__pycache__\ext.cpython-313.pyc
ext.cpython-313.pyc
Other
8,096
0.95
0.054945
0
awesome-app
647
2024-12-03T14:01:40.408033
Apache-2.0
false
41fcaea16828101306e5d9e86a2e3a9d
\n\n
.venv\Lib\site-packages\pip\_vendor\msgpack\__pycache__\fallback.cpython-313.pyc
fallback.cpython-313.pyc
Other
42,071
0.8
0.038186
0.00271
vue-tools
493
2023-08-19T17:26:55.839357
Apache-2.0
false
e4ac4061097a9730482f593caa163057
\n\n
.venv\Lib\site-packages\pip\_vendor\msgpack\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,718
0.8
0.176471
0
react-lib
542
2024-10-20T08:29:26.097234
MIT
false
4633748a330d77897a0d2ba2ee148253
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import annotations\n\nimport operator\nimport os\nimport platform\nimport sys\nfrom typing import AbstractSet, Any, Callable, Literal, TypedDict, Union, cast\n\nfrom ._parser import MarkerAtom, MarkerList, Op, Value, Variable\nfrom ._parser import parse_marker as _parse_marker\nfrom ._tokenizer import ParserSyntaxError\nfrom .specifiers import InvalidSpecifier, Specifier\nfrom .utils import canonicalize_name\n\n__all__ = [\n "EvaluateContext",\n "InvalidMarker",\n "Marker",\n "UndefinedComparison",\n "UndefinedEnvironmentName",\n "default_environment",\n]\n\nOperator = Callable[[str, Union[str, AbstractSet[str]]], bool]\nEvaluateContext = Literal["metadata", "lock_file", "requirement"]\nMARKERS_ALLOWING_SET = {"extras", "dependency_groups"}\n\n\nclass InvalidMarker(ValueError):\n """\n An invalid marker was found, users should refer to PEP 508.\n """\n\n\nclass UndefinedComparison(ValueError):\n """\n An invalid operation was attempted on a value that doesn't support it.\n """\n\n\nclass UndefinedEnvironmentName(ValueError):\n """\n A name was attempted to be used that does not exist inside of the\n environment.\n """\n\n\nclass Environment(TypedDict):\n implementation_name: str\n """The implementation's identifier, e.g. ``'cpython'``."""\n\n implementation_version: str\n """\n The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or\n ``'7.3.13'`` for PyPy3.10 v7.3.13.\n """\n\n os_name: str\n """\n The value of :py:data:`os.name`. The name of the operating system dependent module\n imported, e.g. ``'posix'``.\n """\n\n platform_machine: str\n """\n Returns the machine type, e.g. ``'i386'``.\n\n An empty string if the value cannot be determined.\n """\n\n platform_release: str\n """\n The system's release, e.g. ``'2.2.0'`` or ``'NT'``.\n\n An empty string if the value cannot be determined.\n """\n\n platform_system: str\n """\n The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``.\n\n An empty string if the value cannot be determined.\n """\n\n platform_version: str\n """\n The system's release version, e.g. ``'#3 on degas'``.\n\n An empty string if the value cannot be determined.\n """\n\n python_full_version: str\n """\n The Python version as string ``'major.minor.patchlevel'``.\n\n Note that unlike the Python :py:data:`sys.version`, this value will always include\n the patchlevel (it defaults to 0).\n """\n\n platform_python_implementation: str\n """\n A string identifying the Python implementation, e.g. ``'CPython'``.\n """\n\n python_version: str\n """The Python version as string ``'major.minor'``."""\n\n sys_platform: str\n """\n This string contains a platform identifier that can be used to append\n platform-specific components to :py:data:`sys.path`, for instance.\n\n For Unix systems, except on Linux and AIX, this is the lowercased OS name as\n returned by ``uname -s`` with the first part of the version as returned by\n ``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python\n was built.\n """\n\n\ndef _normalize_extra_values(results: Any) -> Any:\n """\n Normalize extra values.\n """\n if isinstance(results[0], tuple):\n lhs, op, rhs = results[0]\n if isinstance(lhs, Variable) and lhs.value == "extra":\n normalized_extra = canonicalize_name(rhs.value)\n rhs = Value(normalized_extra)\n elif isinstance(rhs, Variable) and rhs.value == "extra":\n normalized_extra = canonicalize_name(lhs.value)\n lhs = Value(normalized_extra)\n results[0] = lhs, op, rhs\n return results\n\n\ndef _format_marker(\n marker: list[str] | MarkerAtom | str, first: bool | None = True\n) -> str:\n assert isinstance(marker, (list, tuple, str))\n\n # Sometimes we have a structure like [[...]] which is a single item list\n # where the single item is itself it's own list. In that case we want skip\n # the rest of this function so that we don't get extraneous () on the\n # outside.\n if (\n isinstance(marker, list)\n and len(marker) == 1\n and isinstance(marker[0], (list, tuple))\n ):\n return _format_marker(marker[0])\n\n if isinstance(marker, list):\n inner = (_format_marker(m, first=False) for m in marker)\n if first:\n return " ".join(inner)\n else:\n return "(" + " ".join(inner) + ")"\n elif isinstance(marker, tuple):\n return " ".join([m.serialize() for m in marker])\n else:\n return marker\n\n\n_operators: dict[str, Operator] = {\n "in": lambda lhs, rhs: lhs in rhs,\n "not in": lambda lhs, rhs: lhs not in rhs,\n "<": operator.lt,\n "<=": operator.le,\n "==": operator.eq,\n "!=": operator.ne,\n ">=": operator.ge,\n ">": operator.gt,\n}\n\n\ndef _eval_op(lhs: str, op: Op, rhs: str | AbstractSet[str]) -> bool:\n if isinstance(rhs, str):\n try:\n spec = Specifier("".join([op.serialize(), rhs]))\n except InvalidSpecifier:\n pass\n else:\n return spec.contains(lhs, prereleases=True)\n\n oper: Operator | None = _operators.get(op.serialize())\n if oper is None:\n raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")\n\n return oper(lhs, rhs)\n\n\ndef _normalize(\n lhs: str, rhs: str | AbstractSet[str], key: str\n) -> tuple[str, str | AbstractSet[str]]:\n # PEP 685 – Comparison of extra names for optional distribution dependencies\n # https://peps.python.org/pep-0685/\n # > When comparing extra names, tools MUST normalize the names being\n # > compared using the semantics outlined in PEP 503 for names\n if key == "extra":\n assert isinstance(rhs, str), "extra value must be a string"\n return (canonicalize_name(lhs), canonicalize_name(rhs))\n if key in MARKERS_ALLOWING_SET:\n if isinstance(rhs, str): # pragma: no cover\n return (canonicalize_name(lhs), canonicalize_name(rhs))\n else:\n return (canonicalize_name(lhs), {canonicalize_name(v) for v in rhs})\n\n # other environment markers don't have such standards\n return lhs, rhs\n\n\ndef _evaluate_markers(\n markers: MarkerList, environment: dict[str, str | AbstractSet[str]]\n) -> bool:\n groups: list[list[bool]] = [[]]\n\n for marker in markers:\n assert isinstance(marker, (list, tuple, str))\n\n if isinstance(marker, list):\n groups[-1].append(_evaluate_markers(marker, environment))\n elif isinstance(marker, tuple):\n lhs, op, rhs = marker\n\n if isinstance(lhs, Variable):\n environment_key = lhs.value\n lhs_value = environment[environment_key]\n rhs_value = rhs.value\n else:\n lhs_value = lhs.value\n environment_key = rhs.value\n rhs_value = environment[environment_key]\n assert isinstance(lhs_value, str), "lhs must be a string"\n lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)\n groups[-1].append(_eval_op(lhs_value, op, rhs_value))\n else:\n assert marker in ["and", "or"]\n if marker == "or":\n groups.append([])\n\n return any(all(item) for item in groups)\n\n\ndef format_full_version(info: sys._version_info) -> str:\n version = f"{info.major}.{info.minor}.{info.micro}"\n kind = info.releaselevel\n if kind != "final":\n version += kind[0] + str(info.serial)\n return version\n\n\ndef default_environment() -> Environment:\n iver = format_full_version(sys.implementation.version)\n implementation_name = sys.implementation.name\n return {\n "implementation_name": implementation_name,\n "implementation_version": iver,\n "os_name": os.name,\n "platform_machine": platform.machine(),\n "platform_release": platform.release(),\n "platform_system": platform.system(),\n "platform_version": platform.version(),\n "python_full_version": platform.python_version(),\n "platform_python_implementation": platform.python_implementation(),\n "python_version": ".".join(platform.python_version_tuple()[:2]),\n "sys_platform": sys.platform,\n }\n\n\nclass Marker:\n def __init__(self, marker: str) -> None:\n # Note: We create a Marker object without calling this constructor in\n # packaging.requirements.Requirement. If any additional logic is\n # added here, make sure to mirror/adapt Requirement.\n try:\n self._markers = _normalize_extra_values(_parse_marker(marker))\n # The attribute `_markers` can be described in terms of a recursive type:\n # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]\n #\n # For example, the following expression:\n # python_version > "3.6" or (python_version == "3.6" and os_name == "unix")\n #\n # is parsed into:\n # [\n # (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),\n # 'and',\n # [\n # (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),\n # 'or',\n # (<Variable('os_name')>, <Op('==')>, <Value('unix')>)\n # ]\n # ]\n except ParserSyntaxError as e:\n raise InvalidMarker(str(e)) from e\n\n def __str__(self) -> str:\n return _format_marker(self._markers)\n\n def __repr__(self) -> str:\n return f"<Marker('{self}')>"\n\n def __hash__(self) -> int:\n return hash((self.__class__.__name__, str(self)))\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Marker):\n return NotImplemented\n\n return str(self) == str(other)\n\n def evaluate(\n self,\n environment: dict[str, str] | None = None,\n context: EvaluateContext = "metadata",\n ) -> bool:\n """Evaluate a marker.\n\n Return the boolean from evaluating the given marker against the\n environment. environment is an optional argument to override all or\n part of the determined environment. The *context* parameter specifies what\n context the markers are being evaluated for, which influences what markers\n are considered valid. Acceptable values are "metadata" (for core metadata;\n default), "lock_file", and "requirement" (i.e. all other situations).\n\n The environment is determined from the current Python process.\n """\n current_environment = cast(\n "dict[str, str | AbstractSet[str]]", default_environment()\n )\n if context == "lock_file":\n current_environment.update(\n extras=frozenset(), dependency_groups=frozenset()\n )\n elif context == "metadata":\n current_environment["extra"] = ""\n if environment is not None:\n current_environment.update(environment)\n # The API used to allow setting extra to None. We need to handle this\n # case for backwards compatibility.\n if "extra" in current_environment and current_environment["extra"] is None:\n current_environment["extra"] = ""\n\n return _evaluate_markers(\n self._markers, _repair_python_full_version(current_environment)\n )\n\n\ndef _repair_python_full_version(\n env: dict[str, str | AbstractSet[str]],\n) -> dict[str, str | AbstractSet[str]]:\n """\n Work around platform.python_version() returning something that is not PEP 440\n compliant for non-tagged Python builds.\n """\n python_full_version = cast(str, env["python_full_version"])\n if python_full_version.endswith("+"):\n env["python_full_version"] = f"{python_full_version}local"\n return env\n
.venv\Lib\site-packages\pip\_vendor\packaging\markers.py
markers.py
Python
12,049
0.95
0.165746
0.111864
vue-tools
946
2024-10-14T06:55:29.678576
MIT
false
1be7f129d91388653f23a0fc7414bbfe
from __future__ import annotations\n\nimport email.feedparser\nimport email.header\nimport email.message\nimport email.parser\nimport email.policy\nimport pathlib\nimport sys\nimport typing\nfrom typing import (\n Any,\n Callable,\n Generic,\n Literal,\n TypedDict,\n cast,\n)\n\nfrom . import licenses, requirements, specifiers, utils\nfrom . import version as version_module\nfrom .licenses import NormalizedLicenseExpression\n\nT = typing.TypeVar("T")\n\n\nif sys.version_info >= (3, 11): # pragma: no cover\n ExceptionGroup = ExceptionGroup\nelse: # pragma: no cover\n\n class ExceptionGroup(Exception):\n """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11.\n\n If :external:exc:`ExceptionGroup` is already defined by Python itself,\n that version is used instead.\n """\n\n message: str\n exceptions: list[Exception]\n\n def __init__(self, message: str, exceptions: list[Exception]) -> None:\n self.message = message\n self.exceptions = exceptions\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})"\n\n\nclass InvalidMetadata(ValueError):\n """A metadata field contains invalid data."""\n\n field: str\n """The name of the field that contains invalid data."""\n\n def __init__(self, field: str, message: str) -> None:\n self.field = field\n super().__init__(message)\n\n\n# The RawMetadata class attempts to make as few assumptions about the underlying\n# serialization formats as possible. The idea is that as long as a serialization\n# formats offer some very basic primitives in *some* way then we can support\n# serializing to and from that format.\nclass RawMetadata(TypedDict, total=False):\n """A dictionary of raw core metadata.\n\n Each field in core metadata maps to a key of this dictionary (when data is\n provided). The key is lower-case and underscores are used instead of dashes\n compared to the equivalent core metadata field. Any core metadata field that\n can be specified multiple times or can hold multiple values in a single\n field have a key with a plural name. See :class:`Metadata` whose attributes\n match the keys of this dictionary.\n\n Core metadata fields that can be specified multiple times are stored as a\n list or dict depending on which is appropriate for the field. Any fields\n which hold multiple values in a single field are stored as a list.\n\n """\n\n # Metadata 1.0 - PEP 241\n metadata_version: str\n name: str\n version: str\n platforms: list[str]\n summary: str\n description: str\n keywords: list[str]\n home_page: str\n author: str\n author_email: str\n license: str\n\n # Metadata 1.1 - PEP 314\n supported_platforms: list[str]\n download_url: str\n classifiers: list[str]\n requires: list[str]\n provides: list[str]\n obsoletes: list[str]\n\n # Metadata 1.2 - PEP 345\n maintainer: str\n maintainer_email: str\n requires_dist: list[str]\n provides_dist: list[str]\n obsoletes_dist: list[str]\n requires_python: str\n requires_external: list[str]\n project_urls: dict[str, str]\n\n # Metadata 2.0\n # PEP 426 attempted to completely revamp the metadata format\n # but got stuck without ever being able to build consensus on\n # it and ultimately ended up withdrawn.\n #\n # However, a number of tools had started emitting METADATA with\n # `2.0` Metadata-Version, so for historical reasons, this version\n # was skipped.\n\n # Metadata 2.1 - PEP 566\n description_content_type: str\n provides_extra: list[str]\n\n # Metadata 2.2 - PEP 643\n dynamic: list[str]\n\n # Metadata 2.3 - PEP 685\n # No new fields were added in PEP 685, just some edge case were\n # tightened up to provide better interoptability.\n\n # Metadata 2.4 - PEP 639\n license_expression: str\n license_files: list[str]\n\n\n_STRING_FIELDS = {\n "author",\n "author_email",\n "description",\n "description_content_type",\n "download_url",\n "home_page",\n "license",\n "license_expression",\n "maintainer",\n "maintainer_email",\n "metadata_version",\n "name",\n "requires_python",\n "summary",\n "version",\n}\n\n_LIST_FIELDS = {\n "classifiers",\n "dynamic",\n "license_files",\n "obsoletes",\n "obsoletes_dist",\n "platforms",\n "provides",\n "provides_dist",\n "provides_extra",\n "requires",\n "requires_dist",\n "requires_external",\n "supported_platforms",\n}\n\n_DICT_FIELDS = {\n "project_urls",\n}\n\n\ndef _parse_keywords(data: str) -> list[str]:\n """Split a string of comma-separated keywords into a list of keywords."""\n return [k.strip() for k in data.split(",")]\n\n\ndef _parse_project_urls(data: list[str]) -> dict[str, str]:\n """Parse a list of label/URL string pairings separated by a comma."""\n urls = {}\n for pair in data:\n # Our logic is slightly tricky here as we want to try and do\n # *something* reasonable with malformed data.\n #\n # The main thing that we have to worry about, is data that does\n # not have a ',' at all to split the label from the Value. There\n # isn't a singular right answer here, and we will fail validation\n # later on (if the caller is validating) so it doesn't *really*\n # matter, but since the missing value has to be an empty str\n # and our return value is dict[str, str], if we let the key\n # be the missing value, then they'd have multiple '' values that\n # overwrite each other in a accumulating dict.\n #\n # The other potentional issue is that it's possible to have the\n # same label multiple times in the metadata, with no solid "right"\n # answer with what to do in that case. As such, we'll do the only\n # thing we can, which is treat the field as unparseable and add it\n # to our list of unparsed fields.\n parts = [p.strip() for p in pair.split(",", 1)]\n parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items\n\n # TODO: The spec doesn't say anything about if the keys should be\n # considered case sensitive or not... logically they should\n # be case-preserving and case-insensitive, but doing that\n # would open up more cases where we might have duplicate\n # entries.\n label, url = parts\n if label in urls:\n # The label already exists in our set of urls, so this field\n # is unparseable, and we can just add the whole thing to our\n # unparseable data and stop processing it.\n raise KeyError("duplicate labels in project urls")\n urls[label] = url\n\n return urls\n\n\ndef _get_payload(msg: email.message.Message, source: bytes | str) -> str:\n """Get the body of the message."""\n # If our source is a str, then our caller has managed encodings for us,\n # and we don't need to deal with it.\n if isinstance(source, str):\n payload = msg.get_payload()\n assert isinstance(payload, str)\n return payload\n # If our source is a bytes, then we're managing the encoding and we need\n # to deal with it.\n else:\n bpayload = msg.get_payload(decode=True)\n assert isinstance(bpayload, bytes)\n try:\n return bpayload.decode("utf8", "strict")\n except UnicodeDecodeError as exc:\n raise ValueError("payload in an invalid encoding") from exc\n\n\n# The various parse_FORMAT functions here are intended to be as lenient as\n# possible in their parsing, while still returning a correctly typed\n# RawMetadata.\n#\n# To aid in this, we also generally want to do as little touching of the\n# data as possible, except where there are possibly some historic holdovers\n# that make valid data awkward to work with.\n#\n# While this is a lower level, intermediate format than our ``Metadata``\n# class, some light touch ups can make a massive difference in usability.\n\n# Map METADATA fields to RawMetadata.\n_EMAIL_TO_RAW_MAPPING = {\n "author": "author",\n "author-email": "author_email",\n "classifier": "classifiers",\n "description": "description",\n "description-content-type": "description_content_type",\n "download-url": "download_url",\n "dynamic": "dynamic",\n "home-page": "home_page",\n "keywords": "keywords",\n "license": "license",\n "license-expression": "license_expression",\n "license-file": "license_files",\n "maintainer": "maintainer",\n "maintainer-email": "maintainer_email",\n "metadata-version": "metadata_version",\n "name": "name",\n "obsoletes": "obsoletes",\n "obsoletes-dist": "obsoletes_dist",\n "platform": "platforms",\n "project-url": "project_urls",\n "provides": "provides",\n "provides-dist": "provides_dist",\n "provides-extra": "provides_extra",\n "requires": "requires",\n "requires-dist": "requires_dist",\n "requires-external": "requires_external",\n "requires-python": "requires_python",\n "summary": "summary",\n "supported-platform": "supported_platforms",\n "version": "version",\n}\n_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()}\n\n\ndef parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]:\n """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).\n\n This function returns a two-item tuple of dicts. The first dict is of\n recognized fields from the core metadata specification. Fields that can be\n parsed and translated into Python's built-in types are converted\n appropriately. All other fields are left as-is. Fields that are allowed to\n appear multiple times are stored as lists.\n\n The second dict contains all other fields from the metadata. This includes\n any unrecognized fields. It also includes any fields which are expected to\n be parsed into a built-in type but were not formatted appropriately. Finally,\n any fields that are expected to appear only once but are repeated are\n included in this dict.\n\n """\n raw: dict[str, str | list[str] | dict[str, str]] = {}\n unparsed: dict[str, list[str]] = {}\n\n if isinstance(data, str):\n parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)\n else:\n parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)\n\n # We have to wrap parsed.keys() in a set, because in the case of multiple\n # values for a key (a list), the key will appear multiple times in the\n # list of keys, but we're avoiding that by using get_all().\n for name in frozenset(parsed.keys()):\n # Header names in RFC are case insensitive, so we'll normalize to all\n # lower case to make comparisons easier.\n name = name.lower()\n\n # We use get_all() here, even for fields that aren't multiple use,\n # because otherwise someone could have e.g. two Name fields, and we\n # would just silently ignore it rather than doing something about it.\n headers = parsed.get_all(name) or []\n\n # The way the email module works when parsing bytes is that it\n # unconditionally decodes the bytes as ascii using the surrogateescape\n # handler. When you pull that data back out (such as with get_all() ),\n # it looks to see if the str has any surrogate escapes, and if it does\n # it wraps it in a Header object instead of returning the string.\n #\n # As such, we'll look for those Header objects, and fix up the encoding.\n value = []\n # Flag if we have run into any issues processing the headers, thus\n # signalling that the data belongs in 'unparsed'.\n valid_encoding = True\n for h in headers:\n # It's unclear if this can return more types than just a Header or\n # a str, so we'll just assert here to make sure.\n assert isinstance(h, (email.header.Header, str))\n\n # If it's a header object, we need to do our little dance to get\n # the real data out of it. In cases where there is invalid data\n # we're going to end up with mojibake, but there's no obvious, good\n # way around that without reimplementing parts of the Header object\n # ourselves.\n #\n # That should be fine since, if mojibacked happens, this key is\n # going into the unparsed dict anyways.\n if isinstance(h, email.header.Header):\n # The Header object stores it's data as chunks, and each chunk\n # can be independently encoded, so we'll need to check each\n # of them.\n chunks: list[tuple[bytes, str | None]] = []\n for bin, encoding in email.header.decode_header(h):\n try:\n bin.decode("utf8", "strict")\n except UnicodeDecodeError:\n # Enable mojibake.\n encoding = "latin1"\n valid_encoding = False\n else:\n encoding = "utf8"\n chunks.append((bin, encoding))\n\n # Turn our chunks back into a Header object, then let that\n # Header object do the right thing to turn them into a\n # string for us.\n value.append(str(email.header.make_header(chunks)))\n # This is already a string, so just add it.\n else:\n value.append(h)\n\n # We've processed all of our values to get them into a list of str,\n # but we may have mojibake data, in which case this is an unparsed\n # field.\n if not valid_encoding:\n unparsed[name] = value\n continue\n\n raw_name = _EMAIL_TO_RAW_MAPPING.get(name)\n if raw_name is None:\n # This is a bit of a weird situation, we've encountered a key that\n # we don't know what it means, so we don't know whether it's meant\n # to be a list or not.\n #\n # Since we can't really tell one way or another, we'll just leave it\n # as a list, even though it may be a single item list, because that's\n # what makes the most sense for email headers.\n unparsed[name] = value\n continue\n\n # If this is one of our string fields, then we'll check to see if our\n # value is a list of a single item. If it is then we'll assume that\n # it was emitted as a single string, and unwrap the str from inside\n # the list.\n #\n # If it's any other kind of data, then we haven't the faintest clue\n # what we should parse it as, and we have to just add it to our list\n # of unparsed stuff.\n if raw_name in _STRING_FIELDS and len(value) == 1:\n raw[raw_name] = value[0]\n # If this is one of our list of string fields, then we can just assign\n # the value, since email *only* has strings, and our get_all() call\n # above ensures that this is a list.\n elif raw_name in _LIST_FIELDS:\n raw[raw_name] = value\n # Special Case: Keywords\n # The keywords field is implemented in the metadata spec as a str,\n # but it conceptually is a list of strings, and is serialized using\n # ", ".join(keywords), so we'll do some light data massaging to turn\n # this into what it logically is.\n elif raw_name == "keywords" and len(value) == 1:\n raw[raw_name] = _parse_keywords(value[0])\n # Special Case: Project-URL\n # The project urls is implemented in the metadata spec as a list of\n # specially-formatted strings that represent a key and a value, which\n # is fundamentally a mapping, however the email format doesn't support\n # mappings in a sane way, so it was crammed into a list of strings\n # instead.\n #\n # We will do a little light data massaging to turn this into a map as\n # it logically should be.\n elif raw_name == "project_urls":\n try:\n raw[raw_name] = _parse_project_urls(value)\n except KeyError:\n unparsed[name] = value\n # Nothing that we've done has managed to parse this, so it'll just\n # throw it in our unparseable data and move on.\n else:\n unparsed[name] = value\n\n # We need to support getting the Description from the message payload in\n # addition to getting it from the the headers. This does mean, though, there\n # is the possibility of it being set both ways, in which case we put both\n # in 'unparsed' since we don't know which is right.\n try:\n payload = _get_payload(parsed, data)\n except ValueError:\n unparsed.setdefault("description", []).append(\n parsed.get_payload(decode=isinstance(data, bytes)) # type: ignore[call-overload]\n )\n else:\n if payload:\n # Check to see if we've already got a description, if so then both\n # it, and this body move to unparseable.\n if "description" in raw:\n description_header = cast(str, raw.pop("description"))\n unparsed.setdefault("description", []).extend(\n [description_header, payload]\n )\n elif "description" in unparsed:\n unparsed["description"].append(payload)\n else:\n raw["description"] = payload\n\n # We need to cast our `raw` to a metadata, because a TypedDict only support\n # literal key names, but we're computing our key names on purpose, but the\n # way this function is implemented, our `TypedDict` can only have valid key\n # names.\n return cast(RawMetadata, raw), unparsed\n\n\n_NOT_FOUND = object()\n\n\n# Keep the two values in sync.\n_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"]\n_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3", "2.4"]\n\n_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"])\n\n\nclass _Validator(Generic[T]):\n """Validate a metadata field.\n\n All _process_*() methods correspond to a core metadata field. The method is\n called with the field's raw value. If the raw value is valid it is returned\n in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field).\n If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause\n as appropriate).\n """\n\n name: str\n raw_name: str\n added: _MetadataVersion\n\n def __init__(\n self,\n *,\n added: _MetadataVersion = "1.0",\n ) -> None:\n self.added = added\n\n def __set_name__(self, _owner: Metadata, name: str) -> None:\n self.name = name\n self.raw_name = _RAW_TO_EMAIL_MAPPING[name]\n\n def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T:\n # With Python 3.8, the caching can be replaced with functools.cached_property().\n # No need to check the cache as attribute lookup will resolve into the\n # instance's __dict__ before __get__ is called.\n cache = instance.__dict__\n value = instance._raw.get(self.name)\n\n # To make the _process_* methods easier, we'll check if the value is None\n # and if this field is NOT a required attribute, and if both of those\n # things are true, we'll skip the the converter. This will mean that the\n # converters never have to deal with the None union.\n if self.name in _REQUIRED_ATTRS or value is not None:\n try:\n converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}")\n except AttributeError:\n pass\n else:\n value = converter(value)\n\n cache[self.name] = value\n try:\n del instance._raw[self.name] # type: ignore[misc]\n except KeyError:\n pass\n\n return cast(T, value)\n\n def _invalid_metadata(\n self, msg: str, cause: Exception | None = None\n ) -> InvalidMetadata:\n exc = InvalidMetadata(\n self.raw_name, msg.format_map({"field": repr(self.raw_name)})\n )\n exc.__cause__ = cause\n return exc\n\n def _process_metadata_version(self, value: str) -> _MetadataVersion:\n # Implicitly makes Metadata-Version required.\n if value not in _VALID_METADATA_VERSIONS:\n raise self._invalid_metadata(f"{value!r} is not a valid metadata version")\n return cast(_MetadataVersion, value)\n\n def _process_name(self, value: str) -> str:\n if not value:\n raise self._invalid_metadata("{field} is a required field")\n # Validate the name as a side-effect.\n try:\n utils.canonicalize_name(value, validate=True)\n except utils.InvalidName as exc:\n raise self._invalid_metadata(\n f"{value!r} is invalid for {{field}}", cause=exc\n ) from exc\n else:\n return value\n\n def _process_version(self, value: str) -> version_module.Version:\n if not value:\n raise self._invalid_metadata("{field} is a required field")\n try:\n return version_module.parse(value)\n except version_module.InvalidVersion as exc:\n raise self._invalid_metadata(\n f"{value!r} is invalid for {{field}}", cause=exc\n ) from exc\n\n def _process_summary(self, value: str) -> str:\n """Check the field contains no newlines."""\n if "\n" in value:\n raise self._invalid_metadata("{field} must be a single line")\n return value\n\n def _process_description_content_type(self, value: str) -> str:\n content_types = {"text/plain", "text/x-rst", "text/markdown"}\n message = email.message.EmailMessage()\n message["content-type"] = value\n\n content_type, parameters = (\n # Defaults to `text/plain` if parsing failed.\n message.get_content_type().lower(),\n message["content-type"].params,\n )\n # Check if content-type is valid or defaulted to `text/plain` and thus was\n # not parseable.\n if content_type not in content_types or content_type not in value.lower():\n raise self._invalid_metadata(\n f"{{field}} must be one of {list(content_types)}, not {value!r}"\n )\n\n charset = parameters.get("charset", "UTF-8")\n if charset != "UTF-8":\n raise self._invalid_metadata(\n f"{{field}} can only specify the UTF-8 charset, not {list(charset)}"\n )\n\n markdown_variants = {"GFM", "CommonMark"}\n variant = parameters.get("variant", "GFM") # Use an acceptable default.\n if content_type == "text/markdown" and variant not in markdown_variants:\n raise self._invalid_metadata(\n f"valid Markdown variants for {{field}} are {list(markdown_variants)}, "\n f"not {variant!r}",\n )\n return value\n\n def _process_dynamic(self, value: list[str]) -> list[str]:\n for dynamic_field in map(str.lower, value):\n if dynamic_field in {"name", "version", "metadata-version"}:\n raise self._invalid_metadata(\n f"{dynamic_field!r} is not allowed as a dynamic field"\n )\n elif dynamic_field not in _EMAIL_TO_RAW_MAPPING:\n raise self._invalid_metadata(\n f"{dynamic_field!r} is not a valid dynamic field"\n )\n return list(map(str.lower, value))\n\n def _process_provides_extra(\n self,\n value: list[str],\n ) -> list[utils.NormalizedName]:\n normalized_names = []\n try:\n for name in value:\n normalized_names.append(utils.canonicalize_name(name, validate=True))\n except utils.InvalidName as exc:\n raise self._invalid_metadata(\n f"{name!r} is invalid for {{field}}", cause=exc\n ) from exc\n else:\n return normalized_names\n\n def _process_requires_python(self, value: str) -> specifiers.SpecifierSet:\n try:\n return specifiers.SpecifierSet(value)\n except specifiers.InvalidSpecifier as exc:\n raise self._invalid_metadata(\n f"{value!r} is invalid for {{field}}", cause=exc\n ) from exc\n\n def _process_requires_dist(\n self,\n value: list[str],\n ) -> list[requirements.Requirement]:\n reqs = []\n try:\n for req in value:\n reqs.append(requirements.Requirement(req))\n except requirements.InvalidRequirement as exc:\n raise self._invalid_metadata(\n f"{req!r} is invalid for {{field}}", cause=exc\n ) from exc\n else:\n return reqs\n\n def _process_license_expression(\n self, value: str\n ) -> NormalizedLicenseExpression | None:\n try:\n return licenses.canonicalize_license_expression(value)\n except ValueError as exc:\n raise self._invalid_metadata(\n f"{value!r} is invalid for {{field}}", cause=exc\n ) from exc\n\n def _process_license_files(self, value: list[str]) -> list[str]:\n paths = []\n for path in value:\n if ".." in path:\n raise self._invalid_metadata(\n f"{path!r} is invalid for {{field}}, "\n "parent directory indicators are not allowed"\n )\n if "*" in path:\n raise self._invalid_metadata(\n f"{path!r} is invalid for {{field}}, paths must be resolved"\n )\n if (\n pathlib.PurePosixPath(path).is_absolute()\n or pathlib.PureWindowsPath(path).is_absolute()\n ):\n raise self._invalid_metadata(\n f"{path!r} is invalid for {{field}}, paths must be relative"\n )\n if pathlib.PureWindowsPath(path).as_posix() != path:\n raise self._invalid_metadata(\n f"{path!r} is invalid for {{field}}, paths must use '/' delimiter"\n )\n paths.append(path)\n return paths\n\n\nclass Metadata:\n """Representation of distribution metadata.\n\n Compared to :class:`RawMetadata`, this class provides objects representing\n metadata fields instead of only using built-in types. Any invalid metadata\n will cause :exc:`InvalidMetadata` to be raised (with a\n :py:attr:`~BaseException.__cause__` attribute as appropriate).\n """\n\n _raw: RawMetadata\n\n @classmethod\n def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata:\n """Create an instance from :class:`RawMetadata`.\n\n If *validate* is true, all metadata will be validated. All exceptions\n related to validation will be gathered and raised as an :class:`ExceptionGroup`.\n """\n ins = cls()\n ins._raw = data.copy() # Mutations occur due to caching enriched values.\n\n if validate:\n exceptions: list[Exception] = []\n try:\n metadata_version = ins.metadata_version\n metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)\n except InvalidMetadata as metadata_version_exc:\n exceptions.append(metadata_version_exc)\n metadata_version = None\n\n # Make sure to check for the fields that are present, the required\n # fields (so their absence can be reported).\n fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS\n # Remove fields that have already been checked.\n fields_to_check -= {"metadata_version"}\n\n for key in fields_to_check:\n try:\n if metadata_version:\n # Can't use getattr() as that triggers descriptor protocol which\n # will fail due to no value for the instance argument.\n try:\n field_metadata_version = cls.__dict__[key].added\n except KeyError:\n exc = InvalidMetadata(key, f"unrecognized field: {key!r}")\n exceptions.append(exc)\n continue\n field_age = _VALID_METADATA_VERSIONS.index(\n field_metadata_version\n )\n if field_age > metadata_age:\n field = _RAW_TO_EMAIL_MAPPING[key]\n exc = InvalidMetadata(\n field,\n f"{field} introduced in metadata version "\n f"{field_metadata_version}, not {metadata_version}",\n )\n exceptions.append(exc)\n continue\n getattr(ins, key)\n except InvalidMetadata as exc:\n exceptions.append(exc)\n\n if exceptions:\n raise ExceptionGroup("invalid metadata", exceptions)\n\n return ins\n\n @classmethod\n def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata:\n """Parse metadata from email headers.\n\n If *validate* is true, the metadata will be validated. All exceptions\n related to validation will be gathered and raised as an :class:`ExceptionGroup`.\n """\n raw, unparsed = parse_email(data)\n\n if validate:\n exceptions: list[Exception] = []\n for unparsed_key in unparsed:\n if unparsed_key in _EMAIL_TO_RAW_MAPPING:\n message = f"{unparsed_key!r} has invalid data"\n else:\n message = f"unrecognized field: {unparsed_key!r}"\n exceptions.append(InvalidMetadata(unparsed_key, message))\n\n if exceptions:\n raise ExceptionGroup("unparsed", exceptions)\n\n try:\n return cls.from_raw(raw, validate=validate)\n except ExceptionGroup as exc_group:\n raise ExceptionGroup(\n "invalid or unparsed metadata", exc_group.exceptions\n ) from None\n\n metadata_version: _Validator[_MetadataVersion] = _Validator()\n """:external:ref:`core-metadata-metadata-version`\n (required; validated to be a valid metadata version)"""\n # `name` is not normalized/typed to NormalizedName so as to provide access to\n # the original/raw name.\n name: _Validator[str] = _Validator()\n """:external:ref:`core-metadata-name`\n (required; validated using :func:`~packaging.utils.canonicalize_name` and its\n *validate* parameter)"""\n version: _Validator[version_module.Version] = _Validator()\n """:external:ref:`core-metadata-version` (required)"""\n dynamic: _Validator[list[str] | None] = _Validator(\n added="2.2",\n )\n """:external:ref:`core-metadata-dynamic`\n (validated against core metadata field names and lowercased)"""\n platforms: _Validator[list[str] | None] = _Validator()\n """:external:ref:`core-metadata-platform`"""\n supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1")\n """:external:ref:`core-metadata-supported-platform`"""\n summary: _Validator[str | None] = _Validator()\n """:external:ref:`core-metadata-summary` (validated to contain no newlines)"""\n description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body\n """:external:ref:`core-metadata-description`"""\n description_content_type: _Validator[str | None] = _Validator(added="2.1")\n """:external:ref:`core-metadata-description-content-type` (validated)"""\n keywords: _Validator[list[str] | None] = _Validator()\n """:external:ref:`core-metadata-keywords`"""\n home_page: _Validator[str | None] = _Validator()\n """:external:ref:`core-metadata-home-page`"""\n download_url: _Validator[str | None] = _Validator(added="1.1")\n """:external:ref:`core-metadata-download-url`"""\n author: _Validator[str | None] = _Validator()\n """:external:ref:`core-metadata-author`"""\n author_email: _Validator[str | None] = _Validator()\n """:external:ref:`core-metadata-author-email`"""\n maintainer: _Validator[str | None] = _Validator(added="1.2")\n """:external:ref:`core-metadata-maintainer`"""\n maintainer_email: _Validator[str | None] = _Validator(added="1.2")\n """:external:ref:`core-metadata-maintainer-email`"""\n license: _Validator[str | None] = _Validator()\n """:external:ref:`core-metadata-license`"""\n license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator(\n added="2.4"\n )\n """:external:ref:`core-metadata-license-expression`"""\n license_files: _Validator[list[str] | None] = _Validator(added="2.4")\n """:external:ref:`core-metadata-license-file`"""\n classifiers: _Validator[list[str] | None] = _Validator(added="1.1")\n """:external:ref:`core-metadata-classifier`"""\n requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator(\n added="1.2"\n )\n """:external:ref:`core-metadata-requires-dist`"""\n requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator(\n added="1.2"\n )\n """:external:ref:`core-metadata-requires-python`"""\n # Because `Requires-External` allows for non-PEP 440 version specifiers, we\n # don't do any processing on the values.\n requires_external: _Validator[list[str] | None] = _Validator(added="1.2")\n """:external:ref:`core-metadata-requires-external`"""\n project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2")\n """:external:ref:`core-metadata-project-url`"""\n # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation\n # regardless of metadata version.\n provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator(\n added="2.1",\n )\n """:external:ref:`core-metadata-provides-extra`"""\n provides_dist: _Validator[list[str] | None] = _Validator(added="1.2")\n """:external:ref:`core-metadata-provides-dist`"""\n obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2")\n """:external:ref:`core-metadata-obsoletes-dist`"""\n requires: _Validator[list[str] | None] = _Validator(added="1.1")\n """``Requires`` (deprecated)"""\n provides: _Validator[list[str] | None] = _Validator(added="1.1")\n """``Provides`` (deprecated)"""\n obsoletes: _Validator[list[str] | None] = _Validator(added="1.1")\n """``Obsoletes`` (deprecated)"""\n
.venv\Lib\site-packages\pip\_vendor\packaging\metadata.py
metadata.py
Python
34,739
0.95
0.162413
0.22325
vue-tools
713
2023-10-08T14:59:07.137879
GPL-3.0
false
7cfeeeeb4a2be7848c92ae82b5c9f6a1
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\nfrom __future__ import annotations\n\nfrom typing import Any, Iterator\n\nfrom ._parser import parse_requirement as _parse_requirement\nfrom ._tokenizer import ParserSyntaxError\nfrom .markers import Marker, _normalize_extra_values\nfrom .specifiers import SpecifierSet\nfrom .utils import canonicalize_name\n\n\nclass InvalidRequirement(ValueError):\n """\n An invalid requirement was found, users should refer to PEP 508.\n """\n\n\nclass Requirement:\n """Parse a requirement.\n\n Parse a given requirement string into its parts, such as name, specifier,\n URL, and extras. Raises InvalidRequirement on a badly-formed requirement\n string.\n """\n\n # TODO: Can we test whether something is contained within a requirement?\n # If so how do we do that? Do we need to test against the _name_ of\n # the thing as well as the version? What about the markers?\n # TODO: Can we normalize the name and extra name?\n\n def __init__(self, requirement_string: str) -> None:\n try:\n parsed = _parse_requirement(requirement_string)\n except ParserSyntaxError as e:\n raise InvalidRequirement(str(e)) from e\n\n self.name: str = parsed.name\n self.url: str | None = parsed.url or None\n self.extras: set[str] = set(parsed.extras or [])\n self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)\n self.marker: Marker | None = None\n if parsed.marker is not None:\n self.marker = Marker.__new__(Marker)\n self.marker._markers = _normalize_extra_values(parsed.marker)\n\n def _iter_parts(self, name: str) -> Iterator[str]:\n yield name\n\n if self.extras:\n formatted_extras = ",".join(sorted(self.extras))\n yield f"[{formatted_extras}]"\n\n if self.specifier:\n yield str(self.specifier)\n\n if self.url:\n yield f"@ {self.url}"\n if self.marker:\n yield " "\n\n if self.marker:\n yield f"; {self.marker}"\n\n def __str__(self) -> str:\n return "".join(self._iter_parts(self.name))\n\n def __repr__(self) -> str:\n return f"<Requirement('{self}')>"\n\n def __hash__(self) -> int:\n return hash(\n (\n self.__class__.__name__,\n *self._iter_parts(canonicalize_name(self.name)),\n )\n )\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Requirement):\n return NotImplemented\n\n return (\n canonicalize_name(self.name) == canonicalize_name(other.name)\n and self.extras == other.extras\n and self.specifier == other.specifier\n and self.url == other.url\n and self.marker == other.marker\n )\n
.venv\Lib\site-packages\pip\_vendor\packaging\requirements.py
requirements.py
Python
2,947
0.95
0.186813
0.112676
node-utils
735
2024-10-06T07:57:49.987062
MIT
false
2fc711cf5b4a1a8ac92aab0bd4e13284
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n"""\n.. testsetup::\n\n from pip._vendor.packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier\n from pip._vendor.packaging.version import Version\n"""\n\nfrom __future__ import annotations\n\nimport abc\nimport itertools\nimport re\nfrom typing import Callable, Iterable, Iterator, TypeVar, Union\n\nfrom .utils import canonicalize_version\nfrom .version import Version\n\nUnparsedVersion = Union[Version, str]\nUnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)\nCallableOperator = Callable[[Version, str], bool]\n\n\ndef _coerce_version(version: UnparsedVersion) -> Version:\n if not isinstance(version, Version):\n version = Version(version)\n return version\n\n\nclass InvalidSpecifier(ValueError):\n """\n Raised when attempting to create a :class:`Specifier` with a specifier\n string that is invalid.\n\n >>> Specifier("lolwat")\n Traceback (most recent call last):\n ...\n packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'\n """\n\n\nclass BaseSpecifier(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def __str__(self) -> str:\n """\n Returns the str representation of this Specifier-like object. This\n should be representative of the Specifier itself.\n """\n\n @abc.abstractmethod\n def __hash__(self) -> int:\n """\n Returns a hash value for this Specifier-like object.\n """\n\n @abc.abstractmethod\n def __eq__(self, other: object) -> bool:\n """\n Returns a boolean representing whether or not the two Specifier-like\n objects are equal.\n\n :param other: The other object to check against.\n """\n\n @property\n @abc.abstractmethod\n def prereleases(self) -> bool | None:\n """Whether or not pre-releases as a whole are allowed.\n\n This can be set to either ``True`` or ``False`` to explicitly enable or disable\n prereleases or it can be set to ``None`` (the default) to use default semantics.\n """\n\n @prereleases.setter\n def prereleases(self, value: bool) -> None:\n """Setter for :attr:`prereleases`.\n\n :param value: The value to set.\n """\n\n @abc.abstractmethod\n def contains(self, item: str, prereleases: bool | None = None) -> bool:\n """\n Determines if the given item is contained within this specifier.\n """\n\n @abc.abstractmethod\n def filter(\n self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None\n ) -> Iterator[UnparsedVersionVar]:\n """\n Takes an iterable of items and filters them so that only items which\n are contained within this specifier are allowed in it.\n """\n\n\nclass Specifier(BaseSpecifier):\n """This class abstracts handling of version specifiers.\n\n .. tip::\n\n It is generally not required to instantiate this manually. You should instead\n prefer to work with :class:`SpecifierSet` instead, which can parse\n comma-separated version specifiers (which is what package metadata contains).\n """\n\n _operator_regex_str = r"""\n (?P<operator>(~=|==|!=|<=|>=|<|>|===))\n """\n _version_regex_str = r"""\n (?P<version>\n (?:\n # The identity operators allow for an escape hatch that will\n # do an exact string match of the version you wish to install.\n # This will not be parsed by PEP 440 and we cannot determine\n # any semantic meaning from it. This operator is discouraged\n # but included entirely as an escape hatch.\n (?<====) # Only match for the identity operator\n \s*\n [^\s;)]* # The arbitrary version can be just about anything,\n # we match everything except for whitespace, a\n # semi-colon for marker support, and a closing paren\n # since versions can be enclosed in them.\n )\n |\n (?:\n # The (non)equality operators allow for wild card and local\n # versions to be specified so we have to define these two\n # operators separately to enable that.\n (?<===|!=) # Only match for equals and not equals\n\n \s*\n v?\n (?:[0-9]+!)? # epoch\n [0-9]+(?:\.[0-9]+)* # release\n\n # You cannot use a wild card and a pre-release, post-release, a dev or\n # local version together so group them with a | and make them optional.\n (?:\n \.\* # Wild card syntax of .*\n |\n (?: # pre release\n [-_\.]?\n (alpha|beta|preview|pre|a|b|c|rc)\n [-_\.]?\n [0-9]*\n )?\n (?: # post release\n (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)\n )?\n (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release\n (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local\n )?\n )\n |\n (?:\n # The compatible operator requires at least two digits in the\n # release segment.\n (?<=~=) # Only match for the compatible operator\n\n \s*\n v?\n (?:[0-9]+!)? # epoch\n [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)\n (?: # pre release\n [-_\.]?\n (alpha|beta|preview|pre|a|b|c|rc)\n [-_\.]?\n [0-9]*\n )?\n (?: # post release\n (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)\n )?\n (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release\n )\n |\n (?:\n # All other operators only allow a sub set of what the\n # (non)equality operators do. Specifically they do not allow\n # local versions to be specified nor do they allow the prefix\n # matching wild cards.\n (?<!==|!=|~=) # We have special cases for these\n # operators so we want to make sure they\n # don't match here.\n\n \s*\n v?\n (?:[0-9]+!)? # epoch\n [0-9]+(?:\.[0-9]+)* # release\n (?: # pre release\n [-_\.]?\n (alpha|beta|preview|pre|a|b|c|rc)\n [-_\.]?\n [0-9]*\n )?\n (?: # post release\n (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)\n )?\n (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release\n )\n )\n """\n\n _regex = re.compile(\n r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",\n re.VERBOSE | re.IGNORECASE,\n )\n\n _operators = {\n "~=": "compatible",\n "==": "equal",\n "!=": "not_equal",\n "<=": "less_than_equal",\n ">=": "greater_than_equal",\n "<": "less_than",\n ">": "greater_than",\n "===": "arbitrary",\n }\n\n def __init__(self, spec: str = "", prereleases: bool | None = None) -> None:\n """Initialize a Specifier instance.\n\n :param spec:\n The string representation of a specifier which will be parsed and\n normalized before use.\n :param prereleases:\n This tells the specifier if it should accept prerelease versions if\n applicable or not. The default of ``None`` will autodetect it from the\n given specifiers.\n :raises InvalidSpecifier:\n If the given specifier is invalid (i.e. bad syntax).\n """\n match = self._regex.search(spec)\n if not match:\n raise InvalidSpecifier(f"Invalid specifier: {spec!r}")\n\n self._spec: tuple[str, str] = (\n match.group("operator").strip(),\n match.group("version").strip(),\n )\n\n # Store whether or not this Specifier should accept prereleases\n self._prereleases = prereleases\n\n # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515\n @property # type: ignore[override]\n def prereleases(self) -> bool:\n # If there is an explicit prereleases set for this, then we'll just\n # blindly use that.\n if self._prereleases is not None:\n return self._prereleases\n\n # Look at all of our specifiers and determine if they are inclusive\n # operators, and if they are if they are including an explicit\n # prerelease.\n operator, version = self._spec\n if operator in ["==", ">=", "<=", "~=", "===", ">", "<"]:\n # The == specifier can include a trailing .*, if it does we\n # want to remove before parsing.\n if operator == "==" and version.endswith(".*"):\n version = version[:-2]\n\n # Parse the version, and if it is a pre-release than this\n # specifier allows pre-releases.\n if Version(version).is_prerelease:\n return True\n\n return False\n\n @prereleases.setter\n def prereleases(self, value: bool) -> None:\n self._prereleases = value\n\n @property\n def operator(self) -> str:\n """The operator of this specifier.\n\n >>> Specifier("==1.2.3").operator\n '=='\n """\n return self._spec[0]\n\n @property\n def version(self) -> str:\n """The version of this specifier.\n\n >>> Specifier("==1.2.3").version\n '1.2.3'\n """\n return self._spec[1]\n\n def __repr__(self) -> str:\n """A representation of the Specifier that shows all internal state.\n\n >>> Specifier('>=1.0.0')\n <Specifier('>=1.0.0')>\n >>> Specifier('>=1.0.0', prereleases=False)\n <Specifier('>=1.0.0', prereleases=False)>\n >>> Specifier('>=1.0.0', prereleases=True)\n <Specifier('>=1.0.0', prereleases=True)>\n """\n pre = (\n f", prereleases={self.prereleases!r}"\n if self._prereleases is not None\n else ""\n )\n\n return f"<{self.__class__.__name__}({str(self)!r}{pre})>"\n\n def __str__(self) -> str:\n """A string representation of the Specifier that can be round-tripped.\n\n >>> str(Specifier('>=1.0.0'))\n '>=1.0.0'\n >>> str(Specifier('>=1.0.0', prereleases=False))\n '>=1.0.0'\n """\n return "{}{}".format(*self._spec)\n\n @property\n def _canonical_spec(self) -> tuple[str, str]:\n canonical_version = canonicalize_version(\n self._spec[1],\n strip_trailing_zero=(self._spec[0] != "~="),\n )\n return self._spec[0], canonical_version\n\n def __hash__(self) -> int:\n return hash(self._canonical_spec)\n\n def __eq__(self, other: object) -> bool:\n """Whether or not the two Specifier-like objects are equal.\n\n :param other: The other object to check against.\n\n The value of :attr:`prereleases` is ignored.\n\n >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")\n True\n >>> (Specifier("==1.2.3", prereleases=False) ==\n ... Specifier("==1.2.3", prereleases=True))\n True\n >>> Specifier("==1.2.3") == "==1.2.3"\n True\n >>> Specifier("==1.2.3") == Specifier("==1.2.4")\n False\n >>> Specifier("==1.2.3") == Specifier("~=1.2.3")\n False\n """\n if isinstance(other, str):\n try:\n other = self.__class__(str(other))\n except InvalidSpecifier:\n return NotImplemented\n elif not isinstance(other, self.__class__):\n return NotImplemented\n\n return self._canonical_spec == other._canonical_spec\n\n def _get_operator(self, op: str) -> CallableOperator:\n operator_callable: CallableOperator = getattr(\n self, f"_compare_{self._operators[op]}"\n )\n return operator_callable\n\n def _compare_compatible(self, prospective: Version, spec: str) -> bool:\n # Compatible releases have an equivalent combination of >= and ==. That\n # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to\n # implement this in terms of the other specifiers instead of\n # implementing it ourselves. The only thing we need to do is construct\n # the other specifiers.\n\n # We want everything but the last item in the version, but we want to\n # ignore suffix segments.\n prefix = _version_join(\n list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]\n )\n\n # Add the prefix notation to the end of our string\n prefix += ".*"\n\n return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(\n prospective, prefix\n )\n\n def _compare_equal(self, prospective: Version, spec: str) -> bool:\n # We need special logic to handle prefix matching\n if spec.endswith(".*"):\n # In the case of prefix matching we want to ignore local segment.\n normalized_prospective = canonicalize_version(\n prospective.public, strip_trailing_zero=False\n )\n # Get the normalized version string ignoring the trailing .*\n normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)\n # Split the spec out by bangs and dots, and pretend that there is\n # an implicit dot in between a release segment and a pre-release segment.\n split_spec = _version_split(normalized_spec)\n\n # Split the prospective version out by bangs and dots, and pretend\n # that there is an implicit dot in between a release segment and\n # a pre-release segment.\n split_prospective = _version_split(normalized_prospective)\n\n # 0-pad the prospective version before shortening it to get the correct\n # shortened version.\n padded_prospective, _ = _pad_version(split_prospective, split_spec)\n\n # Shorten the prospective version to be the same length as the spec\n # so that we can determine if the specifier is a prefix of the\n # prospective version or not.\n shortened_prospective = padded_prospective[: len(split_spec)]\n\n return shortened_prospective == split_spec\n else:\n # Convert our spec string into a Version\n spec_version = Version(spec)\n\n # If the specifier does not have a local segment, then we want to\n # act as if the prospective version also does not have a local\n # segment.\n if not spec_version.local:\n prospective = Version(prospective.public)\n\n return prospective == spec_version\n\n def _compare_not_equal(self, prospective: Version, spec: str) -> bool:\n return not self._compare_equal(prospective, spec)\n\n def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:\n # NB: Local version identifiers are NOT permitted in the version\n # specifier, so local version labels can be universally removed from\n # the prospective version.\n return Version(prospective.public) <= Version(spec)\n\n def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:\n # NB: Local version identifiers are NOT permitted in the version\n # specifier, so local version labels can be universally removed from\n # the prospective version.\n return Version(prospective.public) >= Version(spec)\n\n def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:\n # Convert our spec to a Version instance, since we'll want to work with\n # it as a version.\n spec = Version(spec_str)\n\n # Check to see if the prospective version is less than the spec\n # version. If it's not we can short circuit and just return False now\n # instead of doing extra unneeded work.\n if not prospective < spec:\n return False\n\n # This special case is here so that, unless the specifier itself\n # includes is a pre-release version, that we do not accept pre-release\n # versions for the version mentioned in the specifier (e.g. <3.1 should\n # not match 3.1.dev0, but should match 3.0.dev0).\n if not spec.is_prerelease and prospective.is_prerelease:\n if Version(prospective.base_version) == Version(spec.base_version):\n return False\n\n # If we've gotten to here, it means that prospective version is both\n # less than the spec version *and* it's not a pre-release of the same\n # version in the spec.\n return True\n\n def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:\n # Convert our spec to a Version instance, since we'll want to work with\n # it as a version.\n spec = Version(spec_str)\n\n # Check to see if the prospective version is greater than the spec\n # version. If it's not we can short circuit and just return False now\n # instead of doing extra unneeded work.\n if not prospective > spec:\n return False\n\n # This special case is here so that, unless the specifier itself\n # includes is a post-release version, that we do not accept\n # post-release versions for the version mentioned in the specifier\n # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).\n if not spec.is_postrelease and prospective.is_postrelease:\n if Version(prospective.base_version) == Version(spec.base_version):\n return False\n\n # Ensure that we do not allow a local version of the version mentioned\n # in the specifier, which is technically greater than, to match.\n if prospective.local is not None:\n if Version(prospective.base_version) == Version(spec.base_version):\n return False\n\n # If we've gotten to here, it means that prospective version is both\n # greater than the spec version *and* it's not a pre-release of the\n # same version in the spec.\n return True\n\n def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:\n return str(prospective).lower() == str(spec).lower()\n\n def __contains__(self, item: str | Version) -> bool:\n """Return whether or not the item is contained in this specifier.\n\n :param item: The item to check for.\n\n This is used for the ``in`` operator and behaves the same as\n :meth:`contains` with no ``prereleases`` argument passed.\n\n >>> "1.2.3" in Specifier(">=1.2.3")\n True\n >>> Version("1.2.3") in Specifier(">=1.2.3")\n True\n >>> "1.0.0" in Specifier(">=1.2.3")\n False\n >>> "1.3.0a1" in Specifier(">=1.2.3")\n False\n >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)\n True\n """\n return self.contains(item)\n\n def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool:\n """Return whether or not the item is contained in this specifier.\n\n :param item:\n The item to check for, which can be a version string or a\n :class:`Version` instance.\n :param prereleases:\n Whether or not to match prereleases with this Specifier. If set to\n ``None`` (the default), it uses :attr:`prereleases` to determine\n whether or not prereleases are allowed.\n\n >>> Specifier(">=1.2.3").contains("1.2.3")\n True\n >>> Specifier(">=1.2.3").contains(Version("1.2.3"))\n True\n >>> Specifier(">=1.2.3").contains("1.0.0")\n False\n >>> Specifier(">=1.2.3").contains("1.3.0a1")\n False\n >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")\n True\n >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)\n True\n """\n\n # Determine if prereleases are to be allowed or not.\n if prereleases is None:\n prereleases = self.prereleases\n\n # Normalize item to a Version, this allows us to have a shortcut for\n # "2.0" in Specifier(">=2")\n normalized_item = _coerce_version(item)\n\n # Determine if we should be supporting prereleases in this specifier\n # or not, if we do not support prereleases than we can short circuit\n # logic if this version is a prereleases.\n if normalized_item.is_prerelease and not prereleases:\n return False\n\n # Actually do the comparison to determine if this item is contained\n # within this Specifier or not.\n operator_callable: CallableOperator = self._get_operator(self.operator)\n return operator_callable(normalized_item, self.version)\n\n def filter(\n self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None\n ) -> Iterator[UnparsedVersionVar]:\n """Filter items in the given iterable, that match the specifier.\n\n :param iterable:\n An iterable that can contain version strings and :class:`Version` instances.\n The items in the iterable will be filtered according to the specifier.\n :param prereleases:\n Whether or not to allow prereleases in the returned iterator. If set to\n ``None`` (the default), it will be intelligently decide whether to allow\n prereleases or not (based on the :attr:`prereleases` attribute, and\n whether the only versions matching are prereleases).\n\n This method is smarter than just ``filter(Specifier().contains, [...])``\n because it implements the rule from :pep:`440` that a prerelease item\n SHOULD be accepted if no other versions match the given specifier.\n\n >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))\n ['1.3']\n >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))\n ['1.2.3', '1.3', <Version('1.4')>]\n >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))\n ['1.5a1']\n >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))\n ['1.3', '1.5a1']\n >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))\n ['1.3', '1.5a1']\n """\n\n yielded = False\n found_prereleases = []\n\n kw = {"prereleases": prereleases if prereleases is not None else True}\n\n # Attempt to iterate over all the values in the iterable and if any of\n # them match, yield them.\n for version in iterable:\n parsed_version = _coerce_version(version)\n\n if self.contains(parsed_version, **kw):\n # If our version is a prerelease, and we were not set to allow\n # prereleases, then we'll store it for later in case nothing\n # else matches this specifier.\n if parsed_version.is_prerelease and not (\n prereleases or self.prereleases\n ):\n found_prereleases.append(version)\n # Either this is not a prerelease, or we should have been\n # accepting prereleases from the beginning.\n else:\n yielded = True\n yield version\n\n # Now that we've iterated over everything, determine if we've yielded\n # any values, and if we have not and we have any prereleases stored up\n # then we will go ahead and yield the prereleases.\n if not yielded and found_prereleases:\n for version in found_prereleases:\n yield version\n\n\n_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")\n\n\ndef _version_split(version: str) -> list[str]:\n """Split version into components.\n\n The split components are intended for version comparison. The logic does\n not attempt to retain the original version string, so joining the\n components back with :func:`_version_join` may not produce the original\n version string.\n """\n result: list[str] = []\n\n epoch, _, rest = version.rpartition("!")\n result.append(epoch or "0")\n\n for item in rest.split("."):\n match = _prefix_regex.search(item)\n if match:\n result.extend(match.groups())\n else:\n result.append(item)\n return result\n\n\ndef _version_join(components: list[str]) -> str:\n """Join split version components into a version string.\n\n This function assumes the input came from :func:`_version_split`, where the\n first component must be the epoch (either empty or numeric), and all other\n components numeric.\n """\n epoch, *rest = components\n return f"{epoch}!{'.'.join(rest)}"\n\n\ndef _is_not_suffix(segment: str) -> bool:\n return not any(\n segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")\n )\n\n\ndef _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]:\n left_split, right_split = [], []\n\n # Get the release segment of our versions\n left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))\n right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))\n\n # Get the rest of our versions\n left_split.append(left[len(left_split[0]) :])\n right_split.append(right[len(right_split[0]) :])\n\n # Insert our padding\n left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))\n right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))\n\n return (\n list(itertools.chain.from_iterable(left_split)),\n list(itertools.chain.from_iterable(right_split)),\n )\n\n\nclass SpecifierSet(BaseSpecifier):\n """This class abstracts handling of a set of version specifiers.\n\n It can be passed a single specifier (``>=3.0``), a comma-separated list of\n specifiers (``>=3.0,!=3.1``), or no specifier at all.\n """\n\n def __init__(\n self,\n specifiers: str | Iterable[Specifier] = "",\n prereleases: bool | None = None,\n ) -> None:\n """Initialize a SpecifierSet instance.\n\n :param specifiers:\n The string representation of a specifier or a comma-separated list of\n specifiers which will be parsed and normalized before use.\n May also be an iterable of ``Specifier`` instances, which will be used\n as is.\n :param prereleases:\n This tells the SpecifierSet if it should accept prerelease versions if\n applicable or not. The default of ``None`` will autodetect it from the\n given specifiers.\n\n :raises InvalidSpecifier:\n If the given ``specifiers`` are not parseable than this exception will be\n raised.\n """\n\n if isinstance(specifiers, str):\n # Split on `,` to break each individual specifier into its own item, and\n # strip each item to remove leading/trailing whitespace.\n split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]\n\n # Make each individual specifier a Specifier and save in a frozen set\n # for later.\n self._specs = frozenset(map(Specifier, split_specifiers))\n else:\n # Save the supplied specifiers in a frozen set.\n self._specs = frozenset(specifiers)\n\n # Store our prereleases value so we can use it later to determine if\n # we accept prereleases or not.\n self._prereleases = prereleases\n\n @property\n def prereleases(self) -> bool | None:\n # If we have been given an explicit prerelease modifier, then we'll\n # pass that through here.\n if self._prereleases is not None:\n return self._prereleases\n\n # If we don't have any specifiers, and we don't have a forced value,\n # then we'll just return None since we don't know if this should have\n # pre-releases or not.\n if not self._specs:\n return None\n\n # Otherwise we'll see if any of the given specifiers accept\n # prereleases, if any of them do we'll return True, otherwise False.\n return any(s.prereleases for s in self._specs)\n\n @prereleases.setter\n def prereleases(self, value: bool) -> None:\n self._prereleases = value\n\n def __repr__(self) -> str:\n """A representation of the specifier set that shows all internal state.\n\n Note that the ordering of the individual specifiers within the set may not\n match the input string.\n\n >>> SpecifierSet('>=1.0.0,!=2.0.0')\n <SpecifierSet('!=2.0.0,>=1.0.0')>\n >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)\n <SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>\n >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)\n <SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>\n """\n pre = (\n f", prereleases={self.prereleases!r}"\n if self._prereleases is not None\n else ""\n )\n\n return f"<SpecifierSet({str(self)!r}{pre})>"\n\n def __str__(self) -> str:\n """A string representation of the specifier set that can be round-tripped.\n\n Note that the ordering of the individual specifiers within the set may not\n match the input string.\n\n >>> str(SpecifierSet(">=1.0.0,!=1.0.1"))\n '!=1.0.1,>=1.0.0'\n >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))\n '!=1.0.1,>=1.0.0'\n """\n return ",".join(sorted(str(s) for s in self._specs))\n\n def __hash__(self) -> int:\n return hash(self._specs)\n\n def __and__(self, other: SpecifierSet | str) -> SpecifierSet:\n """Return a SpecifierSet which is a combination of the two sets.\n\n :param other: The other object to combine with.\n\n >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'\n <SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>\n >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')\n <SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>\n """\n if isinstance(other, str):\n other = SpecifierSet(other)\n elif not isinstance(other, SpecifierSet):\n return NotImplemented\n\n specifier = SpecifierSet()\n specifier._specs = frozenset(self._specs | other._specs)\n\n if self._prereleases is None and other._prereleases is not None:\n specifier._prereleases = other._prereleases\n elif self._prereleases is not None and other._prereleases is None:\n specifier._prereleases = self._prereleases\n elif self._prereleases == other._prereleases:\n specifier._prereleases = self._prereleases\n else:\n raise ValueError(\n "Cannot combine SpecifierSets with True and False prerelease overrides."\n )\n\n return specifier\n\n def __eq__(self, other: object) -> bool:\n """Whether or not the two SpecifierSet-like objects are equal.\n\n :param other: The other object to check against.\n\n The value of :attr:`prereleases` is ignored.\n\n >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")\n True\n >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==\n ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))\n True\n >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"\n True\n >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")\n False\n >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")\n False\n """\n if isinstance(other, (str, Specifier)):\n other = SpecifierSet(str(other))\n elif not isinstance(other, SpecifierSet):\n return NotImplemented\n\n return self._specs == other._specs\n\n def __len__(self) -> int:\n """Returns the number of specifiers in this specifier set."""\n return len(self._specs)\n\n def __iter__(self) -> Iterator[Specifier]:\n """\n Returns an iterator over all the underlying :class:`Specifier` instances\n in this specifier set.\n\n >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)\n [<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]\n """\n return iter(self._specs)\n\n def __contains__(self, item: UnparsedVersion) -> bool:\n """Return whether or not the item is contained in this specifier.\n\n :param item: The item to check for.\n\n This is used for the ``in`` operator and behaves the same as\n :meth:`contains` with no ``prereleases`` argument passed.\n\n >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")\n True\n >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")\n True\n >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")\n False\n >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")\n False\n >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)\n True\n """\n return self.contains(item)\n\n def contains(\n self,\n item: UnparsedVersion,\n prereleases: bool | None = None,\n installed: bool | None = None,\n ) -> bool:\n """Return whether or not the item is contained in this SpecifierSet.\n\n :param item:\n The item to check for, which can be a version string or a\n :class:`Version` instance.\n :param prereleases:\n Whether or not to match prereleases with this SpecifierSet. If set to\n ``None`` (the default), it uses :attr:`prereleases` to determine\n whether or not prereleases are allowed.\n\n >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")\n True\n >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))\n True\n >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")\n False\n >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")\n False\n >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")\n True\n >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)\n True\n """\n # Ensure that our item is a Version instance.\n if not isinstance(item, Version):\n item = Version(item)\n\n # Determine if we're forcing a prerelease or not, if we're not forcing\n # one for this particular filter call, then we'll use whatever the\n # SpecifierSet thinks for whether or not we should support prereleases.\n if prereleases is None:\n prereleases = self.prereleases\n\n # We can determine if we're going to allow pre-releases by looking to\n # see if any of the underlying items supports them. If none of them do\n # and this item is a pre-release then we do not allow it and we can\n # short circuit that here.\n # Note: This means that 1.0.dev1 would not be contained in something\n # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0\n if not prereleases and item.is_prerelease:\n return False\n\n if installed and item.is_prerelease:\n item = Version(item.base_version)\n\n # We simply dispatch to the underlying specs here to make sure that the\n # given version is contained within all of them.\n # Note: This use of all() here means that an empty set of specifiers\n # will always return True, this is an explicit design decision.\n return all(s.contains(item, prereleases=prereleases) for s in self._specs)\n\n def filter(\n self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None\n ) -> Iterator[UnparsedVersionVar]:\n """Filter items in the given iterable, that match the specifiers in this set.\n\n :param iterable:\n An iterable that can contain version strings and :class:`Version` instances.\n The items in the iterable will be filtered according to the specifier.\n :param prereleases:\n Whether or not to allow prereleases in the returned iterator. If set to\n ``None`` (the default), it will be intelligently decide whether to allow\n prereleases or not (based on the :attr:`prereleases` attribute, and\n whether the only versions matching are prereleases).\n\n This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``\n because it implements the rule from :pep:`440` that a prerelease item\n SHOULD be accepted if no other versions match the given specifier.\n\n >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))\n ['1.3']\n >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))\n ['1.3', <Version('1.4')>]\n >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))\n []\n >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))\n ['1.3', '1.5a1']\n >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))\n ['1.3', '1.5a1']\n\n An "empty" SpecifierSet will filter items based on the presence of prerelease\n versions in the set.\n\n >>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))\n ['1.3']\n >>> list(SpecifierSet("").filter(["1.5a1"]))\n ['1.5a1']\n >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))\n ['1.3', '1.5a1']\n >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))\n ['1.3', '1.5a1']\n """\n # Determine if we're forcing a prerelease or not, if we're not forcing\n # one for this particular filter call, then we'll use whatever the\n # SpecifierSet thinks for whether or not we should support prereleases.\n if prereleases is None:\n prereleases = self.prereleases\n\n # If we have any specifiers, then we want to wrap our iterable in the\n # filter method for each one, this will act as a logical AND amongst\n # each specifier.\n if self._specs:\n for spec in self._specs:\n iterable = spec.filter(iterable, prereleases=bool(prereleases))\n return iter(iterable)\n # If we do not have any specifiers, then we need to have a rough filter\n # which will filter out any pre-releases, unless there are no final\n # releases.\n else:\n filtered: list[UnparsedVersionVar] = []\n found_prereleases: list[UnparsedVersionVar] = []\n\n for item in iterable:\n parsed_version = _coerce_version(item)\n\n # Store any item which is a pre-release for later unless we've\n # already found a final version or we are accepting prereleases\n if parsed_version.is_prerelease and not prereleases:\n if not filtered:\n found_prereleases.append(item)\n else:\n filtered.append(item)\n\n # If we've found no items except for pre-releases, then we'll go\n # ahead and use the pre-releases\n if not filtered and found_prereleases and prereleases is None:\n return iter(found_prereleases)\n\n return iter(filtered)\n
.venv\Lib\site-packages\pip\_vendor\packaging\specifiers.py
specifiers.py
Python
40,079
0.95
0.175662
0.182898
node-utils
5
2024-08-15T19:07:04.631935
Apache-2.0
false
93e7022c49b04a9a34e89a6261b4e6e1
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import annotations\n\nimport logging\nimport platform\nimport re\nimport struct\nimport subprocess\nimport sys\nimport sysconfig\nfrom importlib.machinery import EXTENSION_SUFFIXES\nfrom typing import (\n Iterable,\n Iterator,\n Sequence,\n Tuple,\n cast,\n)\n\nfrom . import _manylinux, _musllinux\n\nlogger = logging.getLogger(__name__)\n\nPythonVersion = Sequence[int]\nAppleVersion = Tuple[int, int]\n\nINTERPRETER_SHORT_NAMES: dict[str, str] = {\n "python": "py", # Generic.\n "cpython": "cp",\n "pypy": "pp",\n "ironpython": "ip",\n "jython": "jy",\n}\n\n\n_32_BIT_INTERPRETER = struct.calcsize("P") == 4\n\n\nclass Tag:\n """\n A representation of the tag triple for a wheel.\n\n Instances are considered immutable and thus are hashable. Equality checking\n is also supported.\n """\n\n __slots__ = ["_abi", "_hash", "_interpreter", "_platform"]\n\n def __init__(self, interpreter: str, abi: str, platform: str) -> None:\n self._interpreter = interpreter.lower()\n self._abi = abi.lower()\n self._platform = platform.lower()\n # The __hash__ of every single element in a Set[Tag] will be evaluated each time\n # that a set calls its `.disjoint()` method, which may be called hundreds of\n # times when scanning a page of links for packages with tags matching that\n # Set[Tag]. Pre-computing the value here produces significant speedups for\n # downstream consumers.\n self._hash = hash((self._interpreter, self._abi, self._platform))\n\n @property\n def interpreter(self) -> str:\n return self._interpreter\n\n @property\n def abi(self) -> str:\n return self._abi\n\n @property\n def platform(self) -> str:\n return self._platform\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Tag):\n return NotImplemented\n\n return (\n (self._hash == other._hash) # Short-circuit ASAP for perf reasons.\n and (self._platform == other._platform)\n and (self._abi == other._abi)\n and (self._interpreter == other._interpreter)\n )\n\n def __hash__(self) -> int:\n return self._hash\n\n def __str__(self) -> str:\n return f"{self._interpreter}-{self._abi}-{self._platform}"\n\n def __repr__(self) -> str:\n return f"<{self} @ {id(self)}>"\n\n\ndef parse_tag(tag: str) -> frozenset[Tag]:\n """\n Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.\n\n Returning a set is required due to the possibility that the tag is a\n compressed tag set.\n """\n tags = set()\n interpreters, abis, platforms = tag.split("-")\n for interpreter in interpreters.split("."):\n for abi in abis.split("."):\n for platform_ in platforms.split("."):\n tags.add(Tag(interpreter, abi, platform_))\n return frozenset(tags)\n\n\ndef _get_config_var(name: str, warn: bool = False) -> int | str | None:\n value: int | str | None = sysconfig.get_config_var(name)\n if value is None and warn:\n logger.debug(\n "Config variable '%s' is unset, Python ABI tag may be incorrect", name\n )\n return value\n\n\ndef _normalize_string(string: str) -> str:\n return string.replace(".", "_").replace("-", "_").replace(" ", "_")\n\n\ndef _is_threaded_cpython(abis: list[str]) -> bool:\n """\n Determine if the ABI corresponds to a threaded (`--disable-gil`) build.\n\n The threaded builds are indicated by a "t" in the abiflags.\n """\n if len(abis) == 0:\n return False\n # expect e.g., cp313\n m = re.match(r"cp\d+(.*)", abis[0])\n if not m:\n return False\n abiflags = m.group(1)\n return "t" in abiflags\n\n\ndef _abi3_applies(python_version: PythonVersion, threading: bool) -> bool:\n """\n Determine if the Python version supports abi3.\n\n PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`)\n builds do not support abi3.\n """\n return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading\n\n\ndef _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]:\n py_version = tuple(py_version) # To allow for version comparison.\n abis = []\n version = _version_nodot(py_version[:2])\n threading = debug = pymalloc = ucs4 = ""\n with_debug = _get_config_var("Py_DEBUG", warn)\n has_refcount = hasattr(sys, "gettotalrefcount")\n # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled\n # extension modules is the best option.\n # https://github.com/pypa/pip/issues/3383#issuecomment-173267692\n has_ext = "_d.pyd" in EXTENSION_SUFFIXES\n if with_debug or (with_debug is None and (has_refcount or has_ext)):\n debug = "d"\n if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn):\n threading = "t"\n if py_version < (3, 8):\n with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)\n if with_pymalloc or with_pymalloc is None:\n pymalloc = "m"\n if py_version < (3, 3):\n unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)\n if unicode_size == 4 or (\n unicode_size is None and sys.maxunicode == 0x10FFFF\n ):\n ucs4 = "u"\n elif debug:\n # Debug builds can also load "normal" extension modules.\n # We can also assume no UCS-4 or pymalloc requirement.\n abis.append(f"cp{version}{threading}")\n abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}")\n return abis\n\n\ndef cpython_tags(\n python_version: PythonVersion | None = None,\n abis: Iterable[str] | None = None,\n platforms: Iterable[str] | None = None,\n *,\n warn: bool = False,\n) -> Iterator[Tag]:\n """\n Yields the tags for a CPython interpreter.\n\n The tags consist of:\n - cp<python_version>-<abi>-<platform>\n - cp<python_version>-abi3-<platform>\n - cp<python_version>-none-<platform>\n - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.\n\n If python_version only specifies a major version then user-provided ABIs and\n the 'none' ABItag will be used.\n\n If 'abi3' or 'none' are specified in 'abis' then they will be yielded at\n their normal position and not at the beginning.\n """\n if not python_version:\n python_version = sys.version_info[:2]\n\n interpreter = f"cp{_version_nodot(python_version[:2])}"\n\n if abis is None:\n if len(python_version) > 1:\n abis = _cpython_abis(python_version, warn)\n else:\n abis = []\n abis = list(abis)\n # 'abi3' and 'none' are explicitly handled later.\n for explicit_abi in ("abi3", "none"):\n try:\n abis.remove(explicit_abi)\n except ValueError:\n pass\n\n platforms = list(platforms or platform_tags())\n for abi in abis:\n for platform_ in platforms:\n yield Tag(interpreter, abi, platform_)\n\n threading = _is_threaded_cpython(abis)\n use_abi3 = _abi3_applies(python_version, threading)\n if use_abi3:\n yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)\n yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)\n\n if use_abi3:\n for minor_version in range(python_version[1] - 1, 1, -1):\n for platform_ in platforms:\n version = _version_nodot((python_version[0], minor_version))\n interpreter = f"cp{version}"\n yield Tag(interpreter, "abi3", platform_)\n\n\ndef _generic_abi() -> list[str]:\n """\n Return the ABI tag based on EXT_SUFFIX.\n """\n # The following are examples of `EXT_SUFFIX`.\n # We want to keep the parts which are related to the ABI and remove the\n # parts which are related to the platform:\n # - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310\n # - mac: '.cpython-310-darwin.so' => cp310\n # - win: '.cp310-win_amd64.pyd' => cp310\n # - win: '.pyd' => cp37 (uses _cpython_abis())\n # - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73\n # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'\n # => graalpy_38_native\n\n ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)\n if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":\n raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")\n parts = ext_suffix.split(".")\n if len(parts) < 3:\n # CPython3.7 and earlier uses ".pyd" on Windows.\n return _cpython_abis(sys.version_info[:2])\n soabi = parts[1]\n if soabi.startswith("cpython"):\n # non-windows\n abi = "cp" + soabi.split("-")[1]\n elif soabi.startswith("cp"):\n # windows\n abi = soabi.split("-")[0]\n elif soabi.startswith("pypy"):\n abi = "-".join(soabi.split("-")[:2])\n elif soabi.startswith("graalpy"):\n abi = "-".join(soabi.split("-")[:3])\n elif soabi:\n # pyston, ironpython, others?\n abi = soabi\n else:\n return []\n return [_normalize_string(abi)]\n\n\ndef generic_tags(\n interpreter: str | None = None,\n abis: Iterable[str] | None = None,\n platforms: Iterable[str] | None = None,\n *,\n warn: bool = False,\n) -> Iterator[Tag]:\n """\n Yields the tags for a generic interpreter.\n\n The tags consist of:\n - <interpreter>-<abi>-<platform>\n\n The "none" ABI will be added if it was not explicitly provided.\n """\n if not interpreter:\n interp_name = interpreter_name()\n interp_version = interpreter_version(warn=warn)\n interpreter = "".join([interp_name, interp_version])\n if abis is None:\n abis = _generic_abi()\n else:\n abis = list(abis)\n platforms = list(platforms or platform_tags())\n if "none" not in abis:\n abis.append("none")\n for abi in abis:\n for platform_ in platforms:\n yield Tag(interpreter, abi, platform_)\n\n\ndef _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:\n """\n Yields Python versions in descending order.\n\n After the latest version, the major-only version will be yielded, and then\n all previous versions of that major version.\n """\n if len(py_version) > 1:\n yield f"py{_version_nodot(py_version[:2])}"\n yield f"py{py_version[0]}"\n if len(py_version) > 1:\n for minor in range(py_version[1] - 1, -1, -1):\n yield f"py{_version_nodot((py_version[0], minor))}"\n\n\ndef compatible_tags(\n python_version: PythonVersion | None = None,\n interpreter: str | None = None,\n platforms: Iterable[str] | None = None,\n) -> Iterator[Tag]:\n """\n Yields the sequence of tags that are compatible with a specific version of Python.\n\n The tags consist of:\n - py*-none-<platform>\n - <interpreter>-none-any # ... if `interpreter` is provided.\n - py*-none-any\n """\n if not python_version:\n python_version = sys.version_info[:2]\n platforms = list(platforms or platform_tags())\n for version in _py_interpreter_range(python_version):\n for platform_ in platforms:\n yield Tag(version, "none", platform_)\n if interpreter:\n yield Tag(interpreter, "none", "any")\n for version in _py_interpreter_range(python_version):\n yield Tag(version, "none", "any")\n\n\ndef _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:\n if not is_32bit:\n return arch\n\n if arch.startswith("ppc"):\n return "ppc"\n\n return "i386"\n\n\ndef _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]:\n formats = [cpu_arch]\n if cpu_arch == "x86_64":\n if version < (10, 4):\n return []\n formats.extend(["intel", "fat64", "fat32"])\n\n elif cpu_arch == "i386":\n if version < (10, 4):\n return []\n formats.extend(["intel", "fat32", "fat"])\n\n elif cpu_arch == "ppc64":\n # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?\n if version > (10, 5) or version < (10, 4):\n return []\n formats.append("fat64")\n\n elif cpu_arch == "ppc":\n if version > (10, 6):\n return []\n formats.extend(["fat32", "fat"])\n\n if cpu_arch in {"arm64", "x86_64"}:\n formats.append("universal2")\n\n if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:\n formats.append("universal")\n\n return formats\n\n\ndef mac_platforms(\n version: AppleVersion | None = None, arch: str | None = None\n) -> Iterator[str]:\n """\n Yields the platform tags for a macOS system.\n\n The `version` parameter is a two-item tuple specifying the macOS version to\n generate platform tags for. The `arch` parameter is the CPU architecture to\n generate platform tags for. Both parameters default to the appropriate value\n for the current system.\n """\n version_str, _, cpu_arch = platform.mac_ver()\n if version is None:\n version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2])))\n if version == (10, 16):\n # When built against an older macOS SDK, Python will report macOS 10.16\n # instead of the real version.\n version_str = subprocess.run(\n [\n sys.executable,\n "-sS",\n "-c",\n "import platform; print(platform.mac_ver()[0])",\n ],\n check=True,\n env={"SYSTEM_VERSION_COMPAT": "0"},\n stdout=subprocess.PIPE,\n text=True,\n ).stdout\n version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2])))\n else:\n version = version\n if arch is None:\n arch = _mac_arch(cpu_arch)\n else:\n arch = arch\n\n if (10, 0) <= version and version < (11, 0):\n # Prior to Mac OS 11, each yearly release of Mac OS bumped the\n # "minor" version number. The major version was always 10.\n major_version = 10\n for minor_version in range(version[1], -1, -1):\n compat_version = major_version, minor_version\n binary_formats = _mac_binary_formats(compat_version, arch)\n for binary_format in binary_formats:\n yield f"macosx_{major_version}_{minor_version}_{binary_format}"\n\n if version >= (11, 0):\n # Starting with Mac OS 11, each yearly release bumps the major version\n # number. The minor versions are now the midyear updates.\n minor_version = 0\n for major_version in range(version[0], 10, -1):\n compat_version = major_version, minor_version\n binary_formats = _mac_binary_formats(compat_version, arch)\n for binary_format in binary_formats:\n yield f"macosx_{major_version}_{minor_version}_{binary_format}"\n\n if version >= (11, 0):\n # Mac OS 11 on x86_64 is compatible with binaries from previous releases.\n # Arm64 support was introduced in 11.0, so no Arm binaries from previous\n # releases exist.\n #\n # However, the "universal2" binary format can have a\n # macOS version earlier than 11.0 when the x86_64 part of the binary supports\n # that version of macOS.\n major_version = 10\n if arch == "x86_64":\n for minor_version in range(16, 3, -1):\n compat_version = major_version, minor_version\n binary_formats = _mac_binary_formats(compat_version, arch)\n for binary_format in binary_formats:\n yield f"macosx_{major_version}_{minor_version}_{binary_format}"\n else:\n for minor_version in range(16, 3, -1):\n compat_version = major_version, minor_version\n binary_format = "universal2"\n yield f"macosx_{major_version}_{minor_version}_{binary_format}"\n\n\ndef ios_platforms(\n version: AppleVersion | None = None, multiarch: str | None = None\n) -> Iterator[str]:\n """\n Yields the platform tags for an iOS system.\n\n :param version: A two-item tuple specifying the iOS version to generate\n platform tags for. Defaults to the current iOS version.\n :param multiarch: The CPU architecture+ABI to generate platform tags for -\n (the value used by `sys.implementation._multiarch` e.g.,\n `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current\n multiarch value.\n """\n if version is None:\n # if iOS is the current platform, ios_ver *must* be defined. However,\n # it won't exist for CPython versions before 3.13, which causes a mypy\n # error.\n _, release, _, _ = platform.ios_ver() # type: ignore[attr-defined, unused-ignore]\n version = cast("AppleVersion", tuple(map(int, release.split(".")[:2])))\n\n if multiarch is None:\n multiarch = sys.implementation._multiarch\n multiarch = multiarch.replace("-", "_")\n\n ios_platform_template = "ios_{major}_{minor}_{multiarch}"\n\n # Consider any iOS major.minor version from the version requested, down to\n # 12.0. 12.0 is the first iOS version that is known to have enough features\n # to support CPython. Consider every possible minor release up to X.9. There\n # highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra\n # candidates that won't ever match doesn't really hurt, and it saves us from\n # having to keep an explicit list of known iOS versions in the code. Return\n # the results descending order of version number.\n\n # If the requested major version is less than 12, there won't be any matches.\n if version[0] < 12:\n return\n\n # Consider the actual X.Y version that was requested.\n yield ios_platform_template.format(\n major=version[0], minor=version[1], multiarch=multiarch\n )\n\n # Consider every minor version from X.0 to the minor version prior to the\n # version requested by the platform.\n for minor in range(version[1] - 1, -1, -1):\n yield ios_platform_template.format(\n major=version[0], minor=minor, multiarch=multiarch\n )\n\n for major in range(version[0] - 1, 11, -1):\n for minor in range(9, -1, -1):\n yield ios_platform_template.format(\n major=major, minor=minor, multiarch=multiarch\n )\n\n\ndef android_platforms(\n api_level: int | None = None, abi: str | None = None\n) -> Iterator[str]:\n """\n Yields the :attr:`~Tag.platform` tags for Android. If this function is invoked on\n non-Android platforms, the ``api_level`` and ``abi`` arguments are required.\n\n :param int api_level: The maximum `API level\n <https://developer.android.com/tools/releases/platforms>`__ to return. Defaults\n to the current system's version, as returned by ``platform.android_ver``.\n :param str abi: The `Android ABI <https://developer.android.com/ndk/guides/abis>`__,\n e.g. ``arm64_v8a``. Defaults to the current system's ABI , as returned by\n ``sysconfig.get_platform``. Hyphens and periods will be replaced with\n underscores.\n """\n if platform.system() != "Android" and (api_level is None or abi is None):\n raise TypeError(\n "on non-Android platforms, the api_level and abi arguments are required"\n )\n\n if api_level is None:\n # Python 3.13 was the first version to return platform.system() == "Android",\n # and also the first version to define platform.android_ver().\n api_level = platform.android_ver().api_level # type: ignore[attr-defined]\n\n if abi is None:\n abi = sysconfig.get_platform().split("-")[-1]\n abi = _normalize_string(abi)\n\n # 16 is the minimum API level known to have enough features to support CPython\n # without major patching. Yield every API level from the maximum down to the\n # minimum, inclusive.\n min_api_level = 16\n for ver in range(api_level, min_api_level - 1, -1):\n yield f"android_{ver}_{abi}"\n\n\ndef _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:\n linux = _normalize_string(sysconfig.get_platform())\n if not linux.startswith("linux_"):\n # we should never be here, just yield the sysconfig one and return\n yield linux\n return\n if is_32bit:\n if linux == "linux_x86_64":\n linux = "linux_i686"\n elif linux == "linux_aarch64":\n linux = "linux_armv8l"\n _, arch = linux.split("_", 1)\n archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch])\n yield from _manylinux.platform_tags(archs)\n yield from _musllinux.platform_tags(archs)\n for arch in archs:\n yield f"linux_{arch}"\n\n\ndef _generic_platforms() -> Iterator[str]:\n yield _normalize_string(sysconfig.get_platform())\n\n\ndef platform_tags() -> Iterator[str]:\n """\n Provides the platform tags for this installation.\n """\n if platform.system() == "Darwin":\n return mac_platforms()\n elif platform.system() == "iOS":\n return ios_platforms()\n elif platform.system() == "Android":\n return android_platforms()\n elif platform.system() == "Linux":\n return _linux_platforms()\n else:\n return _generic_platforms()\n\n\ndef interpreter_name() -> str:\n """\n Returns the name of the running interpreter.\n\n Some implementations have a reserved, two-letter abbreviation which will\n be returned when appropriate.\n """\n name = sys.implementation.name\n return INTERPRETER_SHORT_NAMES.get(name) or name\n\n\ndef interpreter_version(*, warn: bool = False) -> str:\n """\n Returns the version of the running interpreter.\n """\n version = _get_config_var("py_version_nodot", warn=warn)\n if version:\n version = str(version)\n else:\n version = _version_nodot(sys.version_info[:2])\n return version\n\n\ndef _version_nodot(version: PythonVersion) -> str:\n return "".join(map(str, version))\n\n\ndef sys_tags(*, warn: bool = False) -> Iterator[Tag]:\n """\n Returns the sequence of tag triples for the running interpreter.\n\n The order of the sequence corresponds to priority order for the\n interpreter, from most to least important.\n """\n\n interp_name = interpreter_name()\n if interp_name == "cp":\n yield from cpython_tags(warn=warn)\n else:\n yield from generic_tags()\n\n if interp_name == "pp":\n interp = "pp3"\n elif interp_name == "cp":\n interp = "cp" + interpreter_version(warn=warn)\n else:\n interp = None\n yield from compatible_tags(interpreter=interp)\n
.venv\Lib\site-packages\pip\_vendor\packaging\tags.py
tags.py
Python
22,745
0.95
0.217988
0.119266
node-utils
978
2025-05-17T15:18:48.681158
BSD-3-Clause
false
343d067796e4b905805026a1740edf57
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import annotations\n\nimport functools\nimport re\nfrom typing import NewType, Tuple, Union, cast\n\nfrom .tags import Tag, parse_tag\nfrom .version import InvalidVersion, Version, _TrimmedRelease\n\nBuildTag = Union[Tuple[()], Tuple[int, str]]\nNormalizedName = NewType("NormalizedName", str)\n\n\nclass InvalidName(ValueError):\n """\n An invalid distribution name; users should refer to the packaging user guide.\n """\n\n\nclass InvalidWheelFilename(ValueError):\n """\n An invalid wheel filename was found, users should refer to PEP 427.\n """\n\n\nclass InvalidSdistFilename(ValueError):\n """\n An invalid sdist filename was found, users should refer to the packaging user guide.\n """\n\n\n# Core metadata spec for `Name`\n_validate_regex = re.compile(\n r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE\n)\n_canonicalize_regex = re.compile(r"[-_.]+")\n_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")\n# PEP 427: The build number must start with a digit.\n_build_tag_regex = re.compile(r"(\d+)(.*)")\n\n\ndef canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:\n if validate and not _validate_regex.match(name):\n raise InvalidName(f"name is invalid: {name!r}")\n # This is taken from PEP 503.\n value = _canonicalize_regex.sub("-", name).lower()\n return cast(NormalizedName, value)\n\n\ndef is_normalized_name(name: str) -> bool:\n return _normalized_regex.match(name) is not None\n\n\n@functools.singledispatch\ndef canonicalize_version(\n version: Version | str, *, strip_trailing_zero: bool = True\n) -> str:\n """\n Return a canonical form of a version as a string.\n\n >>> canonicalize_version('1.0.1')\n '1.0.1'\n\n Per PEP 625, versions may have multiple canonical forms, differing\n only by trailing zeros.\n\n >>> canonicalize_version('1.0.0')\n '1'\n >>> canonicalize_version('1.0.0', strip_trailing_zero=False)\n '1.0.0'\n\n Invalid versions are returned unaltered.\n\n >>> canonicalize_version('foo bar baz')\n 'foo bar baz'\n """\n return str(_TrimmedRelease(str(version)) if strip_trailing_zero else version)\n\n\n@canonicalize_version.register\ndef _(version: str, *, strip_trailing_zero: bool = True) -> str:\n try:\n parsed = Version(version)\n except InvalidVersion:\n # Legacy versions cannot be normalized\n return version\n return canonicalize_version(parsed, strip_trailing_zero=strip_trailing_zero)\n\n\ndef parse_wheel_filename(\n filename: str,\n) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]:\n if not filename.endswith(".whl"):\n raise InvalidWheelFilename(\n f"Invalid wheel filename (extension must be '.whl'): {filename!r}"\n )\n\n filename = filename[:-4]\n dashes = filename.count("-")\n if dashes not in (4, 5):\n raise InvalidWheelFilename(\n f"Invalid wheel filename (wrong number of parts): {filename!r}"\n )\n\n parts = filename.split("-", dashes - 2)\n name_part = parts[0]\n # See PEP 427 for the rules on escaping the project name.\n if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:\n raise InvalidWheelFilename(f"Invalid project name: {filename!r}")\n name = canonicalize_name(name_part)\n\n try:\n version = Version(parts[1])\n except InvalidVersion as e:\n raise InvalidWheelFilename(\n f"Invalid wheel filename (invalid version): {filename!r}"\n ) from e\n\n if dashes == 5:\n build_part = parts[2]\n build_match = _build_tag_regex.match(build_part)\n if build_match is None:\n raise InvalidWheelFilename(\n f"Invalid build number: {build_part} in {filename!r}"\n )\n build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))\n else:\n build = ()\n tags = parse_tag(parts[-1])\n return (name, version, build, tags)\n\n\ndef parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]:\n if filename.endswith(".tar.gz"):\n file_stem = filename[: -len(".tar.gz")]\n elif filename.endswith(".zip"):\n file_stem = filename[: -len(".zip")]\n else:\n raise InvalidSdistFilename(\n f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"\n f" {filename!r}"\n )\n\n # We are requiring a PEP 440 version, which cannot contain dashes,\n # so we split on the last dash.\n name_part, sep, version_part = file_stem.rpartition("-")\n if not sep:\n raise InvalidSdistFilename(f"Invalid sdist filename: {filename!r}")\n\n name = canonicalize_name(name_part)\n\n try:\n version = Version(version_part)\n except InvalidVersion as e:\n raise InvalidSdistFilename(\n f"Invalid sdist filename (invalid version): {filename!r}"\n ) from e\n\n return (name, version)\n
.venv\Lib\site-packages\pip\_vendor\packaging\utils.py
utils.py
Python
5,050
0.95
0.147239
0.079365
awesome-app
38
2024-03-26T02:20:44.811134
GPL-3.0
false
f6d73a168977560761887d65c7e9ed18
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n"""\n.. testsetup::\n\n from pip._vendor.packaging.version import parse, Version\n"""\n\nfrom __future__ import annotations\n\nimport itertools\nimport re\nfrom typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union\n\nfrom ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType\n\n__all__ = ["VERSION_PATTERN", "InvalidVersion", "Version", "parse"]\n\nLocalType = Tuple[Union[int, str], ...]\n\nCmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]]\nCmpLocalType = Union[\n NegativeInfinityType,\n Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...],\n]\nCmpKey = Tuple[\n int,\n Tuple[int, ...],\n CmpPrePostDevType,\n CmpPrePostDevType,\n CmpPrePostDevType,\n CmpLocalType,\n]\nVersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]\n\n\nclass _Version(NamedTuple):\n epoch: int\n release: tuple[int, ...]\n dev: tuple[str, int] | None\n pre: tuple[str, int] | None\n post: tuple[str, int] | None\n local: LocalType | None\n\n\ndef parse(version: str) -> Version:\n """Parse the given version string.\n\n >>> parse('1.0.dev1')\n <Version('1.0.dev1')>\n\n :param version: The version string to parse.\n :raises InvalidVersion: When the version string is not a valid version.\n """\n return Version(version)\n\n\nclass InvalidVersion(ValueError):\n """Raised when a version string is not a valid version.\n\n >>> Version("invalid")\n Traceback (most recent call last):\n ...\n packaging.version.InvalidVersion: Invalid version: 'invalid'\n """\n\n\nclass _BaseVersion:\n _key: tuple[Any, ...]\n\n def __hash__(self) -> int:\n return hash(self._key)\n\n # Please keep the duplicated `isinstance` check\n # in the six comparisons hereunder\n # unless you find a way to avoid adding overhead function calls.\n def __lt__(self, other: _BaseVersion) -> bool:\n if not isinstance(other, _BaseVersion):\n return NotImplemented\n\n return self._key < other._key\n\n def __le__(self, other: _BaseVersion) -> bool:\n if not isinstance(other, _BaseVersion):\n return NotImplemented\n\n return self._key <= other._key\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, _BaseVersion):\n return NotImplemented\n\n return self._key == other._key\n\n def __ge__(self, other: _BaseVersion) -> bool:\n if not isinstance(other, _BaseVersion):\n return NotImplemented\n\n return self._key >= other._key\n\n def __gt__(self, other: _BaseVersion) -> bool:\n if not isinstance(other, _BaseVersion):\n return NotImplemented\n\n return self._key > other._key\n\n def __ne__(self, other: object) -> bool:\n if not isinstance(other, _BaseVersion):\n return NotImplemented\n\n return self._key != other._key\n\n\n# Deliberately not anchored to the start and end of the string, to make it\n# easier for 3rd party code to reuse\n_VERSION_PATTERN = r"""\n v?\n (?:\n (?:(?P<epoch>[0-9]+)!)? # epoch\n (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment\n (?P<pre> # pre-release\n [-_\.]?\n (?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)\n [-_\.]?\n (?P<pre_n>[0-9]+)?\n )?\n (?P<post> # post release\n (?:-(?P<post_n1>[0-9]+))\n |\n (?:\n [-_\.]?\n (?P<post_l>post|rev|r)\n [-_\.]?\n (?P<post_n2>[0-9]+)?\n )\n )?\n (?P<dev> # dev release\n [-_\.]?\n (?P<dev_l>dev)\n [-_\.]?\n (?P<dev_n>[0-9]+)?\n )?\n )\n (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version\n"""\n\nVERSION_PATTERN = _VERSION_PATTERN\n"""\nA string containing the regular expression used to match a valid version.\n\nThe pattern is not anchored at either end, and is intended for embedding in larger\nexpressions (for example, matching a version number as part of a file name). The\nregular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``\nflags set.\n\n:meta hide-value:\n"""\n\n\nclass Version(_BaseVersion):\n """This class abstracts handling of a project's versions.\n\n A :class:`Version` instance is comparison aware and can be compared and\n sorted using the standard Python interfaces.\n\n >>> v1 = Version("1.0a5")\n >>> v2 = Version("1.0")\n >>> v1\n <Version('1.0a5')>\n >>> v2\n <Version('1.0')>\n >>> v1 < v2\n True\n >>> v1 == v2\n False\n >>> v1 > v2\n False\n >>> v1 >= v2\n False\n >>> v1 <= v2\n True\n """\n\n _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)\n _key: CmpKey\n\n def __init__(self, version: str) -> None:\n """Initialize a Version object.\n\n :param version:\n The string representation of a version which will be parsed and normalized\n before use.\n :raises InvalidVersion:\n If the ``version`` does not conform to PEP 440 in any way then this\n exception will be raised.\n """\n\n # Validate the version and parse it into pieces\n match = self._regex.search(version)\n if not match:\n raise InvalidVersion(f"Invalid version: {version!r}")\n\n # Store the parsed out pieces of the version\n self._version = _Version(\n epoch=int(match.group("epoch")) if match.group("epoch") else 0,\n release=tuple(int(i) for i in match.group("release").split(".")),\n pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),\n post=_parse_letter_version(\n match.group("post_l"), match.group("post_n1") or match.group("post_n2")\n ),\n dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),\n local=_parse_local_version(match.group("local")),\n )\n\n # Generate a key which will be used for sorting\n self._key = _cmpkey(\n self._version.epoch,\n self._version.release,\n self._version.pre,\n self._version.post,\n self._version.dev,\n self._version.local,\n )\n\n def __repr__(self) -> str:\n """A representation of the Version that shows all internal state.\n\n >>> Version('1.0.0')\n <Version('1.0.0')>\n """\n return f"<Version('{self}')>"\n\n def __str__(self) -> str:\n """A string representation of the version that can be round-tripped.\n\n >>> str(Version("1.0a5"))\n '1.0a5'\n """\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f"{self.epoch}!")\n\n # Release segment\n parts.append(".".join(str(x) for x in self.release))\n\n # Pre-release\n if self.pre is not None:\n parts.append("".join(str(x) for x in self.pre))\n\n # Post-release\n if self.post is not None:\n parts.append(f".post{self.post}")\n\n # Development release\n if self.dev is not None:\n parts.append(f".dev{self.dev}")\n\n # Local version segment\n if self.local is not None:\n parts.append(f"+{self.local}")\n\n return "".join(parts)\n\n @property\n def epoch(self) -> int:\n """The epoch of the version.\n\n >>> Version("2.0.0").epoch\n 0\n >>> Version("1!2.0.0").epoch\n 1\n """\n return self._version.epoch\n\n @property\n def release(self) -> tuple[int, ...]:\n """The components of the "release" segment of the version.\n\n >>> Version("1.2.3").release\n (1, 2, 3)\n >>> Version("2.0.0").release\n (2, 0, 0)\n >>> Version("1!2.0.0.post0").release\n (2, 0, 0)\n\n Includes trailing zeroes but not the epoch or any pre-release / development /\n post-release suffixes.\n """\n return self._version.release\n\n @property\n def pre(self) -> tuple[str, int] | None:\n """The pre-release segment of the version.\n\n >>> print(Version("1.2.3").pre)\n None\n >>> Version("1.2.3a1").pre\n ('a', 1)\n >>> Version("1.2.3b1").pre\n ('b', 1)\n >>> Version("1.2.3rc1").pre\n ('rc', 1)\n """\n return self._version.pre\n\n @property\n def post(self) -> int | None:\n """The post-release number of the version.\n\n >>> print(Version("1.2.3").post)\n None\n >>> Version("1.2.3.post1").post\n 1\n """\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> int | None:\n """The development number of the version.\n\n >>> print(Version("1.2.3").dev)\n None\n >>> Version("1.2.3.dev1").dev\n 1\n """\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> str | None:\n """The local version segment of the version.\n\n >>> print(Version("1.2.3").local)\n None\n >>> Version("1.2.3+abc").local\n 'abc'\n """\n if self._version.local:\n return ".".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n """The public portion of the version.\n\n >>> Version("1.2.3").public\n '1.2.3'\n >>> Version("1.2.3+abc").public\n '1.2.3'\n >>> Version("1!1.2.3dev1+abc").public\n '1!1.2.3.dev1'\n """\n return str(self).split("+", 1)[0]\n\n @property\n def base_version(self) -> str:\n """The "base version" of the version.\n\n >>> Version("1.2.3").base_version\n '1.2.3'\n >>> Version("1.2.3+abc").base_version\n '1.2.3'\n >>> Version("1!1.2.3dev1+abc").base_version\n '1!1.2.3'\n\n The "base version" is the public version of the project without any pre or post\n release markers.\n """\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f"{self.epoch}!")\n\n # Release segment\n parts.append(".".join(str(x) for x in self.release))\n\n return "".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n """Whether this version is a pre-release.\n\n >>> Version("1.2.3").is_prerelease\n False\n >>> Version("1.2.3a1").is_prerelease\n True\n >>> Version("1.2.3b1").is_prerelease\n True\n >>> Version("1.2.3rc1").is_prerelease\n True\n >>> Version("1.2.3dev1").is_prerelease\n True\n """\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n """Whether this version is a post-release.\n\n >>> Version("1.2.3").is_postrelease\n False\n >>> Version("1.2.3.post1").is_postrelease\n True\n """\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n """Whether this version is a development release.\n\n >>> Version("1.2.3").is_devrelease\n False\n >>> Version("1.2.3.dev1").is_devrelease\n True\n """\n return self.dev is not None\n\n @property\n def major(self) -> int:\n """The first item of :attr:`release` or ``0`` if unavailable.\n\n >>> Version("1.2.3").major\n 1\n """\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n """The second item of :attr:`release` or ``0`` if unavailable.\n\n >>> Version("1.2.3").minor\n 2\n >>> Version("1").minor\n 0\n """\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n """The third item of :attr:`release` or ``0`` if unavailable.\n\n >>> Version("1.2.3").micro\n 3\n >>> Version("1").micro\n 0\n """\n return self.release[2] if len(self.release) >= 3 else 0\n\n\nclass _TrimmedRelease(Version):\n @property\n def release(self) -> tuple[int, ...]:\n """\n Release segment without any trailing zeros.\n\n >>> _TrimmedRelease('1.0.0').release\n (1,)\n >>> _TrimmedRelease('0.0').release\n (0,)\n """\n rel = super().release\n nonzeros = (index for index, val in enumerate(rel) if val)\n last_nonzero = max(nonzeros, default=0)\n return rel[: last_nonzero + 1]\n\n\ndef _parse_letter_version(\n letter: str | None, number: str | bytes | SupportsInt | None\n) -> tuple[str, int] | None:\n if letter:\n # We consider there to be an implicit 0 in a pre-release if there is\n # not a numeral associated with it.\n if number is None:\n number = 0\n\n # We normalize any letters to their lower case form\n letter = letter.lower()\n\n # We consider some words to be alternate spellings of other words and\n # in those cases we want to normalize the spellings to our preferred\n # spelling.\n if letter == "alpha":\n letter = "a"\n elif letter == "beta":\n letter = "b"\n elif letter in ["c", "pre", "preview"]:\n letter = "rc"\n elif letter in ["rev", "r"]:\n letter = "post"\n\n return letter, int(number)\n\n assert not letter\n if number:\n # We assume if we are given a number, but we are not given a letter\n # then this is using the implicit post release syntax (e.g. 1.0-1)\n letter = "post"\n\n return letter, int(number)\n\n return None\n\n\n_local_version_separators = re.compile(r"[\._-]")\n\n\ndef _parse_local_version(local: str | None) -> LocalType | None:\n """\n Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").\n """\n if local is not None:\n return tuple(\n part.lower() if not part.isdigit() else int(part)\n for part in _local_version_separators.split(local)\n )\n return None\n\n\ndef _cmpkey(\n epoch: int,\n release: tuple[int, ...],\n pre: tuple[str, int] | None,\n post: tuple[str, int] | None,\n dev: tuple[str, int] | None,\n local: LocalType | None,\n) -> CmpKey:\n # When we compare a release version, we want to compare it with all of the\n # trailing zeros removed. So we'll use a reverse the list, drop all the now\n # leading zeros until we come to something non zero, then take the rest\n # re-reverse it back into the correct order and make it a tuple and use\n # that for our sorting key.\n _release = tuple(\n reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))\n )\n\n # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.\n # We'll do this by abusing the pre segment, but we _only_ want to do this\n # if there is not a pre or a post segment. If we have one of those then\n # the normal sorting rules will handle this case correctly.\n if pre is None and post is None and dev is not None:\n _pre: CmpPrePostDevType = NegativeInfinity\n # Versions without a pre-release (except as noted above) should sort after\n # those with one.\n elif pre is None:\n _pre = Infinity\n else:\n _pre = pre\n\n # Versions without a post segment should sort before those with one.\n if post is None:\n _post: CmpPrePostDevType = NegativeInfinity\n\n else:\n _post = post\n\n # Versions without a development segment should sort after those with one.\n if dev is None:\n _dev: CmpPrePostDevType = Infinity\n\n else:\n _dev = dev\n\n if local is None:\n # Versions without a local segment should sort before those with one.\n _local: CmpLocalType = NegativeInfinity\n else:\n # Versions with a local segment need that segment parsed to implement\n # the sorting rules in PEP440.\n # - Alpha numeric segments sort before numeric segments\n # - Alpha numeric segments sort lexicographically\n # - Numeric segments sort numerically\n # - Shorter versions sort before longer versions when the prefixes\n # match exactly\n _local = tuple(\n (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local\n )\n\n return epoch, _release, _pre, _post, _dev, _local\n
.venv\Lib\site-packages\pip\_vendor\packaging\version.py
version.py
Python
16,688
0.95
0.152921
0.102564
python-kit
486
2023-08-03T08:10:14.721839
MIT
false
ae4ab2671139e954281fc08ae5845559
"""\nELF file parser.\n\nThis provides a class ``ELFFile`` that parses an ELF executable in a similar\ninterface to ``ZipFile``. Only the read interface is implemented.\n\nBased on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca\nELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html\n"""\n\nfrom __future__ import annotations\n\nimport enum\nimport os\nimport struct\nfrom typing import IO\n\n\nclass ELFInvalid(ValueError):\n pass\n\n\nclass EIClass(enum.IntEnum):\n C32 = 1\n C64 = 2\n\n\nclass EIData(enum.IntEnum):\n Lsb = 1\n Msb = 2\n\n\nclass EMachine(enum.IntEnum):\n I386 = 3\n S390 = 22\n Arm = 40\n X8664 = 62\n AArc64 = 183\n\n\nclass ELFFile:\n """\n Representation of an ELF executable.\n """\n\n def __init__(self, f: IO[bytes]) -> None:\n self._f = f\n\n try:\n ident = self._read("16B")\n except struct.error as e:\n raise ELFInvalid("unable to parse identification") from e\n magic = bytes(ident[:4])\n if magic != b"\x7fELF":\n raise ELFInvalid(f"invalid magic: {magic!r}")\n\n self.capacity = ident[4] # Format for program header (bitness).\n self.encoding = ident[5] # Data structure encoding (endianness).\n\n try:\n # e_fmt: Format for program header.\n # p_fmt: Format for section header.\n # p_idx: Indexes to find p_type, p_offset, and p_filesz.\n e_fmt, self._p_fmt, self._p_idx = {\n (1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.\n (1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.\n (2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.\n (2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.\n }[(self.capacity, self.encoding)]\n except KeyError as e:\n raise ELFInvalid(\n f"unrecognized capacity ({self.capacity}) or encoding ({self.encoding})"\n ) from e\n\n try:\n (\n _,\n self.machine, # Architecture type.\n _,\n _,\n self._e_phoff, # Offset of program header.\n _,\n self.flags, # Processor-specific flags.\n _,\n self._e_phentsize, # Size of section.\n self._e_phnum, # Number of sections.\n ) = self._read(e_fmt)\n except struct.error as e:\n raise ELFInvalid("unable to parse machine and section information") from e\n\n def _read(self, fmt: str) -> tuple[int, ...]:\n return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))\n\n @property\n def interpreter(self) -> str | None:\n """\n The path recorded in the ``PT_INTERP`` section header.\n """\n for index in range(self._e_phnum):\n self._f.seek(self._e_phoff + self._e_phentsize * index)\n try:\n data = self._read(self._p_fmt)\n except struct.error:\n continue\n if data[self._p_idx[0]] != 3: # Not PT_INTERP.\n continue\n self._f.seek(data[self._p_idx[1]])\n return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")\n return None\n
.venv\Lib\site-packages\pip\_vendor\packaging\_elffile.py
_elffile.py
Python
3,286
0.95
0.174312
0.034091
vue-tools
778
2023-07-21T01:46:36.161747
BSD-3-Clause
false
e83ac3c80a6482b83578c3ef6cfed4b9
from __future__ import annotations\n\nimport collections\nimport contextlib\nimport functools\nimport os\nimport re\nimport sys\nimport warnings\nfrom typing import Generator, Iterator, NamedTuple, Sequence\n\nfrom ._elffile import EIClass, EIData, ELFFile, EMachine\n\nEF_ARM_ABIMASK = 0xFF000000\nEF_ARM_ABI_VER5 = 0x05000000\nEF_ARM_ABI_FLOAT_HARD = 0x00000400\n\n\n# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`\n# as the type for `path` until then.\n@contextlib.contextmanager\ndef _parse_elf(path: str) -> Generator[ELFFile | None, None, None]:\n try:\n with open(path, "rb") as f:\n yield ELFFile(f)\n except (OSError, TypeError, ValueError):\n yield None\n\n\ndef _is_linux_armhf(executable: str) -> bool:\n # hard-float ABI can be detected from the ELF header of the running\n # process\n # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf\n with _parse_elf(executable) as f:\n return (\n f is not None\n and f.capacity == EIClass.C32\n and f.encoding == EIData.Lsb\n and f.machine == EMachine.Arm\n and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5\n and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD\n )\n\n\ndef _is_linux_i686(executable: str) -> bool:\n with _parse_elf(executable) as f:\n return (\n f is not None\n and f.capacity == EIClass.C32\n and f.encoding == EIData.Lsb\n and f.machine == EMachine.I386\n )\n\n\ndef _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:\n if "armv7l" in archs:\n return _is_linux_armhf(executable)\n if "i686" in archs:\n return _is_linux_i686(executable)\n allowed_archs = {\n "x86_64",\n "aarch64",\n "ppc64",\n "ppc64le",\n "s390x",\n "loongarch64",\n "riscv64",\n }\n return any(arch in allowed_archs for arch in archs)\n\n\n# If glibc ever changes its major version, we need to know what the last\n# minor version was, so we can build the complete list of all versions.\n# For now, guess what the highest minor version might be, assume it will\n# be 50 for testing. Once this actually happens, update the dictionary\n# with the actual value.\n_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50)\n\n\nclass _GLibCVersion(NamedTuple):\n major: int\n minor: int\n\n\ndef _glibc_version_string_confstr() -> str | None:\n """\n Primary implementation of glibc_version_string using os.confstr.\n """\n # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely\n # to be broken or missing. This strategy is used in the standard library\n # platform module.\n # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183\n try:\n # Should be a string like "glibc 2.17".\n version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION")\n assert version_string is not None\n _, version = version_string.rsplit()\n except (AssertionError, AttributeError, OSError, ValueError):\n # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...\n return None\n return version\n\n\ndef _glibc_version_string_ctypes() -> str | None:\n """\n Fallback implementation of glibc_version_string using ctypes.\n """\n try:\n import ctypes\n except ImportError:\n return None\n\n # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen\n # manpage says, "If filename is NULL, then the returned handle is for the\n # main program". This way we can let the linker do the work to figure out\n # which libc our process is actually using.\n #\n # We must also handle the special case where the executable is not a\n # dynamically linked executable. This can occur when using musl libc,\n # for example. In this situation, dlopen() will error, leading to an\n # OSError. Interestingly, at least in the case of musl, there is no\n # errno set on the OSError. The single string argument used to construct\n # OSError comes from libc itself and is therefore not portable to\n # hard code here. In any case, failure to call dlopen() means we\n # can proceed, so we bail on our attempt.\n try:\n process_namespace = ctypes.CDLL(None)\n except OSError:\n return None\n\n try:\n gnu_get_libc_version = process_namespace.gnu_get_libc_version\n except AttributeError:\n # Symbol doesn't exist -> therefore, we are not linked to\n # glibc.\n return None\n\n # Call gnu_get_libc_version, which returns a string like "2.5"\n gnu_get_libc_version.restype = ctypes.c_char_p\n version_str: str = gnu_get_libc_version()\n # py2 / py3 compatibility:\n if not isinstance(version_str, str):\n version_str = version_str.decode("ascii")\n\n return version_str\n\n\ndef _glibc_version_string() -> str | None:\n """Returns glibc version string, or None if not using glibc."""\n return _glibc_version_string_confstr() or _glibc_version_string_ctypes()\n\n\ndef _parse_glibc_version(version_str: str) -> tuple[int, int]:\n """Parse glibc version.\n\n We use a regexp instead of str.split because we want to discard any\n random junk that might come after the minor version -- this might happen\n in patched/forked versions of glibc (e.g. Linaro's version of glibc\n uses version strings like "2.20-2014.11"). See gh-3588.\n """\n m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)\n if not m:\n warnings.warn(\n f"Expected glibc version with 2 components major.minor, got: {version_str}",\n RuntimeWarning,\n stacklevel=2,\n )\n return -1, -1\n return int(m.group("major")), int(m.group("minor"))\n\n\n@functools.lru_cache\ndef _get_glibc_version() -> tuple[int, int]:\n version_str = _glibc_version_string()\n if version_str is None:\n return (-1, -1)\n return _parse_glibc_version(version_str)\n\n\n# From PEP 513, PEP 600\ndef _is_compatible(arch: str, version: _GLibCVersion) -> bool:\n sys_glibc = _get_glibc_version()\n if sys_glibc < version:\n return False\n # Check for presence of _manylinux module.\n try:\n import _manylinux\n except ImportError:\n return True\n if hasattr(_manylinux, "manylinux_compatible"):\n result = _manylinux.manylinux_compatible(version[0], version[1], arch)\n if result is not None:\n return bool(result)\n return True\n if version == _GLibCVersion(2, 5):\n if hasattr(_manylinux, "manylinux1_compatible"):\n return bool(_manylinux.manylinux1_compatible)\n if version == _GLibCVersion(2, 12):\n if hasattr(_manylinux, "manylinux2010_compatible"):\n return bool(_manylinux.manylinux2010_compatible)\n if version == _GLibCVersion(2, 17):\n if hasattr(_manylinux, "manylinux2014_compatible"):\n return bool(_manylinux.manylinux2014_compatible)\n return True\n\n\n_LEGACY_MANYLINUX_MAP = {\n # CentOS 7 w/ glibc 2.17 (PEP 599)\n (2, 17): "manylinux2014",\n # CentOS 6 w/ glibc 2.12 (PEP 571)\n (2, 12): "manylinux2010",\n # CentOS 5 w/ glibc 2.5 (PEP 513)\n (2, 5): "manylinux1",\n}\n\n\ndef platform_tags(archs: Sequence[str]) -> Iterator[str]:\n """Generate manylinux tags compatible to the current platform.\n\n :param archs: Sequence of compatible architectures.\n The first one shall be the closest to the actual architecture and be the part of\n platform tag after the ``linux_`` prefix, e.g. ``x86_64``.\n The ``linux_`` prefix is assumed as a prerequisite for the current platform to\n be manylinux-compatible.\n\n :returns: An iterator of compatible manylinux tags.\n """\n if not _have_compatible_abi(sys.executable, archs):\n return\n # Oldest glibc to be supported regardless of architecture is (2, 17).\n too_old_glibc2 = _GLibCVersion(2, 16)\n if set(archs) & {"x86_64", "i686"}:\n # On x86/i686 also oldest glibc to be supported is (2, 5).\n too_old_glibc2 = _GLibCVersion(2, 4)\n current_glibc = _GLibCVersion(*_get_glibc_version())\n glibc_max_list = [current_glibc]\n # We can assume compatibility across glibc major versions.\n # https://sourceware.org/bugzilla/show_bug.cgi?id=24636\n #\n # Build a list of maximum glibc versions so that we can\n # output the canonical list of all glibc from current_glibc\n # down to too_old_glibc2, including all intermediary versions.\n for glibc_major in range(current_glibc.major - 1, 1, -1):\n glibc_minor = _LAST_GLIBC_MINOR[glibc_major]\n glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))\n for arch in archs:\n for glibc_max in glibc_max_list:\n if glibc_max.major == too_old_glibc2.major:\n min_minor = too_old_glibc2.minor\n else:\n # For other glibc major versions oldest supported is (x, 0).\n min_minor = -1\n for glibc_minor in range(glibc_max.minor, min_minor, -1):\n glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)\n tag = "manylinux_{}_{}".format(*glibc_version)\n if _is_compatible(arch, glibc_version):\n yield f"{tag}_{arch}"\n # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.\n if glibc_version in _LEGACY_MANYLINUX_MAP:\n legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]\n if _is_compatible(arch, glibc_version):\n yield f"{legacy_tag}_{arch}"\n
.venv\Lib\site-packages\pip\_vendor\packaging\_manylinux.py
_manylinux.py
Python
9,596
0.95
0.19084
0.214286
awesome-app
297
2025-04-27T09:40:14.803214
GPL-3.0
false
46426bd482848dbd15d36e0807583368
"""PEP 656 support.\n\nThis module implements logic to detect if the currently running Python is\nlinked against musl, and what musl version is used.\n"""\n\nfrom __future__ import annotations\n\nimport functools\nimport re\nimport subprocess\nimport sys\nfrom typing import Iterator, NamedTuple, Sequence\n\nfrom ._elffile import ELFFile\n\n\nclass _MuslVersion(NamedTuple):\n major: int\n minor: int\n\n\ndef _parse_musl_version(output: str) -> _MuslVersion | None:\n lines = [n for n in (n.strip() for n in output.splitlines()) if n]\n if len(lines) < 2 or lines[0][:4] != "musl":\n return None\n m = re.match(r"Version (\d+)\.(\d+)", lines[1])\n if not m:\n return None\n return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))\n\n\n@functools.lru_cache\ndef _get_musl_version(executable: str) -> _MuslVersion | None:\n """Detect currently-running musl runtime version.\n\n This is done by checking the specified executable's dynamic linking\n information, and invoking the loader to parse its output for a version\n string. If the loader is musl, the output would be something like::\n\n musl libc (x86_64)\n Version 1.2.2\n Dynamic Program Loader\n """\n try:\n with open(executable, "rb") as f:\n ld = ELFFile(f).interpreter\n except (OSError, TypeError, ValueError):\n return None\n if ld is None or "musl" not in ld:\n return None\n proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)\n return _parse_musl_version(proc.stderr)\n\n\ndef platform_tags(archs: Sequence[str]) -> Iterator[str]:\n """Generate musllinux tags compatible to the current platform.\n\n :param archs: Sequence of compatible architectures.\n The first one shall be the closest to the actual architecture and be the part of\n platform tag after the ``linux_`` prefix, e.g. ``x86_64``.\n The ``linux_`` prefix is assumed as a prerequisite for the current platform to\n be musllinux-compatible.\n\n :returns: An iterator of compatible musllinux tags.\n """\n sys_musl = _get_musl_version(sys.executable)\n if sys_musl is None: # Python not dynamically linked against musl.\n return\n for arch in archs:\n for minor in range(sys_musl.minor, -1, -1):\n yield f"musllinux_{sys_musl.major}_{minor}_{arch}"\n\n\nif __name__ == "__main__": # pragma: no cover\n import sysconfig\n\n plat = sysconfig.get_platform()\n assert plat.startswith("linux-"), "not linux"\n\n print("plat:", plat)\n print("musl:", _get_musl_version(sys.executable))\n print("tags:", end=" ")\n for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):\n print(t, end="\n ")\n
.venv\Lib\site-packages\pip\_vendor\packaging\_musllinux.py
_musllinux.py
Python
2,694
0.95
0.223529
0
vue-tools
456
2024-09-03T13:48:56.970408
GPL-3.0
false
d0d487bb6b89df7d122f768d8f1f2f2d
"""Handwritten parser of dependency specifiers.\n\nThe docstring for each __parse_* function contains EBNF-inspired grammar representing\nthe implementation.\n"""\n\nfrom __future__ import annotations\n\nimport ast\nfrom typing import NamedTuple, Sequence, Tuple, Union\n\nfrom ._tokenizer import DEFAULT_RULES, Tokenizer\n\n\nclass Node:\n def __init__(self, value: str) -> None:\n self.value = value\n\n def __str__(self) -> str:\n return self.value\n\n def __repr__(self) -> str:\n return f"<{self.__class__.__name__}('{self}')>"\n\n def serialize(self) -> str:\n raise NotImplementedError\n\n\nclass Variable(Node):\n def serialize(self) -> str:\n return str(self)\n\n\nclass Value(Node):\n def serialize(self) -> str:\n return f'"{self}"'\n\n\nclass Op(Node):\n def serialize(self) -> str:\n return str(self)\n\n\nMarkerVar = Union[Variable, Value]\nMarkerItem = Tuple[MarkerVar, Op, MarkerVar]\nMarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]]\nMarkerList = Sequence[Union["MarkerList", MarkerAtom, str]]\n\n\nclass ParsedRequirement(NamedTuple):\n name: str\n url: str\n extras: list[str]\n specifier: str\n marker: MarkerList | None\n\n\n# --------------------------------------------------------------------------------------\n# Recursive descent parser for dependency specifier\n# --------------------------------------------------------------------------------------\ndef parse_requirement(source: str) -> ParsedRequirement:\n return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))\n\n\ndef _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:\n """\n requirement = WS? IDENTIFIER WS? extras WS? requirement_details\n """\n tokenizer.consume("WS")\n\n name_token = tokenizer.expect(\n "IDENTIFIER", expected="package name at the start of dependency specifier"\n )\n name = name_token.text\n tokenizer.consume("WS")\n\n extras = _parse_extras(tokenizer)\n tokenizer.consume("WS")\n\n url, specifier, marker = _parse_requirement_details(tokenizer)\n tokenizer.expect("END", expected="end of dependency specifier")\n\n return ParsedRequirement(name, url, extras, specifier, marker)\n\n\ndef _parse_requirement_details(\n tokenizer: Tokenizer,\n) -> tuple[str, str, MarkerList | None]:\n """\n requirement_details = AT URL (WS requirement_marker?)?\n | specifier WS? (requirement_marker)?\n """\n\n specifier = ""\n url = ""\n marker = None\n\n if tokenizer.check("AT"):\n tokenizer.read()\n tokenizer.consume("WS")\n\n url_start = tokenizer.position\n url = tokenizer.expect("URL", expected="URL after @").text\n if tokenizer.check("END", peek=True):\n return (url, specifier, marker)\n\n tokenizer.expect("WS", expected="whitespace after URL")\n\n # The input might end after whitespace.\n if tokenizer.check("END", peek=True):\n return (url, specifier, marker)\n\n marker = _parse_requirement_marker(\n tokenizer, span_start=url_start, after="URL and whitespace"\n )\n else:\n specifier_start = tokenizer.position\n specifier = _parse_specifier(tokenizer)\n tokenizer.consume("WS")\n\n if tokenizer.check("END", peek=True):\n return (url, specifier, marker)\n\n marker = _parse_requirement_marker(\n tokenizer,\n span_start=specifier_start,\n after=(\n "version specifier"\n if specifier\n else "name and no valid version specifier"\n ),\n )\n\n return (url, specifier, marker)\n\n\ndef _parse_requirement_marker(\n tokenizer: Tokenizer, *, span_start: int, after: str\n) -> MarkerList:\n """\n requirement_marker = SEMICOLON marker WS?\n """\n\n if not tokenizer.check("SEMICOLON"):\n tokenizer.raise_syntax_error(\n f"Expected end or semicolon (after {after})",\n span_start=span_start,\n )\n tokenizer.read()\n\n marker = _parse_marker(tokenizer)\n tokenizer.consume("WS")\n\n return marker\n\n\ndef _parse_extras(tokenizer: Tokenizer) -> list[str]:\n """\n extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?\n """\n if not tokenizer.check("LEFT_BRACKET", peek=True):\n return []\n\n with tokenizer.enclosing_tokens(\n "LEFT_BRACKET",\n "RIGHT_BRACKET",\n around="extras",\n ):\n tokenizer.consume("WS")\n extras = _parse_extras_list(tokenizer)\n tokenizer.consume("WS")\n\n return extras\n\n\ndef _parse_extras_list(tokenizer: Tokenizer) -> list[str]:\n """\n extras_list = identifier (wsp* ',' wsp* identifier)*\n """\n extras: list[str] = []\n\n if not tokenizer.check("IDENTIFIER"):\n return extras\n\n extras.append(tokenizer.read().text)\n\n while True:\n tokenizer.consume("WS")\n if tokenizer.check("IDENTIFIER", peek=True):\n tokenizer.raise_syntax_error("Expected comma between extra names")\n elif not tokenizer.check("COMMA"):\n break\n\n tokenizer.read()\n tokenizer.consume("WS")\n\n extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")\n extras.append(extra_token.text)\n\n return extras\n\n\ndef _parse_specifier(tokenizer: Tokenizer) -> str:\n """\n specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS\n | WS? version_many WS?\n """\n with tokenizer.enclosing_tokens(\n "LEFT_PARENTHESIS",\n "RIGHT_PARENTHESIS",\n around="version specifier",\n ):\n tokenizer.consume("WS")\n parsed_specifiers = _parse_version_many(tokenizer)\n tokenizer.consume("WS")\n\n return parsed_specifiers\n\n\ndef _parse_version_many(tokenizer: Tokenizer) -> str:\n """\n version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?\n """\n parsed_specifiers = ""\n while tokenizer.check("SPECIFIER"):\n span_start = tokenizer.position\n parsed_specifiers += tokenizer.read().text\n if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):\n tokenizer.raise_syntax_error(\n ".* suffix can only be used with `==` or `!=` operators",\n span_start=span_start,\n span_end=tokenizer.position + 1,\n )\n if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):\n tokenizer.raise_syntax_error(\n "Local version label can only be used with `==` or `!=` operators",\n span_start=span_start,\n span_end=tokenizer.position,\n )\n tokenizer.consume("WS")\n if not tokenizer.check("COMMA"):\n break\n parsed_specifiers += tokenizer.read().text\n tokenizer.consume("WS")\n\n return parsed_specifiers\n\n\n# --------------------------------------------------------------------------------------\n# Recursive descent parser for marker expression\n# --------------------------------------------------------------------------------------\ndef parse_marker(source: str) -> MarkerList:\n return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))\n\n\ndef _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:\n retval = _parse_marker(tokenizer)\n tokenizer.expect("END", expected="end of marker expression")\n return retval\n\n\ndef _parse_marker(tokenizer: Tokenizer) -> MarkerList:\n """\n marker = marker_atom (BOOLOP marker_atom)+\n """\n expression = [_parse_marker_atom(tokenizer)]\n while tokenizer.check("BOOLOP"):\n token = tokenizer.read()\n expr_right = _parse_marker_atom(tokenizer)\n expression.extend((token.text, expr_right))\n return expression\n\n\ndef _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:\n """\n marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?\n | WS? marker_item WS?\n """\n\n tokenizer.consume("WS")\n if tokenizer.check("LEFT_PARENTHESIS", peek=True):\n with tokenizer.enclosing_tokens(\n "LEFT_PARENTHESIS",\n "RIGHT_PARENTHESIS",\n around="marker expression",\n ):\n tokenizer.consume("WS")\n marker: MarkerAtom = _parse_marker(tokenizer)\n tokenizer.consume("WS")\n else:\n marker = _parse_marker_item(tokenizer)\n tokenizer.consume("WS")\n return marker\n\n\ndef _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:\n """\n marker_item = WS? marker_var WS? marker_op WS? marker_var WS?\n """\n tokenizer.consume("WS")\n marker_var_left = _parse_marker_var(tokenizer)\n tokenizer.consume("WS")\n marker_op = _parse_marker_op(tokenizer)\n tokenizer.consume("WS")\n marker_var_right = _parse_marker_var(tokenizer)\n tokenizer.consume("WS")\n return (marker_var_left, marker_op, marker_var_right)\n\n\ndef _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:\n """\n marker_var = VARIABLE | QUOTED_STRING\n """\n if tokenizer.check("VARIABLE"):\n return process_env_var(tokenizer.read().text.replace(".", "_"))\n elif tokenizer.check("QUOTED_STRING"):\n return process_python_str(tokenizer.read().text)\n else:\n tokenizer.raise_syntax_error(\n message="Expected a marker variable or quoted string"\n )\n\n\ndef process_env_var(env_var: str) -> Variable:\n if env_var in ("platform_python_implementation", "python_implementation"):\n return Variable("platform_python_implementation")\n else:\n return Variable(env_var)\n\n\ndef process_python_str(python_str: str) -> Value:\n value = ast.literal_eval(python_str)\n return Value(str(value))\n\n\ndef _parse_marker_op(tokenizer: Tokenizer) -> Op:\n """\n marker_op = IN | NOT IN | OP\n """\n if tokenizer.check("IN"):\n tokenizer.read()\n return Op("in")\n elif tokenizer.check("NOT"):\n tokenizer.read()\n tokenizer.expect("WS", expected="whitespace after 'not'")\n tokenizer.expect("IN", expected="'in' after 'not'")\n return Op("not in")\n elif tokenizer.check("OP"):\n return Op(tokenizer.read().text)\n else:\n return tokenizer.raise_syntax_error(\n "Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in"\n )\n
.venv\Lib\site-packages\pip\_vendor\packaging\_parser.py
_parser.py
Python
10,221
0.95
0.147309
0.025641
node-utils
788
2024-05-14T20:55:34.688149
MIT
false
b8877d075d76fdd0aee2efa2001819a7
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nclass InfinityType:\n def __repr__(self) -> str:\n return "Infinity"\n\n def __hash__(self) -> int:\n return hash(repr(self))\n\n def __lt__(self, other: object) -> bool:\n return False\n\n def __le__(self, other: object) -> bool:\n return False\n\n def __eq__(self, other: object) -> bool:\n return isinstance(other, self.__class__)\n\n def __gt__(self, other: object) -> bool:\n return True\n\n def __ge__(self, other: object) -> bool:\n return True\n\n def __neg__(self: object) -> "NegativeInfinityType":\n return NegativeInfinity\n\n\nInfinity = InfinityType()\n\n\nclass NegativeInfinityType:\n def __repr__(self) -> str:\n return "-Infinity"\n\n def __hash__(self) -> int:\n return hash(repr(self))\n\n def __lt__(self, other: object) -> bool:\n return True\n\n def __le__(self, other: object) -> bool:\n return True\n\n def __eq__(self, other: object) -> bool:\n return isinstance(other, self.__class__)\n\n def __gt__(self, other: object) -> bool:\n return False\n\n def __ge__(self, other: object) -> bool:\n return False\n\n def __neg__(self: object) -> InfinityType:\n return Infinity\n\n\nNegativeInfinity = NegativeInfinityType()\n
.venv\Lib\site-packages\pip\_vendor\packaging\_structures.py
_structures.py
Python
1,431
0.95
0.311475
0.076923
react-lib
676
2024-02-12T05:33:13.810450
Apache-2.0
false
de664fedc083927d3d084f416190d876
from __future__ import annotations\n\nimport contextlib\nimport re\nfrom dataclasses import dataclass\nfrom typing import Iterator, NoReturn\n\nfrom .specifiers import Specifier\n\n\n@dataclass\nclass Token:\n name: str\n text: str\n position: int\n\n\nclass ParserSyntaxError(Exception):\n """The provided source text could not be parsed correctly."""\n\n def __init__(\n self,\n message: str,\n *,\n source: str,\n span: tuple[int, int],\n ) -> None:\n self.span = span\n self.message = message\n self.source = source\n\n super().__init__()\n\n def __str__(self) -> str:\n marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"\n return "\n ".join([self.message, self.source, marker])\n\n\nDEFAULT_RULES: dict[str, str | re.Pattern[str]] = {\n "LEFT_PARENTHESIS": r"\(",\n "RIGHT_PARENTHESIS": r"\)",\n "LEFT_BRACKET": r"\[",\n "RIGHT_BRACKET": r"\]",\n "SEMICOLON": r";",\n "COMMA": r",",\n "QUOTED_STRING": re.compile(\n r"""\n (\n ('[^']*')\n |\n ("[^"]*")\n )\n """,\n re.VERBOSE,\n ),\n "OP": r"(===|==|~=|!=|<=|>=|<|>)",\n "BOOLOP": r"\b(or|and)\b",\n "IN": r"\bin\b",\n "NOT": r"\bnot\b",\n "VARIABLE": re.compile(\n r"""\n \b(\n python_version\n |python_full_version\n |os[._]name\n |sys[._]platform\n |platform_(release|system)\n |platform[._](version|machine|python_implementation)\n |python_implementation\n |implementation_(name|version)\n |extras?\n |dependency_groups\n )\b\n """,\n re.VERBOSE,\n ),\n "SPECIFIER": re.compile(\n Specifier._operator_regex_str + Specifier._version_regex_str,\n re.VERBOSE | re.IGNORECASE,\n ),\n "AT": r"\@",\n "URL": r"[^ \t]+",\n "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",\n "VERSION_PREFIX_TRAIL": r"\.\*",\n "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",\n "WS": r"[ \t]+",\n "END": r"$",\n}\n\n\nclass Tokenizer:\n """Context-sensitive token parsing.\n\n Provides methods to examine the input stream to check whether the next token\n matches.\n """\n\n def __init__(\n self,\n source: str,\n *,\n rules: dict[str, str | re.Pattern[str]],\n ) -> None:\n self.source = source\n self.rules: dict[str, re.Pattern[str]] = {\n name: re.compile(pattern) for name, pattern in rules.items()\n }\n self.next_token: Token | None = None\n self.position = 0\n\n def consume(self, name: str) -> None:\n """Move beyond provided token name, if at current position."""\n if self.check(name):\n self.read()\n\n def check(self, name: str, *, peek: bool = False) -> bool:\n """Check whether the next token has the provided name.\n\n By default, if the check succeeds, the token *must* be read before\n another check. If `peek` is set to `True`, the token is not loaded and\n would need to be checked again.\n """\n assert self.next_token is None, (\n f"Cannot check for {name!r}, already have {self.next_token!r}"\n )\n assert name in self.rules, f"Unknown token name: {name!r}"\n\n expression = self.rules[name]\n\n match = expression.match(self.source, self.position)\n if match is None:\n return False\n if not peek:\n self.next_token = Token(name, match[0], self.position)\n return True\n\n def expect(self, name: str, *, expected: str) -> Token:\n """Expect a certain token name next, failing with a syntax error otherwise.\n\n The token is *not* read.\n """\n if not self.check(name):\n raise self.raise_syntax_error(f"Expected {expected}")\n return self.read()\n\n def read(self) -> Token:\n """Consume the next token and return it."""\n token = self.next_token\n assert token is not None\n\n self.position += len(token.text)\n self.next_token = None\n\n return token\n\n def raise_syntax_error(\n self,\n message: str,\n *,\n span_start: int | None = None,\n span_end: int | None = None,\n ) -> NoReturn:\n """Raise ParserSyntaxError at the given position."""\n span = (\n self.position if span_start is None else span_start,\n self.position if span_end is None else span_end,\n )\n raise ParserSyntaxError(\n message,\n source=self.source,\n span=span,\n )\n\n @contextlib.contextmanager\n def enclosing_tokens(\n self, open_token: str, close_token: str, *, around: str\n ) -> Iterator[None]:\n if self.check(open_token):\n open_position = self.position\n self.read()\n else:\n open_position = None\n\n yield\n\n if open_position is None:\n return\n\n if not self.check(close_token):\n self.raise_syntax_error(\n f"Expected matching {close_token} for {open_token}, after {around}",\n span_start=open_position,\n )\n\n self.read()\n
.venv\Lib\site-packages\pip\_vendor\packaging\_tokenizer.py
_tokenizer.py
Python
5,310
0.85
0.133333
0.018293
react-lib
203
2024-01-07T10:49:57.615736
MIT
false
58bff3ae79b26a93d63f3a9429d70860
# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__title__ = "packaging"\n__summary__ = "Core utilities for Python packages"\n__uri__ = "https://github.com/pypa/packaging"\n\n__version__ = "25.0"\n\n__author__ = "Donald Stufft and individual contributors"\n__email__ = "donald@stufft.io"\n\n__license__ = "BSD-2-Clause or Apache-2.0"\n__copyright__ = f"2014 {__author__}"\n
.venv\Lib\site-packages\pip\_vendor\packaging\__init__.py
__init__.py
Python
494
0.8
0.133333
0.272727
react-lib
449
2024-04-21T00:04:56.968725
BSD-3-Clause
false
bb0d0797c99f155fda872b2ee324d93d
\nfrom __future__ import annotations\n\nfrom typing import TypedDict\n\nclass SPDXLicense(TypedDict):\n id: str\n deprecated: bool\n\nclass SPDXException(TypedDict):\n id: str\n deprecated: bool\n\n\nVERSION = '3.25.0'\n\nLICENSES: dict[str, SPDXLicense] = {\n '0bsd': {'id': '0BSD', 'deprecated': False},\n '3d-slicer-1.0': {'id': '3D-Slicer-1.0', 'deprecated': False},\n 'aal': {'id': 'AAL', 'deprecated': False},\n 'abstyles': {'id': 'Abstyles', 'deprecated': False},\n 'adacore-doc': {'id': 'AdaCore-doc', 'deprecated': False},\n 'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False},\n 'adobe-display-postscript': {'id': 'Adobe-Display-PostScript', 'deprecated': False},\n 'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False},\n 'adobe-utopia': {'id': 'Adobe-Utopia', 'deprecated': False},\n 'adsl': {'id': 'ADSL', 'deprecated': False},\n 'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False},\n 'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False},\n 'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False},\n 'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False},\n 'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False},\n 'afmparse': {'id': 'Afmparse', 'deprecated': False},\n 'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True},\n 'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False},\n 'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False},\n 'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True},\n 'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False},\n 'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False},\n 'aladdin': {'id': 'Aladdin', 'deprecated': False},\n 'amd-newlib': {'id': 'AMD-newlib', 'deprecated': False},\n 'amdplpa': {'id': 'AMDPLPA', 'deprecated': False},\n 'aml': {'id': 'AML', 'deprecated': False},\n 'aml-glslang': {'id': 'AML-glslang', 'deprecated': False},\n 'ampas': {'id': 'AMPAS', 'deprecated': False},\n 'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False},\n 'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False},\n 'any-osi': {'id': 'any-OSI', 'deprecated': False},\n 'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False},\n 'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False},\n 'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False},\n 'apafml': {'id': 'APAFML', 'deprecated': False},\n 'apl-1.0': {'id': 'APL-1.0', 'deprecated': False},\n 'app-s2p': {'id': 'App-s2p', 'deprecated': False},\n 'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False},\n 'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False},\n 'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False},\n 'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False},\n 'arphic-1999': {'id': 'Arphic-1999', 'deprecated': False},\n 'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False},\n 'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False},\n 'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False},\n 'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False},\n 'aswf-digital-assets-1.0': {'id': 'ASWF-Digital-Assets-1.0', 'deprecated': False},\n 'aswf-digital-assets-1.1': {'id': 'ASWF-Digital-Assets-1.1', 'deprecated': False},\n 'baekmuk': {'id': 'Baekmuk', 'deprecated': False},\n 'bahyph': {'id': 'Bahyph', 'deprecated': False},\n 'barr': {'id': 'Barr', 'deprecated': False},\n 'bcrypt-solar-designer': {'id': 'bcrypt-Solar-Designer', 'deprecated': False},\n 'beerware': {'id': 'Beerware', 'deprecated': False},\n 'bitstream-charter': {'id': 'Bitstream-Charter', 'deprecated': False},\n 'bitstream-vera': {'id': 'Bitstream-Vera', 'deprecated': False},\n 'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False},\n 'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False},\n 'blessing': {'id': 'blessing', 'deprecated': False},\n 'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False},\n 'boehm-gc': {'id': 'Boehm-GC', 'deprecated': False},\n 'borceux': {'id': 'Borceux', 'deprecated': False},\n 'brian-gladman-2-clause': {'id': 'Brian-Gladman-2-Clause', 'deprecated': False},\n 'brian-gladman-3-clause': {'id': 'Brian-Gladman-3-Clause', 'deprecated': False},\n 'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False},\n 'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False},\n 'bsd-2-clause-darwin': {'id': 'BSD-2-Clause-Darwin', 'deprecated': False},\n 'bsd-2-clause-first-lines': {'id': 'BSD-2-Clause-first-lines', 'deprecated': False},\n 'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True},\n 'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True},\n 'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False},\n 'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False},\n 'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False},\n 'bsd-3-clause-acpica': {'id': 'BSD-3-Clause-acpica', 'deprecated': False},\n 'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False},\n 'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False},\n 'bsd-3-clause-flex': {'id': 'BSD-3-Clause-flex', 'deprecated': False},\n 'bsd-3-clause-hp': {'id': 'BSD-3-Clause-HP', 'deprecated': False},\n 'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False},\n 'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False},\n 'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False},\n 'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False},\n 'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False},\n 'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False},\n 'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False},\n 'bsd-3-clause-sun': {'id': 'BSD-3-Clause-Sun', 'deprecated': False},\n 'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False},\n 'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False},\n 'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False},\n 'bsd-4.3reno': {'id': 'BSD-4.3RENO', 'deprecated': False},\n 'bsd-4.3tahoe': {'id': 'BSD-4.3TAHOE', 'deprecated': False},\n 'bsd-advertising-acknowledgement': {'id': 'BSD-Advertising-Acknowledgement', 'deprecated': False},\n 'bsd-attribution-hpnd-disclaimer': {'id': 'BSD-Attribution-HPND-disclaimer', 'deprecated': False},\n 'bsd-inferno-nettverk': {'id': 'BSD-Inferno-Nettverk', 'deprecated': False},\n 'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False},\n 'bsd-source-beginning-file': {'id': 'BSD-Source-beginning-file', 'deprecated': False},\n 'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False},\n 'bsd-systemics': {'id': 'BSD-Systemics', 'deprecated': False},\n 'bsd-systemics-w3works': {'id': 'BSD-Systemics-W3Works', 'deprecated': False},\n 'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False},\n 'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False},\n 'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': True},\n 'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False},\n 'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False},\n 'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False},\n 'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False},\n 'caldera': {'id': 'Caldera', 'deprecated': False},\n 'caldera-no-preamble': {'id': 'Caldera-no-preamble', 'deprecated': False},\n 'catharon': {'id': 'Catharon', 'deprecated': False},\n 'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False},\n 'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False},\n 'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False},\n 'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False},\n 'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False},\n 'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False},\n 'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False},\n 'cc-by-3.0-au': {'id': 'CC-BY-3.0-AU', 'deprecated': False},\n 'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False},\n 'cc-by-3.0-igo': {'id': 'CC-BY-3.0-IGO', 'deprecated': False},\n 'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False},\n 'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False},\n 'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False},\n 'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False},\n 'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False},\n 'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False},\n 'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False},\n 'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False},\n 'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False},\n 'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False},\n 'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False},\n 'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False},\n 'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False},\n 'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False},\n 'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False},\n 'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False},\n 'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False},\n 'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False},\n 'cc-by-nc-sa-2.0-de': {'id': 'CC-BY-NC-SA-2.0-DE', 'deprecated': False},\n 'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False},\n 'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False},\n 'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False},\n 'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False},\n 'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False},\n 'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False},\n 'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False},\n 'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False},\n 'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False},\n 'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False},\n 'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False},\n 'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False},\n 'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False},\n 'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False},\n 'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False},\n 'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False},\n 'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False},\n 'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False},\n 'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False},\n 'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False},\n 'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False},\n 'cc-by-sa-3.0-igo': {'id': 'CC-BY-SA-3.0-IGO', 'deprecated': False},\n 'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False},\n 'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False},\n 'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False},\n 'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False},\n 'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False},\n 'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False},\n 'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False},\n 'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False},\n 'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False},\n 'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False},\n 'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False},\n 'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False},\n 'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False},\n 'cecill-b': {'id': 'CECILL-B', 'deprecated': False},\n 'cecill-c': {'id': 'CECILL-C', 'deprecated': False},\n 'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False},\n 'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False},\n 'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False},\n 'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False},\n 'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False},\n 'cfitsio': {'id': 'CFITSIO', 'deprecated': False},\n 'check-cvs': {'id': 'check-cvs', 'deprecated': False},\n 'checkmk': {'id': 'checkmk', 'deprecated': False},\n 'clartistic': {'id': 'ClArtistic', 'deprecated': False},\n 'clips': {'id': 'Clips', 'deprecated': False},\n 'cmu-mach': {'id': 'CMU-Mach', 'deprecated': False},\n 'cmu-mach-nodoc': {'id': 'CMU-Mach-nodoc', 'deprecated': False},\n 'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False},\n 'cnri-python': {'id': 'CNRI-Python', 'deprecated': False},\n 'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False},\n 'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False},\n 'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False},\n 'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False},\n 'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False},\n 'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False},\n 'cornell-lossless-jpeg': {'id': 'Cornell-Lossless-JPEG', 'deprecated': False},\n 'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False},\n 'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False},\n 'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False},\n 'cronyx': {'id': 'Cronyx', 'deprecated': False},\n 'crossword': {'id': 'Crossword', 'deprecated': False},\n 'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False},\n 'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False},\n 'cube': {'id': 'Cube', 'deprecated': False},\n 'curl': {'id': 'curl', 'deprecated': False},\n 'cve-tou': {'id': 'cve-tou', 'deprecated': False},\n 'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False},\n 'dec-3-clause': {'id': 'DEC-3-Clause', 'deprecated': False},\n 'diffmark': {'id': 'diffmark', 'deprecated': False},\n 'dl-de-by-2.0': {'id': 'DL-DE-BY-2.0', 'deprecated': False},\n 'dl-de-zero-2.0': {'id': 'DL-DE-ZERO-2.0', 'deprecated': False},\n 'doc': {'id': 'DOC', 'deprecated': False},\n 'docbook-schema': {'id': 'DocBook-Schema', 'deprecated': False},\n 'docbook-xml': {'id': 'DocBook-XML', 'deprecated': False},\n 'dotseqn': {'id': 'Dotseqn', 'deprecated': False},\n 'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False},\n 'drl-1.1': {'id': 'DRL-1.1', 'deprecated': False},\n 'dsdp': {'id': 'DSDP', 'deprecated': False},\n 'dtoa': {'id': 'dtoa', 'deprecated': False},\n 'dvipdfm': {'id': 'dvipdfm', 'deprecated': False},\n 'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False},\n 'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False},\n 'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True},\n 'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False},\n 'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False},\n 'egenix': {'id': 'eGenix', 'deprecated': False},\n 'elastic-2.0': {'id': 'Elastic-2.0', 'deprecated': False},\n 'entessa': {'id': 'Entessa', 'deprecated': False},\n 'epics': {'id': 'EPICS', 'deprecated': False},\n 'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False},\n 'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False},\n 'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False},\n 'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False},\n 'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False},\n 'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False},\n 'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False},\n 'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False},\n 'eurosym': {'id': 'Eurosym', 'deprecated': False},\n 'fair': {'id': 'Fair', 'deprecated': False},\n 'fbm': {'id': 'FBM', 'deprecated': False},\n 'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False},\n 'ferguson-twofish': {'id': 'Ferguson-Twofish', 'deprecated': False},\n 'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False},\n 'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False},\n 'freeimage': {'id': 'FreeImage', 'deprecated': False},\n 'fsfap': {'id': 'FSFAP', 'deprecated': False},\n 'fsfap-no-warranty-disclaimer': {'id': 'FSFAP-no-warranty-disclaimer', 'deprecated': False},\n 'fsful': {'id': 'FSFUL', 'deprecated': False},\n 'fsfullr': {'id': 'FSFULLR', 'deprecated': False},\n 'fsfullrwd': {'id': 'FSFULLRWD', 'deprecated': False},\n 'ftl': {'id': 'FTL', 'deprecated': False},\n 'furuseth': {'id': 'Furuseth', 'deprecated': False},\n 'fwlw': {'id': 'fwlw', 'deprecated': False},\n 'gcr-docs': {'id': 'GCR-docs', 'deprecated': False},\n 'gd': {'id': 'GD', 'deprecated': False},\n 'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True},\n 'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False},\n 'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False},\n 'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False},\n 'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False},\n 'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False},\n 'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False},\n 'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True},\n 'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False},\n 'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False},\n 'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False},\n 'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False},\n 'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False},\n 'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False},\n 'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True},\n 'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False},\n 'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False},\n 'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False},\n 'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False},\n 'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False},\n 'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False},\n 'giftware': {'id': 'Giftware', 'deprecated': False},\n 'gl2ps': {'id': 'GL2PS', 'deprecated': False},\n 'glide': {'id': 'Glide', 'deprecated': False},\n 'glulxe': {'id': 'Glulxe', 'deprecated': False},\n 'glwtpl': {'id': 'GLWTPL', 'deprecated': False},\n 'gnuplot': {'id': 'gnuplot', 'deprecated': False},\n 'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True},\n 'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True},\n 'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False},\n 'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False},\n 'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True},\n 'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True},\n 'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False},\n 'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False},\n 'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True},\n 'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True},\n 'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True},\n 'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True},\n 'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True},\n 'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True},\n 'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True},\n 'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False},\n 'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False},\n 'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True},\n 'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True},\n 'graphics-gems': {'id': 'Graphics-Gems', 'deprecated': False},\n 'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False},\n 'gtkbook': {'id': 'gtkbook', 'deprecated': False},\n 'gutmann': {'id': 'Gutmann', 'deprecated': False},\n 'haskellreport': {'id': 'HaskellReport', 'deprecated': False},\n 'hdparm': {'id': 'hdparm', 'deprecated': False},\n 'hidapi': {'id': 'HIDAPI', 'deprecated': False},\n 'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False},\n 'hp-1986': {'id': 'HP-1986', 'deprecated': False},\n 'hp-1989': {'id': 'HP-1989', 'deprecated': False},\n 'hpnd': {'id': 'HPND', 'deprecated': False},\n 'hpnd-dec': {'id': 'HPND-DEC', 'deprecated': False},\n 'hpnd-doc': {'id': 'HPND-doc', 'deprecated': False},\n 'hpnd-doc-sell': {'id': 'HPND-doc-sell', 'deprecated': False},\n 'hpnd-export-us': {'id': 'HPND-export-US', 'deprecated': False},\n 'hpnd-export-us-acknowledgement': {'id': 'HPND-export-US-acknowledgement', 'deprecated': False},\n 'hpnd-export-us-modify': {'id': 'HPND-export-US-modify', 'deprecated': False},\n 'hpnd-export2-us': {'id': 'HPND-export2-US', 'deprecated': False},\n 'hpnd-fenneberg-livingston': {'id': 'HPND-Fenneberg-Livingston', 'deprecated': False},\n 'hpnd-inria-imag': {'id': 'HPND-INRIA-IMAG', 'deprecated': False},\n 'hpnd-intel': {'id': 'HPND-Intel', 'deprecated': False},\n 'hpnd-kevlin-henney': {'id': 'HPND-Kevlin-Henney', 'deprecated': False},\n 'hpnd-markus-kuhn': {'id': 'HPND-Markus-Kuhn', 'deprecated': False},\n 'hpnd-merchantability-variant': {'id': 'HPND-merchantability-variant', 'deprecated': False},\n 'hpnd-mit-disclaimer': {'id': 'HPND-MIT-disclaimer', 'deprecated': False},\n 'hpnd-netrek': {'id': 'HPND-Netrek', 'deprecated': False},\n 'hpnd-pbmplus': {'id': 'HPND-Pbmplus', 'deprecated': False},\n 'hpnd-sell-mit-disclaimer-xserver': {'id': 'HPND-sell-MIT-disclaimer-xserver', 'deprecated': False},\n 'hpnd-sell-regexpr': {'id': 'HPND-sell-regexpr', 'deprecated': False},\n 'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False},\n 'hpnd-sell-variant-mit-disclaimer': {'id': 'HPND-sell-variant-MIT-disclaimer', 'deprecated': False},\n 'hpnd-sell-variant-mit-disclaimer-rev': {'id': 'HPND-sell-variant-MIT-disclaimer-rev', 'deprecated': False},\n 'hpnd-uc': {'id': 'HPND-UC', 'deprecated': False},\n 'hpnd-uc-export-us': {'id': 'HPND-UC-export-US', 'deprecated': False},\n 'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False},\n 'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False},\n 'icu': {'id': 'ICU', 'deprecated': False},\n 'iec-code-components-eula': {'id': 'IEC-Code-Components-EULA', 'deprecated': False},\n 'ijg': {'id': 'IJG', 'deprecated': False},\n 'ijg-short': {'id': 'IJG-short', 'deprecated': False},\n 'imagemagick': {'id': 'ImageMagick', 'deprecated': False},\n 'imatix': {'id': 'iMatix', 'deprecated': False},\n 'imlib2': {'id': 'Imlib2', 'deprecated': False},\n 'info-zip': {'id': 'Info-ZIP', 'deprecated': False},\n 'inner-net-2.0': {'id': 'Inner-Net-2.0', 'deprecated': False},\n 'intel': {'id': 'Intel', 'deprecated': False},\n 'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False},\n 'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False},\n 'ipa': {'id': 'IPA', 'deprecated': False},\n 'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False},\n 'isc': {'id': 'ISC', 'deprecated': False},\n 'isc-veillard': {'id': 'ISC-Veillard', 'deprecated': False},\n 'jam': {'id': 'Jam', 'deprecated': False},\n 'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False},\n 'jpl-image': {'id': 'JPL-image', 'deprecated': False},\n 'jpnic': {'id': 'JPNIC', 'deprecated': False},\n 'json': {'id': 'JSON', 'deprecated': False},\n 'kastrup': {'id': 'Kastrup', 'deprecated': False},\n 'kazlib': {'id': 'Kazlib', 'deprecated': False},\n 'knuth-ctan': {'id': 'Knuth-CTAN', 'deprecated': False},\n 'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False},\n 'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False},\n 'latex2e': {'id': 'Latex2e', 'deprecated': False},\n 'latex2e-translated-notice': {'id': 'Latex2e-translated-notice', 'deprecated': False},\n 'leptonica': {'id': 'Leptonica', 'deprecated': False},\n 'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True},\n 'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True},\n 'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False},\n 'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False},\n 'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True},\n 'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True},\n 'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False},\n 'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False},\n 'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True},\n 'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True},\n 'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False},\n 'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False},\n 'lgpllr': {'id': 'LGPLLR', 'deprecated': False},\n 'libpng': {'id': 'Libpng', 'deprecated': False},\n 'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False},\n 'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False},\n 'libtiff': {'id': 'libtiff', 'deprecated': False},\n 'libutil-david-nugent': {'id': 'libutil-David-Nugent', 'deprecated': False},\n 'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False},\n 'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False},\n 'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False},\n 'linux-man-pages-1-para': {'id': 'Linux-man-pages-1-para', 'deprecated': False},\n 'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False},\n 'linux-man-pages-copyleft-2-para': {'id': 'Linux-man-pages-copyleft-2-para', 'deprecated': False},\n 'linux-man-pages-copyleft-var': {'id': 'Linux-man-pages-copyleft-var', 'deprecated': False},\n 'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False},\n 'loop': {'id': 'LOOP', 'deprecated': False},\n 'lpd-document': {'id': 'LPD-document', 'deprecated': False},\n 'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False},\n 'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False},\n 'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False},\n 'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False},\n 'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False},\n 'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False},\n 'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False},\n 'lsof': {'id': 'lsof', 'deprecated': False},\n 'lucida-bitmap-fonts': {'id': 'Lucida-Bitmap-Fonts', 'deprecated': False},\n 'lzma-sdk-9.11-to-9.20': {'id': 'LZMA-SDK-9.11-to-9.20', 'deprecated': False},\n 'lzma-sdk-9.22': {'id': 'LZMA-SDK-9.22', 'deprecated': False},\n 'mackerras-3-clause': {'id': 'Mackerras-3-Clause', 'deprecated': False},\n 'mackerras-3-clause-acknowledgment': {'id': 'Mackerras-3-Clause-acknowledgment', 'deprecated': False},\n 'magaz': {'id': 'magaz', 'deprecated': False},\n 'mailprio': {'id': 'mailprio', 'deprecated': False},\n 'makeindex': {'id': 'MakeIndex', 'deprecated': False},\n 'martin-birgmeier': {'id': 'Martin-Birgmeier', 'deprecated': False},\n 'mcphee-slideshow': {'id': 'McPhee-slideshow', 'deprecated': False},\n 'metamail': {'id': 'metamail', 'deprecated': False},\n 'minpack': {'id': 'Minpack', 'deprecated': False},\n 'miros': {'id': 'MirOS', 'deprecated': False},\n 'mit': {'id': 'MIT', 'deprecated': False},\n 'mit-0': {'id': 'MIT-0', 'deprecated': False},\n 'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False},\n 'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False},\n 'mit-enna': {'id': 'MIT-enna', 'deprecated': False},\n 'mit-feh': {'id': 'MIT-feh', 'deprecated': False},\n 'mit-festival': {'id': 'MIT-Festival', 'deprecated': False},\n 'mit-khronos-old': {'id': 'MIT-Khronos-old', 'deprecated': False},\n 'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False},\n 'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False},\n 'mit-testregex': {'id': 'MIT-testregex', 'deprecated': False},\n 'mit-wu': {'id': 'MIT-Wu', 'deprecated': False},\n 'mitnfa': {'id': 'MITNFA', 'deprecated': False},\n 'mmixware': {'id': 'MMIXware', 'deprecated': False},\n 'motosoto': {'id': 'Motosoto', 'deprecated': False},\n 'mpeg-ssg': {'id': 'MPEG-SSG', 'deprecated': False},\n 'mpi-permissive': {'id': 'mpi-permissive', 'deprecated': False},\n 'mpich2': {'id': 'mpich2', 'deprecated': False},\n 'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False},\n 'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False},\n 'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False},\n 'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False},\n 'mplus': {'id': 'mplus', 'deprecated': False},\n 'ms-lpl': {'id': 'MS-LPL', 'deprecated': False},\n 'ms-pl': {'id': 'MS-PL', 'deprecated': False},\n 'ms-rl': {'id': 'MS-RL', 'deprecated': False},\n 'mtll': {'id': 'MTLL', 'deprecated': False},\n 'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False},\n 'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False},\n 'multics': {'id': 'Multics', 'deprecated': False},\n 'mup': {'id': 'Mup', 'deprecated': False},\n 'naist-2003': {'id': 'NAIST-2003', 'deprecated': False},\n 'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False},\n 'naumen': {'id': 'Naumen', 'deprecated': False},\n 'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False},\n 'ncbi-pd': {'id': 'NCBI-PD', 'deprecated': False},\n 'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False},\n 'ncl': {'id': 'NCL', 'deprecated': False},\n 'ncsa': {'id': 'NCSA', 'deprecated': False},\n 'net-snmp': {'id': 'Net-SNMP', 'deprecated': True},\n 'netcdf': {'id': 'NetCDF', 'deprecated': False},\n 'newsletr': {'id': 'Newsletr', 'deprecated': False},\n 'ngpl': {'id': 'NGPL', 'deprecated': False},\n 'nicta-1.0': {'id': 'NICTA-1.0', 'deprecated': False},\n 'nist-pd': {'id': 'NIST-PD', 'deprecated': False},\n 'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False},\n 'nist-software': {'id': 'NIST-Software', 'deprecated': False},\n 'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False},\n 'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False},\n 'nlpl': {'id': 'NLPL', 'deprecated': False},\n 'nokia': {'id': 'Nokia', 'deprecated': False},\n 'nosl': {'id': 'NOSL', 'deprecated': False},\n 'noweb': {'id': 'Noweb', 'deprecated': False},\n 'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False},\n 'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False},\n 'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False},\n 'nrl': {'id': 'NRL', 'deprecated': False},\n 'ntp': {'id': 'NTP', 'deprecated': False},\n 'ntp-0': {'id': 'NTP-0', 'deprecated': False},\n 'nunit': {'id': 'Nunit', 'deprecated': True},\n 'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False},\n 'oar': {'id': 'OAR', 'deprecated': False},\n 'occt-pl': {'id': 'OCCT-PL', 'deprecated': False},\n 'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False},\n 'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False},\n 'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False},\n 'offis': {'id': 'OFFIS', 'deprecated': False},\n 'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False},\n 'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False},\n 'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False},\n 'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False},\n 'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False},\n 'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False},\n 'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False},\n 'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False},\n 'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False},\n 'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False},\n 'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False},\n 'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False},\n 'ogtsl': {'id': 'OGTSL', 'deprecated': False},\n 'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False},\n 'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False},\n 'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False},\n 'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False},\n 'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False},\n 'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False},\n 'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False},\n 'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False},\n 'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False},\n 'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False},\n 'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False},\n 'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False},\n 'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False},\n 'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False},\n 'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False},\n 'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False},\n 'olfl-1.3': {'id': 'OLFL-1.3', 'deprecated': False},\n 'oml': {'id': 'OML', 'deprecated': False},\n 'openpbs-2.3': {'id': 'OpenPBS-2.3', 'deprecated': False},\n 'openssl': {'id': 'OpenSSL', 'deprecated': False},\n 'openssl-standalone': {'id': 'OpenSSL-standalone', 'deprecated': False},\n 'openvision': {'id': 'OpenVision', 'deprecated': False},\n 'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False},\n 'opl-uk-3.0': {'id': 'OPL-UK-3.0', 'deprecated': False},\n 'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False},\n 'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False},\n 'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False},\n 'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False},\n 'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False},\n 'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False},\n 'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False},\n 'padl': {'id': 'PADL', 'deprecated': False},\n 'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False},\n 'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False},\n 'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False},\n 'php-3.0': {'id': 'PHP-3.0', 'deprecated': False},\n 'php-3.01': {'id': 'PHP-3.01', 'deprecated': False},\n 'pixar': {'id': 'Pixar', 'deprecated': False},\n 'pkgconf': {'id': 'pkgconf', 'deprecated': False},\n 'plexus': {'id': 'Plexus', 'deprecated': False},\n 'pnmstitch': {'id': 'pnmstitch', 'deprecated': False},\n 'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False},\n 'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False},\n 'postgresql': {'id': 'PostgreSQL', 'deprecated': False},\n 'ppl': {'id': 'PPL', 'deprecated': False},\n 'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False},\n 'psfrag': {'id': 'psfrag', 'deprecated': False},\n 'psutils': {'id': 'psutils', 'deprecated': False},\n 'python-2.0': {'id': 'Python-2.0', 'deprecated': False},\n 'python-2.0.1': {'id': 'Python-2.0.1', 'deprecated': False},\n 'python-ldap': {'id': 'python-ldap', 'deprecated': False},\n 'qhull': {'id': 'Qhull', 'deprecated': False},\n 'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False},\n 'qpl-1.0-inria-2004': {'id': 'QPL-1.0-INRIA-2004', 'deprecated': False},\n 'radvd': {'id': 'radvd', 'deprecated': False},\n 'rdisc': {'id': 'Rdisc', 'deprecated': False},\n 'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False},\n 'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False},\n 'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False},\n 'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False},\n 'rsa-md': {'id': 'RSA-MD', 'deprecated': False},\n 'rscpl': {'id': 'RSCPL', 'deprecated': False},\n 'ruby': {'id': 'Ruby', 'deprecated': False},\n 'ruby-pty': {'id': 'Ruby-pty', 'deprecated': False},\n 'sax-pd': {'id': 'SAX-PD', 'deprecated': False},\n 'sax-pd-2.0': {'id': 'SAX-PD-2.0', 'deprecated': False},\n 'saxpath': {'id': 'Saxpath', 'deprecated': False},\n 'scea': {'id': 'SCEA', 'deprecated': False},\n 'schemereport': {'id': 'SchemeReport', 'deprecated': False},\n 'sendmail': {'id': 'Sendmail', 'deprecated': False},\n 'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False},\n 'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False},\n 'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False},\n 'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False},\n 'sgi-opengl': {'id': 'SGI-OpenGL', 'deprecated': False},\n 'sgp4': {'id': 'SGP4', 'deprecated': False},\n 'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False},\n 'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False},\n 'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False},\n 'sissl': {'id': 'SISSL', 'deprecated': False},\n 'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False},\n 'sl': {'id': 'SL', 'deprecated': False},\n 'sleepycat': {'id': 'Sleepycat', 'deprecated': False},\n 'smlnj': {'id': 'SMLNJ', 'deprecated': False},\n 'smppl': {'id': 'SMPPL', 'deprecated': False},\n 'snia': {'id': 'SNIA', 'deprecated': False},\n 'snprintf': {'id': 'snprintf', 'deprecated': False},\n 'softsurfer': {'id': 'softSurfer', 'deprecated': False},\n 'soundex': {'id': 'Soundex', 'deprecated': False},\n 'spencer-86': {'id': 'Spencer-86', 'deprecated': False},\n 'spencer-94': {'id': 'Spencer-94', 'deprecated': False},\n 'spencer-99': {'id': 'Spencer-99', 'deprecated': False},\n 'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False},\n 'ssh-keyscan': {'id': 'ssh-keyscan', 'deprecated': False},\n 'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False},\n 'ssh-short': {'id': 'SSH-short', 'deprecated': False},\n 'ssleay-standalone': {'id': 'SSLeay-standalone', 'deprecated': False},\n 'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False},\n 'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True},\n 'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False},\n 'sun-ppp': {'id': 'Sun-PPP', 'deprecated': False},\n 'sun-ppp-2000': {'id': 'Sun-PPP-2000', 'deprecated': False},\n 'sunpro': {'id': 'SunPro', 'deprecated': False},\n 'swl': {'id': 'SWL', 'deprecated': False},\n 'swrule': {'id': 'swrule', 'deprecated': False},\n 'symlinks': {'id': 'Symlinks', 'deprecated': False},\n 'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False},\n 'tcl': {'id': 'TCL', 'deprecated': False},\n 'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False},\n 'termreadkey': {'id': 'TermReadKey', 'deprecated': False},\n 'tgppl-1.0': {'id': 'TGPPL-1.0', 'deprecated': False},\n 'threeparttable': {'id': 'threeparttable', 'deprecated': False},\n 'tmate': {'id': 'TMate', 'deprecated': False},\n 'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False},\n 'tosl': {'id': 'TOSL', 'deprecated': False},\n 'tpdl': {'id': 'TPDL', 'deprecated': False},\n 'tpl-1.0': {'id': 'TPL-1.0', 'deprecated': False},\n 'ttwl': {'id': 'TTWL', 'deprecated': False},\n 'ttyp0': {'id': 'TTYP0', 'deprecated': False},\n 'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False},\n 'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False},\n 'ubuntu-font-1.0': {'id': 'Ubuntu-font-1.0', 'deprecated': False},\n 'ucar': {'id': 'UCAR', 'deprecated': False},\n 'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False},\n 'ulem': {'id': 'ulem', 'deprecated': False},\n 'umich-merit': {'id': 'UMich-Merit', 'deprecated': False},\n 'unicode-3.0': {'id': 'Unicode-3.0', 'deprecated': False},\n 'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False},\n 'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False},\n 'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False},\n 'unixcrypt': {'id': 'UnixCrypt', 'deprecated': False},\n 'unlicense': {'id': 'Unlicense', 'deprecated': False},\n 'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False},\n 'urt-rle': {'id': 'URT-RLE', 'deprecated': False},\n 'vim': {'id': 'Vim', 'deprecated': False},\n 'vostrom': {'id': 'VOSTROM', 'deprecated': False},\n 'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False},\n 'w3c': {'id': 'W3C', 'deprecated': False},\n 'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False},\n 'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False},\n 'w3m': {'id': 'w3m', 'deprecated': False},\n 'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False},\n 'widget-workshop': {'id': 'Widget-Workshop', 'deprecated': False},\n 'wsuipa': {'id': 'Wsuipa', 'deprecated': False},\n 'wtfpl': {'id': 'WTFPL', 'deprecated': False},\n 'wxwindows': {'id': 'wxWindows', 'deprecated': True},\n 'x11': {'id': 'X11', 'deprecated': False},\n 'x11-distribute-modifications-variant': {'id': 'X11-distribute-modifications-variant', 'deprecated': False},\n 'x11-swapped': {'id': 'X11-swapped', 'deprecated': False},\n 'xdebug-1.03': {'id': 'Xdebug-1.03', 'deprecated': False},\n 'xerox': {'id': 'Xerox', 'deprecated': False},\n 'xfig': {'id': 'Xfig', 'deprecated': False},\n 'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False},\n 'xinetd': {'id': 'xinetd', 'deprecated': False},\n 'xkeyboard-config-zinoviev': {'id': 'xkeyboard-config-Zinoviev', 'deprecated': False},\n 'xlock': {'id': 'xlock', 'deprecated': False},\n 'xnet': {'id': 'Xnet', 'deprecated': False},\n 'xpp': {'id': 'xpp', 'deprecated': False},\n 'xskat': {'id': 'XSkat', 'deprecated': False},\n 'xzoom': {'id': 'xzoom', 'deprecated': False},\n 'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False},\n 'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False},\n 'zed': {'id': 'Zed', 'deprecated': False},\n 'zeeff': {'id': 'Zeeff', 'deprecated': False},\n 'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False},\n 'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False},\n 'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False},\n 'zlib': {'id': 'Zlib', 'deprecated': False},\n 'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False},\n 'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False},\n 'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False},\n 'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False},\n}\n\nEXCEPTIONS: dict[str, SPDXException] = {\n '389-exception': {'id': '389-exception', 'deprecated': False},\n 'asterisk-exception': {'id': 'Asterisk-exception', 'deprecated': False},\n 'asterisk-linking-protocols-exception': {'id': 'Asterisk-linking-protocols-exception', 'deprecated': False},\n 'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False},\n 'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False},\n 'autoconf-exception-generic': {'id': 'Autoconf-exception-generic', 'deprecated': False},\n 'autoconf-exception-generic-3.0': {'id': 'Autoconf-exception-generic-3.0', 'deprecated': False},\n 'autoconf-exception-macro': {'id': 'Autoconf-exception-macro', 'deprecated': False},\n 'bison-exception-1.24': {'id': 'Bison-exception-1.24', 'deprecated': False},\n 'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False},\n 'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False},\n 'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False},\n 'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False},\n 'cryptsetup-openssl-exception': {'id': 'cryptsetup-OpenSSL-exception', 'deprecated': False},\n 'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False},\n 'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False},\n 'erlang-otp-linking-exception': {'id': 'erlang-otp-linking-exception', 'deprecated': False},\n 'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False},\n 'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False},\n 'fmt-exception': {'id': 'fmt-exception', 'deprecated': False},\n 'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False},\n 'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False},\n 'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False},\n 'gcc-exception-2.0-note': {'id': 'GCC-exception-2.0-note', 'deprecated': False},\n 'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False},\n 'gmsh-exception': {'id': 'Gmsh-exception', 'deprecated': False},\n 'gnat-exception': {'id': 'GNAT-exception', 'deprecated': False},\n 'gnome-examples-exception': {'id': 'GNOME-examples-exception', 'deprecated': False},\n 'gnu-compiler-exception': {'id': 'GNU-compiler-exception', 'deprecated': False},\n 'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False},\n 'gpl-3.0-interface-exception': {'id': 'GPL-3.0-interface-exception', 'deprecated': False},\n 'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False},\n 'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False},\n 'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False},\n 'gstreamer-exception-2005': {'id': 'GStreamer-exception-2005', 'deprecated': False},\n 'gstreamer-exception-2008': {'id': 'GStreamer-exception-2008', 'deprecated': False},\n 'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False},\n 'kicad-libraries-exception': {'id': 'KiCad-libraries-exception', 'deprecated': False},\n 'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False},\n 'libpri-openh323-exception': {'id': 'libpri-OpenH323-exception', 'deprecated': False},\n 'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False},\n 'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False},\n 'llgpl': {'id': 'LLGPL', 'deprecated': False},\n 'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False},\n 'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False},\n 'mif-exception': {'id': 'mif-exception', 'deprecated': False},\n 'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True},\n 'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False},\n 'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False},\n 'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False},\n 'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False},\n 'pcre2-exception': {'id': 'PCRE2-exception', 'deprecated': False},\n 'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False},\n 'qpl-1.0-inria-2004-exception': {'id': 'QPL-1.0-INRIA-2004-exception', 'deprecated': False},\n 'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False},\n 'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False},\n 'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False},\n 'romic-exception': {'id': 'romic-exception', 'deprecated': False},\n 'rrdtool-floss-exception-2.0': {'id': 'RRDtool-FLOSS-exception-2.0', 'deprecated': False},\n 'sane-exception': {'id': 'SANE-exception', 'deprecated': False},\n 'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False},\n 'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False},\n 'stunnel-exception': {'id': 'stunnel-exception', 'deprecated': False},\n 'swi-exception': {'id': 'SWI-exception', 'deprecated': False},\n 'swift-exception': {'id': 'Swift-exception', 'deprecated': False},\n 'texinfo-exception': {'id': 'Texinfo-exception', 'deprecated': False},\n 'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False},\n 'ubdl-exception': {'id': 'UBDL-exception', 'deprecated': False},\n 'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False},\n 'vsftpd-openssl-exception': {'id': 'vsftpd-openssl-exception', 'deprecated': False},\n 'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False},\n 'x11vnc-openssl-exception': {'id': 'x11vnc-openssl-exception', 'deprecated': False},\n}\n
.venv\Lib\site-packages\pip\_vendor\packaging\licenses\_spdx.py
_spdx.py
Python
48,398
0.85
0.002635
0
python-kit
135
2025-05-21T08:37:36.474221
MIT
false
313a72cf4425cf31a445d4745d659eb3
#######################################################################################\n#\n# Adapted from:\n# https://github.com/pypa/hatch/blob/5352e44/backend/src/hatchling/licenses/parse.py\n#\n# MIT License\n#\n# Copyright (c) 2017-present Ofek Lev <oss@ofek.dev>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this\n# software and associated documentation files (the "Software"), to deal in the Software\n# without restriction, including without limitation the rights to use, copy, modify,\n# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies\n# or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n#\n# With additional allowance of arbitrary `LicenseRef-` identifiers, not just\n# `LicenseRef-Public-Domain` and `LicenseRef-Proprietary`.\n#\n#######################################################################################\nfrom __future__ import annotations\n\nimport re\nfrom typing import NewType, cast\n\nfrom pip._vendor.packaging.licenses._spdx import EXCEPTIONS, LICENSES\n\n__all__ = [\n "InvalidLicenseExpression",\n "NormalizedLicenseExpression",\n "canonicalize_license_expression",\n]\n\nlicense_ref_allowed = re.compile("^[A-Za-z0-9.-]*$")\n\nNormalizedLicenseExpression = NewType("NormalizedLicenseExpression", str)\n\n\nclass InvalidLicenseExpression(ValueError):\n """Raised when a license-expression string is invalid\n\n >>> canonicalize_license_expression("invalid")\n Traceback (most recent call last):\n ...\n packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid'\n """\n\n\ndef canonicalize_license_expression(\n raw_license_expression: str,\n) -> NormalizedLicenseExpression:\n if not raw_license_expression:\n message = f"Invalid license expression: {raw_license_expression!r}"\n raise InvalidLicenseExpression(message)\n\n # Pad any parentheses so tokenization can be achieved by merely splitting on\n # whitespace.\n license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ")\n licenseref_prefix = "LicenseRef-"\n license_refs = {\n ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :]\n for ref in license_expression.split()\n if ref.lower().startswith(licenseref_prefix.lower())\n }\n\n # Normalize to lower case so we can look up licenses/exceptions\n # and so boolean operators are Python-compatible.\n license_expression = license_expression.lower()\n\n tokens = license_expression.split()\n\n # Rather than implementing boolean logic, we create an expression that Python can\n # parse. Everything that is not involved with the grammar itself is treated as\n # `False` and the expression should evaluate as such.\n python_tokens = []\n for token in tokens:\n if token not in {"or", "and", "with", "(", ")"}:\n python_tokens.append("False")\n elif token == "with":\n python_tokens.append("or")\n elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}:\n message = f"Invalid license expression: {raw_license_expression!r}"\n raise InvalidLicenseExpression(message)\n else:\n python_tokens.append(token)\n\n python_expression = " ".join(python_tokens)\n try:\n invalid = eval(python_expression, globals(), locals())\n except Exception:\n invalid = True\n\n if invalid is not False:\n message = f"Invalid license expression: {raw_license_expression!r}"\n raise InvalidLicenseExpression(message) from None\n\n # Take a final pass to check for unknown licenses/exceptions.\n normalized_tokens = []\n for token in tokens:\n if token in {"or", "and", "with", "(", ")"}:\n normalized_tokens.append(token.upper())\n continue\n\n if normalized_tokens and normalized_tokens[-1] == "WITH":\n if token not in EXCEPTIONS:\n message = f"Unknown license exception: {token!r}"\n raise InvalidLicenseExpression(message)\n\n normalized_tokens.append(EXCEPTIONS[token]["id"])\n else:\n if token.endswith("+"):\n final_token = token[:-1]\n suffix = "+"\n else:\n final_token = token\n suffix = ""\n\n if final_token.startswith("licenseref-"):\n if not license_ref_allowed.match(final_token):\n message = f"Invalid licenseref: {final_token!r}"\n raise InvalidLicenseExpression(message)\n normalized_tokens.append(license_refs[final_token] + suffix)\n else:\n if final_token not in LICENSES:\n message = f"Unknown license: {final_token!r}"\n raise InvalidLicenseExpression(message)\n normalized_tokens.append(LICENSES[final_token]["id"] + suffix)\n\n normalized_expression = " ".join(normalized_tokens)\n\n return cast(\n NormalizedLicenseExpression,\n normalized_expression.replace("( ", "(").replace(" )", ")"),\n )\n
.venv\Lib\site-packages\pip\_vendor\packaging\licenses\__init__.py
__init__.py
Python
5,727
0.95
0.124138
0.317073
node-utils
125
2025-07-06T02:32:03.141777
GPL-3.0
false
e1e7defe941a92253fe300ed1458573b
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\licenses\__pycache__\_spdx.cpython-313.pyc
_spdx.cpython-313.pyc
Other
47,443
0.8
0
0
awesome-app
19
2024-12-06T15:52:22.442753
GPL-3.0
false
26db3a1041939d4a755972eb0bfd29ee
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\licenses\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
4,314
0.8
0
0
vue-tools
596
2023-12-03T09:40:23.981680
Apache-2.0
false
95a6faceed7a6611f14805b5954dca96
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\__pycache__\markers.cpython-313.pyc
markers.cpython-313.pyc
Other
13,103
0.95
0.027027
0
awesome-app
844
2023-09-18T21:56:00.825753
Apache-2.0
false
87f7ce90ab1862adb3e4b7dd821c4436
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\__pycache__\metadata.cpython-313.pyc
metadata.cpython-313.pyc
Other
27,372
0.95
0.061224
0.0131
react-lib
199
2023-09-28T16:10:14.125327
MIT
false
fdf0cf19e92f46c4e590d1d0b1731df9
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\__pycache__\requirements.cpython-313.pyc
requirements.cpython-313.pyc
Other
4,637
0.95
0
0
react-lib
84
2025-03-09T07:45:00.885939
BSD-3-Clause
false
ad0efda1a22505b3b234ac8cd562516a
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\__pycache__\specifiers.cpython-313.pyc
specifiers.cpython-313.pyc
Other
37,669
0.95
0.062615
0.055901
python-kit
771
2024-03-15T09:31:21.764143
BSD-3-Clause
false
3a1741aefda1ea540f3841c586600a7b
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\__pycache__\tags.cpython-313.pyc
tags.cpython-313.pyc
Other
24,977
0.95
0.064626
0
awesome-app
397
2025-03-13T09:41:17.207725
GPL-3.0
false
40c454a141e7dd4e2f6024db7da94714
\n\n
.venv\Lib\site-packages\pip\_vendor\packaging\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
6,761
0.8
0
0
node-utils
825
2023-11-07T15:32:07.511302
MIT
false
65b07fcfc7eb7397a2ef85e39bd87db7