Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__init__.py +28 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/_cmd.py +70 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/adapter.py +161 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/cache.py +74 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__init__.py +8 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py +181 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py +48 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/controller.py +494 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/filewrapper.py +119 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/heuristics.py +154 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/py.typed +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/serialize.py +206 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/wrapper.py +43 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py +82 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/__main__.py +17 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py +668 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py +70 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py +71 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py +940 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py +124 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py +108 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py +170 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py +645 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py +154 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py +521 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py +83 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py +146 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py +188 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py +127 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py +943 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py +362 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-311.pyc +0 -0
.gitattributes
CHANGED
|
@@ -335,3 +335,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
|
|
| 335 |
.venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 336 |
.venv/lib/python3.11/site-packages/pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 337 |
.venv/lib/python3.11/site-packages/pip/_vendor/__pycache__/typing_extensions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 335 |
.venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 336 |
.venv/lib/python3.11/site-packages/pip/_vendor/chardet/__pycache__/langrussianmodel.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 337 |
.venv/lib/python3.11/site-packages/pip/_vendor/__pycache__/typing_extensions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 338 |
+
.venv/lib/python3.11/site-packages/pip/_vendor/rich/__pycache__/_emoji_codes.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
"""CacheControl import Interface.
|
| 6 |
+
|
| 7 |
+
Make it easy to import from cachecontrol without long namespaces.
|
| 8 |
+
"""
|
| 9 |
+
__author__ = "Eric Larson"
|
| 10 |
+
__email__ = "eric@ionrock.org"
|
| 11 |
+
__version__ = "0.13.1"
|
| 12 |
+
|
| 13 |
+
from pip._vendor.cachecontrol.adapter import CacheControlAdapter
|
| 14 |
+
from pip._vendor.cachecontrol.controller import CacheController
|
| 15 |
+
from pip._vendor.cachecontrol.wrapper import CacheControl
|
| 16 |
+
|
| 17 |
+
__all__ = [
|
| 18 |
+
"__author__",
|
| 19 |
+
"__email__",
|
| 20 |
+
"__version__",
|
| 21 |
+
"CacheControlAdapter",
|
| 22 |
+
"CacheController",
|
| 23 |
+
"CacheControl",
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
import logging
|
| 27 |
+
|
| 28 |
+
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (971 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-311.pyc
ADDED
|
Binary file (3.02 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-311.pyc
ADDED
|
Binary file (6.9 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-311.pyc
ADDED
|
Binary file (4.5 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-311.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-311.pyc
ADDED
|
Binary file (4.75 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-311.pyc
ADDED
|
Binary file (7.55 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-311.pyc
ADDED
|
Binary file (7.03 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-311.pyc
ADDED
|
Binary file (1.86 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/_cmd.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import logging
|
| 7 |
+
from argparse import ArgumentParser
|
| 8 |
+
from typing import TYPE_CHECKING
|
| 9 |
+
|
| 10 |
+
from pip._vendor import requests
|
| 11 |
+
|
| 12 |
+
from pip._vendor.cachecontrol.adapter import CacheControlAdapter
|
| 13 |
+
from pip._vendor.cachecontrol.cache import DictCache
|
| 14 |
+
from pip._vendor.cachecontrol.controller import logger
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from argparse import Namespace
|
| 18 |
+
|
| 19 |
+
from pip._vendor.cachecontrol.controller import CacheController
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def setup_logging() -> None:
|
| 23 |
+
logger.setLevel(logging.DEBUG)
|
| 24 |
+
handler = logging.StreamHandler()
|
| 25 |
+
logger.addHandler(handler)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_session() -> requests.Session:
|
| 29 |
+
adapter = CacheControlAdapter(
|
| 30 |
+
DictCache(), cache_etags=True, serializer=None, heuristic=None
|
| 31 |
+
)
|
| 32 |
+
sess = requests.Session()
|
| 33 |
+
sess.mount("http://", adapter)
|
| 34 |
+
sess.mount("https://", adapter)
|
| 35 |
+
|
| 36 |
+
sess.cache_controller = adapter.controller # type: ignore[attr-defined]
|
| 37 |
+
return sess
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_args() -> Namespace:
|
| 41 |
+
parser = ArgumentParser()
|
| 42 |
+
parser.add_argument("url", help="The URL to try and cache")
|
| 43 |
+
return parser.parse_args()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main() -> None:
|
| 47 |
+
args = get_args()
|
| 48 |
+
sess = get_session()
|
| 49 |
+
|
| 50 |
+
# Make a request to get a response
|
| 51 |
+
resp = sess.get(args.url)
|
| 52 |
+
|
| 53 |
+
# Turn on logging
|
| 54 |
+
setup_logging()
|
| 55 |
+
|
| 56 |
+
# try setting the cache
|
| 57 |
+
cache_controller: CacheController = (
|
| 58 |
+
sess.cache_controller # type: ignore[attr-defined]
|
| 59 |
+
)
|
| 60 |
+
cache_controller.cache_response(resp.request, resp.raw)
|
| 61 |
+
|
| 62 |
+
# Now try to get it
|
| 63 |
+
if cache_controller.cached_request(resp.request):
|
| 64 |
+
print("Cached!")
|
| 65 |
+
else:
|
| 66 |
+
print("Not cached :(")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
main()
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/adapter.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import functools
|
| 7 |
+
import types
|
| 8 |
+
import zlib
|
| 9 |
+
from typing import TYPE_CHECKING, Any, Collection, Mapping
|
| 10 |
+
|
| 11 |
+
from pip._vendor.requests.adapters import HTTPAdapter
|
| 12 |
+
|
| 13 |
+
from pip._vendor.cachecontrol.cache import DictCache
|
| 14 |
+
from pip._vendor.cachecontrol.controller import PERMANENT_REDIRECT_STATUSES, CacheController
|
| 15 |
+
from pip._vendor.cachecontrol.filewrapper import CallbackFileWrapper
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from pip._vendor.requests import PreparedRequest, Response
|
| 19 |
+
from pip._vendor.urllib3 import HTTPResponse
|
| 20 |
+
|
| 21 |
+
from pip._vendor.cachecontrol.cache import BaseCache
|
| 22 |
+
from pip._vendor.cachecontrol.heuristics import BaseHeuristic
|
| 23 |
+
from pip._vendor.cachecontrol.serialize import Serializer
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class CacheControlAdapter(HTTPAdapter):
|
| 27 |
+
invalidating_methods = {"PUT", "PATCH", "DELETE"}
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
cache: BaseCache | None = None,
|
| 32 |
+
cache_etags: bool = True,
|
| 33 |
+
controller_class: type[CacheController] | None = None,
|
| 34 |
+
serializer: Serializer | None = None,
|
| 35 |
+
heuristic: BaseHeuristic | None = None,
|
| 36 |
+
cacheable_methods: Collection[str] | None = None,
|
| 37 |
+
*args: Any,
|
| 38 |
+
**kw: Any,
|
| 39 |
+
) -> None:
|
| 40 |
+
super().__init__(*args, **kw)
|
| 41 |
+
self.cache = DictCache() if cache is None else cache
|
| 42 |
+
self.heuristic = heuristic
|
| 43 |
+
self.cacheable_methods = cacheable_methods or ("GET",)
|
| 44 |
+
|
| 45 |
+
controller_factory = controller_class or CacheController
|
| 46 |
+
self.controller = controller_factory(
|
| 47 |
+
self.cache, cache_etags=cache_etags, serializer=serializer
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def send(
|
| 51 |
+
self,
|
| 52 |
+
request: PreparedRequest,
|
| 53 |
+
stream: bool = False,
|
| 54 |
+
timeout: None | float | tuple[float, float] | tuple[float, None] = None,
|
| 55 |
+
verify: bool | str = True,
|
| 56 |
+
cert: (None | bytes | str | tuple[bytes | str, bytes | str]) = None,
|
| 57 |
+
proxies: Mapping[str, str] | None = None,
|
| 58 |
+
cacheable_methods: Collection[str] | None = None,
|
| 59 |
+
) -> Response:
|
| 60 |
+
"""
|
| 61 |
+
Send a request. Use the request information to see if it
|
| 62 |
+
exists in the cache and cache the response if we need to and can.
|
| 63 |
+
"""
|
| 64 |
+
cacheable = cacheable_methods or self.cacheable_methods
|
| 65 |
+
if request.method in cacheable:
|
| 66 |
+
try:
|
| 67 |
+
cached_response = self.controller.cached_request(request)
|
| 68 |
+
except zlib.error:
|
| 69 |
+
cached_response = None
|
| 70 |
+
if cached_response:
|
| 71 |
+
return self.build_response(request, cached_response, from_cache=True)
|
| 72 |
+
|
| 73 |
+
# check for etags and add headers if appropriate
|
| 74 |
+
request.headers.update(self.controller.conditional_headers(request))
|
| 75 |
+
|
| 76 |
+
resp = super().send(request, stream, timeout, verify, cert, proxies)
|
| 77 |
+
|
| 78 |
+
return resp
|
| 79 |
+
|
| 80 |
+
def build_response(
|
| 81 |
+
self,
|
| 82 |
+
request: PreparedRequest,
|
| 83 |
+
response: HTTPResponse,
|
| 84 |
+
from_cache: bool = False,
|
| 85 |
+
cacheable_methods: Collection[str] | None = None,
|
| 86 |
+
) -> Response:
|
| 87 |
+
"""
|
| 88 |
+
Build a response by making a request or using the cache.
|
| 89 |
+
|
| 90 |
+
This will end up calling send and returning a potentially
|
| 91 |
+
cached response
|
| 92 |
+
"""
|
| 93 |
+
cacheable = cacheable_methods or self.cacheable_methods
|
| 94 |
+
if not from_cache and request.method in cacheable:
|
| 95 |
+
# Check for any heuristics that might update headers
|
| 96 |
+
# before trying to cache.
|
| 97 |
+
if self.heuristic:
|
| 98 |
+
response = self.heuristic.apply(response)
|
| 99 |
+
|
| 100 |
+
# apply any expiration heuristics
|
| 101 |
+
if response.status == 304:
|
| 102 |
+
# We must have sent an ETag request. This could mean
|
| 103 |
+
# that we've been expired already or that we simply
|
| 104 |
+
# have an etag. In either case, we want to try and
|
| 105 |
+
# update the cache if that is the case.
|
| 106 |
+
cached_response = self.controller.update_cached_response(
|
| 107 |
+
request, response
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
if cached_response is not response:
|
| 111 |
+
from_cache = True
|
| 112 |
+
|
| 113 |
+
# We are done with the server response, read a
|
| 114 |
+
# possible response body (compliant servers will
|
| 115 |
+
# not return one, but we cannot be 100% sure) and
|
| 116 |
+
# release the connection back to the pool.
|
| 117 |
+
response.read(decode_content=False)
|
| 118 |
+
response.release_conn()
|
| 119 |
+
|
| 120 |
+
response = cached_response
|
| 121 |
+
|
| 122 |
+
# We always cache the 301 responses
|
| 123 |
+
elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
|
| 124 |
+
self.controller.cache_response(request, response)
|
| 125 |
+
else:
|
| 126 |
+
# Wrap the response file with a wrapper that will cache the
|
| 127 |
+
# response when the stream has been consumed.
|
| 128 |
+
response._fp = CallbackFileWrapper( # type: ignore[attr-defined]
|
| 129 |
+
response._fp, # type: ignore[attr-defined]
|
| 130 |
+
functools.partial(
|
| 131 |
+
self.controller.cache_response, request, response
|
| 132 |
+
),
|
| 133 |
+
)
|
| 134 |
+
if response.chunked:
|
| 135 |
+
super_update_chunk_length = response._update_chunk_length # type: ignore[attr-defined]
|
| 136 |
+
|
| 137 |
+
def _update_chunk_length(self: HTTPResponse) -> None:
|
| 138 |
+
super_update_chunk_length()
|
| 139 |
+
if self.chunk_left == 0:
|
| 140 |
+
self._fp._close() # type: ignore[attr-defined]
|
| 141 |
+
|
| 142 |
+
response._update_chunk_length = types.MethodType( # type: ignore[attr-defined]
|
| 143 |
+
_update_chunk_length, response
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
resp: Response = super().build_response(request, response) # type: ignore[no-untyped-call]
|
| 147 |
+
|
| 148 |
+
# See if we should invalidate the cache.
|
| 149 |
+
if request.method in self.invalidating_methods and resp.ok:
|
| 150 |
+
assert request.url is not None
|
| 151 |
+
cache_url = self.controller.cache_url(request.url)
|
| 152 |
+
self.cache.delete(cache_url)
|
| 153 |
+
|
| 154 |
+
# Give the request a from_cache attr to let people use it
|
| 155 |
+
resp.from_cache = from_cache # type: ignore[attr-defined]
|
| 156 |
+
|
| 157 |
+
return resp
|
| 158 |
+
|
| 159 |
+
def close(self) -> None:
|
| 160 |
+
self.cache.close()
|
| 161 |
+
super().close() # type: ignore[no-untyped-call]
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/cache.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
The cache object API for implementing caches. The default is a thread
|
| 7 |
+
safe in-memory dictionary.
|
| 8 |
+
"""
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from threading import Lock
|
| 12 |
+
from typing import IO, TYPE_CHECKING, MutableMapping
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class BaseCache:
|
| 19 |
+
def get(self, key: str) -> bytes | None:
|
| 20 |
+
raise NotImplementedError()
|
| 21 |
+
|
| 22 |
+
def set(
|
| 23 |
+
self, key: str, value: bytes, expires: int | datetime | None = None
|
| 24 |
+
) -> None:
|
| 25 |
+
raise NotImplementedError()
|
| 26 |
+
|
| 27 |
+
def delete(self, key: str) -> None:
|
| 28 |
+
raise NotImplementedError()
|
| 29 |
+
|
| 30 |
+
def close(self) -> None:
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class DictCache(BaseCache):
|
| 35 |
+
def __init__(self, init_dict: MutableMapping[str, bytes] | None = None) -> None:
|
| 36 |
+
self.lock = Lock()
|
| 37 |
+
self.data = init_dict or {}
|
| 38 |
+
|
| 39 |
+
def get(self, key: str) -> bytes | None:
|
| 40 |
+
return self.data.get(key, None)
|
| 41 |
+
|
| 42 |
+
def set(
|
| 43 |
+
self, key: str, value: bytes, expires: int | datetime | None = None
|
| 44 |
+
) -> None:
|
| 45 |
+
with self.lock:
|
| 46 |
+
self.data.update({key: value})
|
| 47 |
+
|
| 48 |
+
def delete(self, key: str) -> None:
|
| 49 |
+
with self.lock:
|
| 50 |
+
if key in self.data:
|
| 51 |
+
self.data.pop(key)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class SeparateBodyBaseCache(BaseCache):
|
| 55 |
+
"""
|
| 56 |
+
In this variant, the body is not stored mixed in with the metadata, but is
|
| 57 |
+
passed in (as a bytes-like object) in a separate call to ``set_body()``.
|
| 58 |
+
|
| 59 |
+
That is, the expected interaction pattern is::
|
| 60 |
+
|
| 61 |
+
cache.set(key, serialized_metadata)
|
| 62 |
+
cache.set_body(key)
|
| 63 |
+
|
| 64 |
+
Similarly, the body should be loaded separately via ``get_body()``.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def set_body(self, key: str, body: bytes) -> None:
|
| 68 |
+
raise NotImplementedError()
|
| 69 |
+
|
| 70 |
+
def get_body(self, key: str) -> IO[bytes] | None:
|
| 71 |
+
"""
|
| 72 |
+
Return the body as file-like object.
|
| 73 |
+
"""
|
| 74 |
+
raise NotImplementedError()
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
from pip._vendor.cachecontrol.caches.file_cache import FileCache, SeparateBodyFileCache
|
| 6 |
+
from pip._vendor.cachecontrol.caches.redis_cache import RedisCache
|
| 7 |
+
|
| 8 |
+
__all__ = ["FileCache", "SeparateBodyFileCache", "RedisCache"]
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (472 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-311.pyc
ADDED
|
Binary file (8.98 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-311.pyc
ADDED
|
Binary file (3.08 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import hashlib
|
| 7 |
+
import os
|
| 8 |
+
from textwrap import dedent
|
| 9 |
+
from typing import IO, TYPE_CHECKING
|
| 10 |
+
|
| 11 |
+
from pip._vendor.cachecontrol.cache import BaseCache, SeparateBodyBaseCache
|
| 12 |
+
from pip._vendor.cachecontrol.controller import CacheController
|
| 13 |
+
|
| 14 |
+
if TYPE_CHECKING:
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
from filelock import BaseFileLock
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _secure_open_write(filename: str, fmode: int) -> IO[bytes]:
|
| 21 |
+
# We only want to write to this file, so open it in write only mode
|
| 22 |
+
flags = os.O_WRONLY
|
| 23 |
+
|
| 24 |
+
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
|
| 25 |
+
# will open *new* files.
|
| 26 |
+
# We specify this because we want to ensure that the mode we pass is the
|
| 27 |
+
# mode of the file.
|
| 28 |
+
flags |= os.O_CREAT | os.O_EXCL
|
| 29 |
+
|
| 30 |
+
# Do not follow symlinks to prevent someone from making a symlink that
|
| 31 |
+
# we follow and insecurely open a cache file.
|
| 32 |
+
if hasattr(os, "O_NOFOLLOW"):
|
| 33 |
+
flags |= os.O_NOFOLLOW
|
| 34 |
+
|
| 35 |
+
# On Windows we'll mark this file as binary
|
| 36 |
+
if hasattr(os, "O_BINARY"):
|
| 37 |
+
flags |= os.O_BINARY
|
| 38 |
+
|
| 39 |
+
# Before we open our file, we want to delete any existing file that is
|
| 40 |
+
# there
|
| 41 |
+
try:
|
| 42 |
+
os.remove(filename)
|
| 43 |
+
except OSError:
|
| 44 |
+
# The file must not exist already, so we can just skip ahead to opening
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
|
| 48 |
+
# race condition happens between the os.remove and this line, that an
|
| 49 |
+
# error will be raised. Because we utilize a lockfile this should only
|
| 50 |
+
# happen if someone is attempting to attack us.
|
| 51 |
+
fd = os.open(filename, flags, fmode)
|
| 52 |
+
try:
|
| 53 |
+
return os.fdopen(fd, "wb")
|
| 54 |
+
|
| 55 |
+
except:
|
| 56 |
+
# An error occurred wrapping our FD in a file object
|
| 57 |
+
os.close(fd)
|
| 58 |
+
raise
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class _FileCacheMixin:
|
| 62 |
+
"""Shared implementation for both FileCache variants."""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
directory: str,
|
| 67 |
+
forever: bool = False,
|
| 68 |
+
filemode: int = 0o0600,
|
| 69 |
+
dirmode: int = 0o0700,
|
| 70 |
+
lock_class: type[BaseFileLock] | None = None,
|
| 71 |
+
) -> None:
|
| 72 |
+
try:
|
| 73 |
+
if lock_class is None:
|
| 74 |
+
from filelock import FileLock
|
| 75 |
+
|
| 76 |
+
lock_class = FileLock
|
| 77 |
+
except ImportError:
|
| 78 |
+
notice = dedent(
|
| 79 |
+
"""
|
| 80 |
+
NOTE: In order to use the FileCache you must have
|
| 81 |
+
filelock installed. You can install it via pip:
|
| 82 |
+
pip install filelock
|
| 83 |
+
"""
|
| 84 |
+
)
|
| 85 |
+
raise ImportError(notice)
|
| 86 |
+
|
| 87 |
+
self.directory = directory
|
| 88 |
+
self.forever = forever
|
| 89 |
+
self.filemode = filemode
|
| 90 |
+
self.dirmode = dirmode
|
| 91 |
+
self.lock_class = lock_class
|
| 92 |
+
|
| 93 |
+
@staticmethod
|
| 94 |
+
def encode(x: str) -> str:
|
| 95 |
+
return hashlib.sha224(x.encode()).hexdigest()
|
| 96 |
+
|
| 97 |
+
def _fn(self, name: str) -> str:
|
| 98 |
+
# NOTE: This method should not change as some may depend on it.
|
| 99 |
+
# See: https://github.com/ionrock/cachecontrol/issues/63
|
| 100 |
+
hashed = self.encode(name)
|
| 101 |
+
parts = list(hashed[:5]) + [hashed]
|
| 102 |
+
return os.path.join(self.directory, *parts)
|
| 103 |
+
|
| 104 |
+
def get(self, key: str) -> bytes | None:
|
| 105 |
+
name = self._fn(key)
|
| 106 |
+
try:
|
| 107 |
+
with open(name, "rb") as fh:
|
| 108 |
+
return fh.read()
|
| 109 |
+
|
| 110 |
+
except FileNotFoundError:
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
def set(
|
| 114 |
+
self, key: str, value: bytes, expires: int | datetime | None = None
|
| 115 |
+
) -> None:
|
| 116 |
+
name = self._fn(key)
|
| 117 |
+
self._write(name, value)
|
| 118 |
+
|
| 119 |
+
def _write(self, path: str, data: bytes) -> None:
|
| 120 |
+
"""
|
| 121 |
+
Safely write the data to the given path.
|
| 122 |
+
"""
|
| 123 |
+
# Make sure the directory exists
|
| 124 |
+
try:
|
| 125 |
+
os.makedirs(os.path.dirname(path), self.dirmode)
|
| 126 |
+
except OSError:
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
with self.lock_class(path + ".lock"):
|
| 130 |
+
# Write our actual file
|
| 131 |
+
with _secure_open_write(path, self.filemode) as fh:
|
| 132 |
+
fh.write(data)
|
| 133 |
+
|
| 134 |
+
def _delete(self, key: str, suffix: str) -> None:
|
| 135 |
+
name = self._fn(key) + suffix
|
| 136 |
+
if not self.forever:
|
| 137 |
+
try:
|
| 138 |
+
os.remove(name)
|
| 139 |
+
except FileNotFoundError:
|
| 140 |
+
pass
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class FileCache(_FileCacheMixin, BaseCache):
|
| 144 |
+
"""
|
| 145 |
+
Traditional FileCache: body is stored in memory, so not suitable for large
|
| 146 |
+
downloads.
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
def delete(self, key: str) -> None:
|
| 150 |
+
self._delete(key, "")
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class SeparateBodyFileCache(_FileCacheMixin, SeparateBodyBaseCache):
|
| 154 |
+
"""
|
| 155 |
+
Memory-efficient FileCache: body is stored in a separate file, reducing
|
| 156 |
+
peak memory usage.
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
def get_body(self, key: str) -> IO[bytes] | None:
|
| 160 |
+
name = self._fn(key) + ".body"
|
| 161 |
+
try:
|
| 162 |
+
return open(name, "rb")
|
| 163 |
+
except FileNotFoundError:
|
| 164 |
+
return None
|
| 165 |
+
|
| 166 |
+
def set_body(self, key: str, body: bytes) -> None:
|
| 167 |
+
name = self._fn(key) + ".body"
|
| 168 |
+
self._write(name, body)
|
| 169 |
+
|
| 170 |
+
def delete(self, key: str) -> None:
|
| 171 |
+
self._delete(key, "")
|
| 172 |
+
self._delete(key, ".body")
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def url_to_file_path(url: str, filecache: FileCache) -> str:
|
| 176 |
+
"""Return the file cache path based on the URL.
|
| 177 |
+
|
| 178 |
+
This does not ensure the file exists!
|
| 179 |
+
"""
|
| 180 |
+
key = CacheController.cache_url(url)
|
| 181 |
+
return filecache._fn(key)
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
from datetime import datetime, timezone
|
| 8 |
+
from typing import TYPE_CHECKING
|
| 9 |
+
|
| 10 |
+
from pip._vendor.cachecontrol.cache import BaseCache
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from redis import Redis
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class RedisCache(BaseCache):
|
| 17 |
+
def __init__(self, conn: Redis[bytes]) -> None:
|
| 18 |
+
self.conn = conn
|
| 19 |
+
|
| 20 |
+
def get(self, key: str) -> bytes | None:
|
| 21 |
+
return self.conn.get(key)
|
| 22 |
+
|
| 23 |
+
def set(
|
| 24 |
+
self, key: str, value: bytes, expires: int | datetime | None = None
|
| 25 |
+
) -> None:
|
| 26 |
+
if not expires:
|
| 27 |
+
self.conn.set(key, value)
|
| 28 |
+
elif isinstance(expires, datetime):
|
| 29 |
+
now_utc = datetime.now(timezone.utc)
|
| 30 |
+
if expires.tzinfo is None:
|
| 31 |
+
now_utc = now_utc.replace(tzinfo=None)
|
| 32 |
+
delta = expires - now_utc
|
| 33 |
+
self.conn.setex(key, int(delta.total_seconds()), value)
|
| 34 |
+
else:
|
| 35 |
+
self.conn.setex(key, expires, value)
|
| 36 |
+
|
| 37 |
+
def delete(self, key: str) -> None:
|
| 38 |
+
self.conn.delete(key)
|
| 39 |
+
|
| 40 |
+
def clear(self) -> None:
|
| 41 |
+
"""Helper for clearing all the keys in a database. Use with
|
| 42 |
+
caution!"""
|
| 43 |
+
for key in self.conn.keys():
|
| 44 |
+
self.conn.delete(key)
|
| 45 |
+
|
| 46 |
+
def close(self) -> None:
|
| 47 |
+
"""Redis uses connection pooling, no need to close the connection."""
|
| 48 |
+
pass
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/controller.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
The httplib2 algorithms ported for use with requests.
|
| 7 |
+
"""
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import calendar
|
| 11 |
+
import logging
|
| 12 |
+
import re
|
| 13 |
+
import time
|
| 14 |
+
from email.utils import parsedate_tz
|
| 15 |
+
from typing import TYPE_CHECKING, Collection, Mapping
|
| 16 |
+
|
| 17 |
+
from pip._vendor.requests.structures import CaseInsensitiveDict
|
| 18 |
+
|
| 19 |
+
from pip._vendor.cachecontrol.cache import DictCache, SeparateBodyBaseCache
|
| 20 |
+
from pip._vendor.cachecontrol.serialize import Serializer
|
| 21 |
+
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
from typing import Literal
|
| 24 |
+
|
| 25 |
+
from pip._vendor.requests import PreparedRequest
|
| 26 |
+
from pip._vendor.urllib3 import HTTPResponse
|
| 27 |
+
|
| 28 |
+
from pip._vendor.cachecontrol.cache import BaseCache
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
|
| 33 |
+
|
| 34 |
+
PERMANENT_REDIRECT_STATUSES = (301, 308)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def parse_uri(uri: str) -> tuple[str, str, str, str, str]:
|
| 38 |
+
"""Parses a URI using the regex given in Appendix B of RFC 3986.
|
| 39 |
+
|
| 40 |
+
(scheme, authority, path, query, fragment) = parse_uri(uri)
|
| 41 |
+
"""
|
| 42 |
+
match = URI.match(uri)
|
| 43 |
+
assert match is not None
|
| 44 |
+
groups = match.groups()
|
| 45 |
+
return (groups[1], groups[3], groups[4], groups[6], groups[8])
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class CacheController:
|
| 49 |
+
"""An interface to see if request should cached or not."""
|
| 50 |
+
|
| 51 |
+
def __init__(
|
| 52 |
+
self,
|
| 53 |
+
cache: BaseCache | None = None,
|
| 54 |
+
cache_etags: bool = True,
|
| 55 |
+
serializer: Serializer | None = None,
|
| 56 |
+
status_codes: Collection[int] | None = None,
|
| 57 |
+
):
|
| 58 |
+
self.cache = DictCache() if cache is None else cache
|
| 59 |
+
self.cache_etags = cache_etags
|
| 60 |
+
self.serializer = serializer or Serializer()
|
| 61 |
+
self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308)
|
| 62 |
+
|
| 63 |
+
@classmethod
|
| 64 |
+
def _urlnorm(cls, uri: str) -> str:
|
| 65 |
+
"""Normalize the URL to create a safe key for the cache"""
|
| 66 |
+
(scheme, authority, path, query, fragment) = parse_uri(uri)
|
| 67 |
+
if not scheme or not authority:
|
| 68 |
+
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
|
| 69 |
+
|
| 70 |
+
scheme = scheme.lower()
|
| 71 |
+
authority = authority.lower()
|
| 72 |
+
|
| 73 |
+
if not path:
|
| 74 |
+
path = "/"
|
| 75 |
+
|
| 76 |
+
# Could do syntax based normalization of the URI before
|
| 77 |
+
# computing the digest. See Section 6.2.2 of Std 66.
|
| 78 |
+
request_uri = query and "?".join([path, query]) or path
|
| 79 |
+
defrag_uri = scheme + "://" + authority + request_uri
|
| 80 |
+
|
| 81 |
+
return defrag_uri
|
| 82 |
+
|
| 83 |
+
@classmethod
|
| 84 |
+
def cache_url(cls, uri: str) -> str:
|
| 85 |
+
return cls._urlnorm(uri)
|
| 86 |
+
|
| 87 |
+
def parse_cache_control(self, headers: Mapping[str, str]) -> dict[str, int | None]:
|
| 88 |
+
known_directives = {
|
| 89 |
+
# https://tools.ietf.org/html/rfc7234#section-5.2
|
| 90 |
+
"max-age": (int, True),
|
| 91 |
+
"max-stale": (int, False),
|
| 92 |
+
"min-fresh": (int, True),
|
| 93 |
+
"no-cache": (None, False),
|
| 94 |
+
"no-store": (None, False),
|
| 95 |
+
"no-transform": (None, False),
|
| 96 |
+
"only-if-cached": (None, False),
|
| 97 |
+
"must-revalidate": (None, False),
|
| 98 |
+
"public": (None, False),
|
| 99 |
+
"private": (None, False),
|
| 100 |
+
"proxy-revalidate": (None, False),
|
| 101 |
+
"s-maxage": (int, True),
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
|
| 105 |
+
|
| 106 |
+
retval: dict[str, int | None] = {}
|
| 107 |
+
|
| 108 |
+
for cc_directive in cc_headers.split(","):
|
| 109 |
+
if not cc_directive.strip():
|
| 110 |
+
continue
|
| 111 |
+
|
| 112 |
+
parts = cc_directive.split("=", 1)
|
| 113 |
+
directive = parts[0].strip()
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
typ, required = known_directives[directive]
|
| 117 |
+
except KeyError:
|
| 118 |
+
logger.debug("Ignoring unknown cache-control directive: %s", directive)
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
if not typ or not required:
|
| 122 |
+
retval[directive] = None
|
| 123 |
+
if typ:
|
| 124 |
+
try:
|
| 125 |
+
retval[directive] = typ(parts[1].strip())
|
| 126 |
+
except IndexError:
|
| 127 |
+
if required:
|
| 128 |
+
logger.debug(
|
| 129 |
+
"Missing value for cache-control " "directive: %s",
|
| 130 |
+
directive,
|
| 131 |
+
)
|
| 132 |
+
except ValueError:
|
| 133 |
+
logger.debug(
|
| 134 |
+
"Invalid value for cache-control directive " "%s, must be %s",
|
| 135 |
+
directive,
|
| 136 |
+
typ.__name__,
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
return retval
|
| 140 |
+
|
| 141 |
+
def _load_from_cache(self, request: PreparedRequest) -> HTTPResponse | None:
|
| 142 |
+
"""
|
| 143 |
+
Load a cached response, or return None if it's not available.
|
| 144 |
+
"""
|
| 145 |
+
cache_url = request.url
|
| 146 |
+
assert cache_url is not None
|
| 147 |
+
cache_data = self.cache.get(cache_url)
|
| 148 |
+
if cache_data is None:
|
| 149 |
+
logger.debug("No cache entry available")
|
| 150 |
+
return None
|
| 151 |
+
|
| 152 |
+
if isinstance(self.cache, SeparateBodyBaseCache):
|
| 153 |
+
body_file = self.cache.get_body(cache_url)
|
| 154 |
+
else:
|
| 155 |
+
body_file = None
|
| 156 |
+
|
| 157 |
+
result = self.serializer.loads(request, cache_data, body_file)
|
| 158 |
+
if result is None:
|
| 159 |
+
logger.warning("Cache entry deserialization failed, entry ignored")
|
| 160 |
+
return result
|
| 161 |
+
|
| 162 |
+
def cached_request(self, request: PreparedRequest) -> HTTPResponse | Literal[False]:
|
| 163 |
+
"""
|
| 164 |
+
Return a cached response if it exists in the cache, otherwise
|
| 165 |
+
return False.
|
| 166 |
+
"""
|
| 167 |
+
assert request.url is not None
|
| 168 |
+
cache_url = self.cache_url(request.url)
|
| 169 |
+
logger.debug('Looking up "%s" in the cache', cache_url)
|
| 170 |
+
cc = self.parse_cache_control(request.headers)
|
| 171 |
+
|
| 172 |
+
# Bail out if the request insists on fresh data
|
| 173 |
+
if "no-cache" in cc:
|
| 174 |
+
logger.debug('Request header has "no-cache", cache bypassed')
|
| 175 |
+
return False
|
| 176 |
+
|
| 177 |
+
if "max-age" in cc and cc["max-age"] == 0:
|
| 178 |
+
logger.debug('Request header has "max_age" as 0, cache bypassed')
|
| 179 |
+
return False
|
| 180 |
+
|
| 181 |
+
# Check whether we can load the response from the cache:
|
| 182 |
+
resp = self._load_from_cache(request)
|
| 183 |
+
if not resp:
|
| 184 |
+
return False
|
| 185 |
+
|
| 186 |
+
# If we have a cached permanent redirect, return it immediately. We
|
| 187 |
+
# don't need to test our response for other headers b/c it is
|
| 188 |
+
# intrinsically "cacheable" as it is Permanent.
|
| 189 |
+
#
|
| 190 |
+
# See:
|
| 191 |
+
# https://tools.ietf.org/html/rfc7231#section-6.4.2
|
| 192 |
+
#
|
| 193 |
+
# Client can try to refresh the value by repeating the request
|
| 194 |
+
# with cache busting headers as usual (ie no-cache).
|
| 195 |
+
if int(resp.status) in PERMANENT_REDIRECT_STATUSES:
|
| 196 |
+
msg = (
|
| 197 |
+
"Returning cached permanent redirect response "
|
| 198 |
+
"(ignoring date and etag information)"
|
| 199 |
+
)
|
| 200 |
+
logger.debug(msg)
|
| 201 |
+
return resp
|
| 202 |
+
|
| 203 |
+
headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(resp.headers)
|
| 204 |
+
if not headers or "date" not in headers:
|
| 205 |
+
if "etag" not in headers:
|
| 206 |
+
# Without date or etag, the cached response can never be used
|
| 207 |
+
# and should be deleted.
|
| 208 |
+
logger.debug("Purging cached response: no date or etag")
|
| 209 |
+
self.cache.delete(cache_url)
|
| 210 |
+
logger.debug("Ignoring cached response: no date")
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
now = time.time()
|
| 214 |
+
time_tuple = parsedate_tz(headers["date"])
|
| 215 |
+
assert time_tuple is not None
|
| 216 |
+
date = calendar.timegm(time_tuple[:6])
|
| 217 |
+
current_age = max(0, now - date)
|
| 218 |
+
logger.debug("Current age based on date: %i", current_age)
|
| 219 |
+
|
| 220 |
+
# TODO: There is an assumption that the result will be a
|
| 221 |
+
# urllib3 response object. This may not be best since we
|
| 222 |
+
# could probably avoid instantiating or constructing the
|
| 223 |
+
# response until we know we need it.
|
| 224 |
+
resp_cc = self.parse_cache_control(headers)
|
| 225 |
+
|
| 226 |
+
# determine freshness
|
| 227 |
+
freshness_lifetime = 0
|
| 228 |
+
|
| 229 |
+
# Check the max-age pragma in the cache control header
|
| 230 |
+
max_age = resp_cc.get("max-age")
|
| 231 |
+
if max_age is not None:
|
| 232 |
+
freshness_lifetime = max_age
|
| 233 |
+
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
|
| 234 |
+
|
| 235 |
+
# If there isn't a max-age, check for an expires header
|
| 236 |
+
elif "expires" in headers:
|
| 237 |
+
expires = parsedate_tz(headers["expires"])
|
| 238 |
+
if expires is not None:
|
| 239 |
+
expire_time = calendar.timegm(expires[:6]) - date
|
| 240 |
+
freshness_lifetime = max(0, expire_time)
|
| 241 |
+
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
|
| 242 |
+
|
| 243 |
+
# Determine if we are setting freshness limit in the
|
| 244 |
+
# request. Note, this overrides what was in the response.
|
| 245 |
+
max_age = cc.get("max-age")
|
| 246 |
+
if max_age is not None:
|
| 247 |
+
freshness_lifetime = max_age
|
| 248 |
+
logger.debug(
|
| 249 |
+
"Freshness lifetime from request max-age: %i", freshness_lifetime
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
min_fresh = cc.get("min-fresh")
|
| 253 |
+
if min_fresh is not None:
|
| 254 |
+
# adjust our current age by our min fresh
|
| 255 |
+
current_age += min_fresh
|
| 256 |
+
logger.debug("Adjusted current age from min-fresh: %i", current_age)
|
| 257 |
+
|
| 258 |
+
# Return entry if it is fresh enough
|
| 259 |
+
if freshness_lifetime > current_age:
|
| 260 |
+
logger.debug('The response is "fresh", returning cached response')
|
| 261 |
+
logger.debug("%i > %i", freshness_lifetime, current_age)
|
| 262 |
+
return resp
|
| 263 |
+
|
| 264 |
+
# we're not fresh. If we don't have an Etag, clear it out
|
| 265 |
+
if "etag" not in headers:
|
| 266 |
+
logger.debug('The cached response is "stale" with no etag, purging')
|
| 267 |
+
self.cache.delete(cache_url)
|
| 268 |
+
|
| 269 |
+
# return the original handler
|
| 270 |
+
return False
|
| 271 |
+
|
| 272 |
+
def conditional_headers(self, request: PreparedRequest) -> dict[str, str]:
|
| 273 |
+
resp = self._load_from_cache(request)
|
| 274 |
+
new_headers = {}
|
| 275 |
+
|
| 276 |
+
if resp:
|
| 277 |
+
headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(resp.headers)
|
| 278 |
+
|
| 279 |
+
if "etag" in headers:
|
| 280 |
+
new_headers["If-None-Match"] = headers["ETag"]
|
| 281 |
+
|
| 282 |
+
if "last-modified" in headers:
|
| 283 |
+
new_headers["If-Modified-Since"] = headers["Last-Modified"]
|
| 284 |
+
|
| 285 |
+
return new_headers
|
| 286 |
+
|
| 287 |
+
def _cache_set(
|
| 288 |
+
self,
|
| 289 |
+
cache_url: str,
|
| 290 |
+
request: PreparedRequest,
|
| 291 |
+
response: HTTPResponse,
|
| 292 |
+
body: bytes | None = None,
|
| 293 |
+
expires_time: int | None = None,
|
| 294 |
+
) -> None:
|
| 295 |
+
"""
|
| 296 |
+
Store the data in the cache.
|
| 297 |
+
"""
|
| 298 |
+
if isinstance(self.cache, SeparateBodyBaseCache):
|
| 299 |
+
# We pass in the body separately; just put a placeholder empty
|
| 300 |
+
# string in the metadata.
|
| 301 |
+
self.cache.set(
|
| 302 |
+
cache_url,
|
| 303 |
+
self.serializer.dumps(request, response, b""),
|
| 304 |
+
expires=expires_time,
|
| 305 |
+
)
|
| 306 |
+
# body is None can happen when, for example, we're only updating
|
| 307 |
+
# headers, as is the case in update_cached_response().
|
| 308 |
+
if body is not None:
|
| 309 |
+
self.cache.set_body(cache_url, body)
|
| 310 |
+
else:
|
| 311 |
+
self.cache.set(
|
| 312 |
+
cache_url,
|
| 313 |
+
self.serializer.dumps(request, response, body),
|
| 314 |
+
expires=expires_time,
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
def cache_response(
|
| 318 |
+
self,
|
| 319 |
+
request: PreparedRequest,
|
| 320 |
+
response: HTTPResponse,
|
| 321 |
+
body: bytes | None = None,
|
| 322 |
+
status_codes: Collection[int] | None = None,
|
| 323 |
+
) -> None:
|
| 324 |
+
"""
|
| 325 |
+
Algorithm for caching requests.
|
| 326 |
+
|
| 327 |
+
This assumes a requests Response object.
|
| 328 |
+
"""
|
| 329 |
+
# From httplib2: Don't cache 206's since we aren't going to
|
| 330 |
+
# handle byte range requests
|
| 331 |
+
cacheable_status_codes = status_codes or self.cacheable_status_codes
|
| 332 |
+
if response.status not in cacheable_status_codes:
|
| 333 |
+
logger.debug(
|
| 334 |
+
"Status code %s not in %s", response.status, cacheable_status_codes
|
| 335 |
+
)
|
| 336 |
+
return
|
| 337 |
+
|
| 338 |
+
response_headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(
|
| 339 |
+
response.headers
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
if "date" in response_headers:
|
| 343 |
+
time_tuple = parsedate_tz(response_headers["date"])
|
| 344 |
+
assert time_tuple is not None
|
| 345 |
+
date = calendar.timegm(time_tuple[:6])
|
| 346 |
+
else:
|
| 347 |
+
date = 0
|
| 348 |
+
|
| 349 |
+
# If we've been given a body, our response has a Content-Length, that
|
| 350 |
+
# Content-Length is valid then we can check to see if the body we've
|
| 351 |
+
# been given matches the expected size, and if it doesn't we'll just
|
| 352 |
+
# skip trying to cache it.
|
| 353 |
+
if (
|
| 354 |
+
body is not None
|
| 355 |
+
and "content-length" in response_headers
|
| 356 |
+
and response_headers["content-length"].isdigit()
|
| 357 |
+
and int(response_headers["content-length"]) != len(body)
|
| 358 |
+
):
|
| 359 |
+
return
|
| 360 |
+
|
| 361 |
+
cc_req = self.parse_cache_control(request.headers)
|
| 362 |
+
cc = self.parse_cache_control(response_headers)
|
| 363 |
+
|
| 364 |
+
assert request.url is not None
|
| 365 |
+
cache_url = self.cache_url(request.url)
|
| 366 |
+
logger.debug('Updating cache with response from "%s"', cache_url)
|
| 367 |
+
|
| 368 |
+
# Delete it from the cache if we happen to have it stored there
|
| 369 |
+
no_store = False
|
| 370 |
+
if "no-store" in cc:
|
| 371 |
+
no_store = True
|
| 372 |
+
logger.debug('Response header has "no-store"')
|
| 373 |
+
if "no-store" in cc_req:
|
| 374 |
+
no_store = True
|
| 375 |
+
logger.debug('Request header has "no-store"')
|
| 376 |
+
if no_store and self.cache.get(cache_url):
|
| 377 |
+
logger.debug('Purging existing cache entry to honor "no-store"')
|
| 378 |
+
self.cache.delete(cache_url)
|
| 379 |
+
if no_store:
|
| 380 |
+
return
|
| 381 |
+
|
| 382 |
+
# https://tools.ietf.org/html/rfc7234#section-4.1:
|
| 383 |
+
# A Vary header field-value of "*" always fails to match.
|
| 384 |
+
# Storing such a response leads to a deserialization warning
|
| 385 |
+
# during cache lookup and is not allowed to ever be served,
|
| 386 |
+
# so storing it can be avoided.
|
| 387 |
+
if "*" in response_headers.get("vary", ""):
|
| 388 |
+
logger.debug('Response header has "Vary: *"')
|
| 389 |
+
return
|
| 390 |
+
|
| 391 |
+
# If we've been given an etag, then keep the response
|
| 392 |
+
if self.cache_etags and "etag" in response_headers:
|
| 393 |
+
expires_time = 0
|
| 394 |
+
if response_headers.get("expires"):
|
| 395 |
+
expires = parsedate_tz(response_headers["expires"])
|
| 396 |
+
if expires is not None:
|
| 397 |
+
expires_time = calendar.timegm(expires[:6]) - date
|
| 398 |
+
|
| 399 |
+
expires_time = max(expires_time, 14 * 86400)
|
| 400 |
+
|
| 401 |
+
logger.debug(f"etag object cached for {expires_time} seconds")
|
| 402 |
+
logger.debug("Caching due to etag")
|
| 403 |
+
self._cache_set(cache_url, request, response, body, expires_time)
|
| 404 |
+
|
| 405 |
+
# Add to the cache any permanent redirects. We do this before looking
|
| 406 |
+
# that the Date headers.
|
| 407 |
+
elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
|
| 408 |
+
logger.debug("Caching permanent redirect")
|
| 409 |
+
self._cache_set(cache_url, request, response, b"")
|
| 410 |
+
|
| 411 |
+
# Add to the cache if the response headers demand it. If there
|
| 412 |
+
# is no date header then we can't do anything about expiring
|
| 413 |
+
# the cache.
|
| 414 |
+
elif "date" in response_headers:
|
| 415 |
+
time_tuple = parsedate_tz(response_headers["date"])
|
| 416 |
+
assert time_tuple is not None
|
| 417 |
+
date = calendar.timegm(time_tuple[:6])
|
| 418 |
+
# cache when there is a max-age > 0
|
| 419 |
+
max_age = cc.get("max-age")
|
| 420 |
+
if max_age is not None and max_age > 0:
|
| 421 |
+
logger.debug("Caching b/c date exists and max-age > 0")
|
| 422 |
+
expires_time = max_age
|
| 423 |
+
self._cache_set(
|
| 424 |
+
cache_url,
|
| 425 |
+
request,
|
| 426 |
+
response,
|
| 427 |
+
body,
|
| 428 |
+
expires_time,
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
# If the request can expire, it means we should cache it
|
| 432 |
+
# in the meantime.
|
| 433 |
+
elif "expires" in response_headers:
|
| 434 |
+
if response_headers["expires"]:
|
| 435 |
+
expires = parsedate_tz(response_headers["expires"])
|
| 436 |
+
if expires is not None:
|
| 437 |
+
expires_time = calendar.timegm(expires[:6]) - date
|
| 438 |
+
else:
|
| 439 |
+
expires_time = None
|
| 440 |
+
|
| 441 |
+
logger.debug(
|
| 442 |
+
"Caching b/c of expires header. expires in {} seconds".format(
|
| 443 |
+
expires_time
|
| 444 |
+
)
|
| 445 |
+
)
|
| 446 |
+
self._cache_set(
|
| 447 |
+
cache_url,
|
| 448 |
+
request,
|
| 449 |
+
response,
|
| 450 |
+
body,
|
| 451 |
+
expires_time,
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
def update_cached_response(
|
| 455 |
+
self, request: PreparedRequest, response: HTTPResponse
|
| 456 |
+
) -> HTTPResponse:
|
| 457 |
+
"""On a 304 we will get a new set of headers that we want to
|
| 458 |
+
update our cached value with, assuming we have one.
|
| 459 |
+
|
| 460 |
+
This should only ever be called when we've sent an ETag and
|
| 461 |
+
gotten a 304 as the response.
|
| 462 |
+
"""
|
| 463 |
+
assert request.url is not None
|
| 464 |
+
cache_url = self.cache_url(request.url)
|
| 465 |
+
cached_response = self._load_from_cache(request)
|
| 466 |
+
|
| 467 |
+
if not cached_response:
|
| 468 |
+
# we didn't have a cached response
|
| 469 |
+
return response
|
| 470 |
+
|
| 471 |
+
# Lets update our headers with the headers from the new request:
|
| 472 |
+
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
|
| 473 |
+
#
|
| 474 |
+
# The server isn't supposed to send headers that would make
|
| 475 |
+
# the cached body invalid. But... just in case, we'll be sure
|
| 476 |
+
# to strip out ones we know that might be problmatic due to
|
| 477 |
+
# typical assumptions.
|
| 478 |
+
excluded_headers = ["content-length"]
|
| 479 |
+
|
| 480 |
+
cached_response.headers.update(
|
| 481 |
+
{
|
| 482 |
+
k: v
|
| 483 |
+
for k, v in response.headers.items() # type: ignore[no-untyped-call]
|
| 484 |
+
if k.lower() not in excluded_headers
|
| 485 |
+
}
|
| 486 |
+
)
|
| 487 |
+
|
| 488 |
+
# we want a 200 b/c we have content via the cache
|
| 489 |
+
cached_response.status = 200
|
| 490 |
+
|
| 491 |
+
# update our cache
|
| 492 |
+
self._cache_set(cache_url, request, cached_response)
|
| 493 |
+
|
| 494 |
+
return cached_response
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/filewrapper.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import mmap
|
| 7 |
+
from tempfile import NamedTemporaryFile
|
| 8 |
+
from typing import TYPE_CHECKING, Any, Callable
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from http.client import HTTPResponse
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class CallbackFileWrapper:
|
| 15 |
+
"""
|
| 16 |
+
Small wrapper around a fp object which will tee everything read into a
|
| 17 |
+
buffer, and when that file is closed it will execute a callback with the
|
| 18 |
+
contents of that buffer.
|
| 19 |
+
|
| 20 |
+
All attributes are proxied to the underlying file object.
|
| 21 |
+
|
| 22 |
+
This class uses members with a double underscore (__) leading prefix so as
|
| 23 |
+
not to accidentally shadow an attribute.
|
| 24 |
+
|
| 25 |
+
The data is stored in a temporary file until it is all available. As long
|
| 26 |
+
as the temporary files directory is disk-based (sometimes it's a
|
| 27 |
+
memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
|
| 28 |
+
pressure is high. For small files the disk usually won't be used at all,
|
| 29 |
+
it'll all be in the filesystem memory cache, so there should be no
|
| 30 |
+
performance impact.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self, fp: HTTPResponse, callback: Callable[[bytes], None] | None
|
| 35 |
+
) -> None:
|
| 36 |
+
self.__buf = NamedTemporaryFile("rb+", delete=True)
|
| 37 |
+
self.__fp = fp
|
| 38 |
+
self.__callback = callback
|
| 39 |
+
|
| 40 |
+
def __getattr__(self, name: str) -> Any:
|
| 41 |
+
# The vaguaries of garbage collection means that self.__fp is
|
| 42 |
+
# not always set. By using __getattribute__ and the private
|
| 43 |
+
# name[0] allows looking up the attribute value and raising an
|
| 44 |
+
# AttributeError when it doesn't exist. This stop thigns from
|
| 45 |
+
# infinitely recursing calls to getattr in the case where
|
| 46 |
+
# self.__fp hasn't been set.
|
| 47 |
+
#
|
| 48 |
+
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
|
| 49 |
+
fp = self.__getattribute__("_CallbackFileWrapper__fp")
|
| 50 |
+
return getattr(fp, name)
|
| 51 |
+
|
| 52 |
+
def __is_fp_closed(self) -> bool:
|
| 53 |
+
try:
|
| 54 |
+
return self.__fp.fp is None
|
| 55 |
+
|
| 56 |
+
except AttributeError:
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
closed: bool = self.__fp.closed
|
| 61 |
+
return closed
|
| 62 |
+
|
| 63 |
+
except AttributeError:
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
# We just don't cache it then.
|
| 67 |
+
# TODO: Add some logging here...
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
def _close(self) -> None:
|
| 71 |
+
if self.__callback:
|
| 72 |
+
if self.__buf.tell() == 0:
|
| 73 |
+
# Empty file:
|
| 74 |
+
result = b""
|
| 75 |
+
else:
|
| 76 |
+
# Return the data without actually loading it into memory,
|
| 77 |
+
# relying on Python's buffer API and mmap(). mmap() just gives
|
| 78 |
+
# a view directly into the filesystem's memory cache, so it
|
| 79 |
+
# doesn't result in duplicate memory use.
|
| 80 |
+
self.__buf.seek(0, 0)
|
| 81 |
+
result = memoryview(
|
| 82 |
+
mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ)
|
| 83 |
+
)
|
| 84 |
+
self.__callback(result)
|
| 85 |
+
|
| 86 |
+
# We assign this to None here, because otherwise we can get into
|
| 87 |
+
# really tricky problems where the CPython interpreter dead locks
|
| 88 |
+
# because the callback is holding a reference to something which
|
| 89 |
+
# has a __del__ method. Setting this to None breaks the cycle
|
| 90 |
+
# and allows the garbage collector to do it's thing normally.
|
| 91 |
+
self.__callback = None
|
| 92 |
+
|
| 93 |
+
# Closing the temporary file releases memory and frees disk space.
|
| 94 |
+
# Important when caching big files.
|
| 95 |
+
self.__buf.close()
|
| 96 |
+
|
| 97 |
+
def read(self, amt: int | None = None) -> bytes:
|
| 98 |
+
data: bytes = self.__fp.read(amt)
|
| 99 |
+
if data:
|
| 100 |
+
# We may be dealing with b'', a sign that things are over:
|
| 101 |
+
# it's passed e.g. after we've already closed self.__buf.
|
| 102 |
+
self.__buf.write(data)
|
| 103 |
+
if self.__is_fp_closed():
|
| 104 |
+
self._close()
|
| 105 |
+
|
| 106 |
+
return data
|
| 107 |
+
|
| 108 |
+
def _safe_read(self, amt: int) -> bytes:
|
| 109 |
+
data: bytes = self.__fp._safe_read(amt) # type: ignore[attr-defined]
|
| 110 |
+
if amt == 2 and data == b"\r\n":
|
| 111 |
+
# urllib executes this read to toss the CRLF at the end
|
| 112 |
+
# of the chunk.
|
| 113 |
+
return data
|
| 114 |
+
|
| 115 |
+
self.__buf.write(data)
|
| 116 |
+
if self.__is_fp_closed():
|
| 117 |
+
self._close()
|
| 118 |
+
|
| 119 |
+
return data
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/heuristics.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import calendar
|
| 7 |
+
import time
|
| 8 |
+
from datetime import datetime, timedelta, timezone
|
| 9 |
+
from email.utils import formatdate, parsedate, parsedate_tz
|
| 10 |
+
from typing import TYPE_CHECKING, Any, Mapping
|
| 11 |
+
|
| 12 |
+
if TYPE_CHECKING:
|
| 13 |
+
from pip._vendor.urllib3 import HTTPResponse
|
| 14 |
+
|
| 15 |
+
TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def expire_after(delta: timedelta, date: datetime | None = None) -> datetime:
|
| 19 |
+
date = date or datetime.now(timezone.utc)
|
| 20 |
+
return date + delta
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def datetime_to_header(dt: datetime) -> str:
|
| 24 |
+
return formatdate(calendar.timegm(dt.timetuple()))
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class BaseHeuristic:
|
| 28 |
+
def warning(self, response: HTTPResponse) -> str | None:
|
| 29 |
+
"""
|
| 30 |
+
Return a valid 1xx warning header value describing the cache
|
| 31 |
+
adjustments.
|
| 32 |
+
|
| 33 |
+
The response is provided too allow warnings like 113
|
| 34 |
+
http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
|
| 35 |
+
to explicitly say response is over 24 hours old.
|
| 36 |
+
"""
|
| 37 |
+
return '110 - "Response is Stale"'
|
| 38 |
+
|
| 39 |
+
def update_headers(self, response: HTTPResponse) -> dict[str, str]:
|
| 40 |
+
"""Update the response headers with any new headers.
|
| 41 |
+
|
| 42 |
+
NOTE: This SHOULD always include some Warning header to
|
| 43 |
+
signify that the response was cached by the client, not
|
| 44 |
+
by way of the provided headers.
|
| 45 |
+
"""
|
| 46 |
+
return {}
|
| 47 |
+
|
| 48 |
+
def apply(self, response: HTTPResponse) -> HTTPResponse:
|
| 49 |
+
updated_headers = self.update_headers(response)
|
| 50 |
+
|
| 51 |
+
if updated_headers:
|
| 52 |
+
response.headers.update(updated_headers)
|
| 53 |
+
warning_header_value = self.warning(response)
|
| 54 |
+
if warning_header_value is not None:
|
| 55 |
+
response.headers.update({"Warning": warning_header_value})
|
| 56 |
+
|
| 57 |
+
return response
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class OneDayCache(BaseHeuristic):
|
| 61 |
+
"""
|
| 62 |
+
Cache the response by providing an expires 1 day in the
|
| 63 |
+
future.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def update_headers(self, response: HTTPResponse) -> dict[str, str]:
|
| 67 |
+
headers = {}
|
| 68 |
+
|
| 69 |
+
if "expires" not in response.headers:
|
| 70 |
+
date = parsedate(response.headers["date"])
|
| 71 |
+
expires = expire_after(timedelta(days=1), date=datetime(*date[:6], tzinfo=timezone.utc)) # type: ignore[misc]
|
| 72 |
+
headers["expires"] = datetime_to_header(expires)
|
| 73 |
+
headers["cache-control"] = "public"
|
| 74 |
+
return headers
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class ExpiresAfter(BaseHeuristic):
|
| 78 |
+
"""
|
| 79 |
+
Cache **all** requests for a defined time period.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
def __init__(self, **kw: Any) -> None:
|
| 83 |
+
self.delta = timedelta(**kw)
|
| 84 |
+
|
| 85 |
+
def update_headers(self, response: HTTPResponse) -> dict[str, str]:
|
| 86 |
+
expires = expire_after(self.delta)
|
| 87 |
+
return {"expires": datetime_to_header(expires), "cache-control": "public"}
|
| 88 |
+
|
| 89 |
+
def warning(self, response: HTTPResponse) -> str | None:
|
| 90 |
+
tmpl = "110 - Automatically cached for %s. Response might be stale"
|
| 91 |
+
return tmpl % self.delta
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class LastModified(BaseHeuristic):
|
| 95 |
+
"""
|
| 96 |
+
If there is no Expires header already, fall back on Last-Modified
|
| 97 |
+
using the heuristic from
|
| 98 |
+
http://tools.ietf.org/html/rfc7234#section-4.2.2
|
| 99 |
+
to calculate a reasonable value.
|
| 100 |
+
|
| 101 |
+
Firefox also does something like this per
|
| 102 |
+
https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
|
| 103 |
+
http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
|
| 104 |
+
Unlike mozilla we limit this to 24-hr.
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
cacheable_by_default_statuses = {
|
| 108 |
+
200,
|
| 109 |
+
203,
|
| 110 |
+
204,
|
| 111 |
+
206,
|
| 112 |
+
300,
|
| 113 |
+
301,
|
| 114 |
+
404,
|
| 115 |
+
405,
|
| 116 |
+
410,
|
| 117 |
+
414,
|
| 118 |
+
501,
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
def update_headers(self, resp: HTTPResponse) -> dict[str, str]:
|
| 122 |
+
headers: Mapping[str, str] = resp.headers
|
| 123 |
+
|
| 124 |
+
if "expires" in headers:
|
| 125 |
+
return {}
|
| 126 |
+
|
| 127 |
+
if "cache-control" in headers and headers["cache-control"] != "public":
|
| 128 |
+
return {}
|
| 129 |
+
|
| 130 |
+
if resp.status not in self.cacheable_by_default_statuses:
|
| 131 |
+
return {}
|
| 132 |
+
|
| 133 |
+
if "date" not in headers or "last-modified" not in headers:
|
| 134 |
+
return {}
|
| 135 |
+
|
| 136 |
+
time_tuple = parsedate_tz(headers["date"])
|
| 137 |
+
assert time_tuple is not None
|
| 138 |
+
date = calendar.timegm(time_tuple[:6])
|
| 139 |
+
last_modified = parsedate(headers["last-modified"])
|
| 140 |
+
if last_modified is None:
|
| 141 |
+
return {}
|
| 142 |
+
|
| 143 |
+
now = time.time()
|
| 144 |
+
current_age = max(0, now - date)
|
| 145 |
+
delta = date - calendar.timegm(last_modified)
|
| 146 |
+
freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
|
| 147 |
+
if freshness_lifetime <= current_age:
|
| 148 |
+
return {}
|
| 149 |
+
|
| 150 |
+
expires = date + freshness_lifetime
|
| 151 |
+
return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))}
|
| 152 |
+
|
| 153 |
+
def warning(self, resp: HTTPResponse) -> str | None:
|
| 154 |
+
return None
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/py.typed
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/serialize.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
import io
|
| 7 |
+
from typing import IO, TYPE_CHECKING, Any, Mapping, cast
|
| 8 |
+
|
| 9 |
+
from pip._vendor import msgpack
|
| 10 |
+
from pip._vendor.requests.structures import CaseInsensitiveDict
|
| 11 |
+
from pip._vendor.urllib3 import HTTPResponse
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from pip._vendor.requests import PreparedRequest
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Serializer:
|
| 18 |
+
serde_version = "4"
|
| 19 |
+
|
| 20 |
+
def dumps(
|
| 21 |
+
self,
|
| 22 |
+
request: PreparedRequest,
|
| 23 |
+
response: HTTPResponse,
|
| 24 |
+
body: bytes | None = None,
|
| 25 |
+
) -> bytes:
|
| 26 |
+
response_headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(
|
| 27 |
+
response.headers
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
if body is None:
|
| 31 |
+
# When a body isn't passed in, we'll read the response. We
|
| 32 |
+
# also update the response with a new file handler to be
|
| 33 |
+
# sure it acts as though it was never read.
|
| 34 |
+
body = response.read(decode_content=False)
|
| 35 |
+
response._fp = io.BytesIO(body) # type: ignore[attr-defined]
|
| 36 |
+
response.length_remaining = len(body)
|
| 37 |
+
|
| 38 |
+
data = {
|
| 39 |
+
"response": {
|
| 40 |
+
"body": body, # Empty bytestring if body is stored separately
|
| 41 |
+
"headers": {str(k): str(v) for k, v in response.headers.items()}, # type: ignore[no-untyped-call]
|
| 42 |
+
"status": response.status,
|
| 43 |
+
"version": response.version,
|
| 44 |
+
"reason": str(response.reason),
|
| 45 |
+
"decode_content": response.decode_content,
|
| 46 |
+
}
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
# Construct our vary headers
|
| 50 |
+
data["vary"] = {}
|
| 51 |
+
if "vary" in response_headers:
|
| 52 |
+
varied_headers = response_headers["vary"].split(",")
|
| 53 |
+
for header in varied_headers:
|
| 54 |
+
header = str(header).strip()
|
| 55 |
+
header_value = request.headers.get(header, None)
|
| 56 |
+
if header_value is not None:
|
| 57 |
+
header_value = str(header_value)
|
| 58 |
+
data["vary"][header] = header_value
|
| 59 |
+
|
| 60 |
+
return b",".join([f"cc={self.serde_version}".encode(), self.serialize(data)])
|
| 61 |
+
|
| 62 |
+
def serialize(self, data: dict[str, Any]) -> bytes:
|
| 63 |
+
return cast(bytes, msgpack.dumps(data, use_bin_type=True))
|
| 64 |
+
|
| 65 |
+
def loads(
|
| 66 |
+
self,
|
| 67 |
+
request: PreparedRequest,
|
| 68 |
+
data: bytes,
|
| 69 |
+
body_file: IO[bytes] | None = None,
|
| 70 |
+
) -> HTTPResponse | None:
|
| 71 |
+
# Short circuit if we've been given an empty set of data
|
| 72 |
+
if not data:
|
| 73 |
+
return None
|
| 74 |
+
|
| 75 |
+
# Determine what version of the serializer the data was serialized
|
| 76 |
+
# with
|
| 77 |
+
try:
|
| 78 |
+
ver, data = data.split(b",", 1)
|
| 79 |
+
except ValueError:
|
| 80 |
+
ver = b"cc=0"
|
| 81 |
+
|
| 82 |
+
# Make sure that our "ver" is actually a version and isn't a false
|
| 83 |
+
# positive from a , being in the data stream.
|
| 84 |
+
if ver[:3] != b"cc=":
|
| 85 |
+
data = ver + data
|
| 86 |
+
ver = b"cc=0"
|
| 87 |
+
|
| 88 |
+
# Get the version number out of the cc=N
|
| 89 |
+
verstr = ver.split(b"=", 1)[-1].decode("ascii")
|
| 90 |
+
|
| 91 |
+
# Dispatch to the actual load method for the given version
|
| 92 |
+
try:
|
| 93 |
+
return getattr(self, f"_loads_v{verstr}")(request, data, body_file) # type: ignore[no-any-return]
|
| 94 |
+
|
| 95 |
+
except AttributeError:
|
| 96 |
+
# This is a version we don't have a loads function for, so we'll
|
| 97 |
+
# just treat it as a miss and return None
|
| 98 |
+
return None
|
| 99 |
+
|
| 100 |
+
def prepare_response(
|
| 101 |
+
self,
|
| 102 |
+
request: PreparedRequest,
|
| 103 |
+
cached: Mapping[str, Any],
|
| 104 |
+
body_file: IO[bytes] | None = None,
|
| 105 |
+
) -> HTTPResponse | None:
|
| 106 |
+
"""Verify our vary headers match and construct a real urllib3
|
| 107 |
+
HTTPResponse object.
|
| 108 |
+
"""
|
| 109 |
+
# Special case the '*' Vary value as it means we cannot actually
|
| 110 |
+
# determine if the cached response is suitable for this request.
|
| 111 |
+
# This case is also handled in the controller code when creating
|
| 112 |
+
# a cache entry, but is left here for backwards compatibility.
|
| 113 |
+
if "*" in cached.get("vary", {}):
|
| 114 |
+
return None
|
| 115 |
+
|
| 116 |
+
# Ensure that the Vary headers for the cached response match our
|
| 117 |
+
# request
|
| 118 |
+
for header, value in cached.get("vary", {}).items():
|
| 119 |
+
if request.headers.get(header, None) != value:
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
body_raw = cached["response"].pop("body")
|
| 123 |
+
|
| 124 |
+
headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(
|
| 125 |
+
data=cached["response"]["headers"]
|
| 126 |
+
)
|
| 127 |
+
if headers.get("transfer-encoding", "") == "chunked":
|
| 128 |
+
headers.pop("transfer-encoding")
|
| 129 |
+
|
| 130 |
+
cached["response"]["headers"] = headers
|
| 131 |
+
|
| 132 |
+
try:
|
| 133 |
+
body: IO[bytes]
|
| 134 |
+
if body_file is None:
|
| 135 |
+
body = io.BytesIO(body_raw)
|
| 136 |
+
else:
|
| 137 |
+
body = body_file
|
| 138 |
+
except TypeError:
|
| 139 |
+
# This can happen if cachecontrol serialized to v1 format (pickle)
|
| 140 |
+
# using Python 2. A Python 2 str(byte string) will be unpickled as
|
| 141 |
+
# a Python 3 str (unicode string), which will cause the above to
|
| 142 |
+
# fail with:
|
| 143 |
+
#
|
| 144 |
+
# TypeError: 'str' does not support the buffer interface
|
| 145 |
+
body = io.BytesIO(body_raw.encode("utf8"))
|
| 146 |
+
|
| 147 |
+
# Discard any `strict` parameter serialized by older version of cachecontrol.
|
| 148 |
+
cached["response"].pop("strict", None)
|
| 149 |
+
|
| 150 |
+
return HTTPResponse(body=body, preload_content=False, **cached["response"])
|
| 151 |
+
|
| 152 |
+
def _loads_v0(
|
| 153 |
+
self,
|
| 154 |
+
request: PreparedRequest,
|
| 155 |
+
data: bytes,
|
| 156 |
+
body_file: IO[bytes] | None = None,
|
| 157 |
+
) -> None:
|
| 158 |
+
# The original legacy cache data. This doesn't contain enough
|
| 159 |
+
# information to construct everything we need, so we'll treat this as
|
| 160 |
+
# a miss.
|
| 161 |
+
return None
|
| 162 |
+
|
| 163 |
+
def _loads_v1(
|
| 164 |
+
self,
|
| 165 |
+
request: PreparedRequest,
|
| 166 |
+
data: bytes,
|
| 167 |
+
body_file: IO[bytes] | None = None,
|
| 168 |
+
) -> HTTPResponse | None:
|
| 169 |
+
# The "v1" pickled cache format. This is no longer supported
|
| 170 |
+
# for security reasons, so we treat it as a miss.
|
| 171 |
+
return None
|
| 172 |
+
|
| 173 |
+
def _loads_v2(
|
| 174 |
+
self,
|
| 175 |
+
request: PreparedRequest,
|
| 176 |
+
data: bytes,
|
| 177 |
+
body_file: IO[bytes] | None = None,
|
| 178 |
+
) -> HTTPResponse | None:
|
| 179 |
+
# The "v2" compressed base64 cache format.
|
| 180 |
+
# This has been removed due to age and poor size/performance
|
| 181 |
+
# characteristics, so we treat it as a miss.
|
| 182 |
+
return None
|
| 183 |
+
|
| 184 |
+
def _loads_v3(
|
| 185 |
+
self,
|
| 186 |
+
request: PreparedRequest,
|
| 187 |
+
data: bytes,
|
| 188 |
+
body_file: IO[bytes] | None = None,
|
| 189 |
+
) -> None:
|
| 190 |
+
# Due to Python 2 encoding issues, it's impossible to know for sure
|
| 191 |
+
# exactly how to load v3 entries, thus we'll treat these as a miss so
|
| 192 |
+
# that they get rewritten out as v4 entries.
|
| 193 |
+
return None
|
| 194 |
+
|
| 195 |
+
def _loads_v4(
|
| 196 |
+
self,
|
| 197 |
+
request: PreparedRequest,
|
| 198 |
+
data: bytes,
|
| 199 |
+
body_file: IO[bytes] | None = None,
|
| 200 |
+
) -> HTTPResponse | None:
|
| 201 |
+
try:
|
| 202 |
+
cached = msgpack.loads(data, raw=False)
|
| 203 |
+
except ValueError:
|
| 204 |
+
return None
|
| 205 |
+
|
| 206 |
+
return self.prepare_response(request, cached, body_file)
|
.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/wrapper.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2015 Eric Larson
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
from __future__ import annotations
|
| 5 |
+
|
| 6 |
+
from typing import TYPE_CHECKING, Collection
|
| 7 |
+
|
| 8 |
+
from pip._vendor.cachecontrol.adapter import CacheControlAdapter
|
| 9 |
+
from pip._vendor.cachecontrol.cache import DictCache
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
from pip._vendor import requests
|
| 13 |
+
|
| 14 |
+
from pip._vendor.cachecontrol.cache import BaseCache
|
| 15 |
+
from pip._vendor.cachecontrol.controller import CacheController
|
| 16 |
+
from pip._vendor.cachecontrol.heuristics import BaseHeuristic
|
| 17 |
+
from pip._vendor.cachecontrol.serialize import Serializer
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def CacheControl(
|
| 21 |
+
sess: requests.Session,
|
| 22 |
+
cache: BaseCache | None = None,
|
| 23 |
+
cache_etags: bool = True,
|
| 24 |
+
serializer: Serializer | None = None,
|
| 25 |
+
heuristic: BaseHeuristic | None = None,
|
| 26 |
+
controller_class: type[CacheController] | None = None,
|
| 27 |
+
adapter_class: type[CacheControlAdapter] | None = None,
|
| 28 |
+
cacheable_methods: Collection[str] | None = None,
|
| 29 |
+
) -> requests.Session:
|
| 30 |
+
cache = DictCache() if cache is None else cache
|
| 31 |
+
adapter_class = adapter_class or CacheControlAdapter
|
| 32 |
+
adapter = adapter_class(
|
| 33 |
+
cache,
|
| 34 |
+
cache_etags=cache_etags,
|
| 35 |
+
serializer=serializer,
|
| 36 |
+
heuristic=heuristic,
|
| 37 |
+
controller_class=controller_class,
|
| 38 |
+
cacheable_methods=cacheable_methods,
|
| 39 |
+
)
|
| 40 |
+
sess.mount("http://", adapter)
|
| 41 |
+
sess.mount("https://", adapter)
|
| 42 |
+
|
| 43 |
+
return sess
|
.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (334 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-311.pyc
ADDED
|
Binary file (3.36 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pygments
|
| 3 |
+
~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments is a syntax highlighting package written in Python.
|
| 6 |
+
|
| 7 |
+
It is a generic syntax highlighter for general use in all kinds of software
|
| 8 |
+
such as forum systems, wikis or other applications that need to prettify
|
| 9 |
+
source code. Highlights are:
|
| 10 |
+
|
| 11 |
+
* a wide range of common languages and markup formats is supported
|
| 12 |
+
* special attention is paid to details, increasing quality by a fair amount
|
| 13 |
+
* support for new languages and formats are added easily
|
| 14 |
+
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
|
| 15 |
+
formats that PIL supports, and ANSI sequences
|
| 16 |
+
* it is usable as a command-line tool and as a library
|
| 17 |
+
* ... and it highlights even Brainfuck!
|
| 18 |
+
|
| 19 |
+
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
|
| 20 |
+
|
| 21 |
+
.. _Pygments master branch:
|
| 22 |
+
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
|
| 23 |
+
|
| 24 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 25 |
+
:license: BSD, see LICENSE for details.
|
| 26 |
+
"""
|
| 27 |
+
from io import StringIO, BytesIO
|
| 28 |
+
|
| 29 |
+
__version__ = '2.15.1'
|
| 30 |
+
__docformat__ = 'restructuredtext'
|
| 31 |
+
|
| 32 |
+
__all__ = ['lex', 'format', 'highlight']
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def lex(code, lexer):
|
| 36 |
+
"""
|
| 37 |
+
Lex `code` with the `lexer` (must be a `Lexer` instance)
|
| 38 |
+
and return an iterable of tokens. Currently, this only calls
|
| 39 |
+
`lexer.get_tokens()`.
|
| 40 |
+
"""
|
| 41 |
+
try:
|
| 42 |
+
return lexer.get_tokens(code)
|
| 43 |
+
except TypeError:
|
| 44 |
+
# Heuristic to catch a common mistake.
|
| 45 |
+
from pip._vendor.pygments.lexer import RegexLexer
|
| 46 |
+
if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
|
| 47 |
+
raise TypeError('lex() argument must be a lexer instance, '
|
| 48 |
+
'not a class')
|
| 49 |
+
raise
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
|
| 53 |
+
"""
|
| 54 |
+
Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
|
| 55 |
+
(a `Formatter` instance).
|
| 56 |
+
|
| 57 |
+
If ``outfile`` is given and a valid file object (an object with a
|
| 58 |
+
``write`` method), the result will be written to it, otherwise it
|
| 59 |
+
is returned as a string.
|
| 60 |
+
"""
|
| 61 |
+
try:
|
| 62 |
+
if not outfile:
|
| 63 |
+
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
|
| 64 |
+
formatter.format(tokens, realoutfile)
|
| 65 |
+
return realoutfile.getvalue()
|
| 66 |
+
else:
|
| 67 |
+
formatter.format(tokens, outfile)
|
| 68 |
+
except TypeError:
|
| 69 |
+
# Heuristic to catch a common mistake.
|
| 70 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 71 |
+
if isinstance(formatter, type) and issubclass(formatter, Formatter):
|
| 72 |
+
raise TypeError('format() argument must be a formatter instance, '
|
| 73 |
+
'not a class')
|
| 74 |
+
raise
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def highlight(code, lexer, formatter, outfile=None):
|
| 78 |
+
"""
|
| 79 |
+
This is the most high-level highlighting function. It combines `lex` and
|
| 80 |
+
`format` in one function.
|
| 81 |
+
"""
|
| 82 |
+
return format(lex(code, lexer), formatter, outfile)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/__main__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.__main__
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Main entry point for ``python -m pygments``.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import sys
|
| 12 |
+
from pip._vendor.pygments.cmdline import main
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
sys.exit(main(sys.argv))
|
| 16 |
+
except KeyboardInterrupt:
|
| 17 |
+
sys.exit(1)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py
ADDED
|
@@ -0,0 +1,668 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.cmdline
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Command line interface.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
import shutil
|
| 14 |
+
import argparse
|
| 15 |
+
from textwrap import dedent
|
| 16 |
+
|
| 17 |
+
from pip._vendor.pygments import __version__, highlight
|
| 18 |
+
from pip._vendor.pygments.util import ClassNotFound, OptionError, docstring_headline, \
|
| 19 |
+
guess_decode, guess_decode_from_terminal, terminal_encoding, \
|
| 20 |
+
UnclosingTextIOWrapper
|
| 21 |
+
from pip._vendor.pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
|
| 22 |
+
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
|
| 23 |
+
from pip._vendor.pygments.lexers.special import TextLexer
|
| 24 |
+
from pip._vendor.pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
|
| 25 |
+
from pip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \
|
| 26 |
+
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
|
| 27 |
+
from pip._vendor.pygments.formatters.terminal import TerminalFormatter
|
| 28 |
+
from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
|
| 29 |
+
from pip._vendor.pygments.filters import get_all_filters, find_filter_class
|
| 30 |
+
from pip._vendor.pygments.styles import get_all_styles, get_style_by_name
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _parse_options(o_strs):
|
| 34 |
+
opts = {}
|
| 35 |
+
if not o_strs:
|
| 36 |
+
return opts
|
| 37 |
+
for o_str in o_strs:
|
| 38 |
+
if not o_str.strip():
|
| 39 |
+
continue
|
| 40 |
+
o_args = o_str.split(',')
|
| 41 |
+
for o_arg in o_args:
|
| 42 |
+
o_arg = o_arg.strip()
|
| 43 |
+
try:
|
| 44 |
+
o_key, o_val = o_arg.split('=', 1)
|
| 45 |
+
o_key = o_key.strip()
|
| 46 |
+
o_val = o_val.strip()
|
| 47 |
+
except ValueError:
|
| 48 |
+
opts[o_arg] = True
|
| 49 |
+
else:
|
| 50 |
+
opts[o_key] = o_val
|
| 51 |
+
return opts
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _parse_filters(f_strs):
|
| 55 |
+
filters = []
|
| 56 |
+
if not f_strs:
|
| 57 |
+
return filters
|
| 58 |
+
for f_str in f_strs:
|
| 59 |
+
if ':' in f_str:
|
| 60 |
+
fname, fopts = f_str.split(':', 1)
|
| 61 |
+
filters.append((fname, _parse_options([fopts])))
|
| 62 |
+
else:
|
| 63 |
+
filters.append((f_str, {}))
|
| 64 |
+
return filters
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _print_help(what, name):
|
| 68 |
+
try:
|
| 69 |
+
if what == 'lexer':
|
| 70 |
+
cls = get_lexer_by_name(name)
|
| 71 |
+
print("Help on the %s lexer:" % cls.name)
|
| 72 |
+
print(dedent(cls.__doc__))
|
| 73 |
+
elif what == 'formatter':
|
| 74 |
+
cls = find_formatter_class(name)
|
| 75 |
+
print("Help on the %s formatter:" % cls.name)
|
| 76 |
+
print(dedent(cls.__doc__))
|
| 77 |
+
elif what == 'filter':
|
| 78 |
+
cls = find_filter_class(name)
|
| 79 |
+
print("Help on the %s filter:" % name)
|
| 80 |
+
print(dedent(cls.__doc__))
|
| 81 |
+
return 0
|
| 82 |
+
except (AttributeError, ValueError):
|
| 83 |
+
print("%s not found!" % what, file=sys.stderr)
|
| 84 |
+
return 1
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _print_list(what):
|
| 88 |
+
if what == 'lexer':
|
| 89 |
+
print()
|
| 90 |
+
print("Lexers:")
|
| 91 |
+
print("~~~~~~~")
|
| 92 |
+
|
| 93 |
+
info = []
|
| 94 |
+
for fullname, names, exts, _ in get_all_lexers():
|
| 95 |
+
tup = (', '.join(names)+':', fullname,
|
| 96 |
+
exts and '(filenames ' + ', '.join(exts) + ')' or '')
|
| 97 |
+
info.append(tup)
|
| 98 |
+
info.sort()
|
| 99 |
+
for i in info:
|
| 100 |
+
print(('* %s\n %s %s') % i)
|
| 101 |
+
|
| 102 |
+
elif what == 'formatter':
|
| 103 |
+
print()
|
| 104 |
+
print("Formatters:")
|
| 105 |
+
print("~~~~~~~~~~~")
|
| 106 |
+
|
| 107 |
+
info = []
|
| 108 |
+
for cls in get_all_formatters():
|
| 109 |
+
doc = docstring_headline(cls)
|
| 110 |
+
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
|
| 111 |
+
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
|
| 112 |
+
info.append(tup)
|
| 113 |
+
info.sort()
|
| 114 |
+
for i in info:
|
| 115 |
+
print(('* %s\n %s %s') % i)
|
| 116 |
+
|
| 117 |
+
elif what == 'filter':
|
| 118 |
+
print()
|
| 119 |
+
print("Filters:")
|
| 120 |
+
print("~~~~~~~~")
|
| 121 |
+
|
| 122 |
+
for name in get_all_filters():
|
| 123 |
+
cls = find_filter_class(name)
|
| 124 |
+
print("* " + name + ':')
|
| 125 |
+
print(" %s" % docstring_headline(cls))
|
| 126 |
+
|
| 127 |
+
elif what == 'style':
|
| 128 |
+
print()
|
| 129 |
+
print("Styles:")
|
| 130 |
+
print("~~~~~~~")
|
| 131 |
+
|
| 132 |
+
for name in get_all_styles():
|
| 133 |
+
cls = get_style_by_name(name)
|
| 134 |
+
print("* " + name + ':')
|
| 135 |
+
print(" %s" % docstring_headline(cls))
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def _print_list_as_json(requested_items):
|
| 139 |
+
import json
|
| 140 |
+
result = {}
|
| 141 |
+
if 'lexer' in requested_items:
|
| 142 |
+
info = {}
|
| 143 |
+
for fullname, names, filenames, mimetypes in get_all_lexers():
|
| 144 |
+
info[fullname] = {
|
| 145 |
+
'aliases': names,
|
| 146 |
+
'filenames': filenames,
|
| 147 |
+
'mimetypes': mimetypes
|
| 148 |
+
}
|
| 149 |
+
result['lexers'] = info
|
| 150 |
+
|
| 151 |
+
if 'formatter' in requested_items:
|
| 152 |
+
info = {}
|
| 153 |
+
for cls in get_all_formatters():
|
| 154 |
+
doc = docstring_headline(cls)
|
| 155 |
+
info[cls.name] = {
|
| 156 |
+
'aliases': cls.aliases,
|
| 157 |
+
'filenames': cls.filenames,
|
| 158 |
+
'doc': doc
|
| 159 |
+
}
|
| 160 |
+
result['formatters'] = info
|
| 161 |
+
|
| 162 |
+
if 'filter' in requested_items:
|
| 163 |
+
info = {}
|
| 164 |
+
for name in get_all_filters():
|
| 165 |
+
cls = find_filter_class(name)
|
| 166 |
+
info[name] = {
|
| 167 |
+
'doc': docstring_headline(cls)
|
| 168 |
+
}
|
| 169 |
+
result['filters'] = info
|
| 170 |
+
|
| 171 |
+
if 'style' in requested_items:
|
| 172 |
+
info = {}
|
| 173 |
+
for name in get_all_styles():
|
| 174 |
+
cls = get_style_by_name(name)
|
| 175 |
+
info[name] = {
|
| 176 |
+
'doc': docstring_headline(cls)
|
| 177 |
+
}
|
| 178 |
+
result['styles'] = info
|
| 179 |
+
|
| 180 |
+
json.dump(result, sys.stdout)
|
| 181 |
+
|
| 182 |
+
def main_inner(parser, argns):
|
| 183 |
+
if argns.help:
|
| 184 |
+
parser.print_help()
|
| 185 |
+
return 0
|
| 186 |
+
|
| 187 |
+
if argns.V:
|
| 188 |
+
print('Pygments version %s, (c) 2006-2023 by Georg Brandl, Matthäus '
|
| 189 |
+
'Chajdas and contributors.' % __version__)
|
| 190 |
+
return 0
|
| 191 |
+
|
| 192 |
+
def is_only_option(opt):
|
| 193 |
+
return not any(v for (k, v) in vars(argns).items() if k != opt)
|
| 194 |
+
|
| 195 |
+
# handle ``pygmentize -L``
|
| 196 |
+
if argns.L is not None:
|
| 197 |
+
arg_set = set()
|
| 198 |
+
for k, v in vars(argns).items():
|
| 199 |
+
if v:
|
| 200 |
+
arg_set.add(k)
|
| 201 |
+
|
| 202 |
+
arg_set.discard('L')
|
| 203 |
+
arg_set.discard('json')
|
| 204 |
+
|
| 205 |
+
if arg_set:
|
| 206 |
+
parser.print_help(sys.stderr)
|
| 207 |
+
return 2
|
| 208 |
+
|
| 209 |
+
# print version
|
| 210 |
+
if not argns.json:
|
| 211 |
+
main(['', '-V'])
|
| 212 |
+
allowed_types = {'lexer', 'formatter', 'filter', 'style'}
|
| 213 |
+
largs = [arg.rstrip('s') for arg in argns.L]
|
| 214 |
+
if any(arg not in allowed_types for arg in largs):
|
| 215 |
+
parser.print_help(sys.stderr)
|
| 216 |
+
return 0
|
| 217 |
+
if not largs:
|
| 218 |
+
largs = allowed_types
|
| 219 |
+
if not argns.json:
|
| 220 |
+
for arg in largs:
|
| 221 |
+
_print_list(arg)
|
| 222 |
+
else:
|
| 223 |
+
_print_list_as_json(largs)
|
| 224 |
+
return 0
|
| 225 |
+
|
| 226 |
+
# handle ``pygmentize -H``
|
| 227 |
+
if argns.H:
|
| 228 |
+
if not is_only_option('H'):
|
| 229 |
+
parser.print_help(sys.stderr)
|
| 230 |
+
return 2
|
| 231 |
+
what, name = argns.H
|
| 232 |
+
if what not in ('lexer', 'formatter', 'filter'):
|
| 233 |
+
parser.print_help(sys.stderr)
|
| 234 |
+
return 2
|
| 235 |
+
return _print_help(what, name)
|
| 236 |
+
|
| 237 |
+
# parse -O options
|
| 238 |
+
parsed_opts = _parse_options(argns.O or [])
|
| 239 |
+
|
| 240 |
+
# parse -P options
|
| 241 |
+
for p_opt in argns.P or []:
|
| 242 |
+
try:
|
| 243 |
+
name, value = p_opt.split('=', 1)
|
| 244 |
+
except ValueError:
|
| 245 |
+
parsed_opts[p_opt] = True
|
| 246 |
+
else:
|
| 247 |
+
parsed_opts[name] = value
|
| 248 |
+
|
| 249 |
+
# encodings
|
| 250 |
+
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
|
| 251 |
+
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
|
| 252 |
+
|
| 253 |
+
# handle ``pygmentize -N``
|
| 254 |
+
if argns.N:
|
| 255 |
+
lexer = find_lexer_class_for_filename(argns.N)
|
| 256 |
+
if lexer is None:
|
| 257 |
+
lexer = TextLexer
|
| 258 |
+
|
| 259 |
+
print(lexer.aliases[0])
|
| 260 |
+
return 0
|
| 261 |
+
|
| 262 |
+
# handle ``pygmentize -C``
|
| 263 |
+
if argns.C:
|
| 264 |
+
inp = sys.stdin.buffer.read()
|
| 265 |
+
try:
|
| 266 |
+
lexer = guess_lexer(inp, inencoding=inencoding)
|
| 267 |
+
except ClassNotFound:
|
| 268 |
+
lexer = TextLexer
|
| 269 |
+
|
| 270 |
+
print(lexer.aliases[0])
|
| 271 |
+
return 0
|
| 272 |
+
|
| 273 |
+
# handle ``pygmentize -S``
|
| 274 |
+
S_opt = argns.S
|
| 275 |
+
a_opt = argns.a
|
| 276 |
+
if S_opt is not None:
|
| 277 |
+
f_opt = argns.f
|
| 278 |
+
if not f_opt:
|
| 279 |
+
parser.print_help(sys.stderr)
|
| 280 |
+
return 2
|
| 281 |
+
if argns.l or argns.INPUTFILE:
|
| 282 |
+
parser.print_help(sys.stderr)
|
| 283 |
+
return 2
|
| 284 |
+
|
| 285 |
+
try:
|
| 286 |
+
parsed_opts['style'] = S_opt
|
| 287 |
+
fmter = get_formatter_by_name(f_opt, **parsed_opts)
|
| 288 |
+
except ClassNotFound as err:
|
| 289 |
+
print(err, file=sys.stderr)
|
| 290 |
+
return 1
|
| 291 |
+
|
| 292 |
+
print(fmter.get_style_defs(a_opt or ''))
|
| 293 |
+
return 0
|
| 294 |
+
|
| 295 |
+
# if no -S is given, -a is not allowed
|
| 296 |
+
if argns.a is not None:
|
| 297 |
+
parser.print_help(sys.stderr)
|
| 298 |
+
return 2
|
| 299 |
+
|
| 300 |
+
# parse -F options
|
| 301 |
+
F_opts = _parse_filters(argns.F or [])
|
| 302 |
+
|
| 303 |
+
# -x: allow custom (eXternal) lexers and formatters
|
| 304 |
+
allow_custom_lexer_formatter = bool(argns.x)
|
| 305 |
+
|
| 306 |
+
# select lexer
|
| 307 |
+
lexer = None
|
| 308 |
+
|
| 309 |
+
# given by name?
|
| 310 |
+
lexername = argns.l
|
| 311 |
+
if lexername:
|
| 312 |
+
# custom lexer, located relative to user's cwd
|
| 313 |
+
if allow_custom_lexer_formatter and '.py' in lexername:
|
| 314 |
+
try:
|
| 315 |
+
filename = None
|
| 316 |
+
name = None
|
| 317 |
+
if ':' in lexername:
|
| 318 |
+
filename, name = lexername.rsplit(':', 1)
|
| 319 |
+
|
| 320 |
+
if '.py' in name:
|
| 321 |
+
# This can happen on Windows: If the lexername is
|
| 322 |
+
# C:\lexer.py -- return to normal load path in that case
|
| 323 |
+
name = None
|
| 324 |
+
|
| 325 |
+
if filename and name:
|
| 326 |
+
lexer = load_lexer_from_file(filename, name,
|
| 327 |
+
**parsed_opts)
|
| 328 |
+
else:
|
| 329 |
+
lexer = load_lexer_from_file(lexername, **parsed_opts)
|
| 330 |
+
except ClassNotFound as err:
|
| 331 |
+
print('Error:', err, file=sys.stderr)
|
| 332 |
+
return 1
|
| 333 |
+
else:
|
| 334 |
+
try:
|
| 335 |
+
lexer = get_lexer_by_name(lexername, **parsed_opts)
|
| 336 |
+
except (OptionError, ClassNotFound) as err:
|
| 337 |
+
print('Error:', err, file=sys.stderr)
|
| 338 |
+
return 1
|
| 339 |
+
|
| 340 |
+
# read input code
|
| 341 |
+
code = None
|
| 342 |
+
|
| 343 |
+
if argns.INPUTFILE:
|
| 344 |
+
if argns.s:
|
| 345 |
+
print('Error: -s option not usable when input file specified',
|
| 346 |
+
file=sys.stderr)
|
| 347 |
+
return 2
|
| 348 |
+
|
| 349 |
+
infn = argns.INPUTFILE
|
| 350 |
+
try:
|
| 351 |
+
with open(infn, 'rb') as infp:
|
| 352 |
+
code = infp.read()
|
| 353 |
+
except Exception as err:
|
| 354 |
+
print('Error: cannot read infile:', err, file=sys.stderr)
|
| 355 |
+
return 1
|
| 356 |
+
if not inencoding:
|
| 357 |
+
code, inencoding = guess_decode(code)
|
| 358 |
+
|
| 359 |
+
# do we have to guess the lexer?
|
| 360 |
+
if not lexer:
|
| 361 |
+
try:
|
| 362 |
+
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
|
| 363 |
+
except ClassNotFound as err:
|
| 364 |
+
if argns.g:
|
| 365 |
+
try:
|
| 366 |
+
lexer = guess_lexer(code, **parsed_opts)
|
| 367 |
+
except ClassNotFound:
|
| 368 |
+
lexer = TextLexer(**parsed_opts)
|
| 369 |
+
else:
|
| 370 |
+
print('Error:', err, file=sys.stderr)
|
| 371 |
+
return 1
|
| 372 |
+
except OptionError as err:
|
| 373 |
+
print('Error:', err, file=sys.stderr)
|
| 374 |
+
return 1
|
| 375 |
+
|
| 376 |
+
elif not argns.s: # treat stdin as full file (-s support is later)
|
| 377 |
+
# read code from terminal, always in binary mode since we want to
|
| 378 |
+
# decode ourselves and be tolerant with it
|
| 379 |
+
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
|
| 380 |
+
if not inencoding:
|
| 381 |
+
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
|
| 382 |
+
# else the lexer will do the decoding
|
| 383 |
+
if not lexer:
|
| 384 |
+
try:
|
| 385 |
+
lexer = guess_lexer(code, **parsed_opts)
|
| 386 |
+
except ClassNotFound:
|
| 387 |
+
lexer = TextLexer(**parsed_opts)
|
| 388 |
+
|
| 389 |
+
else: # -s option needs a lexer with -l
|
| 390 |
+
if not lexer:
|
| 391 |
+
print('Error: when using -s a lexer has to be selected with -l',
|
| 392 |
+
file=sys.stderr)
|
| 393 |
+
return 2
|
| 394 |
+
|
| 395 |
+
# process filters
|
| 396 |
+
for fname, fopts in F_opts:
|
| 397 |
+
try:
|
| 398 |
+
lexer.add_filter(fname, **fopts)
|
| 399 |
+
except ClassNotFound as err:
|
| 400 |
+
print('Error:', err, file=sys.stderr)
|
| 401 |
+
return 1
|
| 402 |
+
|
| 403 |
+
# select formatter
|
| 404 |
+
outfn = argns.o
|
| 405 |
+
fmter = argns.f
|
| 406 |
+
if fmter:
|
| 407 |
+
# custom formatter, located relative to user's cwd
|
| 408 |
+
if allow_custom_lexer_formatter and '.py' in fmter:
|
| 409 |
+
try:
|
| 410 |
+
filename = None
|
| 411 |
+
name = None
|
| 412 |
+
if ':' in fmter:
|
| 413 |
+
# Same logic as above for custom lexer
|
| 414 |
+
filename, name = fmter.rsplit(':', 1)
|
| 415 |
+
|
| 416 |
+
if '.py' in name:
|
| 417 |
+
name = None
|
| 418 |
+
|
| 419 |
+
if filename and name:
|
| 420 |
+
fmter = load_formatter_from_file(filename, name,
|
| 421 |
+
**parsed_opts)
|
| 422 |
+
else:
|
| 423 |
+
fmter = load_formatter_from_file(fmter, **parsed_opts)
|
| 424 |
+
except ClassNotFound as err:
|
| 425 |
+
print('Error:', err, file=sys.stderr)
|
| 426 |
+
return 1
|
| 427 |
+
else:
|
| 428 |
+
try:
|
| 429 |
+
fmter = get_formatter_by_name(fmter, **parsed_opts)
|
| 430 |
+
except (OptionError, ClassNotFound) as err:
|
| 431 |
+
print('Error:', err, file=sys.stderr)
|
| 432 |
+
return 1
|
| 433 |
+
|
| 434 |
+
if outfn:
|
| 435 |
+
if not fmter:
|
| 436 |
+
try:
|
| 437 |
+
fmter = get_formatter_for_filename(outfn, **parsed_opts)
|
| 438 |
+
except (OptionError, ClassNotFound) as err:
|
| 439 |
+
print('Error:', err, file=sys.stderr)
|
| 440 |
+
return 1
|
| 441 |
+
try:
|
| 442 |
+
outfile = open(outfn, 'wb')
|
| 443 |
+
except Exception as err:
|
| 444 |
+
print('Error: cannot open outfile:', err, file=sys.stderr)
|
| 445 |
+
return 1
|
| 446 |
+
else:
|
| 447 |
+
if not fmter:
|
| 448 |
+
if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
|
| 449 |
+
fmter = TerminalTrueColorFormatter(**parsed_opts)
|
| 450 |
+
elif '256' in os.environ.get('TERM', ''):
|
| 451 |
+
fmter = Terminal256Formatter(**parsed_opts)
|
| 452 |
+
else:
|
| 453 |
+
fmter = TerminalFormatter(**parsed_opts)
|
| 454 |
+
outfile = sys.stdout.buffer
|
| 455 |
+
|
| 456 |
+
# determine output encoding if not explicitly selected
|
| 457 |
+
if not outencoding:
|
| 458 |
+
if outfn:
|
| 459 |
+
# output file? use lexer encoding for now (can still be None)
|
| 460 |
+
fmter.encoding = inencoding
|
| 461 |
+
else:
|
| 462 |
+
# else use terminal encoding
|
| 463 |
+
fmter.encoding = terminal_encoding(sys.stdout)
|
| 464 |
+
|
| 465 |
+
# provide coloring under Windows, if possible
|
| 466 |
+
if not outfn and sys.platform in ('win32', 'cygwin') and \
|
| 467 |
+
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
|
| 468 |
+
# unfortunately colorama doesn't support binary streams on Py3
|
| 469 |
+
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
|
| 470 |
+
fmter.encoding = None
|
| 471 |
+
try:
|
| 472 |
+
import pip._vendor.colorama.initialise as colorama_initialise
|
| 473 |
+
except ImportError:
|
| 474 |
+
pass
|
| 475 |
+
else:
|
| 476 |
+
outfile = colorama_initialise.wrap_stream(
|
| 477 |
+
outfile, convert=None, strip=None, autoreset=False, wrap=True)
|
| 478 |
+
|
| 479 |
+
# When using the LaTeX formatter and the option `escapeinside` is
|
| 480 |
+
# specified, we need a special lexer which collects escaped text
|
| 481 |
+
# before running the chosen language lexer.
|
| 482 |
+
escapeinside = parsed_opts.get('escapeinside', '')
|
| 483 |
+
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
|
| 484 |
+
left = escapeinside[0]
|
| 485 |
+
right = escapeinside[1]
|
| 486 |
+
lexer = LatexEmbeddedLexer(left, right, lexer)
|
| 487 |
+
|
| 488 |
+
# ... and do it!
|
| 489 |
+
if not argns.s:
|
| 490 |
+
# process whole input as per normal...
|
| 491 |
+
try:
|
| 492 |
+
highlight(code, lexer, fmter, outfile)
|
| 493 |
+
finally:
|
| 494 |
+
if outfn:
|
| 495 |
+
outfile.close()
|
| 496 |
+
return 0
|
| 497 |
+
else:
|
| 498 |
+
# line by line processing of stdin (eg: for 'tail -f')...
|
| 499 |
+
try:
|
| 500 |
+
while 1:
|
| 501 |
+
line = sys.stdin.buffer.readline()
|
| 502 |
+
if not line:
|
| 503 |
+
break
|
| 504 |
+
if not inencoding:
|
| 505 |
+
line = guess_decode_from_terminal(line, sys.stdin)[0]
|
| 506 |
+
highlight(line, lexer, fmter, outfile)
|
| 507 |
+
if hasattr(outfile, 'flush'):
|
| 508 |
+
outfile.flush()
|
| 509 |
+
return 0
|
| 510 |
+
except KeyboardInterrupt: # pragma: no cover
|
| 511 |
+
return 0
|
| 512 |
+
finally:
|
| 513 |
+
if outfn:
|
| 514 |
+
outfile.close()
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
class HelpFormatter(argparse.HelpFormatter):
|
| 518 |
+
def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
|
| 519 |
+
if width is None:
|
| 520 |
+
try:
|
| 521 |
+
width = shutil.get_terminal_size().columns - 2
|
| 522 |
+
except Exception:
|
| 523 |
+
pass
|
| 524 |
+
argparse.HelpFormatter.__init__(self, prog, indent_increment,
|
| 525 |
+
max_help_position, width)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def main(args=sys.argv):
|
| 529 |
+
"""
|
| 530 |
+
Main command line entry point.
|
| 531 |
+
"""
|
| 532 |
+
desc = "Highlight an input file and write the result to an output file."
|
| 533 |
+
parser = argparse.ArgumentParser(description=desc, add_help=False,
|
| 534 |
+
formatter_class=HelpFormatter)
|
| 535 |
+
|
| 536 |
+
operation = parser.add_argument_group('Main operation')
|
| 537 |
+
lexersel = operation.add_mutually_exclusive_group()
|
| 538 |
+
lexersel.add_argument(
|
| 539 |
+
'-l', metavar='LEXER',
|
| 540 |
+
help='Specify the lexer to use. (Query names with -L.) If not '
|
| 541 |
+
'given and -g is not present, the lexer is guessed from the filename.')
|
| 542 |
+
lexersel.add_argument(
|
| 543 |
+
'-g', action='store_true',
|
| 544 |
+
help='Guess the lexer from the file contents, or pass through '
|
| 545 |
+
'as plain text if nothing can be guessed.')
|
| 546 |
+
operation.add_argument(
|
| 547 |
+
'-F', metavar='FILTER[:options]', action='append',
|
| 548 |
+
help='Add a filter to the token stream. (Query names with -L.) '
|
| 549 |
+
'Filter options are given after a colon if necessary.')
|
| 550 |
+
operation.add_argument(
|
| 551 |
+
'-f', metavar='FORMATTER',
|
| 552 |
+
help='Specify the formatter to use. (Query names with -L.) '
|
| 553 |
+
'If not given, the formatter is guessed from the output filename, '
|
| 554 |
+
'and defaults to the terminal formatter if the output is to the '
|
| 555 |
+
'terminal or an unknown file extension.')
|
| 556 |
+
operation.add_argument(
|
| 557 |
+
'-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
|
| 558 |
+
help='Give options to the lexer and formatter as a comma-separated '
|
| 559 |
+
'list of key-value pairs. '
|
| 560 |
+
'Example: `-O bg=light,python=cool`.')
|
| 561 |
+
operation.add_argument(
|
| 562 |
+
'-P', metavar='OPTION=value', action='append',
|
| 563 |
+
help='Give a single option to the lexer and formatter - with this '
|
| 564 |
+
'you can pass options whose value contains commas and equal signs. '
|
| 565 |
+
'Example: `-P "heading=Pygments, the Python highlighter"`.')
|
| 566 |
+
operation.add_argument(
|
| 567 |
+
'-o', metavar='OUTPUTFILE',
|
| 568 |
+
help='Where to write the output. Defaults to standard output.')
|
| 569 |
+
|
| 570 |
+
operation.add_argument(
|
| 571 |
+
'INPUTFILE', nargs='?',
|
| 572 |
+
help='Where to read the input. Defaults to standard input.')
|
| 573 |
+
|
| 574 |
+
flags = parser.add_argument_group('Operation flags')
|
| 575 |
+
flags.add_argument(
|
| 576 |
+
'-v', action='store_true',
|
| 577 |
+
help='Print a detailed traceback on unhandled exceptions, which '
|
| 578 |
+
'is useful for debugging and bug reports.')
|
| 579 |
+
flags.add_argument(
|
| 580 |
+
'-s', action='store_true',
|
| 581 |
+
help='Process lines one at a time until EOF, rather than waiting to '
|
| 582 |
+
'process the entire file. This only works for stdin, only for lexers '
|
| 583 |
+
'with no line-spanning constructs, and is intended for streaming '
|
| 584 |
+
'input such as you get from `tail -f`. '
|
| 585 |
+
'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
|
| 586 |
+
flags.add_argument(
|
| 587 |
+
'-x', action='store_true',
|
| 588 |
+
help='Allow custom lexers and formatters to be loaded from a .py file '
|
| 589 |
+
'relative to the current working directory. For example, '
|
| 590 |
+
'`-l ./customlexer.py -x`. By default, this option expects a file '
|
| 591 |
+
'with a class named CustomLexer or CustomFormatter; you can also '
|
| 592 |
+
'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
|
| 593 |
+
'Users should be very careful not to use this option with untrusted '
|
| 594 |
+
'files, because it will import and run them.')
|
| 595 |
+
flags.add_argument('--json', help='Output as JSON. This can '
|
| 596 |
+
'be only used in conjunction with -L.',
|
| 597 |
+
default=False,
|
| 598 |
+
action='store_true')
|
| 599 |
+
|
| 600 |
+
special_modes_group = parser.add_argument_group(
|
| 601 |
+
'Special modes - do not do any highlighting')
|
| 602 |
+
special_modes = special_modes_group.add_mutually_exclusive_group()
|
| 603 |
+
special_modes.add_argument(
|
| 604 |
+
'-S', metavar='STYLE -f formatter',
|
| 605 |
+
help='Print style definitions for STYLE for a formatter '
|
| 606 |
+
'given with -f. The argument given by -a is formatter '
|
| 607 |
+
'dependent.')
|
| 608 |
+
special_modes.add_argument(
|
| 609 |
+
'-L', nargs='*', metavar='WHAT',
|
| 610 |
+
help='List lexers, formatters, styles or filters -- '
|
| 611 |
+
'give additional arguments for the thing(s) you want to list '
|
| 612 |
+
'(e.g. "styles"), or omit them to list everything.')
|
| 613 |
+
special_modes.add_argument(
|
| 614 |
+
'-N', metavar='FILENAME',
|
| 615 |
+
help='Guess and print out a lexer name based solely on the given '
|
| 616 |
+
'filename. Does not take input or highlight anything. If no specific '
|
| 617 |
+
'lexer can be determined, "text" is printed.')
|
| 618 |
+
special_modes.add_argument(
|
| 619 |
+
'-C', action='store_true',
|
| 620 |
+
help='Like -N, but print out a lexer name based solely on '
|
| 621 |
+
'a given content from standard input.')
|
| 622 |
+
special_modes.add_argument(
|
| 623 |
+
'-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
|
| 624 |
+
help='Print detailed help for the object <name> of type <type>, '
|
| 625 |
+
'where <type> is one of "lexer", "formatter" or "filter".')
|
| 626 |
+
special_modes.add_argument(
|
| 627 |
+
'-V', action='store_true',
|
| 628 |
+
help='Print the package version.')
|
| 629 |
+
special_modes.add_argument(
|
| 630 |
+
'-h', '--help', action='store_true',
|
| 631 |
+
help='Print this help.')
|
| 632 |
+
special_modes_group.add_argument(
|
| 633 |
+
'-a', metavar='ARG',
|
| 634 |
+
help='Formatter-specific additional argument for the -S (print '
|
| 635 |
+
'style sheet) mode.')
|
| 636 |
+
|
| 637 |
+
argns = parser.parse_args(args[1:])
|
| 638 |
+
|
| 639 |
+
try:
|
| 640 |
+
return main_inner(parser, argns)
|
| 641 |
+
except BrokenPipeError:
|
| 642 |
+
# someone closed our stdout, e.g. by quitting a pager.
|
| 643 |
+
return 0
|
| 644 |
+
except Exception:
|
| 645 |
+
if argns.v:
|
| 646 |
+
print(file=sys.stderr)
|
| 647 |
+
print('*' * 65, file=sys.stderr)
|
| 648 |
+
print('An unhandled exception occurred while highlighting.',
|
| 649 |
+
file=sys.stderr)
|
| 650 |
+
print('Please report the whole traceback to the issue tracker at',
|
| 651 |
+
file=sys.stderr)
|
| 652 |
+
print('<https://github.com/pygments/pygments/issues>.',
|
| 653 |
+
file=sys.stderr)
|
| 654 |
+
print('*' * 65, file=sys.stderr)
|
| 655 |
+
print(file=sys.stderr)
|
| 656 |
+
raise
|
| 657 |
+
import traceback
|
| 658 |
+
info = traceback.format_exception(*sys.exc_info())
|
| 659 |
+
msg = info[-1].strip()
|
| 660 |
+
if len(info) >= 3:
|
| 661 |
+
# extract relevant file and position info
|
| 662 |
+
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
|
| 663 |
+
print(file=sys.stderr)
|
| 664 |
+
print('*** Error while highlighting:', file=sys.stderr)
|
| 665 |
+
print(msg, file=sys.stderr)
|
| 666 |
+
print('*** If this is a bug you want to report, please rerun with -v.',
|
| 667 |
+
file=sys.stderr)
|
| 668 |
+
return 1
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.console
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Format colored console output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
esc = "\x1b["
|
| 12 |
+
|
| 13 |
+
codes = {}
|
| 14 |
+
codes[""] = ""
|
| 15 |
+
codes["reset"] = esc + "39;49;00m"
|
| 16 |
+
|
| 17 |
+
codes["bold"] = esc + "01m"
|
| 18 |
+
codes["faint"] = esc + "02m"
|
| 19 |
+
codes["standout"] = esc + "03m"
|
| 20 |
+
codes["underline"] = esc + "04m"
|
| 21 |
+
codes["blink"] = esc + "05m"
|
| 22 |
+
codes["overline"] = esc + "06m"
|
| 23 |
+
|
| 24 |
+
dark_colors = ["black", "red", "green", "yellow", "blue",
|
| 25 |
+
"magenta", "cyan", "gray"]
|
| 26 |
+
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
|
| 27 |
+
"brightmagenta", "brightcyan", "white"]
|
| 28 |
+
|
| 29 |
+
x = 30
|
| 30 |
+
for d, l in zip(dark_colors, light_colors):
|
| 31 |
+
codes[d] = esc + "%im" % x
|
| 32 |
+
codes[l] = esc + "%im" % (60 + x)
|
| 33 |
+
x += 1
|
| 34 |
+
|
| 35 |
+
del d, l, x
|
| 36 |
+
|
| 37 |
+
codes["white"] = codes["bold"]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def reset_color():
|
| 41 |
+
return codes["reset"]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def colorize(color_key, text):
|
| 45 |
+
return codes[color_key] + text + codes["reset"]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def ansiformat(attr, text):
|
| 49 |
+
"""
|
| 50 |
+
Format ``text`` with a color and/or some attributes::
|
| 51 |
+
|
| 52 |
+
color normal color
|
| 53 |
+
*color* bold color
|
| 54 |
+
_color_ underlined color
|
| 55 |
+
+color+ blinking color
|
| 56 |
+
"""
|
| 57 |
+
result = []
|
| 58 |
+
if attr[:1] == attr[-1:] == '+':
|
| 59 |
+
result.append(codes['blink'])
|
| 60 |
+
attr = attr[1:-1]
|
| 61 |
+
if attr[:1] == attr[-1:] == '*':
|
| 62 |
+
result.append(codes['bold'])
|
| 63 |
+
attr = attr[1:-1]
|
| 64 |
+
if attr[:1] == attr[-1:] == '_':
|
| 65 |
+
result.append(codes['underline'])
|
| 66 |
+
attr = attr[1:-1]
|
| 67 |
+
result.append(codes[attr])
|
| 68 |
+
result.append(text)
|
| 69 |
+
result.append(codes['reset'])
|
| 70 |
+
return ''.join(result)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.filter
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Module that implements the default filter.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def apply_filters(stream, filters, lexer=None):
|
| 13 |
+
"""
|
| 14 |
+
Use this method to apply an iterable of filters to
|
| 15 |
+
a stream. If lexer is given it's forwarded to the
|
| 16 |
+
filter, otherwise the filter receives `None`.
|
| 17 |
+
"""
|
| 18 |
+
def _apply(filter_, stream):
|
| 19 |
+
yield from filter_.filter(lexer, stream)
|
| 20 |
+
for filter_ in filters:
|
| 21 |
+
stream = _apply(filter_, stream)
|
| 22 |
+
return stream
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def simplefilter(f):
|
| 26 |
+
"""
|
| 27 |
+
Decorator that converts a function into a filter::
|
| 28 |
+
|
| 29 |
+
@simplefilter
|
| 30 |
+
def lowercase(self, lexer, stream, options):
|
| 31 |
+
for ttype, value in stream:
|
| 32 |
+
yield ttype, value.lower()
|
| 33 |
+
"""
|
| 34 |
+
return type(f.__name__, (FunctionFilter,), {
|
| 35 |
+
'__module__': getattr(f, '__module__'),
|
| 36 |
+
'__doc__': f.__doc__,
|
| 37 |
+
'function': f,
|
| 38 |
+
})
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Filter:
|
| 42 |
+
"""
|
| 43 |
+
Default filter. Subclass this class or use the `simplefilter`
|
| 44 |
+
decorator to create own filters.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self, **options):
|
| 48 |
+
self.options = options
|
| 49 |
+
|
| 50 |
+
def filter(self, lexer, stream):
|
| 51 |
+
raise NotImplementedError()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class FunctionFilter(Filter):
|
| 55 |
+
"""
|
| 56 |
+
Abstract class used by `simplefilter` to create simple
|
| 57 |
+
function filters on the fly. The `simplefilter` decorator
|
| 58 |
+
automatically creates subclasses of this class for
|
| 59 |
+
functions passed to it.
|
| 60 |
+
"""
|
| 61 |
+
function = None
|
| 62 |
+
|
| 63 |
+
def __init__(self, **options):
|
| 64 |
+
if not hasattr(self, 'function'):
|
| 65 |
+
raise TypeError('%r used without bound function' %
|
| 66 |
+
self.__class__.__name__)
|
| 67 |
+
Filter.__init__(self, **options)
|
| 68 |
+
|
| 69 |
+
def filter(self, lexer, stream):
|
| 70 |
+
# pylint: disable=not-callable
|
| 71 |
+
yield from self.function(lexer, stream, self.options)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py
ADDED
|
@@ -0,0 +1,940 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.filters
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Module containing filter lookup functions and default
|
| 6 |
+
filters.
|
| 7 |
+
|
| 8 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 9 |
+
:license: BSD, see LICENSE for details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import re
|
| 13 |
+
|
| 14 |
+
from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
|
| 15 |
+
string_to_tokentype
|
| 16 |
+
from pip._vendor.pygments.filter import Filter
|
| 17 |
+
from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
|
| 18 |
+
get_choice_opt, ClassNotFound, OptionError
|
| 19 |
+
from pip._vendor.pygments.plugin import find_plugin_filters
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def find_filter_class(filtername):
|
| 23 |
+
"""Lookup a filter by name. Return None if not found."""
|
| 24 |
+
if filtername in FILTERS:
|
| 25 |
+
return FILTERS[filtername]
|
| 26 |
+
for name, cls in find_plugin_filters():
|
| 27 |
+
if name == filtername:
|
| 28 |
+
return cls
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_filter_by_name(filtername, **options):
|
| 33 |
+
"""Return an instantiated filter.
|
| 34 |
+
|
| 35 |
+
Options are passed to the filter initializer if wanted.
|
| 36 |
+
Raise a ClassNotFound if not found.
|
| 37 |
+
"""
|
| 38 |
+
cls = find_filter_class(filtername)
|
| 39 |
+
if cls:
|
| 40 |
+
return cls(**options)
|
| 41 |
+
else:
|
| 42 |
+
raise ClassNotFound('filter %r not found' % filtername)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_all_filters():
|
| 46 |
+
"""Return a generator of all filter names."""
|
| 47 |
+
yield from FILTERS
|
| 48 |
+
for name, _ in find_plugin_filters():
|
| 49 |
+
yield name
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _replace_special(ttype, value, regex, specialttype,
|
| 53 |
+
replacefunc=lambda x: x):
|
| 54 |
+
last = 0
|
| 55 |
+
for match in regex.finditer(value):
|
| 56 |
+
start, end = match.start(), match.end()
|
| 57 |
+
if start != last:
|
| 58 |
+
yield ttype, value[last:start]
|
| 59 |
+
yield specialttype, replacefunc(value[start:end])
|
| 60 |
+
last = end
|
| 61 |
+
if last != len(value):
|
| 62 |
+
yield ttype, value[last:]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class CodeTagFilter(Filter):
|
| 66 |
+
"""Highlight special code tags in comments and docstrings.
|
| 67 |
+
|
| 68 |
+
Options accepted:
|
| 69 |
+
|
| 70 |
+
`codetags` : list of strings
|
| 71 |
+
A list of strings that are flagged as code tags. The default is to
|
| 72 |
+
highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
|
| 73 |
+
|
| 74 |
+
.. versionchanged:: 2.13
|
| 75 |
+
Now recognizes ``FIXME`` by default.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, **options):
|
| 79 |
+
Filter.__init__(self, **options)
|
| 80 |
+
tags = get_list_opt(options, 'codetags',
|
| 81 |
+
['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
|
| 82 |
+
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
|
| 83 |
+
re.escape(tag) for tag in tags if tag
|
| 84 |
+
]))
|
| 85 |
+
|
| 86 |
+
def filter(self, lexer, stream):
|
| 87 |
+
regex = self.tag_re
|
| 88 |
+
for ttype, value in stream:
|
| 89 |
+
if ttype in String.Doc or \
|
| 90 |
+
ttype in Comment and \
|
| 91 |
+
ttype not in Comment.Preproc:
|
| 92 |
+
yield from _replace_special(ttype, value, regex, Comment.Special)
|
| 93 |
+
else:
|
| 94 |
+
yield ttype, value
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class SymbolFilter(Filter):
|
| 98 |
+
"""Convert mathematical symbols such as \\<longrightarrow> in Isabelle
|
| 99 |
+
or \\longrightarrow in LaTeX into Unicode characters.
|
| 100 |
+
|
| 101 |
+
This is mostly useful for HTML or console output when you want to
|
| 102 |
+
approximate the source rendering you'd see in an IDE.
|
| 103 |
+
|
| 104 |
+
Options accepted:
|
| 105 |
+
|
| 106 |
+
`lang` : string
|
| 107 |
+
The symbol language. Must be one of ``'isabelle'`` or
|
| 108 |
+
``'latex'``. The default is ``'isabelle'``.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
latex_symbols = {
|
| 112 |
+
'\\alpha' : '\U000003b1',
|
| 113 |
+
'\\beta' : '\U000003b2',
|
| 114 |
+
'\\gamma' : '\U000003b3',
|
| 115 |
+
'\\delta' : '\U000003b4',
|
| 116 |
+
'\\varepsilon' : '\U000003b5',
|
| 117 |
+
'\\zeta' : '\U000003b6',
|
| 118 |
+
'\\eta' : '\U000003b7',
|
| 119 |
+
'\\vartheta' : '\U000003b8',
|
| 120 |
+
'\\iota' : '\U000003b9',
|
| 121 |
+
'\\kappa' : '\U000003ba',
|
| 122 |
+
'\\lambda' : '\U000003bb',
|
| 123 |
+
'\\mu' : '\U000003bc',
|
| 124 |
+
'\\nu' : '\U000003bd',
|
| 125 |
+
'\\xi' : '\U000003be',
|
| 126 |
+
'\\pi' : '\U000003c0',
|
| 127 |
+
'\\varrho' : '\U000003c1',
|
| 128 |
+
'\\sigma' : '\U000003c3',
|
| 129 |
+
'\\tau' : '\U000003c4',
|
| 130 |
+
'\\upsilon' : '\U000003c5',
|
| 131 |
+
'\\varphi' : '\U000003c6',
|
| 132 |
+
'\\chi' : '\U000003c7',
|
| 133 |
+
'\\psi' : '\U000003c8',
|
| 134 |
+
'\\omega' : '\U000003c9',
|
| 135 |
+
'\\Gamma' : '\U00000393',
|
| 136 |
+
'\\Delta' : '\U00000394',
|
| 137 |
+
'\\Theta' : '\U00000398',
|
| 138 |
+
'\\Lambda' : '\U0000039b',
|
| 139 |
+
'\\Xi' : '\U0000039e',
|
| 140 |
+
'\\Pi' : '\U000003a0',
|
| 141 |
+
'\\Sigma' : '\U000003a3',
|
| 142 |
+
'\\Upsilon' : '\U000003a5',
|
| 143 |
+
'\\Phi' : '\U000003a6',
|
| 144 |
+
'\\Psi' : '\U000003a8',
|
| 145 |
+
'\\Omega' : '\U000003a9',
|
| 146 |
+
'\\leftarrow' : '\U00002190',
|
| 147 |
+
'\\longleftarrow' : '\U000027f5',
|
| 148 |
+
'\\rightarrow' : '\U00002192',
|
| 149 |
+
'\\longrightarrow' : '\U000027f6',
|
| 150 |
+
'\\Leftarrow' : '\U000021d0',
|
| 151 |
+
'\\Longleftarrow' : '\U000027f8',
|
| 152 |
+
'\\Rightarrow' : '\U000021d2',
|
| 153 |
+
'\\Longrightarrow' : '\U000027f9',
|
| 154 |
+
'\\leftrightarrow' : '\U00002194',
|
| 155 |
+
'\\longleftrightarrow' : '\U000027f7',
|
| 156 |
+
'\\Leftrightarrow' : '\U000021d4',
|
| 157 |
+
'\\Longleftrightarrow' : '\U000027fa',
|
| 158 |
+
'\\mapsto' : '\U000021a6',
|
| 159 |
+
'\\longmapsto' : '\U000027fc',
|
| 160 |
+
'\\relbar' : '\U00002500',
|
| 161 |
+
'\\Relbar' : '\U00002550',
|
| 162 |
+
'\\hookleftarrow' : '\U000021a9',
|
| 163 |
+
'\\hookrightarrow' : '\U000021aa',
|
| 164 |
+
'\\leftharpoondown' : '\U000021bd',
|
| 165 |
+
'\\rightharpoondown' : '\U000021c1',
|
| 166 |
+
'\\leftharpoonup' : '\U000021bc',
|
| 167 |
+
'\\rightharpoonup' : '\U000021c0',
|
| 168 |
+
'\\rightleftharpoons' : '\U000021cc',
|
| 169 |
+
'\\leadsto' : '\U0000219d',
|
| 170 |
+
'\\downharpoonleft' : '\U000021c3',
|
| 171 |
+
'\\downharpoonright' : '\U000021c2',
|
| 172 |
+
'\\upharpoonleft' : '\U000021bf',
|
| 173 |
+
'\\upharpoonright' : '\U000021be',
|
| 174 |
+
'\\restriction' : '\U000021be',
|
| 175 |
+
'\\uparrow' : '\U00002191',
|
| 176 |
+
'\\Uparrow' : '\U000021d1',
|
| 177 |
+
'\\downarrow' : '\U00002193',
|
| 178 |
+
'\\Downarrow' : '\U000021d3',
|
| 179 |
+
'\\updownarrow' : '\U00002195',
|
| 180 |
+
'\\Updownarrow' : '\U000021d5',
|
| 181 |
+
'\\langle' : '\U000027e8',
|
| 182 |
+
'\\rangle' : '\U000027e9',
|
| 183 |
+
'\\lceil' : '\U00002308',
|
| 184 |
+
'\\rceil' : '\U00002309',
|
| 185 |
+
'\\lfloor' : '\U0000230a',
|
| 186 |
+
'\\rfloor' : '\U0000230b',
|
| 187 |
+
'\\flqq' : '\U000000ab',
|
| 188 |
+
'\\frqq' : '\U000000bb',
|
| 189 |
+
'\\bot' : '\U000022a5',
|
| 190 |
+
'\\top' : '\U000022a4',
|
| 191 |
+
'\\wedge' : '\U00002227',
|
| 192 |
+
'\\bigwedge' : '\U000022c0',
|
| 193 |
+
'\\vee' : '\U00002228',
|
| 194 |
+
'\\bigvee' : '\U000022c1',
|
| 195 |
+
'\\forall' : '\U00002200',
|
| 196 |
+
'\\exists' : '\U00002203',
|
| 197 |
+
'\\nexists' : '\U00002204',
|
| 198 |
+
'\\neg' : '\U000000ac',
|
| 199 |
+
'\\Box' : '\U000025a1',
|
| 200 |
+
'\\Diamond' : '\U000025c7',
|
| 201 |
+
'\\vdash' : '\U000022a2',
|
| 202 |
+
'\\models' : '\U000022a8',
|
| 203 |
+
'\\dashv' : '\U000022a3',
|
| 204 |
+
'\\surd' : '\U0000221a',
|
| 205 |
+
'\\le' : '\U00002264',
|
| 206 |
+
'\\ge' : '\U00002265',
|
| 207 |
+
'\\ll' : '\U0000226a',
|
| 208 |
+
'\\gg' : '\U0000226b',
|
| 209 |
+
'\\lesssim' : '\U00002272',
|
| 210 |
+
'\\gtrsim' : '\U00002273',
|
| 211 |
+
'\\lessapprox' : '\U00002a85',
|
| 212 |
+
'\\gtrapprox' : '\U00002a86',
|
| 213 |
+
'\\in' : '\U00002208',
|
| 214 |
+
'\\notin' : '\U00002209',
|
| 215 |
+
'\\subset' : '\U00002282',
|
| 216 |
+
'\\supset' : '\U00002283',
|
| 217 |
+
'\\subseteq' : '\U00002286',
|
| 218 |
+
'\\supseteq' : '\U00002287',
|
| 219 |
+
'\\sqsubset' : '\U0000228f',
|
| 220 |
+
'\\sqsupset' : '\U00002290',
|
| 221 |
+
'\\sqsubseteq' : '\U00002291',
|
| 222 |
+
'\\sqsupseteq' : '\U00002292',
|
| 223 |
+
'\\cap' : '\U00002229',
|
| 224 |
+
'\\bigcap' : '\U000022c2',
|
| 225 |
+
'\\cup' : '\U0000222a',
|
| 226 |
+
'\\bigcup' : '\U000022c3',
|
| 227 |
+
'\\sqcup' : '\U00002294',
|
| 228 |
+
'\\bigsqcup' : '\U00002a06',
|
| 229 |
+
'\\sqcap' : '\U00002293',
|
| 230 |
+
'\\Bigsqcap' : '\U00002a05',
|
| 231 |
+
'\\setminus' : '\U00002216',
|
| 232 |
+
'\\propto' : '\U0000221d',
|
| 233 |
+
'\\uplus' : '\U0000228e',
|
| 234 |
+
'\\bigplus' : '\U00002a04',
|
| 235 |
+
'\\sim' : '\U0000223c',
|
| 236 |
+
'\\doteq' : '\U00002250',
|
| 237 |
+
'\\simeq' : '\U00002243',
|
| 238 |
+
'\\approx' : '\U00002248',
|
| 239 |
+
'\\asymp' : '\U0000224d',
|
| 240 |
+
'\\cong' : '\U00002245',
|
| 241 |
+
'\\equiv' : '\U00002261',
|
| 242 |
+
'\\Join' : '\U000022c8',
|
| 243 |
+
'\\bowtie' : '\U00002a1d',
|
| 244 |
+
'\\prec' : '\U0000227a',
|
| 245 |
+
'\\succ' : '\U0000227b',
|
| 246 |
+
'\\preceq' : '\U0000227c',
|
| 247 |
+
'\\succeq' : '\U0000227d',
|
| 248 |
+
'\\parallel' : '\U00002225',
|
| 249 |
+
'\\mid' : '\U000000a6',
|
| 250 |
+
'\\pm' : '\U000000b1',
|
| 251 |
+
'\\mp' : '\U00002213',
|
| 252 |
+
'\\times' : '\U000000d7',
|
| 253 |
+
'\\div' : '\U000000f7',
|
| 254 |
+
'\\cdot' : '\U000022c5',
|
| 255 |
+
'\\star' : '\U000022c6',
|
| 256 |
+
'\\circ' : '\U00002218',
|
| 257 |
+
'\\dagger' : '\U00002020',
|
| 258 |
+
'\\ddagger' : '\U00002021',
|
| 259 |
+
'\\lhd' : '\U000022b2',
|
| 260 |
+
'\\rhd' : '\U000022b3',
|
| 261 |
+
'\\unlhd' : '\U000022b4',
|
| 262 |
+
'\\unrhd' : '\U000022b5',
|
| 263 |
+
'\\triangleleft' : '\U000025c3',
|
| 264 |
+
'\\triangleright' : '\U000025b9',
|
| 265 |
+
'\\triangle' : '\U000025b3',
|
| 266 |
+
'\\triangleq' : '\U0000225c',
|
| 267 |
+
'\\oplus' : '\U00002295',
|
| 268 |
+
'\\bigoplus' : '\U00002a01',
|
| 269 |
+
'\\otimes' : '\U00002297',
|
| 270 |
+
'\\bigotimes' : '\U00002a02',
|
| 271 |
+
'\\odot' : '\U00002299',
|
| 272 |
+
'\\bigodot' : '\U00002a00',
|
| 273 |
+
'\\ominus' : '\U00002296',
|
| 274 |
+
'\\oslash' : '\U00002298',
|
| 275 |
+
'\\dots' : '\U00002026',
|
| 276 |
+
'\\cdots' : '\U000022ef',
|
| 277 |
+
'\\sum' : '\U00002211',
|
| 278 |
+
'\\prod' : '\U0000220f',
|
| 279 |
+
'\\coprod' : '\U00002210',
|
| 280 |
+
'\\infty' : '\U0000221e',
|
| 281 |
+
'\\int' : '\U0000222b',
|
| 282 |
+
'\\oint' : '\U0000222e',
|
| 283 |
+
'\\clubsuit' : '\U00002663',
|
| 284 |
+
'\\diamondsuit' : '\U00002662',
|
| 285 |
+
'\\heartsuit' : '\U00002661',
|
| 286 |
+
'\\spadesuit' : '\U00002660',
|
| 287 |
+
'\\aleph' : '\U00002135',
|
| 288 |
+
'\\emptyset' : '\U00002205',
|
| 289 |
+
'\\nabla' : '\U00002207',
|
| 290 |
+
'\\partial' : '\U00002202',
|
| 291 |
+
'\\flat' : '\U0000266d',
|
| 292 |
+
'\\natural' : '\U0000266e',
|
| 293 |
+
'\\sharp' : '\U0000266f',
|
| 294 |
+
'\\angle' : '\U00002220',
|
| 295 |
+
'\\copyright' : '\U000000a9',
|
| 296 |
+
'\\textregistered' : '\U000000ae',
|
| 297 |
+
'\\textonequarter' : '\U000000bc',
|
| 298 |
+
'\\textonehalf' : '\U000000bd',
|
| 299 |
+
'\\textthreequarters' : '\U000000be',
|
| 300 |
+
'\\textordfeminine' : '\U000000aa',
|
| 301 |
+
'\\textordmasculine' : '\U000000ba',
|
| 302 |
+
'\\euro' : '\U000020ac',
|
| 303 |
+
'\\pounds' : '\U000000a3',
|
| 304 |
+
'\\yen' : '\U000000a5',
|
| 305 |
+
'\\textcent' : '\U000000a2',
|
| 306 |
+
'\\textcurrency' : '\U000000a4',
|
| 307 |
+
'\\textdegree' : '\U000000b0',
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
isabelle_symbols = {
|
| 311 |
+
'\\<zero>' : '\U0001d7ec',
|
| 312 |
+
'\\<one>' : '\U0001d7ed',
|
| 313 |
+
'\\<two>' : '\U0001d7ee',
|
| 314 |
+
'\\<three>' : '\U0001d7ef',
|
| 315 |
+
'\\<four>' : '\U0001d7f0',
|
| 316 |
+
'\\<five>' : '\U0001d7f1',
|
| 317 |
+
'\\<six>' : '\U0001d7f2',
|
| 318 |
+
'\\<seven>' : '\U0001d7f3',
|
| 319 |
+
'\\<eight>' : '\U0001d7f4',
|
| 320 |
+
'\\<nine>' : '\U0001d7f5',
|
| 321 |
+
'\\<A>' : '\U0001d49c',
|
| 322 |
+
'\\<B>' : '\U0000212c',
|
| 323 |
+
'\\<C>' : '\U0001d49e',
|
| 324 |
+
'\\<D>' : '\U0001d49f',
|
| 325 |
+
'\\<E>' : '\U00002130',
|
| 326 |
+
'\\<F>' : '\U00002131',
|
| 327 |
+
'\\<G>' : '\U0001d4a2',
|
| 328 |
+
'\\<H>' : '\U0000210b',
|
| 329 |
+
'\\<I>' : '\U00002110',
|
| 330 |
+
'\\<J>' : '\U0001d4a5',
|
| 331 |
+
'\\<K>' : '\U0001d4a6',
|
| 332 |
+
'\\<L>' : '\U00002112',
|
| 333 |
+
'\\<M>' : '\U00002133',
|
| 334 |
+
'\\<N>' : '\U0001d4a9',
|
| 335 |
+
'\\<O>' : '\U0001d4aa',
|
| 336 |
+
'\\<P>' : '\U0001d4ab',
|
| 337 |
+
'\\<Q>' : '\U0001d4ac',
|
| 338 |
+
'\\<R>' : '\U0000211b',
|
| 339 |
+
'\\<S>' : '\U0001d4ae',
|
| 340 |
+
'\\<T>' : '\U0001d4af',
|
| 341 |
+
'\\<U>' : '\U0001d4b0',
|
| 342 |
+
'\\<V>' : '\U0001d4b1',
|
| 343 |
+
'\\<W>' : '\U0001d4b2',
|
| 344 |
+
'\\<X>' : '\U0001d4b3',
|
| 345 |
+
'\\<Y>' : '\U0001d4b4',
|
| 346 |
+
'\\<Z>' : '\U0001d4b5',
|
| 347 |
+
'\\<a>' : '\U0001d5ba',
|
| 348 |
+
'\\<b>' : '\U0001d5bb',
|
| 349 |
+
'\\<c>' : '\U0001d5bc',
|
| 350 |
+
'\\<d>' : '\U0001d5bd',
|
| 351 |
+
'\\<e>' : '\U0001d5be',
|
| 352 |
+
'\\<f>' : '\U0001d5bf',
|
| 353 |
+
'\\<g>' : '\U0001d5c0',
|
| 354 |
+
'\\<h>' : '\U0001d5c1',
|
| 355 |
+
'\\<i>' : '\U0001d5c2',
|
| 356 |
+
'\\<j>' : '\U0001d5c3',
|
| 357 |
+
'\\<k>' : '\U0001d5c4',
|
| 358 |
+
'\\<l>' : '\U0001d5c5',
|
| 359 |
+
'\\<m>' : '\U0001d5c6',
|
| 360 |
+
'\\<n>' : '\U0001d5c7',
|
| 361 |
+
'\\<o>' : '\U0001d5c8',
|
| 362 |
+
'\\<p>' : '\U0001d5c9',
|
| 363 |
+
'\\<q>' : '\U0001d5ca',
|
| 364 |
+
'\\<r>' : '\U0001d5cb',
|
| 365 |
+
'\\<s>' : '\U0001d5cc',
|
| 366 |
+
'\\<t>' : '\U0001d5cd',
|
| 367 |
+
'\\<u>' : '\U0001d5ce',
|
| 368 |
+
'\\<v>' : '\U0001d5cf',
|
| 369 |
+
'\\<w>' : '\U0001d5d0',
|
| 370 |
+
'\\<x>' : '\U0001d5d1',
|
| 371 |
+
'\\<y>' : '\U0001d5d2',
|
| 372 |
+
'\\<z>' : '\U0001d5d3',
|
| 373 |
+
'\\<AA>' : '\U0001d504',
|
| 374 |
+
'\\<BB>' : '\U0001d505',
|
| 375 |
+
'\\<CC>' : '\U0000212d',
|
| 376 |
+
'\\<DD>' : '\U0001d507',
|
| 377 |
+
'\\<EE>' : '\U0001d508',
|
| 378 |
+
'\\<FF>' : '\U0001d509',
|
| 379 |
+
'\\<GG>' : '\U0001d50a',
|
| 380 |
+
'\\<HH>' : '\U0000210c',
|
| 381 |
+
'\\<II>' : '\U00002111',
|
| 382 |
+
'\\<JJ>' : '\U0001d50d',
|
| 383 |
+
'\\<KK>' : '\U0001d50e',
|
| 384 |
+
'\\<LL>' : '\U0001d50f',
|
| 385 |
+
'\\<MM>' : '\U0001d510',
|
| 386 |
+
'\\<NN>' : '\U0001d511',
|
| 387 |
+
'\\<OO>' : '\U0001d512',
|
| 388 |
+
'\\<PP>' : '\U0001d513',
|
| 389 |
+
'\\<QQ>' : '\U0001d514',
|
| 390 |
+
'\\<RR>' : '\U0000211c',
|
| 391 |
+
'\\<SS>' : '\U0001d516',
|
| 392 |
+
'\\<TT>' : '\U0001d517',
|
| 393 |
+
'\\<UU>' : '\U0001d518',
|
| 394 |
+
'\\<VV>' : '\U0001d519',
|
| 395 |
+
'\\<WW>' : '\U0001d51a',
|
| 396 |
+
'\\<XX>' : '\U0001d51b',
|
| 397 |
+
'\\<YY>' : '\U0001d51c',
|
| 398 |
+
'\\<ZZ>' : '\U00002128',
|
| 399 |
+
'\\<aa>' : '\U0001d51e',
|
| 400 |
+
'\\<bb>' : '\U0001d51f',
|
| 401 |
+
'\\<cc>' : '\U0001d520',
|
| 402 |
+
'\\<dd>' : '\U0001d521',
|
| 403 |
+
'\\<ee>' : '\U0001d522',
|
| 404 |
+
'\\<ff>' : '\U0001d523',
|
| 405 |
+
'\\<gg>' : '\U0001d524',
|
| 406 |
+
'\\<hh>' : '\U0001d525',
|
| 407 |
+
'\\<ii>' : '\U0001d526',
|
| 408 |
+
'\\<jj>' : '\U0001d527',
|
| 409 |
+
'\\<kk>' : '\U0001d528',
|
| 410 |
+
'\\<ll>' : '\U0001d529',
|
| 411 |
+
'\\<mm>' : '\U0001d52a',
|
| 412 |
+
'\\<nn>' : '\U0001d52b',
|
| 413 |
+
'\\<oo>' : '\U0001d52c',
|
| 414 |
+
'\\<pp>' : '\U0001d52d',
|
| 415 |
+
'\\<qq>' : '\U0001d52e',
|
| 416 |
+
'\\<rr>' : '\U0001d52f',
|
| 417 |
+
'\\<ss>' : '\U0001d530',
|
| 418 |
+
'\\<tt>' : '\U0001d531',
|
| 419 |
+
'\\<uu>' : '\U0001d532',
|
| 420 |
+
'\\<vv>' : '\U0001d533',
|
| 421 |
+
'\\<ww>' : '\U0001d534',
|
| 422 |
+
'\\<xx>' : '\U0001d535',
|
| 423 |
+
'\\<yy>' : '\U0001d536',
|
| 424 |
+
'\\<zz>' : '\U0001d537',
|
| 425 |
+
'\\<alpha>' : '\U000003b1',
|
| 426 |
+
'\\<beta>' : '\U000003b2',
|
| 427 |
+
'\\<gamma>' : '\U000003b3',
|
| 428 |
+
'\\<delta>' : '\U000003b4',
|
| 429 |
+
'\\<epsilon>' : '\U000003b5',
|
| 430 |
+
'\\<zeta>' : '\U000003b6',
|
| 431 |
+
'\\<eta>' : '\U000003b7',
|
| 432 |
+
'\\<theta>' : '\U000003b8',
|
| 433 |
+
'\\<iota>' : '\U000003b9',
|
| 434 |
+
'\\<kappa>' : '\U000003ba',
|
| 435 |
+
'\\<lambda>' : '\U000003bb',
|
| 436 |
+
'\\<mu>' : '\U000003bc',
|
| 437 |
+
'\\<nu>' : '\U000003bd',
|
| 438 |
+
'\\<xi>' : '\U000003be',
|
| 439 |
+
'\\<pi>' : '\U000003c0',
|
| 440 |
+
'\\<rho>' : '\U000003c1',
|
| 441 |
+
'\\<sigma>' : '\U000003c3',
|
| 442 |
+
'\\<tau>' : '\U000003c4',
|
| 443 |
+
'\\<upsilon>' : '\U000003c5',
|
| 444 |
+
'\\<phi>' : '\U000003c6',
|
| 445 |
+
'\\<chi>' : '\U000003c7',
|
| 446 |
+
'\\<psi>' : '\U000003c8',
|
| 447 |
+
'\\<omega>' : '\U000003c9',
|
| 448 |
+
'\\<Gamma>' : '\U00000393',
|
| 449 |
+
'\\<Delta>' : '\U00000394',
|
| 450 |
+
'\\<Theta>' : '\U00000398',
|
| 451 |
+
'\\<Lambda>' : '\U0000039b',
|
| 452 |
+
'\\<Xi>' : '\U0000039e',
|
| 453 |
+
'\\<Pi>' : '\U000003a0',
|
| 454 |
+
'\\<Sigma>' : '\U000003a3',
|
| 455 |
+
'\\<Upsilon>' : '\U000003a5',
|
| 456 |
+
'\\<Phi>' : '\U000003a6',
|
| 457 |
+
'\\<Psi>' : '\U000003a8',
|
| 458 |
+
'\\<Omega>' : '\U000003a9',
|
| 459 |
+
'\\<bool>' : '\U0001d539',
|
| 460 |
+
'\\<complex>' : '\U00002102',
|
| 461 |
+
'\\<nat>' : '\U00002115',
|
| 462 |
+
'\\<rat>' : '\U0000211a',
|
| 463 |
+
'\\<real>' : '\U0000211d',
|
| 464 |
+
'\\<int>' : '\U00002124',
|
| 465 |
+
'\\<leftarrow>' : '\U00002190',
|
| 466 |
+
'\\<longleftarrow>' : '\U000027f5',
|
| 467 |
+
'\\<rightarrow>' : '\U00002192',
|
| 468 |
+
'\\<longrightarrow>' : '\U000027f6',
|
| 469 |
+
'\\<Leftarrow>' : '\U000021d0',
|
| 470 |
+
'\\<Longleftarrow>' : '\U000027f8',
|
| 471 |
+
'\\<Rightarrow>' : '\U000021d2',
|
| 472 |
+
'\\<Longrightarrow>' : '\U000027f9',
|
| 473 |
+
'\\<leftrightarrow>' : '\U00002194',
|
| 474 |
+
'\\<longleftrightarrow>' : '\U000027f7',
|
| 475 |
+
'\\<Leftrightarrow>' : '\U000021d4',
|
| 476 |
+
'\\<Longleftrightarrow>' : '\U000027fa',
|
| 477 |
+
'\\<mapsto>' : '\U000021a6',
|
| 478 |
+
'\\<longmapsto>' : '\U000027fc',
|
| 479 |
+
'\\<midarrow>' : '\U00002500',
|
| 480 |
+
'\\<Midarrow>' : '\U00002550',
|
| 481 |
+
'\\<hookleftarrow>' : '\U000021a9',
|
| 482 |
+
'\\<hookrightarrow>' : '\U000021aa',
|
| 483 |
+
'\\<leftharpoondown>' : '\U000021bd',
|
| 484 |
+
'\\<rightharpoondown>' : '\U000021c1',
|
| 485 |
+
'\\<leftharpoonup>' : '\U000021bc',
|
| 486 |
+
'\\<rightharpoonup>' : '\U000021c0',
|
| 487 |
+
'\\<rightleftharpoons>' : '\U000021cc',
|
| 488 |
+
'\\<leadsto>' : '\U0000219d',
|
| 489 |
+
'\\<downharpoonleft>' : '\U000021c3',
|
| 490 |
+
'\\<downharpoonright>' : '\U000021c2',
|
| 491 |
+
'\\<upharpoonleft>' : '\U000021bf',
|
| 492 |
+
'\\<upharpoonright>' : '\U000021be',
|
| 493 |
+
'\\<restriction>' : '\U000021be',
|
| 494 |
+
'\\<Colon>' : '\U00002237',
|
| 495 |
+
'\\<up>' : '\U00002191',
|
| 496 |
+
'\\<Up>' : '\U000021d1',
|
| 497 |
+
'\\<down>' : '\U00002193',
|
| 498 |
+
'\\<Down>' : '\U000021d3',
|
| 499 |
+
'\\<updown>' : '\U00002195',
|
| 500 |
+
'\\<Updown>' : '\U000021d5',
|
| 501 |
+
'\\<langle>' : '\U000027e8',
|
| 502 |
+
'\\<rangle>' : '\U000027e9',
|
| 503 |
+
'\\<lceil>' : '\U00002308',
|
| 504 |
+
'\\<rceil>' : '\U00002309',
|
| 505 |
+
'\\<lfloor>' : '\U0000230a',
|
| 506 |
+
'\\<rfloor>' : '\U0000230b',
|
| 507 |
+
'\\<lparr>' : '\U00002987',
|
| 508 |
+
'\\<rparr>' : '\U00002988',
|
| 509 |
+
'\\<lbrakk>' : '\U000027e6',
|
| 510 |
+
'\\<rbrakk>' : '\U000027e7',
|
| 511 |
+
'\\<lbrace>' : '\U00002983',
|
| 512 |
+
'\\<rbrace>' : '\U00002984',
|
| 513 |
+
'\\<guillemotleft>' : '\U000000ab',
|
| 514 |
+
'\\<guillemotright>' : '\U000000bb',
|
| 515 |
+
'\\<bottom>' : '\U000022a5',
|
| 516 |
+
'\\<top>' : '\U000022a4',
|
| 517 |
+
'\\<and>' : '\U00002227',
|
| 518 |
+
'\\<And>' : '\U000022c0',
|
| 519 |
+
'\\<or>' : '\U00002228',
|
| 520 |
+
'\\<Or>' : '\U000022c1',
|
| 521 |
+
'\\<forall>' : '\U00002200',
|
| 522 |
+
'\\<exists>' : '\U00002203',
|
| 523 |
+
'\\<nexists>' : '\U00002204',
|
| 524 |
+
'\\<not>' : '\U000000ac',
|
| 525 |
+
'\\<box>' : '\U000025a1',
|
| 526 |
+
'\\<diamond>' : '\U000025c7',
|
| 527 |
+
'\\<turnstile>' : '\U000022a2',
|
| 528 |
+
'\\<Turnstile>' : '\U000022a8',
|
| 529 |
+
'\\<tturnstile>' : '\U000022a9',
|
| 530 |
+
'\\<TTurnstile>' : '\U000022ab',
|
| 531 |
+
'\\<stileturn>' : '\U000022a3',
|
| 532 |
+
'\\<surd>' : '\U0000221a',
|
| 533 |
+
'\\<le>' : '\U00002264',
|
| 534 |
+
'\\<ge>' : '\U00002265',
|
| 535 |
+
'\\<lless>' : '\U0000226a',
|
| 536 |
+
'\\<ggreater>' : '\U0000226b',
|
| 537 |
+
'\\<lesssim>' : '\U00002272',
|
| 538 |
+
'\\<greatersim>' : '\U00002273',
|
| 539 |
+
'\\<lessapprox>' : '\U00002a85',
|
| 540 |
+
'\\<greaterapprox>' : '\U00002a86',
|
| 541 |
+
'\\<in>' : '\U00002208',
|
| 542 |
+
'\\<notin>' : '\U00002209',
|
| 543 |
+
'\\<subset>' : '\U00002282',
|
| 544 |
+
'\\<supset>' : '\U00002283',
|
| 545 |
+
'\\<subseteq>' : '\U00002286',
|
| 546 |
+
'\\<supseteq>' : '\U00002287',
|
| 547 |
+
'\\<sqsubset>' : '\U0000228f',
|
| 548 |
+
'\\<sqsupset>' : '\U00002290',
|
| 549 |
+
'\\<sqsubseteq>' : '\U00002291',
|
| 550 |
+
'\\<sqsupseteq>' : '\U00002292',
|
| 551 |
+
'\\<inter>' : '\U00002229',
|
| 552 |
+
'\\<Inter>' : '\U000022c2',
|
| 553 |
+
'\\<union>' : '\U0000222a',
|
| 554 |
+
'\\<Union>' : '\U000022c3',
|
| 555 |
+
'\\<squnion>' : '\U00002294',
|
| 556 |
+
'\\<Squnion>' : '\U00002a06',
|
| 557 |
+
'\\<sqinter>' : '\U00002293',
|
| 558 |
+
'\\<Sqinter>' : '\U00002a05',
|
| 559 |
+
'\\<setminus>' : '\U00002216',
|
| 560 |
+
'\\<propto>' : '\U0000221d',
|
| 561 |
+
'\\<uplus>' : '\U0000228e',
|
| 562 |
+
'\\<Uplus>' : '\U00002a04',
|
| 563 |
+
'\\<noteq>' : '\U00002260',
|
| 564 |
+
'\\<sim>' : '\U0000223c',
|
| 565 |
+
'\\<doteq>' : '\U00002250',
|
| 566 |
+
'\\<simeq>' : '\U00002243',
|
| 567 |
+
'\\<approx>' : '\U00002248',
|
| 568 |
+
'\\<asymp>' : '\U0000224d',
|
| 569 |
+
'\\<cong>' : '\U00002245',
|
| 570 |
+
'\\<smile>' : '\U00002323',
|
| 571 |
+
'\\<equiv>' : '\U00002261',
|
| 572 |
+
'\\<frown>' : '\U00002322',
|
| 573 |
+
'\\<Join>' : '\U000022c8',
|
| 574 |
+
'\\<bowtie>' : '\U00002a1d',
|
| 575 |
+
'\\<prec>' : '\U0000227a',
|
| 576 |
+
'\\<succ>' : '\U0000227b',
|
| 577 |
+
'\\<preceq>' : '\U0000227c',
|
| 578 |
+
'\\<succeq>' : '\U0000227d',
|
| 579 |
+
'\\<parallel>' : '\U00002225',
|
| 580 |
+
'\\<bar>' : '\U000000a6',
|
| 581 |
+
'\\<plusminus>' : '\U000000b1',
|
| 582 |
+
'\\<minusplus>' : '\U00002213',
|
| 583 |
+
'\\<times>' : '\U000000d7',
|
| 584 |
+
'\\<div>' : '\U000000f7',
|
| 585 |
+
'\\<cdot>' : '\U000022c5',
|
| 586 |
+
'\\<star>' : '\U000022c6',
|
| 587 |
+
'\\<bullet>' : '\U00002219',
|
| 588 |
+
'\\<circ>' : '\U00002218',
|
| 589 |
+
'\\<dagger>' : '\U00002020',
|
| 590 |
+
'\\<ddagger>' : '\U00002021',
|
| 591 |
+
'\\<lhd>' : '\U000022b2',
|
| 592 |
+
'\\<rhd>' : '\U000022b3',
|
| 593 |
+
'\\<unlhd>' : '\U000022b4',
|
| 594 |
+
'\\<unrhd>' : '\U000022b5',
|
| 595 |
+
'\\<triangleleft>' : '\U000025c3',
|
| 596 |
+
'\\<triangleright>' : '\U000025b9',
|
| 597 |
+
'\\<triangle>' : '\U000025b3',
|
| 598 |
+
'\\<triangleq>' : '\U0000225c',
|
| 599 |
+
'\\<oplus>' : '\U00002295',
|
| 600 |
+
'\\<Oplus>' : '\U00002a01',
|
| 601 |
+
'\\<otimes>' : '\U00002297',
|
| 602 |
+
'\\<Otimes>' : '\U00002a02',
|
| 603 |
+
'\\<odot>' : '\U00002299',
|
| 604 |
+
'\\<Odot>' : '\U00002a00',
|
| 605 |
+
'\\<ominus>' : '\U00002296',
|
| 606 |
+
'\\<oslash>' : '\U00002298',
|
| 607 |
+
'\\<dots>' : '\U00002026',
|
| 608 |
+
'\\<cdots>' : '\U000022ef',
|
| 609 |
+
'\\<Sum>' : '\U00002211',
|
| 610 |
+
'\\<Prod>' : '\U0000220f',
|
| 611 |
+
'\\<Coprod>' : '\U00002210',
|
| 612 |
+
'\\<infinity>' : '\U0000221e',
|
| 613 |
+
'\\<integral>' : '\U0000222b',
|
| 614 |
+
'\\<ointegral>' : '\U0000222e',
|
| 615 |
+
'\\<clubsuit>' : '\U00002663',
|
| 616 |
+
'\\<diamondsuit>' : '\U00002662',
|
| 617 |
+
'\\<heartsuit>' : '\U00002661',
|
| 618 |
+
'\\<spadesuit>' : '\U00002660',
|
| 619 |
+
'\\<aleph>' : '\U00002135',
|
| 620 |
+
'\\<emptyset>' : '\U00002205',
|
| 621 |
+
'\\<nabla>' : '\U00002207',
|
| 622 |
+
'\\<partial>' : '\U00002202',
|
| 623 |
+
'\\<flat>' : '\U0000266d',
|
| 624 |
+
'\\<natural>' : '\U0000266e',
|
| 625 |
+
'\\<sharp>' : '\U0000266f',
|
| 626 |
+
'\\<angle>' : '\U00002220',
|
| 627 |
+
'\\<copyright>' : '\U000000a9',
|
| 628 |
+
'\\<registered>' : '\U000000ae',
|
| 629 |
+
'\\<hyphen>' : '\U000000ad',
|
| 630 |
+
'\\<inverse>' : '\U000000af',
|
| 631 |
+
'\\<onequarter>' : '\U000000bc',
|
| 632 |
+
'\\<onehalf>' : '\U000000bd',
|
| 633 |
+
'\\<threequarters>' : '\U000000be',
|
| 634 |
+
'\\<ordfeminine>' : '\U000000aa',
|
| 635 |
+
'\\<ordmasculine>' : '\U000000ba',
|
| 636 |
+
'\\<section>' : '\U000000a7',
|
| 637 |
+
'\\<paragraph>' : '\U000000b6',
|
| 638 |
+
'\\<exclamdown>' : '\U000000a1',
|
| 639 |
+
'\\<questiondown>' : '\U000000bf',
|
| 640 |
+
'\\<euro>' : '\U000020ac',
|
| 641 |
+
'\\<pounds>' : '\U000000a3',
|
| 642 |
+
'\\<yen>' : '\U000000a5',
|
| 643 |
+
'\\<cent>' : '\U000000a2',
|
| 644 |
+
'\\<currency>' : '\U000000a4',
|
| 645 |
+
'\\<degree>' : '\U000000b0',
|
| 646 |
+
'\\<amalg>' : '\U00002a3f',
|
| 647 |
+
'\\<mho>' : '\U00002127',
|
| 648 |
+
'\\<lozenge>' : '\U000025ca',
|
| 649 |
+
'\\<wp>' : '\U00002118',
|
| 650 |
+
'\\<wrong>' : '\U00002240',
|
| 651 |
+
'\\<struct>' : '\U000022c4',
|
| 652 |
+
'\\<acute>' : '\U000000b4',
|
| 653 |
+
'\\<index>' : '\U00000131',
|
| 654 |
+
'\\<dieresis>' : '\U000000a8',
|
| 655 |
+
'\\<cedilla>' : '\U000000b8',
|
| 656 |
+
'\\<hungarumlaut>' : '\U000002dd',
|
| 657 |
+
'\\<some>' : '\U000003f5',
|
| 658 |
+
'\\<newline>' : '\U000023ce',
|
| 659 |
+
'\\<open>' : '\U00002039',
|
| 660 |
+
'\\<close>' : '\U0000203a',
|
| 661 |
+
'\\<here>' : '\U00002302',
|
| 662 |
+
'\\<^sub>' : '\U000021e9',
|
| 663 |
+
'\\<^sup>' : '\U000021e7',
|
| 664 |
+
'\\<^bold>' : '\U00002759',
|
| 665 |
+
'\\<^bsub>' : '\U000021d8',
|
| 666 |
+
'\\<^esub>' : '\U000021d9',
|
| 667 |
+
'\\<^bsup>' : '\U000021d7',
|
| 668 |
+
'\\<^esup>' : '\U000021d6',
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
|
| 672 |
+
|
| 673 |
+
def __init__(self, **options):
|
| 674 |
+
Filter.__init__(self, **options)
|
| 675 |
+
lang = get_choice_opt(options, 'lang',
|
| 676 |
+
['isabelle', 'latex'], 'isabelle')
|
| 677 |
+
self.symbols = self.lang_map[lang]
|
| 678 |
+
|
| 679 |
+
def filter(self, lexer, stream):
|
| 680 |
+
for ttype, value in stream:
|
| 681 |
+
if value in self.symbols:
|
| 682 |
+
yield ttype, self.symbols[value]
|
| 683 |
+
else:
|
| 684 |
+
yield ttype, value
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
class KeywordCaseFilter(Filter):
|
| 688 |
+
"""Convert keywords to lowercase or uppercase or capitalize them, which
|
| 689 |
+
means first letter uppercase, rest lowercase.
|
| 690 |
+
|
| 691 |
+
This can be useful e.g. if you highlight Pascal code and want to adapt the
|
| 692 |
+
code to your styleguide.
|
| 693 |
+
|
| 694 |
+
Options accepted:
|
| 695 |
+
|
| 696 |
+
`case` : string
|
| 697 |
+
The casing to convert keywords to. Must be one of ``'lower'``,
|
| 698 |
+
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
|
| 699 |
+
"""
|
| 700 |
+
|
| 701 |
+
def __init__(self, **options):
|
| 702 |
+
Filter.__init__(self, **options)
|
| 703 |
+
case = get_choice_opt(options, 'case',
|
| 704 |
+
['lower', 'upper', 'capitalize'], 'lower')
|
| 705 |
+
self.convert = getattr(str, case)
|
| 706 |
+
|
| 707 |
+
def filter(self, lexer, stream):
|
| 708 |
+
for ttype, value in stream:
|
| 709 |
+
if ttype in Keyword:
|
| 710 |
+
yield ttype, self.convert(value)
|
| 711 |
+
else:
|
| 712 |
+
yield ttype, value
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
class NameHighlightFilter(Filter):
|
| 716 |
+
"""Highlight a normal Name (and Name.*) token with a different token type.
|
| 717 |
+
|
| 718 |
+
Example::
|
| 719 |
+
|
| 720 |
+
filter = NameHighlightFilter(
|
| 721 |
+
names=['foo', 'bar', 'baz'],
|
| 722 |
+
tokentype=Name.Function,
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
This would highlight the names "foo", "bar" and "baz"
|
| 726 |
+
as functions. `Name.Function` is the default token type.
|
| 727 |
+
|
| 728 |
+
Options accepted:
|
| 729 |
+
|
| 730 |
+
`names` : list of strings
|
| 731 |
+
A list of names that should be given the different token type.
|
| 732 |
+
There is no default.
|
| 733 |
+
`tokentype` : TokenType or string
|
| 734 |
+
A token type or a string containing a token type name that is
|
| 735 |
+
used for highlighting the strings in `names`. The default is
|
| 736 |
+
`Name.Function`.
|
| 737 |
+
"""
|
| 738 |
+
|
| 739 |
+
def __init__(self, **options):
|
| 740 |
+
Filter.__init__(self, **options)
|
| 741 |
+
self.names = set(get_list_opt(options, 'names', []))
|
| 742 |
+
tokentype = options.get('tokentype')
|
| 743 |
+
if tokentype:
|
| 744 |
+
self.tokentype = string_to_tokentype(tokentype)
|
| 745 |
+
else:
|
| 746 |
+
self.tokentype = Name.Function
|
| 747 |
+
|
| 748 |
+
def filter(self, lexer, stream):
|
| 749 |
+
for ttype, value in stream:
|
| 750 |
+
if ttype in Name and value in self.names:
|
| 751 |
+
yield self.tokentype, value
|
| 752 |
+
else:
|
| 753 |
+
yield ttype, value
|
| 754 |
+
|
| 755 |
+
|
| 756 |
+
class ErrorToken(Exception):
|
| 757 |
+
pass
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
class RaiseOnErrorTokenFilter(Filter):
|
| 761 |
+
"""Raise an exception when the lexer generates an error token.
|
| 762 |
+
|
| 763 |
+
Options accepted:
|
| 764 |
+
|
| 765 |
+
`excclass` : Exception class
|
| 766 |
+
The exception class to raise.
|
| 767 |
+
The default is `pygments.filters.ErrorToken`.
|
| 768 |
+
|
| 769 |
+
.. versionadded:: 0.8
|
| 770 |
+
"""
|
| 771 |
+
|
| 772 |
+
def __init__(self, **options):
|
| 773 |
+
Filter.__init__(self, **options)
|
| 774 |
+
self.exception = options.get('excclass', ErrorToken)
|
| 775 |
+
try:
|
| 776 |
+
# issubclass() will raise TypeError if first argument is not a class
|
| 777 |
+
if not issubclass(self.exception, Exception):
|
| 778 |
+
raise TypeError
|
| 779 |
+
except TypeError:
|
| 780 |
+
raise OptionError('excclass option is not an exception class')
|
| 781 |
+
|
| 782 |
+
def filter(self, lexer, stream):
|
| 783 |
+
for ttype, value in stream:
|
| 784 |
+
if ttype is Error:
|
| 785 |
+
raise self.exception(value)
|
| 786 |
+
yield ttype, value
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
class VisibleWhitespaceFilter(Filter):
|
| 790 |
+
"""Convert tabs, newlines and/or spaces to visible characters.
|
| 791 |
+
|
| 792 |
+
Options accepted:
|
| 793 |
+
|
| 794 |
+
`spaces` : string or bool
|
| 795 |
+
If this is a one-character string, spaces will be replaces by this string.
|
| 796 |
+
If it is another true value, spaces will be replaced by ``·`` (unicode
|
| 797 |
+
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
|
| 798 |
+
default is ``False``.
|
| 799 |
+
`tabs` : string or bool
|
| 800 |
+
The same as for `spaces`, but the default replacement character is ``»``
|
| 801 |
+
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
|
| 802 |
+
is ``False``. Note: this will not work if the `tabsize` option for the
|
| 803 |
+
lexer is nonzero, as tabs will already have been expanded then.
|
| 804 |
+
`tabsize` : int
|
| 805 |
+
If tabs are to be replaced by this filter (see the `tabs` option), this
|
| 806 |
+
is the total number of characters that a tab should be expanded to.
|
| 807 |
+
The default is ``8``.
|
| 808 |
+
`newlines` : string or bool
|
| 809 |
+
The same as for `spaces`, but the default replacement character is ``¶``
|
| 810 |
+
(unicode PILCROW SIGN). The default value is ``False``.
|
| 811 |
+
`wstokentype` : bool
|
| 812 |
+
If true, give whitespace the special `Whitespace` token type. This allows
|
| 813 |
+
styling the visible whitespace differently (e.g. greyed out), but it can
|
| 814 |
+
disrupt background colors. The default is ``True``.
|
| 815 |
+
|
| 816 |
+
.. versionadded:: 0.8
|
| 817 |
+
"""
|
| 818 |
+
|
| 819 |
+
def __init__(self, **options):
|
| 820 |
+
Filter.__init__(self, **options)
|
| 821 |
+
for name, default in [('spaces', '·'),
|
| 822 |
+
('tabs', '»'),
|
| 823 |
+
('newlines', '¶')]:
|
| 824 |
+
opt = options.get(name, False)
|
| 825 |
+
if isinstance(opt, str) and len(opt) == 1:
|
| 826 |
+
setattr(self, name, opt)
|
| 827 |
+
else:
|
| 828 |
+
setattr(self, name, (opt and default or ''))
|
| 829 |
+
tabsize = get_int_opt(options, 'tabsize', 8)
|
| 830 |
+
if self.tabs:
|
| 831 |
+
self.tabs += ' ' * (tabsize - 1)
|
| 832 |
+
if self.newlines:
|
| 833 |
+
self.newlines += '\n'
|
| 834 |
+
self.wstt = get_bool_opt(options, 'wstokentype', True)
|
| 835 |
+
|
| 836 |
+
def filter(self, lexer, stream):
|
| 837 |
+
if self.wstt:
|
| 838 |
+
spaces = self.spaces or ' '
|
| 839 |
+
tabs = self.tabs or '\t'
|
| 840 |
+
newlines = self.newlines or '\n'
|
| 841 |
+
regex = re.compile(r'\s')
|
| 842 |
+
|
| 843 |
+
def replacefunc(wschar):
|
| 844 |
+
if wschar == ' ':
|
| 845 |
+
return spaces
|
| 846 |
+
elif wschar == '\t':
|
| 847 |
+
return tabs
|
| 848 |
+
elif wschar == '\n':
|
| 849 |
+
return newlines
|
| 850 |
+
return wschar
|
| 851 |
+
|
| 852 |
+
for ttype, value in stream:
|
| 853 |
+
yield from _replace_special(ttype, value, regex, Whitespace,
|
| 854 |
+
replacefunc)
|
| 855 |
+
else:
|
| 856 |
+
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
|
| 857 |
+
# simpler processing
|
| 858 |
+
for ttype, value in stream:
|
| 859 |
+
if spaces:
|
| 860 |
+
value = value.replace(' ', spaces)
|
| 861 |
+
if tabs:
|
| 862 |
+
value = value.replace('\t', tabs)
|
| 863 |
+
if newlines:
|
| 864 |
+
value = value.replace('\n', newlines)
|
| 865 |
+
yield ttype, value
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
class GobbleFilter(Filter):
|
| 869 |
+
"""Gobbles source code lines (eats initial characters).
|
| 870 |
+
|
| 871 |
+
This filter drops the first ``n`` characters off every line of code. This
|
| 872 |
+
may be useful when the source code fed to the lexer is indented by a fixed
|
| 873 |
+
amount of space that isn't desired in the output.
|
| 874 |
+
|
| 875 |
+
Options accepted:
|
| 876 |
+
|
| 877 |
+
`n` : int
|
| 878 |
+
The number of characters to gobble.
|
| 879 |
+
|
| 880 |
+
.. versionadded:: 1.2
|
| 881 |
+
"""
|
| 882 |
+
def __init__(self, **options):
|
| 883 |
+
Filter.__init__(self, **options)
|
| 884 |
+
self.n = get_int_opt(options, 'n', 0)
|
| 885 |
+
|
| 886 |
+
def gobble(self, value, left):
|
| 887 |
+
if left < len(value):
|
| 888 |
+
return value[left:], 0
|
| 889 |
+
else:
|
| 890 |
+
return '', left - len(value)
|
| 891 |
+
|
| 892 |
+
def filter(self, lexer, stream):
|
| 893 |
+
n = self.n
|
| 894 |
+
left = n # How many characters left to gobble.
|
| 895 |
+
for ttype, value in stream:
|
| 896 |
+
# Remove ``left`` tokens from first line, ``n`` from all others.
|
| 897 |
+
parts = value.split('\n')
|
| 898 |
+
(parts[0], left) = self.gobble(parts[0], left)
|
| 899 |
+
for i in range(1, len(parts)):
|
| 900 |
+
(parts[i], left) = self.gobble(parts[i], n)
|
| 901 |
+
value = '\n'.join(parts)
|
| 902 |
+
|
| 903 |
+
if value != '':
|
| 904 |
+
yield ttype, value
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
class TokenMergeFilter(Filter):
|
| 908 |
+
"""Merges consecutive tokens with the same token type in the output
|
| 909 |
+
stream of a lexer.
|
| 910 |
+
|
| 911 |
+
.. versionadded:: 1.2
|
| 912 |
+
"""
|
| 913 |
+
def __init__(self, **options):
|
| 914 |
+
Filter.__init__(self, **options)
|
| 915 |
+
|
| 916 |
+
def filter(self, lexer, stream):
|
| 917 |
+
current_type = None
|
| 918 |
+
current_value = None
|
| 919 |
+
for ttype, value in stream:
|
| 920 |
+
if ttype is current_type:
|
| 921 |
+
current_value += value
|
| 922 |
+
else:
|
| 923 |
+
if current_type is not None:
|
| 924 |
+
yield current_type, current_value
|
| 925 |
+
current_type = ttype
|
| 926 |
+
current_value = value
|
| 927 |
+
if current_type is not None:
|
| 928 |
+
yield current_type, current_value
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
FILTERS = {
|
| 932 |
+
'codetagify': CodeTagFilter,
|
| 933 |
+
'keywordcase': KeywordCaseFilter,
|
| 934 |
+
'highlight': NameHighlightFilter,
|
| 935 |
+
'raiseonerror': RaiseOnErrorTokenFilter,
|
| 936 |
+
'whitespace': VisibleWhitespaceFilter,
|
| 937 |
+
'gobble': GobbleFilter,
|
| 938 |
+
'tokenmerge': TokenMergeFilter,
|
| 939 |
+
'symbols': SymbolFilter,
|
| 940 |
+
}
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (40.1 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatter
|
| 3 |
+
~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Base formatter class.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import codecs
|
| 12 |
+
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt
|
| 14 |
+
from pip._vendor.pygments.styles import get_style_by_name
|
| 15 |
+
|
| 16 |
+
__all__ = ['Formatter']
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _lookup_style(style):
|
| 20 |
+
if isinstance(style, str):
|
| 21 |
+
return get_style_by_name(style)
|
| 22 |
+
return style
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class Formatter:
|
| 26 |
+
"""
|
| 27 |
+
Converts a token stream to text.
|
| 28 |
+
|
| 29 |
+
Formatters should have attributes to help selecting them. These
|
| 30 |
+
are similar to the corresponding :class:`~pygments.lexer.Lexer`
|
| 31 |
+
attributes.
|
| 32 |
+
|
| 33 |
+
.. autoattribute:: name
|
| 34 |
+
:no-value:
|
| 35 |
+
|
| 36 |
+
.. autoattribute:: aliases
|
| 37 |
+
:no-value:
|
| 38 |
+
|
| 39 |
+
.. autoattribute:: filenames
|
| 40 |
+
:no-value:
|
| 41 |
+
|
| 42 |
+
You can pass options as keyword arguments to the constructor.
|
| 43 |
+
All formatters accept these basic options:
|
| 44 |
+
|
| 45 |
+
``style``
|
| 46 |
+
The style to use, can be a string or a Style subclass
|
| 47 |
+
(default: "default"). Not used by e.g. the
|
| 48 |
+
TerminalFormatter.
|
| 49 |
+
``full``
|
| 50 |
+
Tells the formatter to output a "full" document, i.e.
|
| 51 |
+
a complete self-contained document. This doesn't have
|
| 52 |
+
any effect for some formatters (default: false).
|
| 53 |
+
``title``
|
| 54 |
+
If ``full`` is true, the title that should be used to
|
| 55 |
+
caption the document (default: '').
|
| 56 |
+
``encoding``
|
| 57 |
+
If given, must be an encoding name. This will be used to
|
| 58 |
+
convert the Unicode token strings to byte strings in the
|
| 59 |
+
output. If it is "" or None, Unicode strings will be written
|
| 60 |
+
to the output file, which most file-like objects do not
|
| 61 |
+
support (default: None).
|
| 62 |
+
``outencoding``
|
| 63 |
+
Overrides ``encoding`` if given.
|
| 64 |
+
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
#: Full name for the formatter, in human-readable form.
|
| 68 |
+
name = None
|
| 69 |
+
|
| 70 |
+
#: A list of short, unique identifiers that can be used to lookup
|
| 71 |
+
#: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
|
| 72 |
+
aliases = []
|
| 73 |
+
|
| 74 |
+
#: A list of fnmatch patterns that match filenames for which this
|
| 75 |
+
#: formatter can produce output. The patterns in this list should be unique
|
| 76 |
+
#: among all formatters.
|
| 77 |
+
filenames = []
|
| 78 |
+
|
| 79 |
+
#: If True, this formatter outputs Unicode strings when no encoding
|
| 80 |
+
#: option is given.
|
| 81 |
+
unicodeoutput = True
|
| 82 |
+
|
| 83 |
+
def __init__(self, **options):
|
| 84 |
+
"""
|
| 85 |
+
As with lexers, this constructor takes arbitrary optional arguments,
|
| 86 |
+
and if you override it, you should first process your own options, then
|
| 87 |
+
call the base class implementation.
|
| 88 |
+
"""
|
| 89 |
+
self.style = _lookup_style(options.get('style', 'default'))
|
| 90 |
+
self.full = get_bool_opt(options, 'full', False)
|
| 91 |
+
self.title = options.get('title', '')
|
| 92 |
+
self.encoding = options.get('encoding', None) or None
|
| 93 |
+
if self.encoding in ('guess', 'chardet'):
|
| 94 |
+
# can happen for e.g. pygmentize -O encoding=guess
|
| 95 |
+
self.encoding = 'utf-8'
|
| 96 |
+
self.encoding = options.get('outencoding') or self.encoding
|
| 97 |
+
self.options = options
|
| 98 |
+
|
| 99 |
+
def get_style_defs(self, arg=''):
|
| 100 |
+
"""
|
| 101 |
+
This method must return statements or declarations suitable to define
|
| 102 |
+
the current style for subsequent highlighted text (e.g. CSS classes
|
| 103 |
+
in the `HTMLFormatter`).
|
| 104 |
+
|
| 105 |
+
The optional argument `arg` can be used to modify the generation and
|
| 106 |
+
is formatter dependent (it is standardized because it can be given on
|
| 107 |
+
the command line).
|
| 108 |
+
|
| 109 |
+
This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
|
| 110 |
+
the `arg` is then given by the ``-a`` option.
|
| 111 |
+
"""
|
| 112 |
+
return ''
|
| 113 |
+
|
| 114 |
+
def format(self, tokensource, outfile):
|
| 115 |
+
"""
|
| 116 |
+
This method must format the tokens from the `tokensource` iterable and
|
| 117 |
+
write the formatted version to the file object `outfile`.
|
| 118 |
+
|
| 119 |
+
Formatter options can control how exactly the tokens are converted.
|
| 120 |
+
"""
|
| 121 |
+
if self.encoding:
|
| 122 |
+
# wrap the outfile in a StreamWriter
|
| 123 |
+
outfile = codecs.lookup(self.encoding)[3](outfile)
|
| 124 |
+
return self.format_unencoded(tokensource, outfile)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.bbcode
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
BBcode formatter.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt
|
| 14 |
+
|
| 15 |
+
__all__ = ['BBCodeFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class BBCodeFormatter(Formatter):
|
| 19 |
+
"""
|
| 20 |
+
Format tokens with BBcodes. These formatting codes are used by many
|
| 21 |
+
bulletin boards, so you can highlight your sourcecode with pygments before
|
| 22 |
+
posting it there.
|
| 23 |
+
|
| 24 |
+
This formatter has no support for background colors and borders, as there
|
| 25 |
+
are no common BBcode tags for that.
|
| 26 |
+
|
| 27 |
+
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
|
| 28 |
+
so you can't use the highlighting together with that tag.
|
| 29 |
+
Text in a [code] tag usually is shown with a monospace font (which this
|
| 30 |
+
formatter can do with the ``monofont`` option) and no spaces (which you
|
| 31 |
+
need for indentation) are removed.
|
| 32 |
+
|
| 33 |
+
Additional options accepted:
|
| 34 |
+
|
| 35 |
+
`style`
|
| 36 |
+
The style to use, can be a string or a Style subclass (default:
|
| 37 |
+
``'default'``).
|
| 38 |
+
|
| 39 |
+
`codetag`
|
| 40 |
+
If set to true, put the output into ``[code]`` tags (default:
|
| 41 |
+
``false``)
|
| 42 |
+
|
| 43 |
+
`monofont`
|
| 44 |
+
If set to true, add a tag to show the code with a monospace font
|
| 45 |
+
(default: ``false``).
|
| 46 |
+
"""
|
| 47 |
+
name = 'BBCode'
|
| 48 |
+
aliases = ['bbcode', 'bb']
|
| 49 |
+
filenames = []
|
| 50 |
+
|
| 51 |
+
def __init__(self, **options):
|
| 52 |
+
Formatter.__init__(self, **options)
|
| 53 |
+
self._code = get_bool_opt(options, 'codetag', False)
|
| 54 |
+
self._mono = get_bool_opt(options, 'monofont', False)
|
| 55 |
+
|
| 56 |
+
self.styles = {}
|
| 57 |
+
self._make_styles()
|
| 58 |
+
|
| 59 |
+
def _make_styles(self):
|
| 60 |
+
for ttype, ndef in self.style:
|
| 61 |
+
start = end = ''
|
| 62 |
+
if ndef['color']:
|
| 63 |
+
start += '[color=#%s]' % ndef['color']
|
| 64 |
+
end = '[/color]' + end
|
| 65 |
+
if ndef['bold']:
|
| 66 |
+
start += '[b]'
|
| 67 |
+
end = '[/b]' + end
|
| 68 |
+
if ndef['italic']:
|
| 69 |
+
start += '[i]'
|
| 70 |
+
end = '[/i]' + end
|
| 71 |
+
if ndef['underline']:
|
| 72 |
+
start += '[u]'
|
| 73 |
+
end = '[/u]' + end
|
| 74 |
+
# there are no common BBcodes for background-color and border
|
| 75 |
+
|
| 76 |
+
self.styles[ttype] = start, end
|
| 77 |
+
|
| 78 |
+
def format_unencoded(self, tokensource, outfile):
|
| 79 |
+
if self._code:
|
| 80 |
+
outfile.write('[code]')
|
| 81 |
+
if self._mono:
|
| 82 |
+
outfile.write('[font=monospace]')
|
| 83 |
+
|
| 84 |
+
lastval = ''
|
| 85 |
+
lasttype = None
|
| 86 |
+
|
| 87 |
+
for ttype, value in tokensource:
|
| 88 |
+
while ttype not in self.styles:
|
| 89 |
+
ttype = ttype.parent
|
| 90 |
+
if ttype == lasttype:
|
| 91 |
+
lastval += value
|
| 92 |
+
else:
|
| 93 |
+
if lastval:
|
| 94 |
+
start, end = self.styles[lasttype]
|
| 95 |
+
outfile.write(''.join((start, lastval, end)))
|
| 96 |
+
lastval = value
|
| 97 |
+
lasttype = ttype
|
| 98 |
+
|
| 99 |
+
if lastval:
|
| 100 |
+
start, end = self.styles[lasttype]
|
| 101 |
+
outfile.write(''.join((start, lastval, end)))
|
| 102 |
+
|
| 103 |
+
if self._mono:
|
| 104 |
+
outfile.write('[/font]')
|
| 105 |
+
if self._code:
|
| 106 |
+
outfile.write('[/code]')
|
| 107 |
+
if self._code or self._mono:
|
| 108 |
+
outfile.write('\n')
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.groff
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for groff output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import math
|
| 12 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
|
| 14 |
+
|
| 15 |
+
__all__ = ['GroffFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class GroffFormatter(Formatter):
|
| 19 |
+
"""
|
| 20 |
+
Format tokens with groff escapes to change their color and font style.
|
| 21 |
+
|
| 22 |
+
.. versionadded:: 2.11
|
| 23 |
+
|
| 24 |
+
Additional options accepted:
|
| 25 |
+
|
| 26 |
+
`style`
|
| 27 |
+
The style to use, can be a string or a Style subclass (default:
|
| 28 |
+
``'default'``).
|
| 29 |
+
|
| 30 |
+
`monospaced`
|
| 31 |
+
If set to true, monospace font will be used (default: ``true``).
|
| 32 |
+
|
| 33 |
+
`linenos`
|
| 34 |
+
If set to true, print the line numbers (default: ``false``).
|
| 35 |
+
|
| 36 |
+
`wrap`
|
| 37 |
+
Wrap lines to the specified number of characters. Disabled if set to 0
|
| 38 |
+
(default: ``0``).
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
name = 'groff'
|
| 42 |
+
aliases = ['groff','troff','roff']
|
| 43 |
+
filenames = []
|
| 44 |
+
|
| 45 |
+
def __init__(self, **options):
|
| 46 |
+
Formatter.__init__(self, **options)
|
| 47 |
+
|
| 48 |
+
self.monospaced = get_bool_opt(options, 'monospaced', True)
|
| 49 |
+
self.linenos = get_bool_opt(options, 'linenos', False)
|
| 50 |
+
self._lineno = 0
|
| 51 |
+
self.wrap = get_int_opt(options, 'wrap', 0)
|
| 52 |
+
self._linelen = 0
|
| 53 |
+
|
| 54 |
+
self.styles = {}
|
| 55 |
+
self._make_styles()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _make_styles(self):
|
| 59 |
+
regular = '\\f[CR]' if self.monospaced else '\\f[R]'
|
| 60 |
+
bold = '\\f[CB]' if self.monospaced else '\\f[B]'
|
| 61 |
+
italic = '\\f[CI]' if self.monospaced else '\\f[I]'
|
| 62 |
+
|
| 63 |
+
for ttype, ndef in self.style:
|
| 64 |
+
start = end = ''
|
| 65 |
+
if ndef['color']:
|
| 66 |
+
start += '\\m[%s]' % ndef['color']
|
| 67 |
+
end = '\\m[]' + end
|
| 68 |
+
if ndef['bold']:
|
| 69 |
+
start += bold
|
| 70 |
+
end = regular + end
|
| 71 |
+
if ndef['italic']:
|
| 72 |
+
start += italic
|
| 73 |
+
end = regular + end
|
| 74 |
+
if ndef['bgcolor']:
|
| 75 |
+
start += '\\M[%s]' % ndef['bgcolor']
|
| 76 |
+
end = '\\M[]' + end
|
| 77 |
+
|
| 78 |
+
self.styles[ttype] = start, end
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _define_colors(self, outfile):
|
| 82 |
+
colors = set()
|
| 83 |
+
for _, ndef in self.style:
|
| 84 |
+
if ndef['color'] is not None:
|
| 85 |
+
colors.add(ndef['color'])
|
| 86 |
+
|
| 87 |
+
for color in sorted(colors):
|
| 88 |
+
outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _write_lineno(self, outfile):
|
| 92 |
+
self._lineno += 1
|
| 93 |
+
outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _wrap_line(self, line):
|
| 97 |
+
length = len(line.rstrip('\n'))
|
| 98 |
+
space = ' ' if self.linenos else ''
|
| 99 |
+
newline = ''
|
| 100 |
+
|
| 101 |
+
if length > self.wrap:
|
| 102 |
+
for i in range(0, math.floor(length / self.wrap)):
|
| 103 |
+
chunk = line[i*self.wrap:i*self.wrap+self.wrap]
|
| 104 |
+
newline += (chunk + '\n' + space)
|
| 105 |
+
remainder = length % self.wrap
|
| 106 |
+
if remainder > 0:
|
| 107 |
+
newline += line[-remainder-1:]
|
| 108 |
+
self._linelen = remainder
|
| 109 |
+
elif self._linelen + length > self.wrap:
|
| 110 |
+
newline = ('\n' + space) + line
|
| 111 |
+
self._linelen = length
|
| 112 |
+
else:
|
| 113 |
+
newline = line
|
| 114 |
+
self._linelen += length
|
| 115 |
+
|
| 116 |
+
return newline
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _escape_chars(self, text):
|
| 120 |
+
text = text.replace('\\', '\\[u005C]'). \
|
| 121 |
+
replace('.', '\\[char46]'). \
|
| 122 |
+
replace('\'', '\\[u0027]'). \
|
| 123 |
+
replace('`', '\\[u0060]'). \
|
| 124 |
+
replace('~', '\\[u007E]')
|
| 125 |
+
copy = text
|
| 126 |
+
|
| 127 |
+
for char in copy:
|
| 128 |
+
if len(char) != len(char.encode()):
|
| 129 |
+
uni = char.encode('unicode_escape') \
|
| 130 |
+
.decode()[1:] \
|
| 131 |
+
.replace('x', 'u00') \
|
| 132 |
+
.upper()
|
| 133 |
+
text = text.replace(char, '\\[u' + uni[1:] + ']')
|
| 134 |
+
|
| 135 |
+
return text
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def format_unencoded(self, tokensource, outfile):
|
| 139 |
+
self._define_colors(outfile)
|
| 140 |
+
|
| 141 |
+
outfile.write('.nf\n\\f[CR]\n')
|
| 142 |
+
|
| 143 |
+
if self.linenos:
|
| 144 |
+
self._write_lineno(outfile)
|
| 145 |
+
|
| 146 |
+
for ttype, value in tokensource:
|
| 147 |
+
while ttype not in self.styles:
|
| 148 |
+
ttype = ttype.parent
|
| 149 |
+
start, end = self.styles[ttype]
|
| 150 |
+
|
| 151 |
+
for line in value.splitlines(True):
|
| 152 |
+
if self.wrap > 0:
|
| 153 |
+
line = self._wrap_line(line)
|
| 154 |
+
|
| 155 |
+
if start and end:
|
| 156 |
+
text = self._escape_chars(line.rstrip('\n'))
|
| 157 |
+
if text != '':
|
| 158 |
+
outfile.write(''.join((start, text, end)))
|
| 159 |
+
else:
|
| 160 |
+
outfile.write(self._escape_chars(line.rstrip('\n')))
|
| 161 |
+
|
| 162 |
+
if line.endswith('\n'):
|
| 163 |
+
if self.linenos:
|
| 164 |
+
self._write_lineno(outfile)
|
| 165 |
+
self._linelen = 0
|
| 166 |
+
else:
|
| 167 |
+
outfile.write('\n')
|
| 168 |
+
self._linelen = 0
|
| 169 |
+
|
| 170 |
+
outfile.write('\n.fi')
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.img
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for Pixmap output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
|
| 14 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 15 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
| 16 |
+
get_choice_opt
|
| 17 |
+
|
| 18 |
+
import subprocess
|
| 19 |
+
|
| 20 |
+
# Import this carefully
|
| 21 |
+
try:
|
| 22 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 23 |
+
pil_available = True
|
| 24 |
+
except ImportError:
|
| 25 |
+
pil_available = False
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import _winreg
|
| 29 |
+
except ImportError:
|
| 30 |
+
try:
|
| 31 |
+
import winreg as _winreg
|
| 32 |
+
except ImportError:
|
| 33 |
+
_winreg = None
|
| 34 |
+
|
| 35 |
+
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
|
| 36 |
+
'BmpImageFormatter']
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# For some unknown reason every font calls it something different
|
| 40 |
+
STYLES = {
|
| 41 |
+
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
|
| 42 |
+
'ITALIC': ['Oblique', 'Italic'],
|
| 43 |
+
'BOLD': ['Bold'],
|
| 44 |
+
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
# A sane default for modern systems
|
| 48 |
+
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
|
| 49 |
+
DEFAULT_FONT_NAME_WIN = 'Courier New'
|
| 50 |
+
DEFAULT_FONT_NAME_MAC = 'Menlo'
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class PilNotAvailable(ImportError):
|
| 54 |
+
"""When Python imaging library is not available"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class FontNotFound(Exception):
|
| 58 |
+
"""When there are no usable fonts specified"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class FontManager:
|
| 62 |
+
"""
|
| 63 |
+
Manages a set of fonts: normal, italic, bold, etc...
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(self, font_name, font_size=14):
|
| 67 |
+
self.font_name = font_name
|
| 68 |
+
self.font_size = font_size
|
| 69 |
+
self.fonts = {}
|
| 70 |
+
self.encoding = None
|
| 71 |
+
if sys.platform.startswith('win'):
|
| 72 |
+
if not font_name:
|
| 73 |
+
self.font_name = DEFAULT_FONT_NAME_WIN
|
| 74 |
+
self._create_win()
|
| 75 |
+
elif sys.platform.startswith('darwin'):
|
| 76 |
+
if not font_name:
|
| 77 |
+
self.font_name = DEFAULT_FONT_NAME_MAC
|
| 78 |
+
self._create_mac()
|
| 79 |
+
else:
|
| 80 |
+
if not font_name:
|
| 81 |
+
self.font_name = DEFAULT_FONT_NAME_NIX
|
| 82 |
+
self._create_nix()
|
| 83 |
+
|
| 84 |
+
def _get_nix_font_path(self, name, style):
|
| 85 |
+
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
|
| 86 |
+
stdout=subprocess.PIPE, stderr=None)
|
| 87 |
+
stdout, _ = proc.communicate()
|
| 88 |
+
if proc.returncode == 0:
|
| 89 |
+
lines = stdout.splitlines()
|
| 90 |
+
for line in lines:
|
| 91 |
+
if line.startswith(b'Fontconfig warning:'):
|
| 92 |
+
continue
|
| 93 |
+
path = line.decode().strip().strip(':')
|
| 94 |
+
if path:
|
| 95 |
+
return path
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
def _create_nix(self):
|
| 99 |
+
for name in STYLES['NORMAL']:
|
| 100 |
+
path = self._get_nix_font_path(self.font_name, name)
|
| 101 |
+
if path is not None:
|
| 102 |
+
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
| 103 |
+
break
|
| 104 |
+
else:
|
| 105 |
+
raise FontNotFound('No usable fonts named: "%s"' %
|
| 106 |
+
self.font_name)
|
| 107 |
+
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
| 108 |
+
for stylename in STYLES[style]:
|
| 109 |
+
path = self._get_nix_font_path(self.font_name, stylename)
|
| 110 |
+
if path is not None:
|
| 111 |
+
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
| 112 |
+
break
|
| 113 |
+
else:
|
| 114 |
+
if style == 'BOLDITALIC':
|
| 115 |
+
self.fonts[style] = self.fonts['BOLD']
|
| 116 |
+
else:
|
| 117 |
+
self.fonts[style] = self.fonts['NORMAL']
|
| 118 |
+
|
| 119 |
+
def _get_mac_font_path(self, font_map, name, style):
|
| 120 |
+
return font_map.get((name + ' ' + style).strip().lower())
|
| 121 |
+
|
| 122 |
+
def _create_mac(self):
|
| 123 |
+
font_map = {}
|
| 124 |
+
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
|
| 125 |
+
'/Library/Fonts/', '/System/Library/Fonts/'):
|
| 126 |
+
font_map.update(
|
| 127 |
+
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
|
| 128 |
+
for f in os.listdir(font_dir)
|
| 129 |
+
if f.lower().endswith(('ttf', 'ttc')))
|
| 130 |
+
|
| 131 |
+
for name in STYLES['NORMAL']:
|
| 132 |
+
path = self._get_mac_font_path(font_map, self.font_name, name)
|
| 133 |
+
if path is not None:
|
| 134 |
+
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
| 135 |
+
break
|
| 136 |
+
else:
|
| 137 |
+
raise FontNotFound('No usable fonts named: "%s"' %
|
| 138 |
+
self.font_name)
|
| 139 |
+
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
| 140 |
+
for stylename in STYLES[style]:
|
| 141 |
+
path = self._get_mac_font_path(font_map, self.font_name, stylename)
|
| 142 |
+
if path is not None:
|
| 143 |
+
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
| 144 |
+
break
|
| 145 |
+
else:
|
| 146 |
+
if style == 'BOLDITALIC':
|
| 147 |
+
self.fonts[style] = self.fonts['BOLD']
|
| 148 |
+
else:
|
| 149 |
+
self.fonts[style] = self.fonts['NORMAL']
|
| 150 |
+
|
| 151 |
+
def _lookup_win(self, key, basename, styles, fail=False):
|
| 152 |
+
for suffix in ('', ' (TrueType)'):
|
| 153 |
+
for style in styles:
|
| 154 |
+
try:
|
| 155 |
+
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
|
| 156 |
+
val, _ = _winreg.QueryValueEx(key, valname)
|
| 157 |
+
return val
|
| 158 |
+
except OSError:
|
| 159 |
+
continue
|
| 160 |
+
else:
|
| 161 |
+
if fail:
|
| 162 |
+
raise FontNotFound('Font %s (%s) not found in registry' %
|
| 163 |
+
(basename, styles[0]))
|
| 164 |
+
return None
|
| 165 |
+
|
| 166 |
+
def _create_win(self):
|
| 167 |
+
lookuperror = None
|
| 168 |
+
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
|
| 169 |
+
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
|
| 170 |
+
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
|
| 171 |
+
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
|
| 172 |
+
for keyname in keynames:
|
| 173 |
+
try:
|
| 174 |
+
key = _winreg.OpenKey(*keyname)
|
| 175 |
+
try:
|
| 176 |
+
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
|
| 177 |
+
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
| 178 |
+
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
| 179 |
+
path = self._lookup_win(key, self.font_name, STYLES[style])
|
| 180 |
+
if path:
|
| 181 |
+
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
| 182 |
+
else:
|
| 183 |
+
if style == 'BOLDITALIC':
|
| 184 |
+
self.fonts[style] = self.fonts['BOLD']
|
| 185 |
+
else:
|
| 186 |
+
self.fonts[style] = self.fonts['NORMAL']
|
| 187 |
+
return
|
| 188 |
+
except FontNotFound as err:
|
| 189 |
+
lookuperror = err
|
| 190 |
+
finally:
|
| 191 |
+
_winreg.CloseKey(key)
|
| 192 |
+
except OSError:
|
| 193 |
+
pass
|
| 194 |
+
else:
|
| 195 |
+
# If we get here, we checked all registry keys and had no luck
|
| 196 |
+
# We can be in one of two situations now:
|
| 197 |
+
# * All key lookups failed. In this case lookuperror is None and we
|
| 198 |
+
# will raise a generic error
|
| 199 |
+
# * At least one lookup failed with a FontNotFound error. In this
|
| 200 |
+
# case, we will raise that as a more specific error
|
| 201 |
+
if lookuperror:
|
| 202 |
+
raise lookuperror
|
| 203 |
+
raise FontNotFound('Can\'t open Windows font registry key')
|
| 204 |
+
|
| 205 |
+
def get_char_size(self):
|
| 206 |
+
"""
|
| 207 |
+
Get the character size.
|
| 208 |
+
"""
|
| 209 |
+
return self.get_text_size('M')
|
| 210 |
+
|
| 211 |
+
def get_text_size(self, text):
|
| 212 |
+
"""
|
| 213 |
+
Get the text size (width, height).
|
| 214 |
+
"""
|
| 215 |
+
font = self.fonts['NORMAL']
|
| 216 |
+
if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
|
| 217 |
+
return font.getbbox(text)[2:4]
|
| 218 |
+
else:
|
| 219 |
+
return font.getsize(text)
|
| 220 |
+
|
| 221 |
+
def get_font(self, bold, oblique):
|
| 222 |
+
"""
|
| 223 |
+
Get the font based on bold and italic flags.
|
| 224 |
+
"""
|
| 225 |
+
if bold and oblique:
|
| 226 |
+
return self.fonts['BOLDITALIC']
|
| 227 |
+
elif bold:
|
| 228 |
+
return self.fonts['BOLD']
|
| 229 |
+
elif oblique:
|
| 230 |
+
return self.fonts['ITALIC']
|
| 231 |
+
else:
|
| 232 |
+
return self.fonts['NORMAL']
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
class ImageFormatter(Formatter):
|
| 236 |
+
"""
|
| 237 |
+
Create a PNG image from source code. This uses the Python Imaging Library to
|
| 238 |
+
generate a pixmap from the source code.
|
| 239 |
+
|
| 240 |
+
.. versionadded:: 0.10
|
| 241 |
+
|
| 242 |
+
Additional options accepted:
|
| 243 |
+
|
| 244 |
+
`image_format`
|
| 245 |
+
An image format to output to that is recognised by PIL, these include:
|
| 246 |
+
|
| 247 |
+
* "PNG" (default)
|
| 248 |
+
* "JPEG"
|
| 249 |
+
* "BMP"
|
| 250 |
+
* "GIF"
|
| 251 |
+
|
| 252 |
+
`line_pad`
|
| 253 |
+
The extra spacing (in pixels) between each line of text.
|
| 254 |
+
|
| 255 |
+
Default: 2
|
| 256 |
+
|
| 257 |
+
`font_name`
|
| 258 |
+
The font name to be used as the base font from which others, such as
|
| 259 |
+
bold and italic fonts will be generated. This really should be a
|
| 260 |
+
monospace font to look sane.
|
| 261 |
+
|
| 262 |
+
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
|
| 263 |
+
"DejaVu Sans Mono" on \\*nix
|
| 264 |
+
|
| 265 |
+
`font_size`
|
| 266 |
+
The font size in points to be used.
|
| 267 |
+
|
| 268 |
+
Default: 14
|
| 269 |
+
|
| 270 |
+
`image_pad`
|
| 271 |
+
The padding, in pixels to be used at each edge of the resulting image.
|
| 272 |
+
|
| 273 |
+
Default: 10
|
| 274 |
+
|
| 275 |
+
`line_numbers`
|
| 276 |
+
Whether line numbers should be shown: True/False
|
| 277 |
+
|
| 278 |
+
Default: True
|
| 279 |
+
|
| 280 |
+
`line_number_start`
|
| 281 |
+
The line number of the first line.
|
| 282 |
+
|
| 283 |
+
Default: 1
|
| 284 |
+
|
| 285 |
+
`line_number_step`
|
| 286 |
+
The step used when printing line numbers.
|
| 287 |
+
|
| 288 |
+
Default: 1
|
| 289 |
+
|
| 290 |
+
`line_number_bg`
|
| 291 |
+
The background colour (in "#123456" format) of the line number bar, or
|
| 292 |
+
None to use the style background color.
|
| 293 |
+
|
| 294 |
+
Default: "#eed"
|
| 295 |
+
|
| 296 |
+
`line_number_fg`
|
| 297 |
+
The text color of the line numbers (in "#123456"-like format).
|
| 298 |
+
|
| 299 |
+
Default: "#886"
|
| 300 |
+
|
| 301 |
+
`line_number_chars`
|
| 302 |
+
The number of columns of line numbers allowable in the line number
|
| 303 |
+
margin.
|
| 304 |
+
|
| 305 |
+
Default: 2
|
| 306 |
+
|
| 307 |
+
`line_number_bold`
|
| 308 |
+
Whether line numbers will be bold: True/False
|
| 309 |
+
|
| 310 |
+
Default: False
|
| 311 |
+
|
| 312 |
+
`line_number_italic`
|
| 313 |
+
Whether line numbers will be italicized: True/False
|
| 314 |
+
|
| 315 |
+
Default: False
|
| 316 |
+
|
| 317 |
+
`line_number_separator`
|
| 318 |
+
Whether a line will be drawn between the line number area and the
|
| 319 |
+
source code area: True/False
|
| 320 |
+
|
| 321 |
+
Default: True
|
| 322 |
+
|
| 323 |
+
`line_number_pad`
|
| 324 |
+
The horizontal padding (in pixels) between the line number margin, and
|
| 325 |
+
the source code area.
|
| 326 |
+
|
| 327 |
+
Default: 6
|
| 328 |
+
|
| 329 |
+
`hl_lines`
|
| 330 |
+
Specify a list of lines to be highlighted.
|
| 331 |
+
|
| 332 |
+
.. versionadded:: 1.2
|
| 333 |
+
|
| 334 |
+
Default: empty list
|
| 335 |
+
|
| 336 |
+
`hl_color`
|
| 337 |
+
Specify the color for highlighting lines.
|
| 338 |
+
|
| 339 |
+
.. versionadded:: 1.2
|
| 340 |
+
|
| 341 |
+
Default: highlight color of the selected style
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
# Required by the pygments mapper
|
| 345 |
+
name = 'img'
|
| 346 |
+
aliases = ['img', 'IMG', 'png']
|
| 347 |
+
filenames = ['*.png']
|
| 348 |
+
|
| 349 |
+
unicodeoutput = False
|
| 350 |
+
|
| 351 |
+
default_image_format = 'png'
|
| 352 |
+
|
| 353 |
+
def __init__(self, **options):
|
| 354 |
+
"""
|
| 355 |
+
See the class docstring for explanation of options.
|
| 356 |
+
"""
|
| 357 |
+
if not pil_available:
|
| 358 |
+
raise PilNotAvailable(
|
| 359 |
+
'Python Imaging Library is required for this formatter')
|
| 360 |
+
Formatter.__init__(self, **options)
|
| 361 |
+
self.encoding = 'latin1' # let pygments.format() do the right thing
|
| 362 |
+
# Read the style
|
| 363 |
+
self.styles = dict(self.style)
|
| 364 |
+
if self.style.background_color is None:
|
| 365 |
+
self.background_color = '#fff'
|
| 366 |
+
else:
|
| 367 |
+
self.background_color = self.style.background_color
|
| 368 |
+
# Image options
|
| 369 |
+
self.image_format = get_choice_opt(
|
| 370 |
+
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
|
| 371 |
+
self.default_image_format, normcase=True)
|
| 372 |
+
self.image_pad = get_int_opt(options, 'image_pad', 10)
|
| 373 |
+
self.line_pad = get_int_opt(options, 'line_pad', 2)
|
| 374 |
+
# The fonts
|
| 375 |
+
fontsize = get_int_opt(options, 'font_size', 14)
|
| 376 |
+
self.fonts = FontManager(options.get('font_name', ''), fontsize)
|
| 377 |
+
self.fontw, self.fonth = self.fonts.get_char_size()
|
| 378 |
+
# Line number options
|
| 379 |
+
self.line_number_fg = options.get('line_number_fg', '#886')
|
| 380 |
+
self.line_number_bg = options.get('line_number_bg', '#eed')
|
| 381 |
+
self.line_number_chars = get_int_opt(options,
|
| 382 |
+
'line_number_chars', 2)
|
| 383 |
+
self.line_number_bold = get_bool_opt(options,
|
| 384 |
+
'line_number_bold', False)
|
| 385 |
+
self.line_number_italic = get_bool_opt(options,
|
| 386 |
+
'line_number_italic', False)
|
| 387 |
+
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
|
| 388 |
+
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
|
| 389 |
+
self.line_number_separator = get_bool_opt(options,
|
| 390 |
+
'line_number_separator', True)
|
| 391 |
+
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
|
| 392 |
+
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
|
| 393 |
+
if self.line_numbers:
|
| 394 |
+
self.line_number_width = (self.fontw * self.line_number_chars +
|
| 395 |
+
self.line_number_pad * 2)
|
| 396 |
+
else:
|
| 397 |
+
self.line_number_width = 0
|
| 398 |
+
self.hl_lines = []
|
| 399 |
+
hl_lines_str = get_list_opt(options, 'hl_lines', [])
|
| 400 |
+
for line in hl_lines_str:
|
| 401 |
+
try:
|
| 402 |
+
self.hl_lines.append(int(line))
|
| 403 |
+
except ValueError:
|
| 404 |
+
pass
|
| 405 |
+
self.hl_color = options.get('hl_color',
|
| 406 |
+
self.style.highlight_color) or '#f90'
|
| 407 |
+
self.drawables = []
|
| 408 |
+
|
| 409 |
+
def get_style_defs(self, arg=''):
|
| 410 |
+
raise NotImplementedError('The -S option is meaningless for the image '
|
| 411 |
+
'formatter. Use -O style=<stylename> instead.')
|
| 412 |
+
|
| 413 |
+
def _get_line_height(self):
|
| 414 |
+
"""
|
| 415 |
+
Get the height of a line.
|
| 416 |
+
"""
|
| 417 |
+
return self.fonth + self.line_pad
|
| 418 |
+
|
| 419 |
+
def _get_line_y(self, lineno):
|
| 420 |
+
"""
|
| 421 |
+
Get the Y coordinate of a line number.
|
| 422 |
+
"""
|
| 423 |
+
return lineno * self._get_line_height() + self.image_pad
|
| 424 |
+
|
| 425 |
+
def _get_char_width(self):
|
| 426 |
+
"""
|
| 427 |
+
Get the width of a character.
|
| 428 |
+
"""
|
| 429 |
+
return self.fontw
|
| 430 |
+
|
| 431 |
+
def _get_char_x(self, linelength):
|
| 432 |
+
"""
|
| 433 |
+
Get the X coordinate of a character position.
|
| 434 |
+
"""
|
| 435 |
+
return linelength + self.image_pad + self.line_number_width
|
| 436 |
+
|
| 437 |
+
def _get_text_pos(self, linelength, lineno):
|
| 438 |
+
"""
|
| 439 |
+
Get the actual position for a character and line position.
|
| 440 |
+
"""
|
| 441 |
+
return self._get_char_x(linelength), self._get_line_y(lineno)
|
| 442 |
+
|
| 443 |
+
def _get_linenumber_pos(self, lineno):
|
| 444 |
+
"""
|
| 445 |
+
Get the actual position for the start of a line number.
|
| 446 |
+
"""
|
| 447 |
+
return (self.image_pad, self._get_line_y(lineno))
|
| 448 |
+
|
| 449 |
+
def _get_text_color(self, style):
|
| 450 |
+
"""
|
| 451 |
+
Get the correct color for the token from the style.
|
| 452 |
+
"""
|
| 453 |
+
if style['color'] is not None:
|
| 454 |
+
fill = '#' + style['color']
|
| 455 |
+
else:
|
| 456 |
+
fill = '#000'
|
| 457 |
+
return fill
|
| 458 |
+
|
| 459 |
+
def _get_text_bg_color(self, style):
|
| 460 |
+
"""
|
| 461 |
+
Get the correct background color for the token from the style.
|
| 462 |
+
"""
|
| 463 |
+
if style['bgcolor'] is not None:
|
| 464 |
+
bg_color = '#' + style['bgcolor']
|
| 465 |
+
else:
|
| 466 |
+
bg_color = None
|
| 467 |
+
return bg_color
|
| 468 |
+
|
| 469 |
+
def _get_style_font(self, style):
|
| 470 |
+
"""
|
| 471 |
+
Get the correct font for the style.
|
| 472 |
+
"""
|
| 473 |
+
return self.fonts.get_font(style['bold'], style['italic'])
|
| 474 |
+
|
| 475 |
+
def _get_image_size(self, maxlinelength, maxlineno):
|
| 476 |
+
"""
|
| 477 |
+
Get the required image size.
|
| 478 |
+
"""
|
| 479 |
+
return (self._get_char_x(maxlinelength) + self.image_pad,
|
| 480 |
+
self._get_line_y(maxlineno + 0) + self.image_pad)
|
| 481 |
+
|
| 482 |
+
def _draw_linenumber(self, posno, lineno):
|
| 483 |
+
"""
|
| 484 |
+
Remember a line number drawable to paint later.
|
| 485 |
+
"""
|
| 486 |
+
self._draw_text(
|
| 487 |
+
self._get_linenumber_pos(posno),
|
| 488 |
+
str(lineno).rjust(self.line_number_chars),
|
| 489 |
+
font=self.fonts.get_font(self.line_number_bold,
|
| 490 |
+
self.line_number_italic),
|
| 491 |
+
text_fg=self.line_number_fg,
|
| 492 |
+
text_bg=None,
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
def _draw_text(self, pos, text, font, text_fg, text_bg):
|
| 496 |
+
"""
|
| 497 |
+
Remember a single drawable tuple to paint later.
|
| 498 |
+
"""
|
| 499 |
+
self.drawables.append((pos, text, font, text_fg, text_bg))
|
| 500 |
+
|
| 501 |
+
def _create_drawables(self, tokensource):
|
| 502 |
+
"""
|
| 503 |
+
Create drawables for the token content.
|
| 504 |
+
"""
|
| 505 |
+
lineno = charno = maxcharno = 0
|
| 506 |
+
maxlinelength = linelength = 0
|
| 507 |
+
for ttype, value in tokensource:
|
| 508 |
+
while ttype not in self.styles:
|
| 509 |
+
ttype = ttype.parent
|
| 510 |
+
style = self.styles[ttype]
|
| 511 |
+
# TODO: make sure tab expansion happens earlier in the chain. It
|
| 512 |
+
# really ought to be done on the input, as to do it right here is
|
| 513 |
+
# quite complex.
|
| 514 |
+
value = value.expandtabs(4)
|
| 515 |
+
lines = value.splitlines(True)
|
| 516 |
+
# print lines
|
| 517 |
+
for i, line in enumerate(lines):
|
| 518 |
+
temp = line.rstrip('\n')
|
| 519 |
+
if temp:
|
| 520 |
+
self._draw_text(
|
| 521 |
+
self._get_text_pos(linelength, lineno),
|
| 522 |
+
temp,
|
| 523 |
+
font = self._get_style_font(style),
|
| 524 |
+
text_fg = self._get_text_color(style),
|
| 525 |
+
text_bg = self._get_text_bg_color(style),
|
| 526 |
+
)
|
| 527 |
+
temp_width, _ = self.fonts.get_text_size(temp)
|
| 528 |
+
linelength += temp_width
|
| 529 |
+
maxlinelength = max(maxlinelength, linelength)
|
| 530 |
+
charno += len(temp)
|
| 531 |
+
maxcharno = max(maxcharno, charno)
|
| 532 |
+
if line.endswith('\n'):
|
| 533 |
+
# add a line for each extra line in the value
|
| 534 |
+
linelength = 0
|
| 535 |
+
charno = 0
|
| 536 |
+
lineno += 1
|
| 537 |
+
self.maxlinelength = maxlinelength
|
| 538 |
+
self.maxcharno = maxcharno
|
| 539 |
+
self.maxlineno = lineno
|
| 540 |
+
|
| 541 |
+
def _draw_line_numbers(self):
|
| 542 |
+
"""
|
| 543 |
+
Create drawables for the line numbers.
|
| 544 |
+
"""
|
| 545 |
+
if not self.line_numbers:
|
| 546 |
+
return
|
| 547 |
+
for p in range(self.maxlineno):
|
| 548 |
+
n = p + self.line_number_start
|
| 549 |
+
if (n % self.line_number_step) == 0:
|
| 550 |
+
self._draw_linenumber(p, n)
|
| 551 |
+
|
| 552 |
+
def _paint_line_number_bg(self, im):
|
| 553 |
+
"""
|
| 554 |
+
Paint the line number background on the image.
|
| 555 |
+
"""
|
| 556 |
+
if not self.line_numbers:
|
| 557 |
+
return
|
| 558 |
+
if self.line_number_fg is None:
|
| 559 |
+
return
|
| 560 |
+
draw = ImageDraw.Draw(im)
|
| 561 |
+
recth = im.size[-1]
|
| 562 |
+
rectw = self.image_pad + self.line_number_width - self.line_number_pad
|
| 563 |
+
draw.rectangle([(0, 0), (rectw, recth)],
|
| 564 |
+
fill=self.line_number_bg)
|
| 565 |
+
if self.line_number_separator:
|
| 566 |
+
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
|
| 567 |
+
del draw
|
| 568 |
+
|
| 569 |
+
def format(self, tokensource, outfile):
|
| 570 |
+
"""
|
| 571 |
+
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
| 572 |
+
tuples and write it into ``outfile``.
|
| 573 |
+
|
| 574 |
+
This implementation calculates where it should draw each token on the
|
| 575 |
+
pixmap, then calculates the required pixmap size and draws the items.
|
| 576 |
+
"""
|
| 577 |
+
self._create_drawables(tokensource)
|
| 578 |
+
self._draw_line_numbers()
|
| 579 |
+
im = Image.new(
|
| 580 |
+
'RGB',
|
| 581 |
+
self._get_image_size(self.maxlinelength, self.maxlineno),
|
| 582 |
+
self.background_color
|
| 583 |
+
)
|
| 584 |
+
self._paint_line_number_bg(im)
|
| 585 |
+
draw = ImageDraw.Draw(im)
|
| 586 |
+
# Highlight
|
| 587 |
+
if self.hl_lines:
|
| 588 |
+
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
|
| 589 |
+
recth = self._get_line_height()
|
| 590 |
+
rectw = im.size[0] - x
|
| 591 |
+
for linenumber in self.hl_lines:
|
| 592 |
+
y = self._get_line_y(linenumber - 1)
|
| 593 |
+
draw.rectangle([(x, y), (x + rectw, y + recth)],
|
| 594 |
+
fill=self.hl_color)
|
| 595 |
+
for pos, value, font, text_fg, text_bg in self.drawables:
|
| 596 |
+
if text_bg:
|
| 597 |
+
text_size = draw.textsize(text=value, font=font)
|
| 598 |
+
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
|
| 599 |
+
draw.text(pos, value, font=font, fill=text_fg)
|
| 600 |
+
im.save(outfile, self.image_format.upper())
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
# Add one formatter per format, so that the "-f gif" option gives the correct result
|
| 604 |
+
# when used in pygmentize.
|
| 605 |
+
|
| 606 |
+
class GifImageFormatter(ImageFormatter):
|
| 607 |
+
"""
|
| 608 |
+
Create a GIF image from source code. This uses the Python Imaging Library to
|
| 609 |
+
generate a pixmap from the source code.
|
| 610 |
+
|
| 611 |
+
.. versionadded:: 1.0
|
| 612 |
+
"""
|
| 613 |
+
|
| 614 |
+
name = 'img_gif'
|
| 615 |
+
aliases = ['gif']
|
| 616 |
+
filenames = ['*.gif']
|
| 617 |
+
default_image_format = 'gif'
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
class JpgImageFormatter(ImageFormatter):
|
| 621 |
+
"""
|
| 622 |
+
Create a JPEG image from source code. This uses the Python Imaging Library to
|
| 623 |
+
generate a pixmap from the source code.
|
| 624 |
+
|
| 625 |
+
.. versionadded:: 1.0
|
| 626 |
+
"""
|
| 627 |
+
|
| 628 |
+
name = 'img_jpg'
|
| 629 |
+
aliases = ['jpg', 'jpeg']
|
| 630 |
+
filenames = ['*.jpg']
|
| 631 |
+
default_image_format = 'jpeg'
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class BmpImageFormatter(ImageFormatter):
|
| 635 |
+
"""
|
| 636 |
+
Create a bitmap image from source code. This uses the Python Imaging Library to
|
| 637 |
+
generate a pixmap from the source code.
|
| 638 |
+
|
| 639 |
+
.. versionadded:: 1.0
|
| 640 |
+
"""
|
| 641 |
+
|
| 642 |
+
name = 'img_bmp'
|
| 643 |
+
aliases = ['bmp', 'bitmap']
|
| 644 |
+
filenames = ['*.bmp']
|
| 645 |
+
default_image_format = 'bmp'
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.irc
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for IRC output
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
|
| 13 |
+
Number, Operator, Generic, Token, Whitespace
|
| 14 |
+
from pip._vendor.pygments.util import get_choice_opt
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
__all__ = ['IRCFormatter']
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#: Map token types to a tuple of color values for light and dark
|
| 21 |
+
#: backgrounds.
|
| 22 |
+
IRC_COLORS = {
|
| 23 |
+
Token: ('', ''),
|
| 24 |
+
|
| 25 |
+
Whitespace: ('gray', 'brightblack'),
|
| 26 |
+
Comment: ('gray', 'brightblack'),
|
| 27 |
+
Comment.Preproc: ('cyan', 'brightcyan'),
|
| 28 |
+
Keyword: ('blue', 'brightblue'),
|
| 29 |
+
Keyword.Type: ('cyan', 'brightcyan'),
|
| 30 |
+
Operator.Word: ('magenta', 'brightcyan'),
|
| 31 |
+
Name.Builtin: ('cyan', 'brightcyan'),
|
| 32 |
+
Name.Function: ('green', 'brightgreen'),
|
| 33 |
+
Name.Namespace: ('_cyan_', '_brightcyan_'),
|
| 34 |
+
Name.Class: ('_green_', '_brightgreen_'),
|
| 35 |
+
Name.Exception: ('cyan', 'brightcyan'),
|
| 36 |
+
Name.Decorator: ('brightblack', 'gray'),
|
| 37 |
+
Name.Variable: ('red', 'brightred'),
|
| 38 |
+
Name.Constant: ('red', 'brightred'),
|
| 39 |
+
Name.Attribute: ('cyan', 'brightcyan'),
|
| 40 |
+
Name.Tag: ('brightblue', 'brightblue'),
|
| 41 |
+
String: ('yellow', 'yellow'),
|
| 42 |
+
Number: ('blue', 'brightblue'),
|
| 43 |
+
|
| 44 |
+
Generic.Deleted: ('brightred', 'brightred'),
|
| 45 |
+
Generic.Inserted: ('green', 'brightgreen'),
|
| 46 |
+
Generic.Heading: ('**', '**'),
|
| 47 |
+
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
|
| 48 |
+
Generic.Error: ('brightred', 'brightred'),
|
| 49 |
+
|
| 50 |
+
Error: ('_brightred_', '_brightred_'),
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
IRC_COLOR_MAP = {
|
| 55 |
+
'white': 0,
|
| 56 |
+
'black': 1,
|
| 57 |
+
'blue': 2,
|
| 58 |
+
'brightgreen': 3,
|
| 59 |
+
'brightred': 4,
|
| 60 |
+
'yellow': 5,
|
| 61 |
+
'magenta': 6,
|
| 62 |
+
'orange': 7,
|
| 63 |
+
'green': 7, #compat w/ ansi
|
| 64 |
+
'brightyellow': 8,
|
| 65 |
+
'lightgreen': 9,
|
| 66 |
+
'brightcyan': 9, # compat w/ ansi
|
| 67 |
+
'cyan': 10,
|
| 68 |
+
'lightblue': 11,
|
| 69 |
+
'red': 11, # compat w/ ansi
|
| 70 |
+
'brightblue': 12,
|
| 71 |
+
'brightmagenta': 13,
|
| 72 |
+
'brightblack': 14,
|
| 73 |
+
'gray': 15,
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
def ircformat(color, text):
|
| 77 |
+
if len(color) < 1:
|
| 78 |
+
return text
|
| 79 |
+
add = sub = ''
|
| 80 |
+
if '_' in color: # italic
|
| 81 |
+
add += '\x1D'
|
| 82 |
+
sub = '\x1D' + sub
|
| 83 |
+
color = color.strip('_')
|
| 84 |
+
if '*' in color: # bold
|
| 85 |
+
add += '\x02'
|
| 86 |
+
sub = '\x02' + sub
|
| 87 |
+
color = color.strip('*')
|
| 88 |
+
# underline (\x1F) not supported
|
| 89 |
+
# backgrounds (\x03FF,BB) not supported
|
| 90 |
+
if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
|
| 91 |
+
add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
|
| 92 |
+
sub = '\x03' + sub
|
| 93 |
+
return add + text + sub
|
| 94 |
+
return '<'+add+'>'+text+'</'+sub+'>'
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class IRCFormatter(Formatter):
|
| 98 |
+
r"""
|
| 99 |
+
Format tokens with IRC color sequences
|
| 100 |
+
|
| 101 |
+
The `get_style_defs()` method doesn't do anything special since there is
|
| 102 |
+
no support for common styles.
|
| 103 |
+
|
| 104 |
+
Options accepted:
|
| 105 |
+
|
| 106 |
+
`bg`
|
| 107 |
+
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
|
| 108 |
+
(default: ``"light"``).
|
| 109 |
+
|
| 110 |
+
`colorscheme`
|
| 111 |
+
A dictionary mapping token types to (lightbg, darkbg) color names or
|
| 112 |
+
``None`` (default: ``None`` = use builtin colorscheme).
|
| 113 |
+
|
| 114 |
+
`linenos`
|
| 115 |
+
Set to ``True`` to have line numbers in the output as well
|
| 116 |
+
(default: ``False`` = no line numbers).
|
| 117 |
+
"""
|
| 118 |
+
name = 'IRC'
|
| 119 |
+
aliases = ['irc', 'IRC']
|
| 120 |
+
filenames = []
|
| 121 |
+
|
| 122 |
+
def __init__(self, **options):
|
| 123 |
+
Formatter.__init__(self, **options)
|
| 124 |
+
self.darkbg = get_choice_opt(options, 'bg',
|
| 125 |
+
['light', 'dark'], 'light') == 'dark'
|
| 126 |
+
self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
|
| 127 |
+
self.linenos = options.get('linenos', False)
|
| 128 |
+
self._lineno = 0
|
| 129 |
+
|
| 130 |
+
def _write_lineno(self, outfile):
|
| 131 |
+
if self.linenos:
|
| 132 |
+
self._lineno += 1
|
| 133 |
+
outfile.write("%04d: " % self._lineno)
|
| 134 |
+
|
| 135 |
+
def format_unencoded(self, tokensource, outfile):
|
| 136 |
+
self._write_lineno(outfile)
|
| 137 |
+
|
| 138 |
+
for ttype, value in tokensource:
|
| 139 |
+
color = self.colorscheme.get(ttype)
|
| 140 |
+
while color is None:
|
| 141 |
+
ttype = ttype[:-1]
|
| 142 |
+
color = self.colorscheme.get(ttype)
|
| 143 |
+
if color:
|
| 144 |
+
color = color[self.darkbg]
|
| 145 |
+
spl = value.split('\n')
|
| 146 |
+
for line in spl[:-1]:
|
| 147 |
+
if line:
|
| 148 |
+
outfile.write(ircformat(color, line))
|
| 149 |
+
outfile.write('\n')
|
| 150 |
+
self._write_lineno(outfile)
|
| 151 |
+
if spl[-1]:
|
| 152 |
+
outfile.write(ircformat(color, spl[-1]))
|
| 153 |
+
else:
|
| 154 |
+
outfile.write(value)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.latex
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for LaTeX fancyvrb output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from io import StringIO
|
| 12 |
+
|
| 13 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 14 |
+
from pip._vendor.pygments.lexer import Lexer, do_insertions
|
| 15 |
+
from pip._vendor.pygments.token import Token, STANDARD_TYPES
|
| 16 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
__all__ = ['LatexFormatter']
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def escape_tex(text, commandprefix):
|
| 23 |
+
return text.replace('\\', '\x00'). \
|
| 24 |
+
replace('{', '\x01'). \
|
| 25 |
+
replace('}', '\x02'). \
|
| 26 |
+
replace('\x00', r'\%sZbs{}' % commandprefix). \
|
| 27 |
+
replace('\x01', r'\%sZob{}' % commandprefix). \
|
| 28 |
+
replace('\x02', r'\%sZcb{}' % commandprefix). \
|
| 29 |
+
replace('^', r'\%sZca{}' % commandprefix). \
|
| 30 |
+
replace('_', r'\%sZus{}' % commandprefix). \
|
| 31 |
+
replace('&', r'\%sZam{}' % commandprefix). \
|
| 32 |
+
replace('<', r'\%sZlt{}' % commandprefix). \
|
| 33 |
+
replace('>', r'\%sZgt{}' % commandprefix). \
|
| 34 |
+
replace('#', r'\%sZsh{}' % commandprefix). \
|
| 35 |
+
replace('%', r'\%sZpc{}' % commandprefix). \
|
| 36 |
+
replace('$', r'\%sZdl{}' % commandprefix). \
|
| 37 |
+
replace('-', r'\%sZhy{}' % commandprefix). \
|
| 38 |
+
replace("'", r'\%sZsq{}' % commandprefix). \
|
| 39 |
+
replace('"', r'\%sZdq{}' % commandprefix). \
|
| 40 |
+
replace('~', r'\%sZti{}' % commandprefix)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
DOC_TEMPLATE = r'''
|
| 44 |
+
\documentclass{%(docclass)s}
|
| 45 |
+
\usepackage{fancyvrb}
|
| 46 |
+
\usepackage{color}
|
| 47 |
+
\usepackage[%(encoding)s]{inputenc}
|
| 48 |
+
%(preamble)s
|
| 49 |
+
|
| 50 |
+
%(styledefs)s
|
| 51 |
+
|
| 52 |
+
\begin{document}
|
| 53 |
+
|
| 54 |
+
\section*{%(title)s}
|
| 55 |
+
|
| 56 |
+
%(code)s
|
| 57 |
+
\end{document}
|
| 58 |
+
'''
|
| 59 |
+
|
| 60 |
+
## Small explanation of the mess below :)
|
| 61 |
+
#
|
| 62 |
+
# The previous version of the LaTeX formatter just assigned a command to
|
| 63 |
+
# each token type defined in the current style. That obviously is
|
| 64 |
+
# problematic if the highlighted code is produced for a different style
|
| 65 |
+
# than the style commands themselves.
|
| 66 |
+
#
|
| 67 |
+
# This version works much like the HTML formatter which assigns multiple
|
| 68 |
+
# CSS classes to each <span> tag, from the most specific to the least
|
| 69 |
+
# specific token type, thus falling back to the parent token type if one
|
| 70 |
+
# is not defined. Here, the classes are there too and use the same short
|
| 71 |
+
# forms given in token.STANDARD_TYPES.
|
| 72 |
+
#
|
| 73 |
+
# Highlighted code now only uses one custom command, which by default is
|
| 74 |
+
# \PY and selectable by the commandprefix option (and in addition the
|
| 75 |
+
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
|
| 76 |
+
# backwards compatibility purposes).
|
| 77 |
+
#
|
| 78 |
+
# \PY has two arguments: the classes, separated by +, and the text to
|
| 79 |
+
# render in that style. The classes are resolved into the respective
|
| 80 |
+
# style commands by magic, which serves to ignore unknown classes.
|
| 81 |
+
#
|
| 82 |
+
# The magic macros are:
|
| 83 |
+
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
|
| 84 |
+
# to render in \PY@do. Their definition determines the style.
|
| 85 |
+
# * \PY@reset resets \PY@it etc. to do nothing.
|
| 86 |
+
# * \PY@toks parses the list of classes, using magic inspired by the
|
| 87 |
+
# keyval package (but modified to use plusses instead of commas
|
| 88 |
+
# because fancyvrb redefines commas inside its environments).
|
| 89 |
+
# * \PY@tok processes one class, calling the \PY@tok@classname command
|
| 90 |
+
# if it exists.
|
| 91 |
+
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
|
| 92 |
+
# for its class.
|
| 93 |
+
# * \PY resets the style, parses the classnames and then calls \PY@do.
|
| 94 |
+
#
|
| 95 |
+
# Tip: to read this code, print it out in substituted form using e.g.
|
| 96 |
+
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
|
| 97 |
+
|
| 98 |
+
STYLE_TEMPLATE = r'''
|
| 99 |
+
\makeatletter
|
| 100 |
+
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
|
| 101 |
+
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
|
| 102 |
+
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
|
| 103 |
+
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
|
| 104 |
+
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
|
| 105 |
+
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
|
| 106 |
+
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
|
| 107 |
+
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
|
| 108 |
+
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
|
| 109 |
+
|
| 110 |
+
%(styles)s
|
| 111 |
+
|
| 112 |
+
\def\%(cp)sZbs{\char`\\}
|
| 113 |
+
\def\%(cp)sZus{\char`\_}
|
| 114 |
+
\def\%(cp)sZob{\char`\{}
|
| 115 |
+
\def\%(cp)sZcb{\char`\}}
|
| 116 |
+
\def\%(cp)sZca{\char`\^}
|
| 117 |
+
\def\%(cp)sZam{\char`\&}
|
| 118 |
+
\def\%(cp)sZlt{\char`\<}
|
| 119 |
+
\def\%(cp)sZgt{\char`\>}
|
| 120 |
+
\def\%(cp)sZsh{\char`\#}
|
| 121 |
+
\def\%(cp)sZpc{\char`\%%}
|
| 122 |
+
\def\%(cp)sZdl{\char`\$}
|
| 123 |
+
\def\%(cp)sZhy{\char`\-}
|
| 124 |
+
\def\%(cp)sZsq{\char`\'}
|
| 125 |
+
\def\%(cp)sZdq{\char`\"}
|
| 126 |
+
\def\%(cp)sZti{\char`\~}
|
| 127 |
+
%% for compatibility with earlier versions
|
| 128 |
+
\def\%(cp)sZat{@}
|
| 129 |
+
\def\%(cp)sZlb{[}
|
| 130 |
+
\def\%(cp)sZrb{]}
|
| 131 |
+
\makeatother
|
| 132 |
+
'''
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _get_ttype_name(ttype):
|
| 136 |
+
fname = STANDARD_TYPES.get(ttype)
|
| 137 |
+
if fname:
|
| 138 |
+
return fname
|
| 139 |
+
aname = ''
|
| 140 |
+
while fname is None:
|
| 141 |
+
aname = ttype[-1] + aname
|
| 142 |
+
ttype = ttype.parent
|
| 143 |
+
fname = STANDARD_TYPES.get(ttype)
|
| 144 |
+
return fname + aname
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class LatexFormatter(Formatter):
|
| 148 |
+
r"""
|
| 149 |
+
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
|
| 150 |
+
standard packages.
|
| 151 |
+
|
| 152 |
+
Without the `full` option, code is formatted as one ``Verbatim``
|
| 153 |
+
environment, like this:
|
| 154 |
+
|
| 155 |
+
.. sourcecode:: latex
|
| 156 |
+
|
| 157 |
+
\begin{Verbatim}[commandchars=\\\{\}]
|
| 158 |
+
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
|
| 159 |
+
\PY{k}{pass}
|
| 160 |
+
\end{Verbatim}
|
| 161 |
+
|
| 162 |
+
Wrapping can be disabled using the `nowrap` option.
|
| 163 |
+
|
| 164 |
+
The special command used here (``\PY``) and all the other macros it needs
|
| 165 |
+
are output by the `get_style_defs` method.
|
| 166 |
+
|
| 167 |
+
With the `full` option, a complete LaTeX document is output, including
|
| 168 |
+
the command definitions in the preamble.
|
| 169 |
+
|
| 170 |
+
The `get_style_defs()` method of a `LatexFormatter` returns a string
|
| 171 |
+
containing ``\def`` commands defining the macros needed inside the
|
| 172 |
+
``Verbatim`` environments.
|
| 173 |
+
|
| 174 |
+
Additional options accepted:
|
| 175 |
+
|
| 176 |
+
`nowrap`
|
| 177 |
+
If set to ``True``, don't wrap the tokens at all, not even inside a
|
| 178 |
+
``\begin{Verbatim}`` environment. This disables most other options
|
| 179 |
+
(default: ``False``).
|
| 180 |
+
|
| 181 |
+
`style`
|
| 182 |
+
The style to use, can be a string or a Style subclass (default:
|
| 183 |
+
``'default'``).
|
| 184 |
+
|
| 185 |
+
`full`
|
| 186 |
+
Tells the formatter to output a "full" document, i.e. a complete
|
| 187 |
+
self-contained document (default: ``False``).
|
| 188 |
+
|
| 189 |
+
`title`
|
| 190 |
+
If `full` is true, the title that should be used to caption the
|
| 191 |
+
document (default: ``''``).
|
| 192 |
+
|
| 193 |
+
`docclass`
|
| 194 |
+
If the `full` option is enabled, this is the document class to use
|
| 195 |
+
(default: ``'article'``).
|
| 196 |
+
|
| 197 |
+
`preamble`
|
| 198 |
+
If the `full` option is enabled, this can be further preamble commands,
|
| 199 |
+
e.g. ``\usepackage`` (default: ``''``).
|
| 200 |
+
|
| 201 |
+
`linenos`
|
| 202 |
+
If set to ``True``, output line numbers (default: ``False``).
|
| 203 |
+
|
| 204 |
+
`linenostart`
|
| 205 |
+
The line number for the first line (default: ``1``).
|
| 206 |
+
|
| 207 |
+
`linenostep`
|
| 208 |
+
If set to a number n > 1, only every nth line number is printed.
|
| 209 |
+
|
| 210 |
+
`verboptions`
|
| 211 |
+
Additional options given to the Verbatim environment (see the *fancyvrb*
|
| 212 |
+
docs for possible values) (default: ``''``).
|
| 213 |
+
|
| 214 |
+
`commandprefix`
|
| 215 |
+
The LaTeX commands used to produce colored output are constructed
|
| 216 |
+
using this prefix and some letters (default: ``'PY'``).
|
| 217 |
+
|
| 218 |
+
.. versionadded:: 0.7
|
| 219 |
+
.. versionchanged:: 0.10
|
| 220 |
+
The default is now ``'PY'`` instead of ``'C'``.
|
| 221 |
+
|
| 222 |
+
`texcomments`
|
| 223 |
+
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
|
| 224 |
+
in comment tokens is not escaped so that LaTeX can render it (default:
|
| 225 |
+
``False``).
|
| 226 |
+
|
| 227 |
+
.. versionadded:: 1.2
|
| 228 |
+
|
| 229 |
+
`mathescape`
|
| 230 |
+
If set to ``True``, enables LaTeX math mode escape in comments. That
|
| 231 |
+
is, ``'$...$'`` inside a comment will trigger math mode (default:
|
| 232 |
+
``False``).
|
| 233 |
+
|
| 234 |
+
.. versionadded:: 1.2
|
| 235 |
+
|
| 236 |
+
`escapeinside`
|
| 237 |
+
If set to a string of length 2, enables escaping to LaTeX. Text
|
| 238 |
+
delimited by these 2 characters is read as LaTeX code and
|
| 239 |
+
typeset accordingly. It has no effect in string literals. It has
|
| 240 |
+
no effect in comments if `texcomments` or `mathescape` is
|
| 241 |
+
set. (default: ``''``).
|
| 242 |
+
|
| 243 |
+
.. versionadded:: 2.0
|
| 244 |
+
|
| 245 |
+
`envname`
|
| 246 |
+
Allows you to pick an alternative environment name replacing Verbatim.
|
| 247 |
+
The alternate environment still has to support Verbatim's option syntax.
|
| 248 |
+
(default: ``'Verbatim'``).
|
| 249 |
+
|
| 250 |
+
.. versionadded:: 2.0
|
| 251 |
+
"""
|
| 252 |
+
name = 'LaTeX'
|
| 253 |
+
aliases = ['latex', 'tex']
|
| 254 |
+
filenames = ['*.tex']
|
| 255 |
+
|
| 256 |
+
def __init__(self, **options):
|
| 257 |
+
Formatter.__init__(self, **options)
|
| 258 |
+
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
| 259 |
+
self.docclass = options.get('docclass', 'article')
|
| 260 |
+
self.preamble = options.get('preamble', '')
|
| 261 |
+
self.linenos = get_bool_opt(options, 'linenos', False)
|
| 262 |
+
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
|
| 263 |
+
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
|
| 264 |
+
self.verboptions = options.get('verboptions', '')
|
| 265 |
+
self.nobackground = get_bool_opt(options, 'nobackground', False)
|
| 266 |
+
self.commandprefix = options.get('commandprefix', 'PY')
|
| 267 |
+
self.texcomments = get_bool_opt(options, 'texcomments', False)
|
| 268 |
+
self.mathescape = get_bool_opt(options, 'mathescape', False)
|
| 269 |
+
self.escapeinside = options.get('escapeinside', '')
|
| 270 |
+
if len(self.escapeinside) == 2:
|
| 271 |
+
self.left = self.escapeinside[0]
|
| 272 |
+
self.right = self.escapeinside[1]
|
| 273 |
+
else:
|
| 274 |
+
self.escapeinside = ''
|
| 275 |
+
self.envname = options.get('envname', 'Verbatim')
|
| 276 |
+
|
| 277 |
+
self._create_stylesheet()
|
| 278 |
+
|
| 279 |
+
def _create_stylesheet(self):
|
| 280 |
+
t2n = self.ttype2name = {Token: ''}
|
| 281 |
+
c2d = self.cmd2def = {}
|
| 282 |
+
cp = self.commandprefix
|
| 283 |
+
|
| 284 |
+
def rgbcolor(col):
|
| 285 |
+
if col:
|
| 286 |
+
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
|
| 287 |
+
for i in (0, 2, 4)])
|
| 288 |
+
else:
|
| 289 |
+
return '1,1,1'
|
| 290 |
+
|
| 291 |
+
for ttype, ndef in self.style:
|
| 292 |
+
name = _get_ttype_name(ttype)
|
| 293 |
+
cmndef = ''
|
| 294 |
+
if ndef['bold']:
|
| 295 |
+
cmndef += r'\let\$$@bf=\textbf'
|
| 296 |
+
if ndef['italic']:
|
| 297 |
+
cmndef += r'\let\$$@it=\textit'
|
| 298 |
+
if ndef['underline']:
|
| 299 |
+
cmndef += r'\let\$$@ul=\underline'
|
| 300 |
+
if ndef['roman']:
|
| 301 |
+
cmndef += r'\let\$$@ff=\textrm'
|
| 302 |
+
if ndef['sans']:
|
| 303 |
+
cmndef += r'\let\$$@ff=\textsf'
|
| 304 |
+
if ndef['mono']:
|
| 305 |
+
cmndef += r'\let\$$@ff=\textsf'
|
| 306 |
+
if ndef['color']:
|
| 307 |
+
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
|
| 308 |
+
rgbcolor(ndef['color']))
|
| 309 |
+
if ndef['border']:
|
| 310 |
+
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
|
| 311 |
+
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
|
| 312 |
+
(rgbcolor(ndef['border']),
|
| 313 |
+
rgbcolor(ndef['bgcolor'])))
|
| 314 |
+
elif ndef['bgcolor']:
|
| 315 |
+
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
|
| 316 |
+
r'\colorbox[rgb]{%s}{\strut ##1}}}' %
|
| 317 |
+
rgbcolor(ndef['bgcolor']))
|
| 318 |
+
if cmndef == '':
|
| 319 |
+
continue
|
| 320 |
+
cmndef = cmndef.replace('$$', cp)
|
| 321 |
+
t2n[ttype] = name
|
| 322 |
+
c2d[name] = cmndef
|
| 323 |
+
|
| 324 |
+
def get_style_defs(self, arg=''):
|
| 325 |
+
"""
|
| 326 |
+
Return the command sequences needed to define the commands
|
| 327 |
+
used to format text in the verbatim environment. ``arg`` is ignored.
|
| 328 |
+
"""
|
| 329 |
+
cp = self.commandprefix
|
| 330 |
+
styles = []
|
| 331 |
+
for name, definition in self.cmd2def.items():
|
| 332 |
+
styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
|
| 333 |
+
return STYLE_TEMPLATE % {'cp': self.commandprefix,
|
| 334 |
+
'styles': '\n'.join(styles)}
|
| 335 |
+
|
| 336 |
+
def format_unencoded(self, tokensource, outfile):
|
| 337 |
+
# TODO: add support for background colors
|
| 338 |
+
t2n = self.ttype2name
|
| 339 |
+
cp = self.commandprefix
|
| 340 |
+
|
| 341 |
+
if self.full:
|
| 342 |
+
realoutfile = outfile
|
| 343 |
+
outfile = StringIO()
|
| 344 |
+
|
| 345 |
+
if not self.nowrap:
|
| 346 |
+
outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
|
| 347 |
+
if self.linenos:
|
| 348 |
+
start, step = self.linenostart, self.linenostep
|
| 349 |
+
outfile.write(',numbers=left' +
|
| 350 |
+
(start and ',firstnumber=%d' % start or '') +
|
| 351 |
+
(step and ',stepnumber=%d' % step or ''))
|
| 352 |
+
if self.mathescape or self.texcomments or self.escapeinside:
|
| 353 |
+
outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
|
| 354 |
+
'\\catcode`\\_=8\\relax}')
|
| 355 |
+
if self.verboptions:
|
| 356 |
+
outfile.write(',' + self.verboptions)
|
| 357 |
+
outfile.write(']\n')
|
| 358 |
+
|
| 359 |
+
for ttype, value in tokensource:
|
| 360 |
+
if ttype in Token.Comment:
|
| 361 |
+
if self.texcomments:
|
| 362 |
+
# Try to guess comment starting lexeme and escape it ...
|
| 363 |
+
start = value[0:1]
|
| 364 |
+
for i in range(1, len(value)):
|
| 365 |
+
if start[0] != value[i]:
|
| 366 |
+
break
|
| 367 |
+
start += value[i]
|
| 368 |
+
|
| 369 |
+
value = value[len(start):]
|
| 370 |
+
start = escape_tex(start, cp)
|
| 371 |
+
|
| 372 |
+
# ... but do not escape inside comment.
|
| 373 |
+
value = start + value
|
| 374 |
+
elif self.mathescape:
|
| 375 |
+
# Only escape parts not inside a math environment.
|
| 376 |
+
parts = value.split('$')
|
| 377 |
+
in_math = False
|
| 378 |
+
for i, part in enumerate(parts):
|
| 379 |
+
if not in_math:
|
| 380 |
+
parts[i] = escape_tex(part, cp)
|
| 381 |
+
in_math = not in_math
|
| 382 |
+
value = '$'.join(parts)
|
| 383 |
+
elif self.escapeinside:
|
| 384 |
+
text = value
|
| 385 |
+
value = ''
|
| 386 |
+
while text:
|
| 387 |
+
a, sep1, text = text.partition(self.left)
|
| 388 |
+
if sep1:
|
| 389 |
+
b, sep2, text = text.partition(self.right)
|
| 390 |
+
if sep2:
|
| 391 |
+
value += escape_tex(a, cp) + b
|
| 392 |
+
else:
|
| 393 |
+
value += escape_tex(a + sep1 + b, cp)
|
| 394 |
+
else:
|
| 395 |
+
value += escape_tex(a, cp)
|
| 396 |
+
else:
|
| 397 |
+
value = escape_tex(value, cp)
|
| 398 |
+
elif ttype not in Token.Escape:
|
| 399 |
+
value = escape_tex(value, cp)
|
| 400 |
+
styles = []
|
| 401 |
+
while ttype is not Token:
|
| 402 |
+
try:
|
| 403 |
+
styles.append(t2n[ttype])
|
| 404 |
+
except KeyError:
|
| 405 |
+
# not in current style
|
| 406 |
+
styles.append(_get_ttype_name(ttype))
|
| 407 |
+
ttype = ttype.parent
|
| 408 |
+
styleval = '+'.join(reversed(styles))
|
| 409 |
+
if styleval:
|
| 410 |
+
spl = value.split('\n')
|
| 411 |
+
for line in spl[:-1]:
|
| 412 |
+
if line:
|
| 413 |
+
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
|
| 414 |
+
outfile.write('\n')
|
| 415 |
+
if spl[-1]:
|
| 416 |
+
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
|
| 417 |
+
else:
|
| 418 |
+
outfile.write(value)
|
| 419 |
+
|
| 420 |
+
if not self.nowrap:
|
| 421 |
+
outfile.write('\\end{' + self.envname + '}\n')
|
| 422 |
+
|
| 423 |
+
if self.full:
|
| 424 |
+
encoding = self.encoding or 'utf8'
|
| 425 |
+
# map known existings encodings from LaTeX distribution
|
| 426 |
+
encoding = {
|
| 427 |
+
'utf_8': 'utf8',
|
| 428 |
+
'latin_1': 'latin1',
|
| 429 |
+
'iso_8859_1': 'latin1',
|
| 430 |
+
}.get(encoding.replace('-', '_'), encoding)
|
| 431 |
+
realoutfile.write(DOC_TEMPLATE %
|
| 432 |
+
dict(docclass = self.docclass,
|
| 433 |
+
preamble = self.preamble,
|
| 434 |
+
title = self.title,
|
| 435 |
+
encoding = encoding,
|
| 436 |
+
styledefs = self.get_style_defs(),
|
| 437 |
+
code = outfile.getvalue()))
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
class LatexEmbeddedLexer(Lexer):
|
| 441 |
+
"""
|
| 442 |
+
This lexer takes one lexer as argument, the lexer for the language
|
| 443 |
+
being formatted, and the left and right delimiters for escaped text.
|
| 444 |
+
|
| 445 |
+
First everything is scanned using the language lexer to obtain
|
| 446 |
+
strings and comments. All other consecutive tokens are merged and
|
| 447 |
+
the resulting text is scanned for escaped segments, which are given
|
| 448 |
+
the Token.Escape type. Finally text that is not escaped is scanned
|
| 449 |
+
again with the language lexer.
|
| 450 |
+
"""
|
| 451 |
+
def __init__(self, left, right, lang, **options):
|
| 452 |
+
self.left = left
|
| 453 |
+
self.right = right
|
| 454 |
+
self.lang = lang
|
| 455 |
+
Lexer.__init__(self, **options)
|
| 456 |
+
|
| 457 |
+
def get_tokens_unprocessed(self, text):
|
| 458 |
+
# find and remove all the escape tokens (replace with an empty string)
|
| 459 |
+
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
|
| 460 |
+
buffered = ''
|
| 461 |
+
insertions = []
|
| 462 |
+
insertion_buf = []
|
| 463 |
+
for i, t, v in self._find_safe_escape_tokens(text):
|
| 464 |
+
if t is None:
|
| 465 |
+
if insertion_buf:
|
| 466 |
+
insertions.append((len(buffered), insertion_buf))
|
| 467 |
+
insertion_buf = []
|
| 468 |
+
buffered += v
|
| 469 |
+
else:
|
| 470 |
+
insertion_buf.append((i, t, v))
|
| 471 |
+
if insertion_buf:
|
| 472 |
+
insertions.append((len(buffered), insertion_buf))
|
| 473 |
+
return do_insertions(insertions,
|
| 474 |
+
self.lang.get_tokens_unprocessed(buffered))
|
| 475 |
+
|
| 476 |
+
def _find_safe_escape_tokens(self, text):
|
| 477 |
+
""" find escape tokens that are not in strings or comments """
|
| 478 |
+
for i, t, v in self._filter_to(
|
| 479 |
+
self.lang.get_tokens_unprocessed(text),
|
| 480 |
+
lambda t: t in Token.Comment or t in Token.String
|
| 481 |
+
):
|
| 482 |
+
if t is None:
|
| 483 |
+
for i2, t2, v2 in self._find_escape_tokens(v):
|
| 484 |
+
yield i + i2, t2, v2
|
| 485 |
+
else:
|
| 486 |
+
yield i, None, v
|
| 487 |
+
|
| 488 |
+
def _filter_to(self, it, pred):
|
| 489 |
+
""" Keep only the tokens that match `pred`, merge the others together """
|
| 490 |
+
buf = ''
|
| 491 |
+
idx = 0
|
| 492 |
+
for i, t, v in it:
|
| 493 |
+
if pred(t):
|
| 494 |
+
if buf:
|
| 495 |
+
yield idx, None, buf
|
| 496 |
+
buf = ''
|
| 497 |
+
yield i, t, v
|
| 498 |
+
else:
|
| 499 |
+
if not buf:
|
| 500 |
+
idx = i
|
| 501 |
+
buf += v
|
| 502 |
+
if buf:
|
| 503 |
+
yield idx, None, buf
|
| 504 |
+
|
| 505 |
+
def _find_escape_tokens(self, text):
|
| 506 |
+
""" Find escape tokens within text, give token=None otherwise """
|
| 507 |
+
index = 0
|
| 508 |
+
while text:
|
| 509 |
+
a, sep1, text = text.partition(self.left)
|
| 510 |
+
if a:
|
| 511 |
+
yield index, None, a
|
| 512 |
+
index += len(a)
|
| 513 |
+
if sep1:
|
| 514 |
+
b, sep2, text = text.partition(self.right)
|
| 515 |
+
if sep2:
|
| 516 |
+
yield index + len(sep1), Token.Escape, b
|
| 517 |
+
index += len(sep1) + len(b) + len(sep2)
|
| 518 |
+
else:
|
| 519 |
+
yield index, Token.Error, sep1
|
| 520 |
+
index += len(sep1)
|
| 521 |
+
text = b
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.pangomarkup
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for Pango markup output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = ['PangoMarkupFormatter']
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
_escape_table = {
|
| 18 |
+
ord('&'): '&',
|
| 19 |
+
ord('<'): '<',
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def escape_special_chars(text, table=_escape_table):
|
| 24 |
+
"""Escape & and < for Pango Markup."""
|
| 25 |
+
return text.translate(table)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class PangoMarkupFormatter(Formatter):
|
| 29 |
+
"""
|
| 30 |
+
Format tokens as Pango Markup code. It can then be rendered to an SVG.
|
| 31 |
+
|
| 32 |
+
.. versionadded:: 2.9
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
name = 'Pango Markup'
|
| 36 |
+
aliases = ['pango', 'pangomarkup']
|
| 37 |
+
filenames = []
|
| 38 |
+
|
| 39 |
+
def __init__(self, **options):
|
| 40 |
+
Formatter.__init__(self, **options)
|
| 41 |
+
|
| 42 |
+
self.styles = {}
|
| 43 |
+
|
| 44 |
+
for token, style in self.style:
|
| 45 |
+
start = ''
|
| 46 |
+
end = ''
|
| 47 |
+
if style['color']:
|
| 48 |
+
start += '<span fgcolor="#%s">' % style['color']
|
| 49 |
+
end = '</span>' + end
|
| 50 |
+
if style['bold']:
|
| 51 |
+
start += '<b>'
|
| 52 |
+
end = '</b>' + end
|
| 53 |
+
if style['italic']:
|
| 54 |
+
start += '<i>'
|
| 55 |
+
end = '</i>' + end
|
| 56 |
+
if style['underline']:
|
| 57 |
+
start += '<u>'
|
| 58 |
+
end = '</u>' + end
|
| 59 |
+
self.styles[token] = (start, end)
|
| 60 |
+
|
| 61 |
+
def format_unencoded(self, tokensource, outfile):
|
| 62 |
+
lastval = ''
|
| 63 |
+
lasttype = None
|
| 64 |
+
|
| 65 |
+
outfile.write('<tt>')
|
| 66 |
+
|
| 67 |
+
for ttype, value in tokensource:
|
| 68 |
+
while ttype not in self.styles:
|
| 69 |
+
ttype = ttype.parent
|
| 70 |
+
if ttype == lasttype:
|
| 71 |
+
lastval += escape_special_chars(value)
|
| 72 |
+
else:
|
| 73 |
+
if lastval:
|
| 74 |
+
stylebegin, styleend = self.styles[lasttype]
|
| 75 |
+
outfile.write(stylebegin + lastval + styleend)
|
| 76 |
+
lastval = escape_special_chars(value)
|
| 77 |
+
lasttype = ttype
|
| 78 |
+
|
| 79 |
+
if lastval:
|
| 80 |
+
stylebegin, styleend = self.styles[lasttype]
|
| 81 |
+
outfile.write(stylebegin + lastval + styleend)
|
| 82 |
+
|
| 83 |
+
outfile.write('</tt>')
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.rtf
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
A formatter that generates RTF files.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.util import get_int_opt, surrogatepair
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
__all__ = ['RtfFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class RtfFormatter(Formatter):
|
| 19 |
+
"""
|
| 20 |
+
Format tokens as RTF markup. This formatter automatically outputs full RTF
|
| 21 |
+
documents with color information and other useful stuff. Perfect for Copy and
|
| 22 |
+
Paste into Microsoft(R) Word(R) documents.
|
| 23 |
+
|
| 24 |
+
Please note that ``encoding`` and ``outencoding`` options are ignored.
|
| 25 |
+
The RTF format is ASCII natively, but handles unicode characters correctly
|
| 26 |
+
thanks to escape sequences.
|
| 27 |
+
|
| 28 |
+
.. versionadded:: 0.6
|
| 29 |
+
|
| 30 |
+
Additional options accepted:
|
| 31 |
+
|
| 32 |
+
`style`
|
| 33 |
+
The style to use, can be a string or a Style subclass (default:
|
| 34 |
+
``'default'``).
|
| 35 |
+
|
| 36 |
+
`fontface`
|
| 37 |
+
The used font family, for example ``Bitstream Vera Sans``. Defaults to
|
| 38 |
+
some generic font which is supposed to have fixed width.
|
| 39 |
+
|
| 40 |
+
`fontsize`
|
| 41 |
+
Size of the font used. Size is specified in half points. The
|
| 42 |
+
default is 24 half-points, giving a size 12 font.
|
| 43 |
+
|
| 44 |
+
.. versionadded:: 2.0
|
| 45 |
+
"""
|
| 46 |
+
name = 'RTF'
|
| 47 |
+
aliases = ['rtf']
|
| 48 |
+
filenames = ['*.rtf']
|
| 49 |
+
|
| 50 |
+
def __init__(self, **options):
|
| 51 |
+
r"""
|
| 52 |
+
Additional options accepted:
|
| 53 |
+
|
| 54 |
+
``fontface``
|
| 55 |
+
Name of the font used. Could for example be ``'Courier New'``
|
| 56 |
+
to further specify the default which is ``'\fmodern'``. The RTF
|
| 57 |
+
specification claims that ``\fmodern`` are "Fixed-pitch serif
|
| 58 |
+
and sans serif fonts". Hope every RTF implementation thinks
|
| 59 |
+
the same about modern...
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
Formatter.__init__(self, **options)
|
| 63 |
+
self.fontface = options.get('fontface') or ''
|
| 64 |
+
self.fontsize = get_int_opt(options, 'fontsize', 0)
|
| 65 |
+
|
| 66 |
+
def _escape(self, text):
|
| 67 |
+
return text.replace('\\', '\\\\') \
|
| 68 |
+
.replace('{', '\\{') \
|
| 69 |
+
.replace('}', '\\}')
|
| 70 |
+
|
| 71 |
+
def _escape_text(self, text):
|
| 72 |
+
# empty strings, should give a small performance improvement
|
| 73 |
+
if not text:
|
| 74 |
+
return ''
|
| 75 |
+
|
| 76 |
+
# escape text
|
| 77 |
+
text = self._escape(text)
|
| 78 |
+
|
| 79 |
+
buf = []
|
| 80 |
+
for c in text:
|
| 81 |
+
cn = ord(c)
|
| 82 |
+
if cn < (2**7):
|
| 83 |
+
# ASCII character
|
| 84 |
+
buf.append(str(c))
|
| 85 |
+
elif (2**7) <= cn < (2**16):
|
| 86 |
+
# single unicode escape sequence
|
| 87 |
+
buf.append('{\\u%d}' % cn)
|
| 88 |
+
elif (2**16) <= cn:
|
| 89 |
+
# RTF limits unicode to 16 bits.
|
| 90 |
+
# Force surrogate pairs
|
| 91 |
+
buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
|
| 92 |
+
|
| 93 |
+
return ''.join(buf).replace('\n', '\\par\n')
|
| 94 |
+
|
| 95 |
+
def format_unencoded(self, tokensource, outfile):
|
| 96 |
+
# rtf 1.8 header
|
| 97 |
+
outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
|
| 98 |
+
'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
|
| 99 |
+
'{\\colortbl;' % (self.fontface and
|
| 100 |
+
' ' + self._escape(self.fontface) or
|
| 101 |
+
''))
|
| 102 |
+
|
| 103 |
+
# convert colors and save them in a mapping to access them later.
|
| 104 |
+
color_mapping = {}
|
| 105 |
+
offset = 1
|
| 106 |
+
for _, style in self.style:
|
| 107 |
+
for color in style['color'], style['bgcolor'], style['border']:
|
| 108 |
+
if color and color not in color_mapping:
|
| 109 |
+
color_mapping[color] = offset
|
| 110 |
+
outfile.write('\\red%d\\green%d\\blue%d;' % (
|
| 111 |
+
int(color[0:2], 16),
|
| 112 |
+
int(color[2:4], 16),
|
| 113 |
+
int(color[4:6], 16)
|
| 114 |
+
))
|
| 115 |
+
offset += 1
|
| 116 |
+
outfile.write('}\\f0 ')
|
| 117 |
+
if self.fontsize:
|
| 118 |
+
outfile.write('\\fs%d' % self.fontsize)
|
| 119 |
+
|
| 120 |
+
# highlight stream
|
| 121 |
+
for ttype, value in tokensource:
|
| 122 |
+
while not self.style.styles_token(ttype) and ttype.parent:
|
| 123 |
+
ttype = ttype.parent
|
| 124 |
+
style = self.style.style_for_token(ttype)
|
| 125 |
+
buf = []
|
| 126 |
+
if style['bgcolor']:
|
| 127 |
+
buf.append('\\cb%d' % color_mapping[style['bgcolor']])
|
| 128 |
+
if style['color']:
|
| 129 |
+
buf.append('\\cf%d' % color_mapping[style['color']])
|
| 130 |
+
if style['bold']:
|
| 131 |
+
buf.append('\\b')
|
| 132 |
+
if style['italic']:
|
| 133 |
+
buf.append('\\i')
|
| 134 |
+
if style['underline']:
|
| 135 |
+
buf.append('\\ul')
|
| 136 |
+
if style['border']:
|
| 137 |
+
buf.append('\\chbrdr\\chcfpat%d' %
|
| 138 |
+
color_mapping[style['border']])
|
| 139 |
+
start = ''.join(buf)
|
| 140 |
+
if start:
|
| 141 |
+
outfile.write('{%s ' % start)
|
| 142 |
+
outfile.write(self._escape_text(value))
|
| 143 |
+
if start:
|
| 144 |
+
outfile.write('}')
|
| 145 |
+
|
| 146 |
+
outfile.write('}')
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.svg
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for SVG output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.token import Comment
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
|
| 14 |
+
|
| 15 |
+
__all__ = ['SvgFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def escape_html(text):
|
| 19 |
+
"""Escape &, <, > as well as single and double quotes for HTML."""
|
| 20 |
+
return text.replace('&', '&'). \
|
| 21 |
+
replace('<', '<'). \
|
| 22 |
+
replace('>', '>'). \
|
| 23 |
+
replace('"', '"'). \
|
| 24 |
+
replace("'", ''')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class2style = {}
|
| 28 |
+
|
| 29 |
+
class SvgFormatter(Formatter):
|
| 30 |
+
"""
|
| 31 |
+
Format tokens as an SVG graphics file. This formatter is still experimental.
|
| 32 |
+
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
|
| 33 |
+
coordinates containing ``<tspan>`` elements with the individual token styles.
|
| 34 |
+
|
| 35 |
+
By default, this formatter outputs a full SVG document including doctype
|
| 36 |
+
declaration and the ``<svg>`` root element.
|
| 37 |
+
|
| 38 |
+
.. versionadded:: 0.9
|
| 39 |
+
|
| 40 |
+
Additional options accepted:
|
| 41 |
+
|
| 42 |
+
`nowrap`
|
| 43 |
+
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
|
| 44 |
+
don't add a XML declaration and a doctype. If true, the `fontfamily`
|
| 45 |
+
and `fontsize` options are ignored. Defaults to ``False``.
|
| 46 |
+
|
| 47 |
+
`fontfamily`
|
| 48 |
+
The value to give the wrapping ``<g>`` element's ``font-family``
|
| 49 |
+
attribute, defaults to ``"monospace"``.
|
| 50 |
+
|
| 51 |
+
`fontsize`
|
| 52 |
+
The value to give the wrapping ``<g>`` element's ``font-size``
|
| 53 |
+
attribute, defaults to ``"14px"``.
|
| 54 |
+
|
| 55 |
+
`linenos`
|
| 56 |
+
If ``True``, add line numbers (default: ``False``).
|
| 57 |
+
|
| 58 |
+
`linenostart`
|
| 59 |
+
The line number for the first line (default: ``1``).
|
| 60 |
+
|
| 61 |
+
`linenostep`
|
| 62 |
+
If set to a number n > 1, only every nth line number is printed.
|
| 63 |
+
|
| 64 |
+
`linenowidth`
|
| 65 |
+
Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
|
| 66 |
+
for up to 4-digit line numbers. Increase width for longer code blocks).
|
| 67 |
+
|
| 68 |
+
`xoffset`
|
| 69 |
+
Starting offset in X direction, defaults to ``0``.
|
| 70 |
+
|
| 71 |
+
`yoffset`
|
| 72 |
+
Starting offset in Y direction, defaults to the font size if it is given
|
| 73 |
+
in pixels, or ``20`` else. (This is necessary since text coordinates
|
| 74 |
+
refer to the text baseline, not the top edge.)
|
| 75 |
+
|
| 76 |
+
`ystep`
|
| 77 |
+
Offset to add to the Y coordinate for each subsequent line. This should
|
| 78 |
+
roughly be the text size plus 5. It defaults to that value if the text
|
| 79 |
+
size is given in pixels, or ``25`` else.
|
| 80 |
+
|
| 81 |
+
`spacehack`
|
| 82 |
+
Convert spaces in the source to `` ``, which are non-breaking
|
| 83 |
+
spaces. SVG provides the ``xml:space`` attribute to control how
|
| 84 |
+
whitespace inside tags is handled, in theory, the ``preserve`` value
|
| 85 |
+
could be used to keep all whitespace as-is. However, many current SVG
|
| 86 |
+
viewers don't obey that rule, so this option is provided as a workaround
|
| 87 |
+
and defaults to ``True``.
|
| 88 |
+
"""
|
| 89 |
+
name = 'SVG'
|
| 90 |
+
aliases = ['svg']
|
| 91 |
+
filenames = ['*.svg']
|
| 92 |
+
|
| 93 |
+
def __init__(self, **options):
|
| 94 |
+
Formatter.__init__(self, **options)
|
| 95 |
+
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
| 96 |
+
self.fontfamily = options.get('fontfamily', 'monospace')
|
| 97 |
+
self.fontsize = options.get('fontsize', '14px')
|
| 98 |
+
self.xoffset = get_int_opt(options, 'xoffset', 0)
|
| 99 |
+
fs = self.fontsize.strip()
|
| 100 |
+
if fs.endswith('px'): fs = fs[:-2].strip()
|
| 101 |
+
try:
|
| 102 |
+
int_fs = int(fs)
|
| 103 |
+
except:
|
| 104 |
+
int_fs = 20
|
| 105 |
+
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
|
| 106 |
+
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
|
| 107 |
+
self.spacehack = get_bool_opt(options, 'spacehack', True)
|
| 108 |
+
self.linenos = get_bool_opt(options,'linenos',False)
|
| 109 |
+
self.linenostart = get_int_opt(options,'linenostart',1)
|
| 110 |
+
self.linenostep = get_int_opt(options,'linenostep',1)
|
| 111 |
+
self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
|
| 112 |
+
self._stylecache = {}
|
| 113 |
+
|
| 114 |
+
def format_unencoded(self, tokensource, outfile):
|
| 115 |
+
"""
|
| 116 |
+
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
| 117 |
+
tuples and write it into ``outfile``.
|
| 118 |
+
|
| 119 |
+
For our implementation we put all lines in their own 'line group'.
|
| 120 |
+
"""
|
| 121 |
+
x = self.xoffset
|
| 122 |
+
y = self.yoffset
|
| 123 |
+
if not self.nowrap:
|
| 124 |
+
if self.encoding:
|
| 125 |
+
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
|
| 126 |
+
self.encoding)
|
| 127 |
+
else:
|
| 128 |
+
outfile.write('<?xml version="1.0"?>\n')
|
| 129 |
+
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
|
| 130 |
+
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
|
| 131 |
+
'svg10.dtd">\n')
|
| 132 |
+
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
|
| 133 |
+
outfile.write('<g font-family="%s" font-size="%s">\n' %
|
| 134 |
+
(self.fontfamily, self.fontsize))
|
| 135 |
+
|
| 136 |
+
counter = self.linenostart
|
| 137 |
+
counter_step = self.linenostep
|
| 138 |
+
counter_style = self._get_style(Comment)
|
| 139 |
+
line_x = x
|
| 140 |
+
|
| 141 |
+
if self.linenos:
|
| 142 |
+
if counter % counter_step == 0:
|
| 143 |
+
outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' %
|
| 144 |
+
(x+self.linenowidth,y,counter_style,counter))
|
| 145 |
+
line_x += self.linenowidth + self.ystep
|
| 146 |
+
counter += 1
|
| 147 |
+
|
| 148 |
+
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
|
| 149 |
+
for ttype, value in tokensource:
|
| 150 |
+
style = self._get_style(ttype)
|
| 151 |
+
tspan = style and '<tspan' + style + '>' or ''
|
| 152 |
+
tspanend = tspan and '</tspan>' or ''
|
| 153 |
+
value = escape_html(value)
|
| 154 |
+
if self.spacehack:
|
| 155 |
+
value = value.expandtabs().replace(' ', ' ')
|
| 156 |
+
parts = value.split('\n')
|
| 157 |
+
for part in parts[:-1]:
|
| 158 |
+
outfile.write(tspan + part + tspanend)
|
| 159 |
+
y += self.ystep
|
| 160 |
+
outfile.write('</text>\n')
|
| 161 |
+
if self.linenos and counter % counter_step == 0:
|
| 162 |
+
outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' %
|
| 163 |
+
(x+self.linenowidth,y,counter_style,counter))
|
| 164 |
+
|
| 165 |
+
counter += 1
|
| 166 |
+
outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y))
|
| 167 |
+
outfile.write(tspan + parts[-1] + tspanend)
|
| 168 |
+
outfile.write('</text>')
|
| 169 |
+
|
| 170 |
+
if not self.nowrap:
|
| 171 |
+
outfile.write('</g></svg>\n')
|
| 172 |
+
|
| 173 |
+
def _get_style(self, tokentype):
|
| 174 |
+
if tokentype in self._stylecache:
|
| 175 |
+
return self._stylecache[tokentype]
|
| 176 |
+
otokentype = tokentype
|
| 177 |
+
while not self.style.styles_token(tokentype):
|
| 178 |
+
tokentype = tokentype.parent
|
| 179 |
+
value = self.style.style_for_token(tokentype)
|
| 180 |
+
result = ''
|
| 181 |
+
if value['color']:
|
| 182 |
+
result = ' fill="#' + value['color'] + '"'
|
| 183 |
+
if value['bold']:
|
| 184 |
+
result += ' font-weight="bold"'
|
| 185 |
+
if value['italic']:
|
| 186 |
+
result += ' font-style="italic"'
|
| 187 |
+
self._stylecache[otokentype] = result
|
| 188 |
+
return result
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.terminal
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for terminal output with ANSI sequences.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
|
| 13 |
+
Number, Operator, Generic, Token, Whitespace
|
| 14 |
+
from pip._vendor.pygments.console import ansiformat
|
| 15 |
+
from pip._vendor.pygments.util import get_choice_opt
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
__all__ = ['TerminalFormatter']
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#: Map token types to a tuple of color values for light and dark
|
| 22 |
+
#: backgrounds.
|
| 23 |
+
TERMINAL_COLORS = {
|
| 24 |
+
Token: ('', ''),
|
| 25 |
+
|
| 26 |
+
Whitespace: ('gray', 'brightblack'),
|
| 27 |
+
Comment: ('gray', 'brightblack'),
|
| 28 |
+
Comment.Preproc: ('cyan', 'brightcyan'),
|
| 29 |
+
Keyword: ('blue', 'brightblue'),
|
| 30 |
+
Keyword.Type: ('cyan', 'brightcyan'),
|
| 31 |
+
Operator.Word: ('magenta', 'brightmagenta'),
|
| 32 |
+
Name.Builtin: ('cyan', 'brightcyan'),
|
| 33 |
+
Name.Function: ('green', 'brightgreen'),
|
| 34 |
+
Name.Namespace: ('_cyan_', '_brightcyan_'),
|
| 35 |
+
Name.Class: ('_green_', '_brightgreen_'),
|
| 36 |
+
Name.Exception: ('cyan', 'brightcyan'),
|
| 37 |
+
Name.Decorator: ('brightblack', 'gray'),
|
| 38 |
+
Name.Variable: ('red', 'brightred'),
|
| 39 |
+
Name.Constant: ('red', 'brightred'),
|
| 40 |
+
Name.Attribute: ('cyan', 'brightcyan'),
|
| 41 |
+
Name.Tag: ('brightblue', 'brightblue'),
|
| 42 |
+
String: ('yellow', 'yellow'),
|
| 43 |
+
Number: ('blue', 'brightblue'),
|
| 44 |
+
|
| 45 |
+
Generic.Deleted: ('brightred', 'brightred'),
|
| 46 |
+
Generic.Inserted: ('green', 'brightgreen'),
|
| 47 |
+
Generic.Heading: ('**', '**'),
|
| 48 |
+
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
|
| 49 |
+
Generic.Prompt: ('**', '**'),
|
| 50 |
+
Generic.Error: ('brightred', 'brightred'),
|
| 51 |
+
|
| 52 |
+
Error: ('_brightred_', '_brightred_'),
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class TerminalFormatter(Formatter):
|
| 57 |
+
r"""
|
| 58 |
+
Format tokens with ANSI color sequences, for output in a text console.
|
| 59 |
+
Color sequences are terminated at newlines, so that paging the output
|
| 60 |
+
works correctly.
|
| 61 |
+
|
| 62 |
+
The `get_style_defs()` method doesn't do anything special since there is
|
| 63 |
+
no support for common styles.
|
| 64 |
+
|
| 65 |
+
Options accepted:
|
| 66 |
+
|
| 67 |
+
`bg`
|
| 68 |
+
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
|
| 69 |
+
(default: ``"light"``).
|
| 70 |
+
|
| 71 |
+
`colorscheme`
|
| 72 |
+
A dictionary mapping token types to (lightbg, darkbg) color names or
|
| 73 |
+
``None`` (default: ``None`` = use builtin colorscheme).
|
| 74 |
+
|
| 75 |
+
`linenos`
|
| 76 |
+
Set to ``True`` to have line numbers on the terminal output as well
|
| 77 |
+
(default: ``False`` = no line numbers).
|
| 78 |
+
"""
|
| 79 |
+
name = 'Terminal'
|
| 80 |
+
aliases = ['terminal', 'console']
|
| 81 |
+
filenames = []
|
| 82 |
+
|
| 83 |
+
def __init__(self, **options):
|
| 84 |
+
Formatter.__init__(self, **options)
|
| 85 |
+
self.darkbg = get_choice_opt(options, 'bg',
|
| 86 |
+
['light', 'dark'], 'light') == 'dark'
|
| 87 |
+
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
|
| 88 |
+
self.linenos = options.get('linenos', False)
|
| 89 |
+
self._lineno = 0
|
| 90 |
+
|
| 91 |
+
def format(self, tokensource, outfile):
|
| 92 |
+
return Formatter.format(self, tokensource, outfile)
|
| 93 |
+
|
| 94 |
+
def _write_lineno(self, outfile):
|
| 95 |
+
self._lineno += 1
|
| 96 |
+
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
|
| 97 |
+
|
| 98 |
+
def _get_color(self, ttype):
|
| 99 |
+
# self.colorscheme is a dict containing usually generic types, so we
|
| 100 |
+
# have to walk the tree of dots. The base Token type must be a key,
|
| 101 |
+
# even if it's empty string, as in the default above.
|
| 102 |
+
colors = self.colorscheme.get(ttype)
|
| 103 |
+
while colors is None:
|
| 104 |
+
ttype = ttype.parent
|
| 105 |
+
colors = self.colorscheme.get(ttype)
|
| 106 |
+
return colors[self.darkbg]
|
| 107 |
+
|
| 108 |
+
def format_unencoded(self, tokensource, outfile):
|
| 109 |
+
if self.linenos:
|
| 110 |
+
self._write_lineno(outfile)
|
| 111 |
+
|
| 112 |
+
for ttype, value in tokensource:
|
| 113 |
+
color = self._get_color(ttype)
|
| 114 |
+
|
| 115 |
+
for line in value.splitlines(True):
|
| 116 |
+
if color:
|
| 117 |
+
outfile.write(ansiformat(color, line.rstrip('\n')))
|
| 118 |
+
else:
|
| 119 |
+
outfile.write(line.rstrip('\n'))
|
| 120 |
+
if line.endswith('\n'):
|
| 121 |
+
if self.linenos:
|
| 122 |
+
self._write_lineno(outfile)
|
| 123 |
+
else:
|
| 124 |
+
outfile.write('\n')
|
| 125 |
+
|
| 126 |
+
if self.linenos:
|
| 127 |
+
outfile.write("\n")
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py
ADDED
|
@@ -0,0 +1,943 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.lexer
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Base lexer classes.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
import sys
|
| 13 |
+
import time
|
| 14 |
+
|
| 15 |
+
from pip._vendor.pygments.filter import apply_filters, Filter
|
| 16 |
+
from pip._vendor.pygments.filters import get_filter_by_name
|
| 17 |
+
from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
|
| 18 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
| 19 |
+
make_analysator, Future, guess_decode
|
| 20 |
+
from pip._vendor.pygments.regexopt import regex_opt
|
| 21 |
+
|
| 22 |
+
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
|
| 23 |
+
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
|
| 24 |
+
'default', 'words', 'line_re']
|
| 25 |
+
|
| 26 |
+
line_re = re.compile('.*?\n')
|
| 27 |
+
|
| 28 |
+
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
|
| 29 |
+
(b'\xff\xfe\0\0', 'utf-32'),
|
| 30 |
+
(b'\0\0\xfe\xff', 'utf-32be'),
|
| 31 |
+
(b'\xff\xfe', 'utf-16'),
|
| 32 |
+
(b'\xfe\xff', 'utf-16be')]
|
| 33 |
+
|
| 34 |
+
_default_analyse = staticmethod(lambda x: 0.0)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LexerMeta(type):
|
| 38 |
+
"""
|
| 39 |
+
This metaclass automagically converts ``analyse_text`` methods into
|
| 40 |
+
static methods which always return float values.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __new__(mcs, name, bases, d):
|
| 44 |
+
if 'analyse_text' in d:
|
| 45 |
+
d['analyse_text'] = make_analysator(d['analyse_text'])
|
| 46 |
+
return type.__new__(mcs, name, bases, d)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class Lexer(metaclass=LexerMeta):
|
| 50 |
+
"""
|
| 51 |
+
Lexer for a specific language.
|
| 52 |
+
|
| 53 |
+
See also :doc:`lexerdevelopment`, a high-level guide to writing
|
| 54 |
+
lexers.
|
| 55 |
+
|
| 56 |
+
Lexer classes have attributes used for choosing the most appropriate
|
| 57 |
+
lexer based on various criteria.
|
| 58 |
+
|
| 59 |
+
.. autoattribute:: name
|
| 60 |
+
:no-value:
|
| 61 |
+
.. autoattribute:: aliases
|
| 62 |
+
:no-value:
|
| 63 |
+
.. autoattribute:: filenames
|
| 64 |
+
:no-value:
|
| 65 |
+
.. autoattribute:: alias_filenames
|
| 66 |
+
.. autoattribute:: mimetypes
|
| 67 |
+
:no-value:
|
| 68 |
+
.. autoattribute:: priority
|
| 69 |
+
|
| 70 |
+
Lexers included in Pygments should have an additional attribute:
|
| 71 |
+
|
| 72 |
+
.. autoattribute:: url
|
| 73 |
+
:no-value:
|
| 74 |
+
|
| 75 |
+
You can pass options to the constructor. The basic options recognized
|
| 76 |
+
by all lexers and processed by the base `Lexer` class are:
|
| 77 |
+
|
| 78 |
+
``stripnl``
|
| 79 |
+
Strip leading and trailing newlines from the input (default: True).
|
| 80 |
+
``stripall``
|
| 81 |
+
Strip all leading and trailing whitespace from the input
|
| 82 |
+
(default: False).
|
| 83 |
+
``ensurenl``
|
| 84 |
+
Make sure that the input ends with a newline (default: True). This
|
| 85 |
+
is required for some lexers that consume input linewise.
|
| 86 |
+
|
| 87 |
+
.. versionadded:: 1.3
|
| 88 |
+
|
| 89 |
+
``tabsize``
|
| 90 |
+
If given and greater than 0, expand tabs in the input (default: 0).
|
| 91 |
+
``encoding``
|
| 92 |
+
If given, must be an encoding name. This encoding will be used to
|
| 93 |
+
convert the input string to Unicode, if it is not already a Unicode
|
| 94 |
+
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
|
| 95 |
+
Latin1 detection. Can also be ``'chardet'`` to use the chardet
|
| 96 |
+
library, if it is installed.
|
| 97 |
+
``inencoding``
|
| 98 |
+
Overrides the ``encoding`` if given.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
#: Full name of the lexer, in human-readable form
|
| 102 |
+
name = None
|
| 103 |
+
|
| 104 |
+
#: A list of short, unique identifiers that can be used to look
|
| 105 |
+
#: up the lexer from a list, e.g., using `get_lexer_by_name()`.
|
| 106 |
+
aliases = []
|
| 107 |
+
|
| 108 |
+
#: A list of `fnmatch` patterns that match filenames which contain
|
| 109 |
+
#: content for this lexer. The patterns in this list should be unique among
|
| 110 |
+
#: all lexers.
|
| 111 |
+
filenames = []
|
| 112 |
+
|
| 113 |
+
#: A list of `fnmatch` patterns that match filenames which may or may not
|
| 114 |
+
#: contain content for this lexer. This list is used by the
|
| 115 |
+
#: :func:`.guess_lexer_for_filename()` function, to determine which lexers
|
| 116 |
+
#: are then included in guessing the correct one. That means that
|
| 117 |
+
#: e.g. every lexer for HTML and a template language should include
|
| 118 |
+
#: ``\*.html`` in this list.
|
| 119 |
+
alias_filenames = []
|
| 120 |
+
|
| 121 |
+
#: A list of MIME types for content that can be lexed with this lexer.
|
| 122 |
+
mimetypes = []
|
| 123 |
+
|
| 124 |
+
#: Priority, should multiple lexers match and no content is provided
|
| 125 |
+
priority = 0
|
| 126 |
+
|
| 127 |
+
#: URL of the language specification/definition. Used in the Pygments
|
| 128 |
+
#: documentation.
|
| 129 |
+
url = None
|
| 130 |
+
|
| 131 |
+
def __init__(self, **options):
|
| 132 |
+
"""
|
| 133 |
+
This constructor takes arbitrary options as keyword arguments.
|
| 134 |
+
Every subclass must first process its own options and then call
|
| 135 |
+
the `Lexer` constructor, since it processes the basic
|
| 136 |
+
options like `stripnl`.
|
| 137 |
+
|
| 138 |
+
An example looks like this:
|
| 139 |
+
|
| 140 |
+
.. sourcecode:: python
|
| 141 |
+
|
| 142 |
+
def __init__(self, **options):
|
| 143 |
+
self.compress = options.get('compress', '')
|
| 144 |
+
Lexer.__init__(self, **options)
|
| 145 |
+
|
| 146 |
+
As these options must all be specifiable as strings (due to the
|
| 147 |
+
command line usage), there are various utility functions
|
| 148 |
+
available to help with that, see `Utilities`_.
|
| 149 |
+
"""
|
| 150 |
+
self.options = options
|
| 151 |
+
self.stripnl = get_bool_opt(options, 'stripnl', True)
|
| 152 |
+
self.stripall = get_bool_opt(options, 'stripall', False)
|
| 153 |
+
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
|
| 154 |
+
self.tabsize = get_int_opt(options, 'tabsize', 0)
|
| 155 |
+
self.encoding = options.get('encoding', 'guess')
|
| 156 |
+
self.encoding = options.get('inencoding') or self.encoding
|
| 157 |
+
self.filters = []
|
| 158 |
+
for filter_ in get_list_opt(options, 'filters', ()):
|
| 159 |
+
self.add_filter(filter_)
|
| 160 |
+
|
| 161 |
+
def __repr__(self):
|
| 162 |
+
if self.options:
|
| 163 |
+
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
|
| 164 |
+
self.options)
|
| 165 |
+
else:
|
| 166 |
+
return '<pygments.lexers.%s>' % self.__class__.__name__
|
| 167 |
+
|
| 168 |
+
def add_filter(self, filter_, **options):
|
| 169 |
+
"""
|
| 170 |
+
Add a new stream filter to this lexer.
|
| 171 |
+
"""
|
| 172 |
+
if not isinstance(filter_, Filter):
|
| 173 |
+
filter_ = get_filter_by_name(filter_, **options)
|
| 174 |
+
self.filters.append(filter_)
|
| 175 |
+
|
| 176 |
+
def analyse_text(text):
|
| 177 |
+
"""
|
| 178 |
+
A static method which is called for lexer guessing.
|
| 179 |
+
|
| 180 |
+
It should analyse the text and return a float in the range
|
| 181 |
+
from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
|
| 182 |
+
will not be selected as the most probable one, if it returns
|
| 183 |
+
``1.0``, it will be selected immediately. This is used by
|
| 184 |
+
`guess_lexer`.
|
| 185 |
+
|
| 186 |
+
The `LexerMeta` metaclass automatically wraps this function so
|
| 187 |
+
that it works like a static method (no ``self`` or ``cls``
|
| 188 |
+
parameter) and the return value is automatically converted to
|
| 189 |
+
`float`. If the return value is an object that is boolean `False`
|
| 190 |
+
it's the same as if the return values was ``0.0``.
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
def get_tokens(self, text, unfiltered=False):
|
| 194 |
+
"""
|
| 195 |
+
This method is the basic interface of a lexer. It is called by
|
| 196 |
+
the `highlight()` function. It must process the text and return an
|
| 197 |
+
iterable of ``(tokentype, value)`` pairs from `text`.
|
| 198 |
+
|
| 199 |
+
Normally, you don't need to override this method. The default
|
| 200 |
+
implementation processes the options recognized by all lexers
|
| 201 |
+
(`stripnl`, `stripall` and so on), and then yields all tokens
|
| 202 |
+
from `get_tokens_unprocessed()`, with the ``index`` dropped.
|
| 203 |
+
|
| 204 |
+
If `unfiltered` is set to `True`, the filtering mechanism is
|
| 205 |
+
bypassed even if filters are defined.
|
| 206 |
+
"""
|
| 207 |
+
if not isinstance(text, str):
|
| 208 |
+
if self.encoding == 'guess':
|
| 209 |
+
text, _ = guess_decode(text)
|
| 210 |
+
elif self.encoding == 'chardet':
|
| 211 |
+
try:
|
| 212 |
+
from pip._vendor import chardet
|
| 213 |
+
except ImportError as e:
|
| 214 |
+
raise ImportError('To enable chardet encoding guessing, '
|
| 215 |
+
'please install the chardet library '
|
| 216 |
+
'from http://chardet.feedparser.org/') from e
|
| 217 |
+
# check for BOM first
|
| 218 |
+
decoded = None
|
| 219 |
+
for bom, encoding in _encoding_map:
|
| 220 |
+
if text.startswith(bom):
|
| 221 |
+
decoded = text[len(bom):].decode(encoding, 'replace')
|
| 222 |
+
break
|
| 223 |
+
# no BOM found, so use chardet
|
| 224 |
+
if decoded is None:
|
| 225 |
+
enc = chardet.detect(text[:1024]) # Guess using first 1KB
|
| 226 |
+
decoded = text.decode(enc.get('encoding') or 'utf-8',
|
| 227 |
+
'replace')
|
| 228 |
+
text = decoded
|
| 229 |
+
else:
|
| 230 |
+
text = text.decode(self.encoding)
|
| 231 |
+
if text.startswith('\ufeff'):
|
| 232 |
+
text = text[len('\ufeff'):]
|
| 233 |
+
else:
|
| 234 |
+
if text.startswith('\ufeff'):
|
| 235 |
+
text = text[len('\ufeff'):]
|
| 236 |
+
|
| 237 |
+
# text now *is* a unicode string
|
| 238 |
+
text = text.replace('\r\n', '\n')
|
| 239 |
+
text = text.replace('\r', '\n')
|
| 240 |
+
if self.stripall:
|
| 241 |
+
text = text.strip()
|
| 242 |
+
elif self.stripnl:
|
| 243 |
+
text = text.strip('\n')
|
| 244 |
+
if self.tabsize > 0:
|
| 245 |
+
text = text.expandtabs(self.tabsize)
|
| 246 |
+
if self.ensurenl and not text.endswith('\n'):
|
| 247 |
+
text += '\n'
|
| 248 |
+
|
| 249 |
+
def streamer():
|
| 250 |
+
for _, t, v in self.get_tokens_unprocessed(text):
|
| 251 |
+
yield t, v
|
| 252 |
+
stream = streamer()
|
| 253 |
+
if not unfiltered:
|
| 254 |
+
stream = apply_filters(stream, self.filters, self)
|
| 255 |
+
return stream
|
| 256 |
+
|
| 257 |
+
def get_tokens_unprocessed(self, text):
|
| 258 |
+
"""
|
| 259 |
+
This method should process the text and return an iterable of
|
| 260 |
+
``(index, tokentype, value)`` tuples where ``index`` is the starting
|
| 261 |
+
position of the token within the input text.
|
| 262 |
+
|
| 263 |
+
It must be overridden by subclasses. It is recommended to
|
| 264 |
+
implement it as a generator to maximize effectiveness.
|
| 265 |
+
"""
|
| 266 |
+
raise NotImplementedError
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class DelegatingLexer(Lexer):
|
| 270 |
+
"""
|
| 271 |
+
This lexer takes two lexer as arguments. A root lexer and
|
| 272 |
+
a language lexer. First everything is scanned using the language
|
| 273 |
+
lexer, afterwards all ``Other`` tokens are lexed using the root
|
| 274 |
+
lexer.
|
| 275 |
+
|
| 276 |
+
The lexers from the ``template`` lexer package use this base lexer.
|
| 277 |
+
"""
|
| 278 |
+
|
| 279 |
+
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
|
| 280 |
+
self.root_lexer = _root_lexer(**options)
|
| 281 |
+
self.language_lexer = _language_lexer(**options)
|
| 282 |
+
self.needle = _needle
|
| 283 |
+
Lexer.__init__(self, **options)
|
| 284 |
+
|
| 285 |
+
def get_tokens_unprocessed(self, text):
|
| 286 |
+
buffered = ''
|
| 287 |
+
insertions = []
|
| 288 |
+
lng_buffer = []
|
| 289 |
+
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
|
| 290 |
+
if t is self.needle:
|
| 291 |
+
if lng_buffer:
|
| 292 |
+
insertions.append((len(buffered), lng_buffer))
|
| 293 |
+
lng_buffer = []
|
| 294 |
+
buffered += v
|
| 295 |
+
else:
|
| 296 |
+
lng_buffer.append((i, t, v))
|
| 297 |
+
if lng_buffer:
|
| 298 |
+
insertions.append((len(buffered), lng_buffer))
|
| 299 |
+
return do_insertions(insertions,
|
| 300 |
+
self.root_lexer.get_tokens_unprocessed(buffered))
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
# ------------------------------------------------------------------------------
|
| 304 |
+
# RegexLexer and ExtendedRegexLexer
|
| 305 |
+
#
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class include(str): # pylint: disable=invalid-name
|
| 309 |
+
"""
|
| 310 |
+
Indicates that a state should include rules from another state.
|
| 311 |
+
"""
|
| 312 |
+
pass
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
class _inherit:
|
| 316 |
+
"""
|
| 317 |
+
Indicates the a state should inherit from its superclass.
|
| 318 |
+
"""
|
| 319 |
+
def __repr__(self):
|
| 320 |
+
return 'inherit'
|
| 321 |
+
|
| 322 |
+
inherit = _inherit() # pylint: disable=invalid-name
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class combined(tuple): # pylint: disable=invalid-name
|
| 326 |
+
"""
|
| 327 |
+
Indicates a state combined from multiple states.
|
| 328 |
+
"""
|
| 329 |
+
|
| 330 |
+
def __new__(cls, *args):
|
| 331 |
+
return tuple.__new__(cls, args)
|
| 332 |
+
|
| 333 |
+
def __init__(self, *args):
|
| 334 |
+
# tuple.__init__ doesn't do anything
|
| 335 |
+
pass
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class _PseudoMatch:
|
| 339 |
+
"""
|
| 340 |
+
A pseudo match object constructed from a string.
|
| 341 |
+
"""
|
| 342 |
+
|
| 343 |
+
def __init__(self, start, text):
|
| 344 |
+
self._text = text
|
| 345 |
+
self._start = start
|
| 346 |
+
|
| 347 |
+
def start(self, arg=None):
|
| 348 |
+
return self._start
|
| 349 |
+
|
| 350 |
+
def end(self, arg=None):
|
| 351 |
+
return self._start + len(self._text)
|
| 352 |
+
|
| 353 |
+
def group(self, arg=None):
|
| 354 |
+
if arg:
|
| 355 |
+
raise IndexError('No such group')
|
| 356 |
+
return self._text
|
| 357 |
+
|
| 358 |
+
def groups(self):
|
| 359 |
+
return (self._text,)
|
| 360 |
+
|
| 361 |
+
def groupdict(self):
|
| 362 |
+
return {}
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def bygroups(*args):
|
| 366 |
+
"""
|
| 367 |
+
Callback that yields multiple actions for each group in the match.
|
| 368 |
+
"""
|
| 369 |
+
def callback(lexer, match, ctx=None):
|
| 370 |
+
for i, action in enumerate(args):
|
| 371 |
+
if action is None:
|
| 372 |
+
continue
|
| 373 |
+
elif type(action) is _TokenType:
|
| 374 |
+
data = match.group(i + 1)
|
| 375 |
+
if data:
|
| 376 |
+
yield match.start(i + 1), action, data
|
| 377 |
+
else:
|
| 378 |
+
data = match.group(i + 1)
|
| 379 |
+
if data is not None:
|
| 380 |
+
if ctx:
|
| 381 |
+
ctx.pos = match.start(i + 1)
|
| 382 |
+
for item in action(lexer,
|
| 383 |
+
_PseudoMatch(match.start(i + 1), data), ctx):
|
| 384 |
+
if item:
|
| 385 |
+
yield item
|
| 386 |
+
if ctx:
|
| 387 |
+
ctx.pos = match.end()
|
| 388 |
+
return callback
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class _This:
|
| 392 |
+
"""
|
| 393 |
+
Special singleton used for indicating the caller class.
|
| 394 |
+
Used by ``using``.
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
this = _This()
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def using(_other, **kwargs):
|
| 401 |
+
"""
|
| 402 |
+
Callback that processes the match with a different lexer.
|
| 403 |
+
|
| 404 |
+
The keyword arguments are forwarded to the lexer, except `state` which
|
| 405 |
+
is handled separately.
|
| 406 |
+
|
| 407 |
+
`state` specifies the state that the new lexer will start in, and can
|
| 408 |
+
be an enumerable such as ('root', 'inline', 'string') or a simple
|
| 409 |
+
string which is assumed to be on top of the root state.
|
| 410 |
+
|
| 411 |
+
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
|
| 412 |
+
"""
|
| 413 |
+
gt_kwargs = {}
|
| 414 |
+
if 'state' in kwargs:
|
| 415 |
+
s = kwargs.pop('state')
|
| 416 |
+
if isinstance(s, (list, tuple)):
|
| 417 |
+
gt_kwargs['stack'] = s
|
| 418 |
+
else:
|
| 419 |
+
gt_kwargs['stack'] = ('root', s)
|
| 420 |
+
|
| 421 |
+
if _other is this:
|
| 422 |
+
def callback(lexer, match, ctx=None):
|
| 423 |
+
# if keyword arguments are given the callback
|
| 424 |
+
# function has to create a new lexer instance
|
| 425 |
+
if kwargs:
|
| 426 |
+
# XXX: cache that somehow
|
| 427 |
+
kwargs.update(lexer.options)
|
| 428 |
+
lx = lexer.__class__(**kwargs)
|
| 429 |
+
else:
|
| 430 |
+
lx = lexer
|
| 431 |
+
s = match.start()
|
| 432 |
+
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
| 433 |
+
yield i + s, t, v
|
| 434 |
+
if ctx:
|
| 435 |
+
ctx.pos = match.end()
|
| 436 |
+
else:
|
| 437 |
+
def callback(lexer, match, ctx=None):
|
| 438 |
+
# XXX: cache that somehow
|
| 439 |
+
kwargs.update(lexer.options)
|
| 440 |
+
lx = _other(**kwargs)
|
| 441 |
+
|
| 442 |
+
s = match.start()
|
| 443 |
+
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
| 444 |
+
yield i + s, t, v
|
| 445 |
+
if ctx:
|
| 446 |
+
ctx.pos = match.end()
|
| 447 |
+
return callback
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class default:
|
| 451 |
+
"""
|
| 452 |
+
Indicates a state or state action (e.g. #pop) to apply.
|
| 453 |
+
For example default('#pop') is equivalent to ('', Token, '#pop')
|
| 454 |
+
Note that state tuples may be used as well.
|
| 455 |
+
|
| 456 |
+
.. versionadded:: 2.0
|
| 457 |
+
"""
|
| 458 |
+
def __init__(self, state):
|
| 459 |
+
self.state = state
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class words(Future):
|
| 463 |
+
"""
|
| 464 |
+
Indicates a list of literal words that is transformed into an optimized
|
| 465 |
+
regex that matches any of the words.
|
| 466 |
+
|
| 467 |
+
.. versionadded:: 2.0
|
| 468 |
+
"""
|
| 469 |
+
def __init__(self, words, prefix='', suffix=''):
|
| 470 |
+
self.words = words
|
| 471 |
+
self.prefix = prefix
|
| 472 |
+
self.suffix = suffix
|
| 473 |
+
|
| 474 |
+
def get(self):
|
| 475 |
+
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class RegexLexerMeta(LexerMeta):
|
| 479 |
+
"""
|
| 480 |
+
Metaclass for RegexLexer, creates the self._tokens attribute from
|
| 481 |
+
self.tokens on the first instantiation.
|
| 482 |
+
"""
|
| 483 |
+
|
| 484 |
+
def _process_regex(cls, regex, rflags, state):
|
| 485 |
+
"""Preprocess the regular expression component of a token definition."""
|
| 486 |
+
if isinstance(regex, Future):
|
| 487 |
+
regex = regex.get()
|
| 488 |
+
return re.compile(regex, rflags).match
|
| 489 |
+
|
| 490 |
+
def _process_token(cls, token):
|
| 491 |
+
"""Preprocess the token component of a token definition."""
|
| 492 |
+
assert type(token) is _TokenType or callable(token), \
|
| 493 |
+
'token type must be simple type or callable, not %r' % (token,)
|
| 494 |
+
return token
|
| 495 |
+
|
| 496 |
+
def _process_new_state(cls, new_state, unprocessed, processed):
|
| 497 |
+
"""Preprocess the state transition action of a token definition."""
|
| 498 |
+
if isinstance(new_state, str):
|
| 499 |
+
# an existing state
|
| 500 |
+
if new_state == '#pop':
|
| 501 |
+
return -1
|
| 502 |
+
elif new_state in unprocessed:
|
| 503 |
+
return (new_state,)
|
| 504 |
+
elif new_state == '#push':
|
| 505 |
+
return new_state
|
| 506 |
+
elif new_state[:5] == '#pop:':
|
| 507 |
+
return -int(new_state[5:])
|
| 508 |
+
else:
|
| 509 |
+
assert False, 'unknown new state %r' % new_state
|
| 510 |
+
elif isinstance(new_state, combined):
|
| 511 |
+
# combine a new state from existing ones
|
| 512 |
+
tmp_state = '_tmp_%d' % cls._tmpname
|
| 513 |
+
cls._tmpname += 1
|
| 514 |
+
itokens = []
|
| 515 |
+
for istate in new_state:
|
| 516 |
+
assert istate != new_state, 'circular state ref %r' % istate
|
| 517 |
+
itokens.extend(cls._process_state(unprocessed,
|
| 518 |
+
processed, istate))
|
| 519 |
+
processed[tmp_state] = itokens
|
| 520 |
+
return (tmp_state,)
|
| 521 |
+
elif isinstance(new_state, tuple):
|
| 522 |
+
# push more than one state
|
| 523 |
+
for istate in new_state:
|
| 524 |
+
assert (istate in unprocessed or
|
| 525 |
+
istate in ('#pop', '#push')), \
|
| 526 |
+
'unknown new state ' + istate
|
| 527 |
+
return new_state
|
| 528 |
+
else:
|
| 529 |
+
assert False, 'unknown new state def %r' % new_state
|
| 530 |
+
|
| 531 |
+
def _process_state(cls, unprocessed, processed, state):
|
| 532 |
+
"""Preprocess a single state definition."""
|
| 533 |
+
assert type(state) is str, "wrong state name %r" % state
|
| 534 |
+
assert state[0] != '#', "invalid state name %r" % state
|
| 535 |
+
if state in processed:
|
| 536 |
+
return processed[state]
|
| 537 |
+
tokens = processed[state] = []
|
| 538 |
+
rflags = cls.flags
|
| 539 |
+
for tdef in unprocessed[state]:
|
| 540 |
+
if isinstance(tdef, include):
|
| 541 |
+
# it's a state reference
|
| 542 |
+
assert tdef != state, "circular state reference %r" % state
|
| 543 |
+
tokens.extend(cls._process_state(unprocessed, processed,
|
| 544 |
+
str(tdef)))
|
| 545 |
+
continue
|
| 546 |
+
if isinstance(tdef, _inherit):
|
| 547 |
+
# should be processed already, but may not in the case of:
|
| 548 |
+
# 1. the state has no counterpart in any parent
|
| 549 |
+
# 2. the state includes more than one 'inherit'
|
| 550 |
+
continue
|
| 551 |
+
if isinstance(tdef, default):
|
| 552 |
+
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
|
| 553 |
+
tokens.append((re.compile('').match, None, new_state))
|
| 554 |
+
continue
|
| 555 |
+
|
| 556 |
+
assert type(tdef) is tuple, "wrong rule def %r" % tdef
|
| 557 |
+
|
| 558 |
+
try:
|
| 559 |
+
rex = cls._process_regex(tdef[0], rflags, state)
|
| 560 |
+
except Exception as err:
|
| 561 |
+
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
|
| 562 |
+
(tdef[0], state, cls, err)) from err
|
| 563 |
+
|
| 564 |
+
token = cls._process_token(tdef[1])
|
| 565 |
+
|
| 566 |
+
if len(tdef) == 2:
|
| 567 |
+
new_state = None
|
| 568 |
+
else:
|
| 569 |
+
new_state = cls._process_new_state(tdef[2],
|
| 570 |
+
unprocessed, processed)
|
| 571 |
+
|
| 572 |
+
tokens.append((rex, token, new_state))
|
| 573 |
+
return tokens
|
| 574 |
+
|
| 575 |
+
def process_tokendef(cls, name, tokendefs=None):
|
| 576 |
+
"""Preprocess a dictionary of token definitions."""
|
| 577 |
+
processed = cls._all_tokens[name] = {}
|
| 578 |
+
tokendefs = tokendefs or cls.tokens[name]
|
| 579 |
+
for state in list(tokendefs):
|
| 580 |
+
cls._process_state(tokendefs, processed, state)
|
| 581 |
+
return processed
|
| 582 |
+
|
| 583 |
+
def get_tokendefs(cls):
|
| 584 |
+
"""
|
| 585 |
+
Merge tokens from superclasses in MRO order, returning a single tokendef
|
| 586 |
+
dictionary.
|
| 587 |
+
|
| 588 |
+
Any state that is not defined by a subclass will be inherited
|
| 589 |
+
automatically. States that *are* defined by subclasses will, by
|
| 590 |
+
default, override that state in the superclass. If a subclass wishes to
|
| 591 |
+
inherit definitions from a superclass, it can use the special value
|
| 592 |
+
"inherit", which will cause the superclass' state definition to be
|
| 593 |
+
included at that point in the state.
|
| 594 |
+
"""
|
| 595 |
+
tokens = {}
|
| 596 |
+
inheritable = {}
|
| 597 |
+
for c in cls.__mro__:
|
| 598 |
+
toks = c.__dict__.get('tokens', {})
|
| 599 |
+
|
| 600 |
+
for state, items in toks.items():
|
| 601 |
+
curitems = tokens.get(state)
|
| 602 |
+
if curitems is None:
|
| 603 |
+
# N.b. because this is assigned by reference, sufficiently
|
| 604 |
+
# deep hierarchies are processed incrementally (e.g. for
|
| 605 |
+
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
|
| 606 |
+
# will not see any inherits in B).
|
| 607 |
+
tokens[state] = items
|
| 608 |
+
try:
|
| 609 |
+
inherit_ndx = items.index(inherit)
|
| 610 |
+
except ValueError:
|
| 611 |
+
continue
|
| 612 |
+
inheritable[state] = inherit_ndx
|
| 613 |
+
continue
|
| 614 |
+
|
| 615 |
+
inherit_ndx = inheritable.pop(state, None)
|
| 616 |
+
if inherit_ndx is None:
|
| 617 |
+
continue
|
| 618 |
+
|
| 619 |
+
# Replace the "inherit" value with the items
|
| 620 |
+
curitems[inherit_ndx:inherit_ndx+1] = items
|
| 621 |
+
try:
|
| 622 |
+
# N.b. this is the index in items (that is, the superclass
|
| 623 |
+
# copy), so offset required when storing below.
|
| 624 |
+
new_inh_ndx = items.index(inherit)
|
| 625 |
+
except ValueError:
|
| 626 |
+
pass
|
| 627 |
+
else:
|
| 628 |
+
inheritable[state] = inherit_ndx + new_inh_ndx
|
| 629 |
+
|
| 630 |
+
return tokens
|
| 631 |
+
|
| 632 |
+
def __call__(cls, *args, **kwds):
|
| 633 |
+
"""Instantiate cls after preprocessing its token definitions."""
|
| 634 |
+
if '_tokens' not in cls.__dict__:
|
| 635 |
+
cls._all_tokens = {}
|
| 636 |
+
cls._tmpname = 0
|
| 637 |
+
if hasattr(cls, 'token_variants') and cls.token_variants:
|
| 638 |
+
# don't process yet
|
| 639 |
+
pass
|
| 640 |
+
else:
|
| 641 |
+
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
|
| 642 |
+
|
| 643 |
+
return type.__call__(cls, *args, **kwds)
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
|
| 647 |
+
"""
|
| 648 |
+
Base for simple stateful regular expression-based lexers.
|
| 649 |
+
Simplifies the lexing process so that you need only
|
| 650 |
+
provide a list of states and regular expressions.
|
| 651 |
+
"""
|
| 652 |
+
|
| 653 |
+
#: Flags for compiling the regular expressions.
|
| 654 |
+
#: Defaults to MULTILINE.
|
| 655 |
+
flags = re.MULTILINE
|
| 656 |
+
|
| 657 |
+
#: At all time there is a stack of states. Initially, the stack contains
|
| 658 |
+
#: a single state 'root'. The top of the stack is called "the current state".
|
| 659 |
+
#:
|
| 660 |
+
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
|
| 661 |
+
#:
|
| 662 |
+
#: ``new_state`` can be omitted to signify no state transition.
|
| 663 |
+
#: If ``new_state`` is a string, it is pushed on the stack. This ensure
|
| 664 |
+
#: the new current state is ``new_state``.
|
| 665 |
+
#: If ``new_state`` is a tuple of strings, all of those strings are pushed
|
| 666 |
+
#: on the stack and the current state will be the last element of the list.
|
| 667 |
+
#: ``new_state`` can also be ``combined('state1', 'state2', ...)``
|
| 668 |
+
#: to signify a new, anonymous state combined from the rules of two
|
| 669 |
+
#: or more existing ones.
|
| 670 |
+
#: Furthermore, it can be '#pop' to signify going back one step in
|
| 671 |
+
#: the state stack, or '#push' to push the current state on the stack
|
| 672 |
+
#: again. Note that if you push while in a combined state, the combined
|
| 673 |
+
#: state itself is pushed, and not only the state in which the rule is
|
| 674 |
+
#: defined.
|
| 675 |
+
#:
|
| 676 |
+
#: The tuple can also be replaced with ``include('state')``, in which
|
| 677 |
+
#: case the rules from the state named by the string are included in the
|
| 678 |
+
#: current one.
|
| 679 |
+
tokens = {}
|
| 680 |
+
|
| 681 |
+
def get_tokens_unprocessed(self, text, stack=('root',)):
|
| 682 |
+
"""
|
| 683 |
+
Split ``text`` into (tokentype, text) pairs.
|
| 684 |
+
|
| 685 |
+
``stack`` is the initial stack (default: ``['root']``)
|
| 686 |
+
"""
|
| 687 |
+
pos = 0
|
| 688 |
+
tokendefs = self._tokens
|
| 689 |
+
statestack = list(stack)
|
| 690 |
+
statetokens = tokendefs[statestack[-1]]
|
| 691 |
+
while 1:
|
| 692 |
+
for rexmatch, action, new_state in statetokens:
|
| 693 |
+
m = rexmatch(text, pos)
|
| 694 |
+
if m:
|
| 695 |
+
if action is not None:
|
| 696 |
+
if type(action) is _TokenType:
|
| 697 |
+
yield pos, action, m.group()
|
| 698 |
+
else:
|
| 699 |
+
yield from action(self, m)
|
| 700 |
+
pos = m.end()
|
| 701 |
+
if new_state is not None:
|
| 702 |
+
# state transition
|
| 703 |
+
if isinstance(new_state, tuple):
|
| 704 |
+
for state in new_state:
|
| 705 |
+
if state == '#pop':
|
| 706 |
+
if len(statestack) > 1:
|
| 707 |
+
statestack.pop()
|
| 708 |
+
elif state == '#push':
|
| 709 |
+
statestack.append(statestack[-1])
|
| 710 |
+
else:
|
| 711 |
+
statestack.append(state)
|
| 712 |
+
elif isinstance(new_state, int):
|
| 713 |
+
# pop, but keep at least one state on the stack
|
| 714 |
+
# (random code leading to unexpected pops should
|
| 715 |
+
# not allow exceptions)
|
| 716 |
+
if abs(new_state) >= len(statestack):
|
| 717 |
+
del statestack[1:]
|
| 718 |
+
else:
|
| 719 |
+
del statestack[new_state:]
|
| 720 |
+
elif new_state == '#push':
|
| 721 |
+
statestack.append(statestack[-1])
|
| 722 |
+
else:
|
| 723 |
+
assert False, "wrong state def: %r" % new_state
|
| 724 |
+
statetokens = tokendefs[statestack[-1]]
|
| 725 |
+
break
|
| 726 |
+
else:
|
| 727 |
+
# We are here only if all state tokens have been considered
|
| 728 |
+
# and there was not a match on any of them.
|
| 729 |
+
try:
|
| 730 |
+
if text[pos] == '\n':
|
| 731 |
+
# at EOL, reset state to "root"
|
| 732 |
+
statestack = ['root']
|
| 733 |
+
statetokens = tokendefs['root']
|
| 734 |
+
yield pos, Whitespace, '\n'
|
| 735 |
+
pos += 1
|
| 736 |
+
continue
|
| 737 |
+
yield pos, Error, text[pos]
|
| 738 |
+
pos += 1
|
| 739 |
+
except IndexError:
|
| 740 |
+
break
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
class LexerContext:
|
| 744 |
+
"""
|
| 745 |
+
A helper object that holds lexer position data.
|
| 746 |
+
"""
|
| 747 |
+
|
| 748 |
+
def __init__(self, text, pos, stack=None, end=None):
|
| 749 |
+
self.text = text
|
| 750 |
+
self.pos = pos
|
| 751 |
+
self.end = end or len(text) # end=0 not supported ;-)
|
| 752 |
+
self.stack = stack or ['root']
|
| 753 |
+
|
| 754 |
+
def __repr__(self):
|
| 755 |
+
return 'LexerContext(%r, %r, %r)' % (
|
| 756 |
+
self.text, self.pos, self.stack)
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
class ExtendedRegexLexer(RegexLexer):
|
| 760 |
+
"""
|
| 761 |
+
A RegexLexer that uses a context object to store its state.
|
| 762 |
+
"""
|
| 763 |
+
|
| 764 |
+
def get_tokens_unprocessed(self, text=None, context=None):
|
| 765 |
+
"""
|
| 766 |
+
Split ``text`` into (tokentype, text) pairs.
|
| 767 |
+
If ``context`` is given, use this lexer context instead.
|
| 768 |
+
"""
|
| 769 |
+
tokendefs = self._tokens
|
| 770 |
+
if not context:
|
| 771 |
+
ctx = LexerContext(text, 0)
|
| 772 |
+
statetokens = tokendefs['root']
|
| 773 |
+
else:
|
| 774 |
+
ctx = context
|
| 775 |
+
statetokens = tokendefs[ctx.stack[-1]]
|
| 776 |
+
text = ctx.text
|
| 777 |
+
while 1:
|
| 778 |
+
for rexmatch, action, new_state in statetokens:
|
| 779 |
+
m = rexmatch(text, ctx.pos, ctx.end)
|
| 780 |
+
if m:
|
| 781 |
+
if action is not None:
|
| 782 |
+
if type(action) is _TokenType:
|
| 783 |
+
yield ctx.pos, action, m.group()
|
| 784 |
+
ctx.pos = m.end()
|
| 785 |
+
else:
|
| 786 |
+
yield from action(self, m, ctx)
|
| 787 |
+
if not new_state:
|
| 788 |
+
# altered the state stack?
|
| 789 |
+
statetokens = tokendefs[ctx.stack[-1]]
|
| 790 |
+
# CAUTION: callback must set ctx.pos!
|
| 791 |
+
if new_state is not None:
|
| 792 |
+
# state transition
|
| 793 |
+
if isinstance(new_state, tuple):
|
| 794 |
+
for state in new_state:
|
| 795 |
+
if state == '#pop':
|
| 796 |
+
if len(ctx.stack) > 1:
|
| 797 |
+
ctx.stack.pop()
|
| 798 |
+
elif state == '#push':
|
| 799 |
+
ctx.stack.append(ctx.stack[-1])
|
| 800 |
+
else:
|
| 801 |
+
ctx.stack.append(state)
|
| 802 |
+
elif isinstance(new_state, int):
|
| 803 |
+
# see RegexLexer for why this check is made
|
| 804 |
+
if abs(new_state) >= len(ctx.stack):
|
| 805 |
+
del ctx.stack[1:]
|
| 806 |
+
else:
|
| 807 |
+
del ctx.stack[new_state:]
|
| 808 |
+
elif new_state == '#push':
|
| 809 |
+
ctx.stack.append(ctx.stack[-1])
|
| 810 |
+
else:
|
| 811 |
+
assert False, "wrong state def: %r" % new_state
|
| 812 |
+
statetokens = tokendefs[ctx.stack[-1]]
|
| 813 |
+
break
|
| 814 |
+
else:
|
| 815 |
+
try:
|
| 816 |
+
if ctx.pos >= ctx.end:
|
| 817 |
+
break
|
| 818 |
+
if text[ctx.pos] == '\n':
|
| 819 |
+
# at EOL, reset state to "root"
|
| 820 |
+
ctx.stack = ['root']
|
| 821 |
+
statetokens = tokendefs['root']
|
| 822 |
+
yield ctx.pos, Text, '\n'
|
| 823 |
+
ctx.pos += 1
|
| 824 |
+
continue
|
| 825 |
+
yield ctx.pos, Error, text[ctx.pos]
|
| 826 |
+
ctx.pos += 1
|
| 827 |
+
except IndexError:
|
| 828 |
+
break
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
def do_insertions(insertions, tokens):
|
| 832 |
+
"""
|
| 833 |
+
Helper for lexers which must combine the results of several
|
| 834 |
+
sublexers.
|
| 835 |
+
|
| 836 |
+
``insertions`` is a list of ``(index, itokens)`` pairs.
|
| 837 |
+
Each ``itokens`` iterable should be inserted at position
|
| 838 |
+
``index`` into the token stream given by the ``tokens``
|
| 839 |
+
argument.
|
| 840 |
+
|
| 841 |
+
The result is a combined token stream.
|
| 842 |
+
|
| 843 |
+
TODO: clean up the code here.
|
| 844 |
+
"""
|
| 845 |
+
insertions = iter(insertions)
|
| 846 |
+
try:
|
| 847 |
+
index, itokens = next(insertions)
|
| 848 |
+
except StopIteration:
|
| 849 |
+
# no insertions
|
| 850 |
+
yield from tokens
|
| 851 |
+
return
|
| 852 |
+
|
| 853 |
+
realpos = None
|
| 854 |
+
insleft = True
|
| 855 |
+
|
| 856 |
+
# iterate over the token stream where we want to insert
|
| 857 |
+
# the tokens from the insertion list.
|
| 858 |
+
for i, t, v in tokens:
|
| 859 |
+
# first iteration. store the position of first item
|
| 860 |
+
if realpos is None:
|
| 861 |
+
realpos = i
|
| 862 |
+
oldi = 0
|
| 863 |
+
while insleft and i + len(v) >= index:
|
| 864 |
+
tmpval = v[oldi:index - i]
|
| 865 |
+
if tmpval:
|
| 866 |
+
yield realpos, t, tmpval
|
| 867 |
+
realpos += len(tmpval)
|
| 868 |
+
for it_index, it_token, it_value in itokens:
|
| 869 |
+
yield realpos, it_token, it_value
|
| 870 |
+
realpos += len(it_value)
|
| 871 |
+
oldi = index - i
|
| 872 |
+
try:
|
| 873 |
+
index, itokens = next(insertions)
|
| 874 |
+
except StopIteration:
|
| 875 |
+
insleft = False
|
| 876 |
+
break # not strictly necessary
|
| 877 |
+
if oldi < len(v):
|
| 878 |
+
yield realpos, t, v[oldi:]
|
| 879 |
+
realpos += len(v) - oldi
|
| 880 |
+
|
| 881 |
+
# leftover tokens
|
| 882 |
+
while insleft:
|
| 883 |
+
# no normal tokens, set realpos to zero
|
| 884 |
+
realpos = realpos or 0
|
| 885 |
+
for p, t, v in itokens:
|
| 886 |
+
yield realpos, t, v
|
| 887 |
+
realpos += len(v)
|
| 888 |
+
try:
|
| 889 |
+
index, itokens = next(insertions)
|
| 890 |
+
except StopIteration:
|
| 891 |
+
insleft = False
|
| 892 |
+
break # not strictly necessary
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
class ProfilingRegexLexerMeta(RegexLexerMeta):
|
| 896 |
+
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
|
| 897 |
+
|
| 898 |
+
def _process_regex(cls, regex, rflags, state):
|
| 899 |
+
if isinstance(regex, words):
|
| 900 |
+
rex = regex_opt(regex.words, prefix=regex.prefix,
|
| 901 |
+
suffix=regex.suffix)
|
| 902 |
+
else:
|
| 903 |
+
rex = regex
|
| 904 |
+
compiled = re.compile(rex, rflags)
|
| 905 |
+
|
| 906 |
+
def match_func(text, pos, endpos=sys.maxsize):
|
| 907 |
+
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
|
| 908 |
+
t0 = time.time()
|
| 909 |
+
res = compiled.match(text, pos, endpos)
|
| 910 |
+
t1 = time.time()
|
| 911 |
+
info[0] += 1
|
| 912 |
+
info[1] += t1 - t0
|
| 913 |
+
return res
|
| 914 |
+
return match_func
|
| 915 |
+
|
| 916 |
+
|
| 917 |
+
class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
|
| 918 |
+
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
|
| 919 |
+
|
| 920 |
+
_prof_data = []
|
| 921 |
+
_prof_sort_index = 4 # defaults to time per call
|
| 922 |
+
|
| 923 |
+
def get_tokens_unprocessed(self, text, stack=('root',)):
|
| 924 |
+
# this needs to be a stack, since using(this) will produce nested calls
|
| 925 |
+
self.__class__._prof_data.append({})
|
| 926 |
+
yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
|
| 927 |
+
rawdata = self.__class__._prof_data.pop()
|
| 928 |
+
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
|
| 929 |
+
n, 1000 * t, 1000 * t / n)
|
| 930 |
+
for ((s, r), (n, t)) in rawdata.items()),
|
| 931 |
+
key=lambda x: x[self._prof_sort_index],
|
| 932 |
+
reverse=True)
|
| 933 |
+
sum_total = sum(x[3] for x in data)
|
| 934 |
+
|
| 935 |
+
print()
|
| 936 |
+
print('Profiling result for %s lexing %d chars in %.3f ms' %
|
| 937 |
+
(self.__class__.__name__, len(text), sum_total))
|
| 938 |
+
print('=' * 110)
|
| 939 |
+
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
|
| 940 |
+
print('-' * 110)
|
| 941 |
+
for d in data:
|
| 942 |
+
print('%-20s %-65s %5d %8.4f %8.4f' % d)
|
| 943 |
+
print('=' * 110)
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.lexers
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments lexers.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
import sys
|
| 13 |
+
import types
|
| 14 |
+
import fnmatch
|
| 15 |
+
from os.path import basename
|
| 16 |
+
|
| 17 |
+
from pip._vendor.pygments.lexers._mapping import LEXERS
|
| 18 |
+
from pip._vendor.pygments.modeline import get_filetype_from_buffer
|
| 19 |
+
from pip._vendor.pygments.plugin import find_plugin_lexers
|
| 20 |
+
from pip._vendor.pygments.util import ClassNotFound, guess_decode
|
| 21 |
+
|
| 22 |
+
COMPAT = {
|
| 23 |
+
'Python3Lexer': 'PythonLexer',
|
| 24 |
+
'Python3TracebackLexer': 'PythonTracebackLexer',
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
|
| 28 |
+
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
|
| 29 |
+
|
| 30 |
+
_lexer_cache = {}
|
| 31 |
+
_pattern_cache = {}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _fn_matches(fn, glob):
|
| 35 |
+
"""Return whether the supplied file name fn matches pattern filename."""
|
| 36 |
+
if glob not in _pattern_cache:
|
| 37 |
+
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
|
| 38 |
+
return pattern.match(fn)
|
| 39 |
+
return _pattern_cache[glob].match(fn)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _load_lexers(module_name):
|
| 43 |
+
"""Load a lexer (and all others in the module too)."""
|
| 44 |
+
mod = __import__(module_name, None, None, ['__all__'])
|
| 45 |
+
for lexer_name in mod.__all__:
|
| 46 |
+
cls = getattr(mod, lexer_name)
|
| 47 |
+
_lexer_cache[cls.name] = cls
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def get_all_lexers(plugins=True):
|
| 51 |
+
"""Return a generator of tuples in the form ``(name, aliases,
|
| 52 |
+
filenames, mimetypes)`` of all know lexers.
|
| 53 |
+
|
| 54 |
+
If *plugins* is true (the default), plugin lexers supplied by entrypoints
|
| 55 |
+
are also returned. Otherwise, only builtin ones are considered.
|
| 56 |
+
"""
|
| 57 |
+
for item in LEXERS.values():
|
| 58 |
+
yield item[1:]
|
| 59 |
+
if plugins:
|
| 60 |
+
for lexer in find_plugin_lexers():
|
| 61 |
+
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def find_lexer_class(name):
|
| 65 |
+
"""
|
| 66 |
+
Return the `Lexer` subclass that with the *name* attribute as given by
|
| 67 |
+
the *name* argument.
|
| 68 |
+
"""
|
| 69 |
+
if name in _lexer_cache:
|
| 70 |
+
return _lexer_cache[name]
|
| 71 |
+
# lookup builtin lexers
|
| 72 |
+
for module_name, lname, aliases, _, _ in LEXERS.values():
|
| 73 |
+
if name == lname:
|
| 74 |
+
_load_lexers(module_name)
|
| 75 |
+
return _lexer_cache[name]
|
| 76 |
+
# continue with lexers from setuptools entrypoints
|
| 77 |
+
for cls in find_plugin_lexers():
|
| 78 |
+
if cls.name == name:
|
| 79 |
+
return cls
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def find_lexer_class_by_name(_alias):
|
| 83 |
+
"""
|
| 84 |
+
Return the `Lexer` subclass that has `alias` in its aliases list, without
|
| 85 |
+
instantiating it.
|
| 86 |
+
|
| 87 |
+
Like `get_lexer_by_name`, but does not instantiate the class.
|
| 88 |
+
|
| 89 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
|
| 90 |
+
found.
|
| 91 |
+
|
| 92 |
+
.. versionadded:: 2.2
|
| 93 |
+
"""
|
| 94 |
+
if not _alias:
|
| 95 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 96 |
+
# lookup builtin lexers
|
| 97 |
+
for module_name, name, aliases, _, _ in LEXERS.values():
|
| 98 |
+
if _alias.lower() in aliases:
|
| 99 |
+
if name not in _lexer_cache:
|
| 100 |
+
_load_lexers(module_name)
|
| 101 |
+
return _lexer_cache[name]
|
| 102 |
+
# continue with lexers from setuptools entrypoints
|
| 103 |
+
for cls in find_plugin_lexers():
|
| 104 |
+
if _alias.lower() in cls.aliases:
|
| 105 |
+
return cls
|
| 106 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_lexer_by_name(_alias, **options):
|
| 110 |
+
"""
|
| 111 |
+
Return an instance of a `Lexer` subclass that has `alias` in its
|
| 112 |
+
aliases list. The lexer is given the `options` at its
|
| 113 |
+
instantiation.
|
| 114 |
+
|
| 115 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
|
| 116 |
+
found.
|
| 117 |
+
"""
|
| 118 |
+
if not _alias:
|
| 119 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 120 |
+
|
| 121 |
+
# lookup builtin lexers
|
| 122 |
+
for module_name, name, aliases, _, _ in LEXERS.values():
|
| 123 |
+
if _alias.lower() in aliases:
|
| 124 |
+
if name not in _lexer_cache:
|
| 125 |
+
_load_lexers(module_name)
|
| 126 |
+
return _lexer_cache[name](**options)
|
| 127 |
+
# continue with lexers from setuptools entrypoints
|
| 128 |
+
for cls in find_plugin_lexers():
|
| 129 |
+
if _alias.lower() in cls.aliases:
|
| 130 |
+
return cls(**options)
|
| 131 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
|
| 135 |
+
"""Load a lexer from a file.
|
| 136 |
+
|
| 137 |
+
This method expects a file located relative to the current working
|
| 138 |
+
directory, which contains a Lexer class. By default, it expects the
|
| 139 |
+
Lexer to be name CustomLexer; you can specify your own class name
|
| 140 |
+
as the second argument to this function.
|
| 141 |
+
|
| 142 |
+
Users should be very careful with the input, because this method
|
| 143 |
+
is equivalent to running eval on the input file.
|
| 144 |
+
|
| 145 |
+
Raises ClassNotFound if there are any problems importing the Lexer.
|
| 146 |
+
|
| 147 |
+
.. versionadded:: 2.2
|
| 148 |
+
"""
|
| 149 |
+
try:
|
| 150 |
+
# This empty dict will contain the namespace for the exec'd file
|
| 151 |
+
custom_namespace = {}
|
| 152 |
+
with open(filename, 'rb') as f:
|
| 153 |
+
exec(f.read(), custom_namespace)
|
| 154 |
+
# Retrieve the class `lexername` from that namespace
|
| 155 |
+
if lexername not in custom_namespace:
|
| 156 |
+
raise ClassNotFound('no valid %s class found in %s' %
|
| 157 |
+
(lexername, filename))
|
| 158 |
+
lexer_class = custom_namespace[lexername]
|
| 159 |
+
# And finally instantiate it with the options
|
| 160 |
+
return lexer_class(**options)
|
| 161 |
+
except OSError as err:
|
| 162 |
+
raise ClassNotFound('cannot read %s: %s' % (filename, err))
|
| 163 |
+
except ClassNotFound:
|
| 164 |
+
raise
|
| 165 |
+
except Exception as err:
|
| 166 |
+
raise ClassNotFound('error when loading custom lexer: %s' % err)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def find_lexer_class_for_filename(_fn, code=None):
|
| 170 |
+
"""Get a lexer for a filename.
|
| 171 |
+
|
| 172 |
+
If multiple lexers match the filename pattern, use ``analyse_text()`` to
|
| 173 |
+
figure out which one is more appropriate.
|
| 174 |
+
|
| 175 |
+
Returns None if not found.
|
| 176 |
+
"""
|
| 177 |
+
matches = []
|
| 178 |
+
fn = basename(_fn)
|
| 179 |
+
for modname, name, _, filenames, _ in LEXERS.values():
|
| 180 |
+
for filename in filenames:
|
| 181 |
+
if _fn_matches(fn, filename):
|
| 182 |
+
if name not in _lexer_cache:
|
| 183 |
+
_load_lexers(modname)
|
| 184 |
+
matches.append((_lexer_cache[name], filename))
|
| 185 |
+
for cls in find_plugin_lexers():
|
| 186 |
+
for filename in cls.filenames:
|
| 187 |
+
if _fn_matches(fn, filename):
|
| 188 |
+
matches.append((cls, filename))
|
| 189 |
+
|
| 190 |
+
if isinstance(code, bytes):
|
| 191 |
+
# decode it, since all analyse_text functions expect unicode
|
| 192 |
+
code = guess_decode(code)
|
| 193 |
+
|
| 194 |
+
def get_rating(info):
|
| 195 |
+
cls, filename = info
|
| 196 |
+
# explicit patterns get a bonus
|
| 197 |
+
bonus = '*' not in filename and 0.5 or 0
|
| 198 |
+
# The class _always_ defines analyse_text because it's included in
|
| 199 |
+
# the Lexer class. The default implementation returns None which
|
| 200 |
+
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
|
| 201 |
+
# to find lexers which need it overridden.
|
| 202 |
+
if code:
|
| 203 |
+
return cls.analyse_text(code) + bonus, cls.__name__
|
| 204 |
+
return cls.priority + bonus, cls.__name__
|
| 205 |
+
|
| 206 |
+
if matches:
|
| 207 |
+
matches.sort(key=get_rating)
|
| 208 |
+
# print "Possible lexers, after sort:", matches
|
| 209 |
+
return matches[-1][0]
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def get_lexer_for_filename(_fn, code=None, **options):
|
| 213 |
+
"""Get a lexer for a filename.
|
| 214 |
+
|
| 215 |
+
Return a `Lexer` subclass instance that has a filename pattern
|
| 216 |
+
matching `fn`. The lexer is given the `options` at its
|
| 217 |
+
instantiation.
|
| 218 |
+
|
| 219 |
+
Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
|
| 220 |
+
is found.
|
| 221 |
+
|
| 222 |
+
If multiple lexers match the filename pattern, use their ``analyse_text()``
|
| 223 |
+
methods to figure out which one is more appropriate.
|
| 224 |
+
"""
|
| 225 |
+
res = find_lexer_class_for_filename(_fn, code)
|
| 226 |
+
if not res:
|
| 227 |
+
raise ClassNotFound('no lexer for filename %r found' % _fn)
|
| 228 |
+
return res(**options)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def get_lexer_for_mimetype(_mime, **options):
|
| 232 |
+
"""
|
| 233 |
+
Return a `Lexer` subclass instance that has `mime` in its mimetype
|
| 234 |
+
list. The lexer is given the `options` at its instantiation.
|
| 235 |
+
|
| 236 |
+
Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
|
| 237 |
+
is found.
|
| 238 |
+
"""
|
| 239 |
+
for modname, name, _, _, mimetypes in LEXERS.values():
|
| 240 |
+
if _mime in mimetypes:
|
| 241 |
+
if name not in _lexer_cache:
|
| 242 |
+
_load_lexers(modname)
|
| 243 |
+
return _lexer_cache[name](**options)
|
| 244 |
+
for cls in find_plugin_lexers():
|
| 245 |
+
if _mime in cls.mimetypes:
|
| 246 |
+
return cls(**options)
|
| 247 |
+
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _iter_lexerclasses(plugins=True):
|
| 251 |
+
"""Return an iterator over all lexer classes."""
|
| 252 |
+
for key in sorted(LEXERS):
|
| 253 |
+
module_name, name = LEXERS[key][:2]
|
| 254 |
+
if name not in _lexer_cache:
|
| 255 |
+
_load_lexers(module_name)
|
| 256 |
+
yield _lexer_cache[name]
|
| 257 |
+
if plugins:
|
| 258 |
+
yield from find_plugin_lexers()
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def guess_lexer_for_filename(_fn, _text, **options):
|
| 262 |
+
"""
|
| 263 |
+
As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
|
| 264 |
+
or `alias_filenames` that matches `filename` are taken into consideration.
|
| 265 |
+
|
| 266 |
+
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
|
| 267 |
+
handle the content.
|
| 268 |
+
"""
|
| 269 |
+
fn = basename(_fn)
|
| 270 |
+
primary = {}
|
| 271 |
+
matching_lexers = set()
|
| 272 |
+
for lexer in _iter_lexerclasses():
|
| 273 |
+
for filename in lexer.filenames:
|
| 274 |
+
if _fn_matches(fn, filename):
|
| 275 |
+
matching_lexers.add(lexer)
|
| 276 |
+
primary[lexer] = True
|
| 277 |
+
for filename in lexer.alias_filenames:
|
| 278 |
+
if _fn_matches(fn, filename):
|
| 279 |
+
matching_lexers.add(lexer)
|
| 280 |
+
primary[lexer] = False
|
| 281 |
+
if not matching_lexers:
|
| 282 |
+
raise ClassNotFound('no lexer for filename %r found' % fn)
|
| 283 |
+
if len(matching_lexers) == 1:
|
| 284 |
+
return matching_lexers.pop()(**options)
|
| 285 |
+
result = []
|
| 286 |
+
for lexer in matching_lexers:
|
| 287 |
+
rv = lexer.analyse_text(_text)
|
| 288 |
+
if rv == 1.0:
|
| 289 |
+
return lexer(**options)
|
| 290 |
+
result.append((rv, lexer))
|
| 291 |
+
|
| 292 |
+
def type_sort(t):
|
| 293 |
+
# sort by:
|
| 294 |
+
# - analyse score
|
| 295 |
+
# - is primary filename pattern?
|
| 296 |
+
# - priority
|
| 297 |
+
# - last resort: class name
|
| 298 |
+
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
|
| 299 |
+
result.sort(key=type_sort)
|
| 300 |
+
|
| 301 |
+
return result[-1][1](**options)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def guess_lexer(_text, **options):
|
| 305 |
+
"""
|
| 306 |
+
Return a `Lexer` subclass instance that's guessed from the text in
|
| 307 |
+
`text`. For that, the :meth:`.analyse_text()` method of every known lexer
|
| 308 |
+
class is called with the text as argument, and the lexer which returned the
|
| 309 |
+
highest value will be instantiated and returned.
|
| 310 |
+
|
| 311 |
+
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
|
| 312 |
+
handle the content.
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
if not isinstance(_text, str):
|
| 316 |
+
inencoding = options.get('inencoding', options.get('encoding'))
|
| 317 |
+
if inencoding:
|
| 318 |
+
_text = _text.decode(inencoding or 'utf8')
|
| 319 |
+
else:
|
| 320 |
+
_text, _ = guess_decode(_text)
|
| 321 |
+
|
| 322 |
+
# try to get a vim modeline first
|
| 323 |
+
ft = get_filetype_from_buffer(_text)
|
| 324 |
+
|
| 325 |
+
if ft is not None:
|
| 326 |
+
try:
|
| 327 |
+
return get_lexer_by_name(ft, **options)
|
| 328 |
+
except ClassNotFound:
|
| 329 |
+
pass
|
| 330 |
+
|
| 331 |
+
best_lexer = [0.0, None]
|
| 332 |
+
for lexer in _iter_lexerclasses():
|
| 333 |
+
rv = lexer.analyse_text(_text)
|
| 334 |
+
if rv == 1.0:
|
| 335 |
+
return lexer(**options)
|
| 336 |
+
if rv > best_lexer[0]:
|
| 337 |
+
best_lexer[:] = (rv, lexer)
|
| 338 |
+
if not best_lexer[0] or best_lexer[1] is None:
|
| 339 |
+
raise ClassNotFound('no lexer matching the text found')
|
| 340 |
+
return best_lexer[1](**options)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class _automodule(types.ModuleType):
|
| 344 |
+
"""Automatically import lexers."""
|
| 345 |
+
|
| 346 |
+
def __getattr__(self, name):
|
| 347 |
+
info = LEXERS.get(name)
|
| 348 |
+
if info:
|
| 349 |
+
_load_lexers(info[0])
|
| 350 |
+
cls = _lexer_cache[info[1]]
|
| 351 |
+
setattr(self, name, cls)
|
| 352 |
+
return cls
|
| 353 |
+
if name in COMPAT:
|
| 354 |
+
return getattr(self, COMPAT[name])
|
| 355 |
+
raise AttributeError(name)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
oldmod = sys.modules[__name__]
|
| 359 |
+
newmod = _automodule(__name__)
|
| 360 |
+
newmod.__dict__.update(oldmod.__dict__)
|
| 361 |
+
sys.modules[__name__] = newmod
|
| 362 |
+
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (16.3 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc
ADDED
|
Binary file (64.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-311.pyc
ADDED
|
Binary file (43.3 kB). View file
|
|
|