Datasets:
Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .venv/Lib/site-packages/_virtualenv.pth +3 -0
- .venv/Lib/site-packages/distutils-precedence.pth +3 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/__init__.py +23 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/t32.exe +0 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe +0 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/t64.exe +0 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/util.py +1932 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/version.py +739 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/w32.exe +0 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe +0 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/w64.exe +0 -0
- .venv/Lib/site-packages/pip/_vendor/distlib/wheel.py +1082 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/__init__.py +82 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/__main__.py +17 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py +940 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py +158 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py +23 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/bbcode.py +108 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/groff.py +170 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/html.py +989 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/img.py +645 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/irc.py +154 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/latex.py +521 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/other.py +161 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py +83 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py +146 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/svg.py +188 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/terminal.py +127 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/formatters/terminal256.py +338 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py +362 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py +559 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/lexers/python.py +1198 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/plugin.py +88 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/regexopt.py +91 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/scanner.py +104 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/sphinxext.py +217 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/style.py +197 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py +103 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/token.py +213 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/unistring.py +153 -0
- .venv/Lib/site-packages/pip/_vendor/pygments/util.py +330 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/__init__.py +322 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/actions.py +217 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/common.py +432 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/core.py +0 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/diagram/__init__.py +656 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py +299 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/helpers.py +1100 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/results.py +796 -0
- .venv/Lib/site-packages/pip/_vendor/pyparsing/testing.py +331 -0
.venv/Lib/site-packages/_virtualenv.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69ac3d8f27e679c81b94ab30b3b56e9cd138219b1ba94a1fa3606d5a76a1433d
|
| 3 |
+
size 18
|
.venv/Lib/site-packages/distutils-precedence.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2638ce9e2500e572a5e0de7faed6661eb569d1b696fcba07b0dd223da5f5d224
|
| 3 |
+
size 151
|
.venv/Lib/site-packages/pip/_vendor/distlib/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2012-2022 Vinay Sajip.
|
| 4 |
+
# Licensed to the Python Software Foundation under a contributor agreement.
|
| 5 |
+
# See LICENSE.txt and CONTRIBUTORS.txt.
|
| 6 |
+
#
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
__version__ = '0.3.6'
|
| 10 |
+
|
| 11 |
+
class DistlibException(Exception):
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
from logging import NullHandler
|
| 16 |
+
except ImportError: # pragma: no cover
|
| 17 |
+
class NullHandler(logging.Handler):
|
| 18 |
+
def handle(self, record): pass
|
| 19 |
+
def emit(self, record): pass
|
| 20 |
+
def createLock(self): self.lock = None
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
logger.addHandler(NullHandler())
|
.venv/Lib/site-packages/pip/_vendor/distlib/t32.exe
ADDED
|
Binary file (97.8 kB). View file
|
|
|
.venv/Lib/site-packages/pip/_vendor/distlib/t64-arm.exe
ADDED
|
Binary file (183 kB). View file
|
|
|
.venv/Lib/site-packages/pip/_vendor/distlib/t64.exe
ADDED
|
Binary file (108 kB). View file
|
|
|
.venv/Lib/site-packages/pip/_vendor/distlib/util.py
ADDED
|
@@ -0,0 +1,1932 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright (C) 2012-2021 The Python Software Foundation.
|
| 3 |
+
# See LICENSE.txt and CONTRIBUTORS.txt.
|
| 4 |
+
#
|
| 5 |
+
import codecs
|
| 6 |
+
from collections import deque
|
| 7 |
+
import contextlib
|
| 8 |
+
import csv
|
| 9 |
+
from glob import iglob as std_iglob
|
| 10 |
+
import io
|
| 11 |
+
import json
|
| 12 |
+
import logging
|
| 13 |
+
import os
|
| 14 |
+
import py_compile
|
| 15 |
+
import re
|
| 16 |
+
import socket
|
| 17 |
+
try:
|
| 18 |
+
import ssl
|
| 19 |
+
except ImportError: # pragma: no cover
|
| 20 |
+
ssl = None
|
| 21 |
+
import subprocess
|
| 22 |
+
import sys
|
| 23 |
+
import tarfile
|
| 24 |
+
import tempfile
|
| 25 |
+
import textwrap
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import threading
|
| 29 |
+
except ImportError: # pragma: no cover
|
| 30 |
+
import dummy_threading as threading
|
| 31 |
+
import time
|
| 32 |
+
|
| 33 |
+
from . import DistlibException
|
| 34 |
+
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
|
| 35 |
+
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
|
| 36 |
+
splittype, HTTPHandler, BaseConfigurator, valid_ident,
|
| 37 |
+
Container, configparser, URLError, ZipFile, fsdecode,
|
| 38 |
+
unquote, urlparse)
|
| 39 |
+
|
| 40 |
+
logger = logging.getLogger(__name__)
|
| 41 |
+
|
| 42 |
+
#
|
| 43 |
+
# Requirement parsing code as per PEP 508
|
| 44 |
+
#
|
| 45 |
+
|
| 46 |
+
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
|
| 47 |
+
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
|
| 48 |
+
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
|
| 49 |
+
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
|
| 50 |
+
OR = re.compile(r'^or\b\s*')
|
| 51 |
+
AND = re.compile(r'^and\b\s*')
|
| 52 |
+
NON_SPACE = re.compile(r'(\S+)\s*')
|
| 53 |
+
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def parse_marker(marker_string):
|
| 57 |
+
"""
|
| 58 |
+
Parse a marker string and return a dictionary containing a marker expression.
|
| 59 |
+
|
| 60 |
+
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
|
| 61 |
+
the expression grammar, or strings. A string contained in quotes is to be
|
| 62 |
+
interpreted as a literal string, and a string not contained in quotes is a
|
| 63 |
+
variable (such as os_name).
|
| 64 |
+
"""
|
| 65 |
+
def marker_var(remaining):
|
| 66 |
+
# either identifier, or literal string
|
| 67 |
+
m = IDENTIFIER.match(remaining)
|
| 68 |
+
if m:
|
| 69 |
+
result = m.groups()[0]
|
| 70 |
+
remaining = remaining[m.end():]
|
| 71 |
+
elif not remaining:
|
| 72 |
+
raise SyntaxError('unexpected end of input')
|
| 73 |
+
else:
|
| 74 |
+
q = remaining[0]
|
| 75 |
+
if q not in '\'"':
|
| 76 |
+
raise SyntaxError('invalid expression: %s' % remaining)
|
| 77 |
+
oq = '\'"'.replace(q, '')
|
| 78 |
+
remaining = remaining[1:]
|
| 79 |
+
parts = [q]
|
| 80 |
+
while remaining:
|
| 81 |
+
# either a string chunk, or oq, or q to terminate
|
| 82 |
+
if remaining[0] == q:
|
| 83 |
+
break
|
| 84 |
+
elif remaining[0] == oq:
|
| 85 |
+
parts.append(oq)
|
| 86 |
+
remaining = remaining[1:]
|
| 87 |
+
else:
|
| 88 |
+
m = STRING_CHUNK.match(remaining)
|
| 89 |
+
if not m:
|
| 90 |
+
raise SyntaxError('error in string literal: %s' % remaining)
|
| 91 |
+
parts.append(m.groups()[0])
|
| 92 |
+
remaining = remaining[m.end():]
|
| 93 |
+
else:
|
| 94 |
+
s = ''.join(parts)
|
| 95 |
+
raise SyntaxError('unterminated string: %s' % s)
|
| 96 |
+
parts.append(q)
|
| 97 |
+
result = ''.join(parts)
|
| 98 |
+
remaining = remaining[1:].lstrip() # skip past closing quote
|
| 99 |
+
return result, remaining
|
| 100 |
+
|
| 101 |
+
def marker_expr(remaining):
|
| 102 |
+
if remaining and remaining[0] == '(':
|
| 103 |
+
result, remaining = marker(remaining[1:].lstrip())
|
| 104 |
+
if remaining[0] != ')':
|
| 105 |
+
raise SyntaxError('unterminated parenthesis: %s' % remaining)
|
| 106 |
+
remaining = remaining[1:].lstrip()
|
| 107 |
+
else:
|
| 108 |
+
lhs, remaining = marker_var(remaining)
|
| 109 |
+
while remaining:
|
| 110 |
+
m = MARKER_OP.match(remaining)
|
| 111 |
+
if not m:
|
| 112 |
+
break
|
| 113 |
+
op = m.groups()[0]
|
| 114 |
+
remaining = remaining[m.end():]
|
| 115 |
+
rhs, remaining = marker_var(remaining)
|
| 116 |
+
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
|
| 117 |
+
result = lhs
|
| 118 |
+
return result, remaining
|
| 119 |
+
|
| 120 |
+
def marker_and(remaining):
|
| 121 |
+
lhs, remaining = marker_expr(remaining)
|
| 122 |
+
while remaining:
|
| 123 |
+
m = AND.match(remaining)
|
| 124 |
+
if not m:
|
| 125 |
+
break
|
| 126 |
+
remaining = remaining[m.end():]
|
| 127 |
+
rhs, remaining = marker_expr(remaining)
|
| 128 |
+
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
|
| 129 |
+
return lhs, remaining
|
| 130 |
+
|
| 131 |
+
def marker(remaining):
|
| 132 |
+
lhs, remaining = marker_and(remaining)
|
| 133 |
+
while remaining:
|
| 134 |
+
m = OR.match(remaining)
|
| 135 |
+
if not m:
|
| 136 |
+
break
|
| 137 |
+
remaining = remaining[m.end():]
|
| 138 |
+
rhs, remaining = marker_and(remaining)
|
| 139 |
+
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
|
| 140 |
+
return lhs, remaining
|
| 141 |
+
|
| 142 |
+
return marker(marker_string)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def parse_requirement(req):
|
| 146 |
+
"""
|
| 147 |
+
Parse a requirement passed in as a string. Return a Container
|
| 148 |
+
whose attributes contain the various parts of the requirement.
|
| 149 |
+
"""
|
| 150 |
+
remaining = req.strip()
|
| 151 |
+
if not remaining or remaining.startswith('#'):
|
| 152 |
+
return None
|
| 153 |
+
m = IDENTIFIER.match(remaining)
|
| 154 |
+
if not m:
|
| 155 |
+
raise SyntaxError('name expected: %s' % remaining)
|
| 156 |
+
distname = m.groups()[0]
|
| 157 |
+
remaining = remaining[m.end():]
|
| 158 |
+
extras = mark_expr = versions = uri = None
|
| 159 |
+
if remaining and remaining[0] == '[':
|
| 160 |
+
i = remaining.find(']', 1)
|
| 161 |
+
if i < 0:
|
| 162 |
+
raise SyntaxError('unterminated extra: %s' % remaining)
|
| 163 |
+
s = remaining[1:i]
|
| 164 |
+
remaining = remaining[i + 1:].lstrip()
|
| 165 |
+
extras = []
|
| 166 |
+
while s:
|
| 167 |
+
m = IDENTIFIER.match(s)
|
| 168 |
+
if not m:
|
| 169 |
+
raise SyntaxError('malformed extra: %s' % s)
|
| 170 |
+
extras.append(m.groups()[0])
|
| 171 |
+
s = s[m.end():]
|
| 172 |
+
if not s:
|
| 173 |
+
break
|
| 174 |
+
if s[0] != ',':
|
| 175 |
+
raise SyntaxError('comma expected in extras: %s' % s)
|
| 176 |
+
s = s[1:].lstrip()
|
| 177 |
+
if not extras:
|
| 178 |
+
extras = None
|
| 179 |
+
if remaining:
|
| 180 |
+
if remaining[0] == '@':
|
| 181 |
+
# it's a URI
|
| 182 |
+
remaining = remaining[1:].lstrip()
|
| 183 |
+
m = NON_SPACE.match(remaining)
|
| 184 |
+
if not m:
|
| 185 |
+
raise SyntaxError('invalid URI: %s' % remaining)
|
| 186 |
+
uri = m.groups()[0]
|
| 187 |
+
t = urlparse(uri)
|
| 188 |
+
# there are issues with Python and URL parsing, so this test
|
| 189 |
+
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
|
| 190 |
+
# always parse invalid URLs correctly - it should raise
|
| 191 |
+
# exceptions for malformed URLs
|
| 192 |
+
if not (t.scheme and t.netloc):
|
| 193 |
+
raise SyntaxError('Invalid URL: %s' % uri)
|
| 194 |
+
remaining = remaining[m.end():].lstrip()
|
| 195 |
+
else:
|
| 196 |
+
|
| 197 |
+
def get_versions(ver_remaining):
|
| 198 |
+
"""
|
| 199 |
+
Return a list of operator, version tuples if any are
|
| 200 |
+
specified, else None.
|
| 201 |
+
"""
|
| 202 |
+
m = COMPARE_OP.match(ver_remaining)
|
| 203 |
+
versions = None
|
| 204 |
+
if m:
|
| 205 |
+
versions = []
|
| 206 |
+
while True:
|
| 207 |
+
op = m.groups()[0]
|
| 208 |
+
ver_remaining = ver_remaining[m.end():]
|
| 209 |
+
m = VERSION_IDENTIFIER.match(ver_remaining)
|
| 210 |
+
if not m:
|
| 211 |
+
raise SyntaxError('invalid version: %s' % ver_remaining)
|
| 212 |
+
v = m.groups()[0]
|
| 213 |
+
versions.append((op, v))
|
| 214 |
+
ver_remaining = ver_remaining[m.end():]
|
| 215 |
+
if not ver_remaining or ver_remaining[0] != ',':
|
| 216 |
+
break
|
| 217 |
+
ver_remaining = ver_remaining[1:].lstrip()
|
| 218 |
+
# Some packages have a trailing comma which would break things
|
| 219 |
+
# See issue #148
|
| 220 |
+
if not ver_remaining:
|
| 221 |
+
break
|
| 222 |
+
m = COMPARE_OP.match(ver_remaining)
|
| 223 |
+
if not m:
|
| 224 |
+
raise SyntaxError('invalid constraint: %s' % ver_remaining)
|
| 225 |
+
if not versions:
|
| 226 |
+
versions = None
|
| 227 |
+
return versions, ver_remaining
|
| 228 |
+
|
| 229 |
+
if remaining[0] != '(':
|
| 230 |
+
versions, remaining = get_versions(remaining)
|
| 231 |
+
else:
|
| 232 |
+
i = remaining.find(')', 1)
|
| 233 |
+
if i < 0:
|
| 234 |
+
raise SyntaxError('unterminated parenthesis: %s' % remaining)
|
| 235 |
+
s = remaining[1:i]
|
| 236 |
+
remaining = remaining[i + 1:].lstrip()
|
| 237 |
+
# As a special diversion from PEP 508, allow a version number
|
| 238 |
+
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
|
| 239 |
+
# is allowed in earlier PEPs)
|
| 240 |
+
if COMPARE_OP.match(s):
|
| 241 |
+
versions, _ = get_versions(s)
|
| 242 |
+
else:
|
| 243 |
+
m = VERSION_IDENTIFIER.match(s)
|
| 244 |
+
if not m:
|
| 245 |
+
raise SyntaxError('invalid constraint: %s' % s)
|
| 246 |
+
v = m.groups()[0]
|
| 247 |
+
s = s[m.end():].lstrip()
|
| 248 |
+
if s:
|
| 249 |
+
raise SyntaxError('invalid constraint: %s' % s)
|
| 250 |
+
versions = [('~=', v)]
|
| 251 |
+
|
| 252 |
+
if remaining:
|
| 253 |
+
if remaining[0] != ';':
|
| 254 |
+
raise SyntaxError('invalid requirement: %s' % remaining)
|
| 255 |
+
remaining = remaining[1:].lstrip()
|
| 256 |
+
|
| 257 |
+
mark_expr, remaining = parse_marker(remaining)
|
| 258 |
+
|
| 259 |
+
if remaining and remaining[0] != '#':
|
| 260 |
+
raise SyntaxError('unexpected trailing data: %s' % remaining)
|
| 261 |
+
|
| 262 |
+
if not versions:
|
| 263 |
+
rs = distname
|
| 264 |
+
else:
|
| 265 |
+
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
|
| 266 |
+
return Container(name=distname, extras=extras, constraints=versions,
|
| 267 |
+
marker=mark_expr, url=uri, requirement=rs)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def get_resources_dests(resources_root, rules):
|
| 271 |
+
"""Find destinations for resources files"""
|
| 272 |
+
|
| 273 |
+
def get_rel_path(root, path):
|
| 274 |
+
# normalizes and returns a lstripped-/-separated path
|
| 275 |
+
root = root.replace(os.path.sep, '/')
|
| 276 |
+
path = path.replace(os.path.sep, '/')
|
| 277 |
+
assert path.startswith(root)
|
| 278 |
+
return path[len(root):].lstrip('/')
|
| 279 |
+
|
| 280 |
+
destinations = {}
|
| 281 |
+
for base, suffix, dest in rules:
|
| 282 |
+
prefix = os.path.join(resources_root, base)
|
| 283 |
+
for abs_base in iglob(prefix):
|
| 284 |
+
abs_glob = os.path.join(abs_base, suffix)
|
| 285 |
+
for abs_path in iglob(abs_glob):
|
| 286 |
+
resource_file = get_rel_path(resources_root, abs_path)
|
| 287 |
+
if dest is None: # remove the entry if it was here
|
| 288 |
+
destinations.pop(resource_file, None)
|
| 289 |
+
else:
|
| 290 |
+
rel_path = get_rel_path(abs_base, abs_path)
|
| 291 |
+
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
|
| 292 |
+
destinations[resource_file] = rel_dest + '/' + rel_path
|
| 293 |
+
return destinations
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def in_venv():
|
| 297 |
+
if hasattr(sys, 'real_prefix'):
|
| 298 |
+
# virtualenv venvs
|
| 299 |
+
result = True
|
| 300 |
+
else:
|
| 301 |
+
# PEP 405 venvs
|
| 302 |
+
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
|
| 303 |
+
return result
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def get_executable():
|
| 307 |
+
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
|
| 308 |
+
# changes to the stub launcher mean that sys.executable always points
|
| 309 |
+
# to the stub on OS X
|
| 310 |
+
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
|
| 311 |
+
# in os.environ):
|
| 312 |
+
# result = os.environ['__PYVENV_LAUNCHER__']
|
| 313 |
+
# else:
|
| 314 |
+
# result = sys.executable
|
| 315 |
+
# return result
|
| 316 |
+
# Avoid normcasing: see issue #143
|
| 317 |
+
# result = os.path.normcase(sys.executable)
|
| 318 |
+
result = sys.executable
|
| 319 |
+
if not isinstance(result, text_type):
|
| 320 |
+
result = fsdecode(result)
|
| 321 |
+
return result
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
|
| 325 |
+
p = prompt
|
| 326 |
+
while True:
|
| 327 |
+
s = raw_input(p)
|
| 328 |
+
p = prompt
|
| 329 |
+
if not s and default:
|
| 330 |
+
s = default
|
| 331 |
+
if s:
|
| 332 |
+
c = s[0].lower()
|
| 333 |
+
if c in allowed_chars:
|
| 334 |
+
break
|
| 335 |
+
if error_prompt:
|
| 336 |
+
p = '%c: %s\n%s' % (c, error_prompt, prompt)
|
| 337 |
+
return c
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def extract_by_key(d, keys):
|
| 341 |
+
if isinstance(keys, string_types):
|
| 342 |
+
keys = keys.split()
|
| 343 |
+
result = {}
|
| 344 |
+
for key in keys:
|
| 345 |
+
if key in d:
|
| 346 |
+
result[key] = d[key]
|
| 347 |
+
return result
|
| 348 |
+
|
| 349 |
+
def read_exports(stream):
|
| 350 |
+
if sys.version_info[0] >= 3:
|
| 351 |
+
# needs to be a text stream
|
| 352 |
+
stream = codecs.getreader('utf-8')(stream)
|
| 353 |
+
# Try to load as JSON, falling back on legacy format
|
| 354 |
+
data = stream.read()
|
| 355 |
+
stream = StringIO(data)
|
| 356 |
+
try:
|
| 357 |
+
jdata = json.load(stream)
|
| 358 |
+
result = jdata['extensions']['python.exports']['exports']
|
| 359 |
+
for group, entries in result.items():
|
| 360 |
+
for k, v in entries.items():
|
| 361 |
+
s = '%s = %s' % (k, v)
|
| 362 |
+
entry = get_export_entry(s)
|
| 363 |
+
assert entry is not None
|
| 364 |
+
entries[k] = entry
|
| 365 |
+
return result
|
| 366 |
+
except Exception:
|
| 367 |
+
stream.seek(0, 0)
|
| 368 |
+
|
| 369 |
+
def read_stream(cp, stream):
|
| 370 |
+
if hasattr(cp, 'read_file'):
|
| 371 |
+
cp.read_file(stream)
|
| 372 |
+
else:
|
| 373 |
+
cp.readfp(stream)
|
| 374 |
+
|
| 375 |
+
cp = configparser.ConfigParser()
|
| 376 |
+
try:
|
| 377 |
+
read_stream(cp, stream)
|
| 378 |
+
except configparser.MissingSectionHeaderError:
|
| 379 |
+
stream.close()
|
| 380 |
+
data = textwrap.dedent(data)
|
| 381 |
+
stream = StringIO(data)
|
| 382 |
+
read_stream(cp, stream)
|
| 383 |
+
|
| 384 |
+
result = {}
|
| 385 |
+
for key in cp.sections():
|
| 386 |
+
result[key] = entries = {}
|
| 387 |
+
for name, value in cp.items(key):
|
| 388 |
+
s = '%s = %s' % (name, value)
|
| 389 |
+
entry = get_export_entry(s)
|
| 390 |
+
assert entry is not None
|
| 391 |
+
#entry.dist = self
|
| 392 |
+
entries[name] = entry
|
| 393 |
+
return result
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def write_exports(exports, stream):
|
| 397 |
+
if sys.version_info[0] >= 3:
|
| 398 |
+
# needs to be a text stream
|
| 399 |
+
stream = codecs.getwriter('utf-8')(stream)
|
| 400 |
+
cp = configparser.ConfigParser()
|
| 401 |
+
for k, v in exports.items():
|
| 402 |
+
# TODO check k, v for valid values
|
| 403 |
+
cp.add_section(k)
|
| 404 |
+
for entry in v.values():
|
| 405 |
+
if entry.suffix is None:
|
| 406 |
+
s = entry.prefix
|
| 407 |
+
else:
|
| 408 |
+
s = '%s:%s' % (entry.prefix, entry.suffix)
|
| 409 |
+
if entry.flags:
|
| 410 |
+
s = '%s [%s]' % (s, ', '.join(entry.flags))
|
| 411 |
+
cp.set(k, entry.name, s)
|
| 412 |
+
cp.write(stream)
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
@contextlib.contextmanager
|
| 416 |
+
def tempdir():
|
| 417 |
+
td = tempfile.mkdtemp()
|
| 418 |
+
try:
|
| 419 |
+
yield td
|
| 420 |
+
finally:
|
| 421 |
+
shutil.rmtree(td)
|
| 422 |
+
|
| 423 |
+
@contextlib.contextmanager
|
| 424 |
+
def chdir(d):
|
| 425 |
+
cwd = os.getcwd()
|
| 426 |
+
try:
|
| 427 |
+
os.chdir(d)
|
| 428 |
+
yield
|
| 429 |
+
finally:
|
| 430 |
+
os.chdir(cwd)
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
@contextlib.contextmanager
|
| 434 |
+
def socket_timeout(seconds=15):
|
| 435 |
+
cto = socket.getdefaulttimeout()
|
| 436 |
+
try:
|
| 437 |
+
socket.setdefaulttimeout(seconds)
|
| 438 |
+
yield
|
| 439 |
+
finally:
|
| 440 |
+
socket.setdefaulttimeout(cto)
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
class cached_property(object):
|
| 444 |
+
def __init__(self, func):
|
| 445 |
+
self.func = func
|
| 446 |
+
#for attr in ('__name__', '__module__', '__doc__'):
|
| 447 |
+
# setattr(self, attr, getattr(func, attr, None))
|
| 448 |
+
|
| 449 |
+
def __get__(self, obj, cls=None):
|
| 450 |
+
if obj is None:
|
| 451 |
+
return self
|
| 452 |
+
value = self.func(obj)
|
| 453 |
+
object.__setattr__(obj, self.func.__name__, value)
|
| 454 |
+
#obj.__dict__[self.func.__name__] = value = self.func(obj)
|
| 455 |
+
return value
|
| 456 |
+
|
| 457 |
+
def convert_path(pathname):
|
| 458 |
+
"""Return 'pathname' as a name that will work on the native filesystem.
|
| 459 |
+
|
| 460 |
+
The path is split on '/' and put back together again using the current
|
| 461 |
+
directory separator. Needed because filenames in the setup script are
|
| 462 |
+
always supplied in Unix style, and have to be converted to the local
|
| 463 |
+
convention before we can actually use them in the filesystem. Raises
|
| 464 |
+
ValueError on non-Unix-ish systems if 'pathname' either starts or
|
| 465 |
+
ends with a slash.
|
| 466 |
+
"""
|
| 467 |
+
if os.sep == '/':
|
| 468 |
+
return pathname
|
| 469 |
+
if not pathname:
|
| 470 |
+
return pathname
|
| 471 |
+
if pathname[0] == '/':
|
| 472 |
+
raise ValueError("path '%s' cannot be absolute" % pathname)
|
| 473 |
+
if pathname[-1] == '/':
|
| 474 |
+
raise ValueError("path '%s' cannot end with '/'" % pathname)
|
| 475 |
+
|
| 476 |
+
paths = pathname.split('/')
|
| 477 |
+
while os.curdir in paths:
|
| 478 |
+
paths.remove(os.curdir)
|
| 479 |
+
if not paths:
|
| 480 |
+
return os.curdir
|
| 481 |
+
return os.path.join(*paths)
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
class FileOperator(object):
|
| 485 |
+
def __init__(self, dry_run=False):
|
| 486 |
+
self.dry_run = dry_run
|
| 487 |
+
self.ensured = set()
|
| 488 |
+
self._init_record()
|
| 489 |
+
|
| 490 |
+
def _init_record(self):
|
| 491 |
+
self.record = False
|
| 492 |
+
self.files_written = set()
|
| 493 |
+
self.dirs_created = set()
|
| 494 |
+
|
| 495 |
+
def record_as_written(self, path):
|
| 496 |
+
if self.record:
|
| 497 |
+
self.files_written.add(path)
|
| 498 |
+
|
| 499 |
+
def newer(self, source, target):
|
| 500 |
+
"""Tell if the target is newer than the source.
|
| 501 |
+
|
| 502 |
+
Returns true if 'source' exists and is more recently modified than
|
| 503 |
+
'target', or if 'source' exists and 'target' doesn't.
|
| 504 |
+
|
| 505 |
+
Returns false if both exist and 'target' is the same age or younger
|
| 506 |
+
than 'source'. Raise PackagingFileError if 'source' does not exist.
|
| 507 |
+
|
| 508 |
+
Note that this test is not very accurate: files created in the same
|
| 509 |
+
second will have the same "age".
|
| 510 |
+
"""
|
| 511 |
+
if not os.path.exists(source):
|
| 512 |
+
raise DistlibException("file '%r' does not exist" %
|
| 513 |
+
os.path.abspath(source))
|
| 514 |
+
if not os.path.exists(target):
|
| 515 |
+
return True
|
| 516 |
+
|
| 517 |
+
return os.stat(source).st_mtime > os.stat(target).st_mtime
|
| 518 |
+
|
| 519 |
+
def copy_file(self, infile, outfile, check=True):
|
| 520 |
+
"""Copy a file respecting dry-run and force flags.
|
| 521 |
+
"""
|
| 522 |
+
self.ensure_dir(os.path.dirname(outfile))
|
| 523 |
+
logger.info('Copying %s to %s', infile, outfile)
|
| 524 |
+
if not self.dry_run:
|
| 525 |
+
msg = None
|
| 526 |
+
if check:
|
| 527 |
+
if os.path.islink(outfile):
|
| 528 |
+
msg = '%s is a symlink' % outfile
|
| 529 |
+
elif os.path.exists(outfile) and not os.path.isfile(outfile):
|
| 530 |
+
msg = '%s is a non-regular file' % outfile
|
| 531 |
+
if msg:
|
| 532 |
+
raise ValueError(msg + ' which would be overwritten')
|
| 533 |
+
shutil.copyfile(infile, outfile)
|
| 534 |
+
self.record_as_written(outfile)
|
| 535 |
+
|
| 536 |
+
def copy_stream(self, instream, outfile, encoding=None):
|
| 537 |
+
assert not os.path.isdir(outfile)
|
| 538 |
+
self.ensure_dir(os.path.dirname(outfile))
|
| 539 |
+
logger.info('Copying stream %s to %s', instream, outfile)
|
| 540 |
+
if not self.dry_run:
|
| 541 |
+
if encoding is None:
|
| 542 |
+
outstream = open(outfile, 'wb')
|
| 543 |
+
else:
|
| 544 |
+
outstream = codecs.open(outfile, 'w', encoding=encoding)
|
| 545 |
+
try:
|
| 546 |
+
shutil.copyfileobj(instream, outstream)
|
| 547 |
+
finally:
|
| 548 |
+
outstream.close()
|
| 549 |
+
self.record_as_written(outfile)
|
| 550 |
+
|
| 551 |
+
def write_binary_file(self, path, data):
|
| 552 |
+
self.ensure_dir(os.path.dirname(path))
|
| 553 |
+
if not self.dry_run:
|
| 554 |
+
if os.path.exists(path):
|
| 555 |
+
os.remove(path)
|
| 556 |
+
with open(path, 'wb') as f:
|
| 557 |
+
f.write(data)
|
| 558 |
+
self.record_as_written(path)
|
| 559 |
+
|
| 560 |
+
def write_text_file(self, path, data, encoding):
|
| 561 |
+
self.write_binary_file(path, data.encode(encoding))
|
| 562 |
+
|
| 563 |
+
def set_mode(self, bits, mask, files):
|
| 564 |
+
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
|
| 565 |
+
# Set the executable bits (owner, group, and world) on
|
| 566 |
+
# all the files specified.
|
| 567 |
+
for f in files:
|
| 568 |
+
if self.dry_run:
|
| 569 |
+
logger.info("changing mode of %s", f)
|
| 570 |
+
else:
|
| 571 |
+
mode = (os.stat(f).st_mode | bits) & mask
|
| 572 |
+
logger.info("changing mode of %s to %o", f, mode)
|
| 573 |
+
os.chmod(f, mode)
|
| 574 |
+
|
| 575 |
+
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
|
| 576 |
+
|
| 577 |
+
def ensure_dir(self, path):
|
| 578 |
+
path = os.path.abspath(path)
|
| 579 |
+
if path not in self.ensured and not os.path.exists(path):
|
| 580 |
+
self.ensured.add(path)
|
| 581 |
+
d, f = os.path.split(path)
|
| 582 |
+
self.ensure_dir(d)
|
| 583 |
+
logger.info('Creating %s' % path)
|
| 584 |
+
if not self.dry_run:
|
| 585 |
+
os.mkdir(path)
|
| 586 |
+
if self.record:
|
| 587 |
+
self.dirs_created.add(path)
|
| 588 |
+
|
| 589 |
+
def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
|
| 590 |
+
dpath = cache_from_source(path, not optimize)
|
| 591 |
+
logger.info('Byte-compiling %s to %s', path, dpath)
|
| 592 |
+
if not self.dry_run:
|
| 593 |
+
if force or self.newer(path, dpath):
|
| 594 |
+
if not prefix:
|
| 595 |
+
diagpath = None
|
| 596 |
+
else:
|
| 597 |
+
assert path.startswith(prefix)
|
| 598 |
+
diagpath = path[len(prefix):]
|
| 599 |
+
compile_kwargs = {}
|
| 600 |
+
if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
|
| 601 |
+
compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
|
| 602 |
+
py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
|
| 603 |
+
self.record_as_written(dpath)
|
| 604 |
+
return dpath
|
| 605 |
+
|
| 606 |
+
def ensure_removed(self, path):
|
| 607 |
+
if os.path.exists(path):
|
| 608 |
+
if os.path.isdir(path) and not os.path.islink(path):
|
| 609 |
+
logger.debug('Removing directory tree at %s', path)
|
| 610 |
+
if not self.dry_run:
|
| 611 |
+
shutil.rmtree(path)
|
| 612 |
+
if self.record:
|
| 613 |
+
if path in self.dirs_created:
|
| 614 |
+
self.dirs_created.remove(path)
|
| 615 |
+
else:
|
| 616 |
+
if os.path.islink(path):
|
| 617 |
+
s = 'link'
|
| 618 |
+
else:
|
| 619 |
+
s = 'file'
|
| 620 |
+
logger.debug('Removing %s %s', s, path)
|
| 621 |
+
if not self.dry_run:
|
| 622 |
+
os.remove(path)
|
| 623 |
+
if self.record:
|
| 624 |
+
if path in self.files_written:
|
| 625 |
+
self.files_written.remove(path)
|
| 626 |
+
|
| 627 |
+
def is_writable(self, path):
|
| 628 |
+
result = False
|
| 629 |
+
while not result:
|
| 630 |
+
if os.path.exists(path):
|
| 631 |
+
result = os.access(path, os.W_OK)
|
| 632 |
+
break
|
| 633 |
+
parent = os.path.dirname(path)
|
| 634 |
+
if parent == path:
|
| 635 |
+
break
|
| 636 |
+
path = parent
|
| 637 |
+
return result
|
| 638 |
+
|
| 639 |
+
def commit(self):
|
| 640 |
+
"""
|
| 641 |
+
Commit recorded changes, turn off recording, return
|
| 642 |
+
changes.
|
| 643 |
+
"""
|
| 644 |
+
assert self.record
|
| 645 |
+
result = self.files_written, self.dirs_created
|
| 646 |
+
self._init_record()
|
| 647 |
+
return result
|
| 648 |
+
|
| 649 |
+
def rollback(self):
|
| 650 |
+
if not self.dry_run:
|
| 651 |
+
for f in list(self.files_written):
|
| 652 |
+
if os.path.exists(f):
|
| 653 |
+
os.remove(f)
|
| 654 |
+
# dirs should all be empty now, except perhaps for
|
| 655 |
+
# __pycache__ subdirs
|
| 656 |
+
# reverse so that subdirs appear before their parents
|
| 657 |
+
dirs = sorted(self.dirs_created, reverse=True)
|
| 658 |
+
for d in dirs:
|
| 659 |
+
flist = os.listdir(d)
|
| 660 |
+
if flist:
|
| 661 |
+
assert flist == ['__pycache__']
|
| 662 |
+
sd = os.path.join(d, flist[0])
|
| 663 |
+
os.rmdir(sd)
|
| 664 |
+
os.rmdir(d) # should fail if non-empty
|
| 665 |
+
self._init_record()
|
| 666 |
+
|
| 667 |
+
def resolve(module_name, dotted_path):
|
| 668 |
+
if module_name in sys.modules:
|
| 669 |
+
mod = sys.modules[module_name]
|
| 670 |
+
else:
|
| 671 |
+
mod = __import__(module_name)
|
| 672 |
+
if dotted_path is None:
|
| 673 |
+
result = mod
|
| 674 |
+
else:
|
| 675 |
+
parts = dotted_path.split('.')
|
| 676 |
+
result = getattr(mod, parts.pop(0))
|
| 677 |
+
for p in parts:
|
| 678 |
+
result = getattr(result, p)
|
| 679 |
+
return result
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
class ExportEntry(object):
|
| 683 |
+
def __init__(self, name, prefix, suffix, flags):
|
| 684 |
+
self.name = name
|
| 685 |
+
self.prefix = prefix
|
| 686 |
+
self.suffix = suffix
|
| 687 |
+
self.flags = flags
|
| 688 |
+
|
| 689 |
+
@cached_property
|
| 690 |
+
def value(self):
|
| 691 |
+
return resolve(self.prefix, self.suffix)
|
| 692 |
+
|
| 693 |
+
def __repr__(self): # pragma: no cover
|
| 694 |
+
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
|
| 695 |
+
self.suffix, self.flags)
|
| 696 |
+
|
| 697 |
+
def __eq__(self, other):
|
| 698 |
+
if not isinstance(other, ExportEntry):
|
| 699 |
+
result = False
|
| 700 |
+
else:
|
| 701 |
+
result = (self.name == other.name and
|
| 702 |
+
self.prefix == other.prefix and
|
| 703 |
+
self.suffix == other.suffix and
|
| 704 |
+
self.flags == other.flags)
|
| 705 |
+
return result
|
| 706 |
+
|
| 707 |
+
__hash__ = object.__hash__
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
|
| 711 |
+
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
|
| 712 |
+
\s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
|
| 713 |
+
''', re.VERBOSE)
|
| 714 |
+
|
| 715 |
+
def get_export_entry(specification):
|
| 716 |
+
m = ENTRY_RE.search(specification)
|
| 717 |
+
if not m:
|
| 718 |
+
result = None
|
| 719 |
+
if '[' in specification or ']' in specification:
|
| 720 |
+
raise DistlibException("Invalid specification "
|
| 721 |
+
"'%s'" % specification)
|
| 722 |
+
else:
|
| 723 |
+
d = m.groupdict()
|
| 724 |
+
name = d['name']
|
| 725 |
+
path = d['callable']
|
| 726 |
+
colons = path.count(':')
|
| 727 |
+
if colons == 0:
|
| 728 |
+
prefix, suffix = path, None
|
| 729 |
+
else:
|
| 730 |
+
if colons != 1:
|
| 731 |
+
raise DistlibException("Invalid specification "
|
| 732 |
+
"'%s'" % specification)
|
| 733 |
+
prefix, suffix = path.split(':')
|
| 734 |
+
flags = d['flags']
|
| 735 |
+
if flags is None:
|
| 736 |
+
if '[' in specification or ']' in specification:
|
| 737 |
+
raise DistlibException("Invalid specification "
|
| 738 |
+
"'%s'" % specification)
|
| 739 |
+
flags = []
|
| 740 |
+
else:
|
| 741 |
+
flags = [f.strip() for f in flags.split(',')]
|
| 742 |
+
result = ExportEntry(name, prefix, suffix, flags)
|
| 743 |
+
return result
|
| 744 |
+
|
| 745 |
+
|
| 746 |
+
def get_cache_base(suffix=None):
|
| 747 |
+
"""
|
| 748 |
+
Return the default base location for distlib caches. If the directory does
|
| 749 |
+
not exist, it is created. Use the suffix provided for the base directory,
|
| 750 |
+
and default to '.distlib' if it isn't provided.
|
| 751 |
+
|
| 752 |
+
On Windows, if LOCALAPPDATA is defined in the environment, then it is
|
| 753 |
+
assumed to be a directory, and will be the parent directory of the result.
|
| 754 |
+
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
|
| 755 |
+
directory - using os.expanduser('~') - will be the parent directory of
|
| 756 |
+
the result.
|
| 757 |
+
|
| 758 |
+
The result is just the directory '.distlib' in the parent directory as
|
| 759 |
+
determined above, or with the name specified with ``suffix``.
|
| 760 |
+
"""
|
| 761 |
+
if suffix is None:
|
| 762 |
+
suffix = '.distlib'
|
| 763 |
+
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
|
| 764 |
+
result = os.path.expandvars('$localappdata')
|
| 765 |
+
else:
|
| 766 |
+
# Assume posix, or old Windows
|
| 767 |
+
result = os.path.expanduser('~')
|
| 768 |
+
# we use 'isdir' instead of 'exists', because we want to
|
| 769 |
+
# fail if there's a file with that name
|
| 770 |
+
if os.path.isdir(result):
|
| 771 |
+
usable = os.access(result, os.W_OK)
|
| 772 |
+
if not usable:
|
| 773 |
+
logger.warning('Directory exists but is not writable: %s', result)
|
| 774 |
+
else:
|
| 775 |
+
try:
|
| 776 |
+
os.makedirs(result)
|
| 777 |
+
usable = True
|
| 778 |
+
except OSError:
|
| 779 |
+
logger.warning('Unable to create %s', result, exc_info=True)
|
| 780 |
+
usable = False
|
| 781 |
+
if not usable:
|
| 782 |
+
result = tempfile.mkdtemp()
|
| 783 |
+
logger.warning('Default location unusable, using %s', result)
|
| 784 |
+
return os.path.join(result, suffix)
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
def path_to_cache_dir(path):
|
| 788 |
+
"""
|
| 789 |
+
Convert an absolute path to a directory name for use in a cache.
|
| 790 |
+
|
| 791 |
+
The algorithm used is:
|
| 792 |
+
|
| 793 |
+
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
|
| 794 |
+
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
|
| 795 |
+
#. ``'.cache'`` is appended.
|
| 796 |
+
"""
|
| 797 |
+
d, p = os.path.splitdrive(os.path.abspath(path))
|
| 798 |
+
if d:
|
| 799 |
+
d = d.replace(':', '---')
|
| 800 |
+
p = p.replace(os.sep, '--')
|
| 801 |
+
return d + p + '.cache'
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
def ensure_slash(s):
|
| 805 |
+
if not s.endswith('/'):
|
| 806 |
+
return s + '/'
|
| 807 |
+
return s
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
def parse_credentials(netloc):
|
| 811 |
+
username = password = None
|
| 812 |
+
if '@' in netloc:
|
| 813 |
+
prefix, netloc = netloc.rsplit('@', 1)
|
| 814 |
+
if ':' not in prefix:
|
| 815 |
+
username = prefix
|
| 816 |
+
else:
|
| 817 |
+
username, password = prefix.split(':', 1)
|
| 818 |
+
if username:
|
| 819 |
+
username = unquote(username)
|
| 820 |
+
if password:
|
| 821 |
+
password = unquote(password)
|
| 822 |
+
return username, password, netloc
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
def get_process_umask():
|
| 826 |
+
result = os.umask(0o22)
|
| 827 |
+
os.umask(result)
|
| 828 |
+
return result
|
| 829 |
+
|
| 830 |
+
def is_string_sequence(seq):
|
| 831 |
+
result = True
|
| 832 |
+
i = None
|
| 833 |
+
for i, s in enumerate(seq):
|
| 834 |
+
if not isinstance(s, string_types):
|
| 835 |
+
result = False
|
| 836 |
+
break
|
| 837 |
+
assert i is not None
|
| 838 |
+
return result
|
| 839 |
+
|
| 840 |
+
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
|
| 841 |
+
'([a-z0-9_.+-]+)', re.I)
|
| 842 |
+
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
def split_filename(filename, project_name=None):
|
| 846 |
+
"""
|
| 847 |
+
Extract name, version, python version from a filename (no extension)
|
| 848 |
+
|
| 849 |
+
Return name, version, pyver or None
|
| 850 |
+
"""
|
| 851 |
+
result = None
|
| 852 |
+
pyver = None
|
| 853 |
+
filename = unquote(filename).replace(' ', '-')
|
| 854 |
+
m = PYTHON_VERSION.search(filename)
|
| 855 |
+
if m:
|
| 856 |
+
pyver = m.group(1)
|
| 857 |
+
filename = filename[:m.start()]
|
| 858 |
+
if project_name and len(filename) > len(project_name) + 1:
|
| 859 |
+
m = re.match(re.escape(project_name) + r'\b', filename)
|
| 860 |
+
if m:
|
| 861 |
+
n = m.end()
|
| 862 |
+
result = filename[:n], filename[n + 1:], pyver
|
| 863 |
+
if result is None:
|
| 864 |
+
m = PROJECT_NAME_AND_VERSION.match(filename)
|
| 865 |
+
if m:
|
| 866 |
+
result = m.group(1), m.group(3), pyver
|
| 867 |
+
return result
|
| 868 |
+
|
| 869 |
+
# Allow spaces in name because of legacy dists like "Twisted Core"
|
| 870 |
+
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
|
| 871 |
+
r'\(\s*(?P<ver>[^\s)]+)\)$')
|
| 872 |
+
|
| 873 |
+
def parse_name_and_version(p):
|
| 874 |
+
"""
|
| 875 |
+
A utility method used to get name and version from a string.
|
| 876 |
+
|
| 877 |
+
From e.g. a Provides-Dist value.
|
| 878 |
+
|
| 879 |
+
:param p: A value in a form 'foo (1.0)'
|
| 880 |
+
:return: The name and version as a tuple.
|
| 881 |
+
"""
|
| 882 |
+
m = NAME_VERSION_RE.match(p)
|
| 883 |
+
if not m:
|
| 884 |
+
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
|
| 885 |
+
d = m.groupdict()
|
| 886 |
+
return d['name'].strip().lower(), d['ver']
|
| 887 |
+
|
| 888 |
+
def get_extras(requested, available):
|
| 889 |
+
result = set()
|
| 890 |
+
requested = set(requested or [])
|
| 891 |
+
available = set(available or [])
|
| 892 |
+
if '*' in requested:
|
| 893 |
+
requested.remove('*')
|
| 894 |
+
result |= available
|
| 895 |
+
for r in requested:
|
| 896 |
+
if r == '-':
|
| 897 |
+
result.add(r)
|
| 898 |
+
elif r.startswith('-'):
|
| 899 |
+
unwanted = r[1:]
|
| 900 |
+
if unwanted not in available:
|
| 901 |
+
logger.warning('undeclared extra: %s' % unwanted)
|
| 902 |
+
if unwanted in result:
|
| 903 |
+
result.remove(unwanted)
|
| 904 |
+
else:
|
| 905 |
+
if r not in available:
|
| 906 |
+
logger.warning('undeclared extra: %s' % r)
|
| 907 |
+
result.add(r)
|
| 908 |
+
return result
|
| 909 |
+
#
|
| 910 |
+
# Extended metadata functionality
|
| 911 |
+
#
|
| 912 |
+
|
| 913 |
+
def _get_external_data(url):
|
| 914 |
+
result = {}
|
| 915 |
+
try:
|
| 916 |
+
# urlopen might fail if it runs into redirections,
|
| 917 |
+
# because of Python issue #13696. Fixed in locators
|
| 918 |
+
# using a custom redirect handler.
|
| 919 |
+
resp = urlopen(url)
|
| 920 |
+
headers = resp.info()
|
| 921 |
+
ct = headers.get('Content-Type')
|
| 922 |
+
if not ct.startswith('application/json'):
|
| 923 |
+
logger.debug('Unexpected response for JSON request: %s', ct)
|
| 924 |
+
else:
|
| 925 |
+
reader = codecs.getreader('utf-8')(resp)
|
| 926 |
+
#data = reader.read().decode('utf-8')
|
| 927 |
+
#result = json.loads(data)
|
| 928 |
+
result = json.load(reader)
|
| 929 |
+
except Exception as e:
|
| 930 |
+
logger.exception('Failed to get external data for %s: %s', url, e)
|
| 931 |
+
return result
|
| 932 |
+
|
| 933 |
+
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
|
| 934 |
+
|
| 935 |
+
def get_project_data(name):
|
| 936 |
+
url = '%s/%s/project.json' % (name[0].upper(), name)
|
| 937 |
+
url = urljoin(_external_data_base_url, url)
|
| 938 |
+
result = _get_external_data(url)
|
| 939 |
+
return result
|
| 940 |
+
|
| 941 |
+
def get_package_data(name, version):
|
| 942 |
+
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
|
| 943 |
+
url = urljoin(_external_data_base_url, url)
|
| 944 |
+
return _get_external_data(url)
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
class Cache(object):
|
| 948 |
+
"""
|
| 949 |
+
A class implementing a cache for resources that need to live in the file system
|
| 950 |
+
e.g. shared libraries. This class was moved from resources to here because it
|
| 951 |
+
could be used by other modules, e.g. the wheel module.
|
| 952 |
+
"""
|
| 953 |
+
|
| 954 |
+
def __init__(self, base):
|
| 955 |
+
"""
|
| 956 |
+
Initialise an instance.
|
| 957 |
+
|
| 958 |
+
:param base: The base directory where the cache should be located.
|
| 959 |
+
"""
|
| 960 |
+
# we use 'isdir' instead of 'exists', because we want to
|
| 961 |
+
# fail if there's a file with that name
|
| 962 |
+
if not os.path.isdir(base): # pragma: no cover
|
| 963 |
+
os.makedirs(base)
|
| 964 |
+
if (os.stat(base).st_mode & 0o77) != 0:
|
| 965 |
+
logger.warning('Directory \'%s\' is not private', base)
|
| 966 |
+
self.base = os.path.abspath(os.path.normpath(base))
|
| 967 |
+
|
| 968 |
+
def prefix_to_dir(self, prefix):
|
| 969 |
+
"""
|
| 970 |
+
Converts a resource prefix to a directory name in the cache.
|
| 971 |
+
"""
|
| 972 |
+
return path_to_cache_dir(prefix)
|
| 973 |
+
|
| 974 |
+
def clear(self):
|
| 975 |
+
"""
|
| 976 |
+
Clear the cache.
|
| 977 |
+
"""
|
| 978 |
+
not_removed = []
|
| 979 |
+
for fn in os.listdir(self.base):
|
| 980 |
+
fn = os.path.join(self.base, fn)
|
| 981 |
+
try:
|
| 982 |
+
if os.path.islink(fn) or os.path.isfile(fn):
|
| 983 |
+
os.remove(fn)
|
| 984 |
+
elif os.path.isdir(fn):
|
| 985 |
+
shutil.rmtree(fn)
|
| 986 |
+
except Exception:
|
| 987 |
+
not_removed.append(fn)
|
| 988 |
+
return not_removed
|
| 989 |
+
|
| 990 |
+
|
| 991 |
+
class EventMixin(object):
|
| 992 |
+
"""
|
| 993 |
+
A very simple publish/subscribe system.
|
| 994 |
+
"""
|
| 995 |
+
def __init__(self):
|
| 996 |
+
self._subscribers = {}
|
| 997 |
+
|
| 998 |
+
def add(self, event, subscriber, append=True):
|
| 999 |
+
"""
|
| 1000 |
+
Add a subscriber for an event.
|
| 1001 |
+
|
| 1002 |
+
:param event: The name of an event.
|
| 1003 |
+
:param subscriber: The subscriber to be added (and called when the
|
| 1004 |
+
event is published).
|
| 1005 |
+
:param append: Whether to append or prepend the subscriber to an
|
| 1006 |
+
existing subscriber list for the event.
|
| 1007 |
+
"""
|
| 1008 |
+
subs = self._subscribers
|
| 1009 |
+
if event not in subs:
|
| 1010 |
+
subs[event] = deque([subscriber])
|
| 1011 |
+
else:
|
| 1012 |
+
sq = subs[event]
|
| 1013 |
+
if append:
|
| 1014 |
+
sq.append(subscriber)
|
| 1015 |
+
else:
|
| 1016 |
+
sq.appendleft(subscriber)
|
| 1017 |
+
|
| 1018 |
+
def remove(self, event, subscriber):
|
| 1019 |
+
"""
|
| 1020 |
+
Remove a subscriber for an event.
|
| 1021 |
+
|
| 1022 |
+
:param event: The name of an event.
|
| 1023 |
+
:param subscriber: The subscriber to be removed.
|
| 1024 |
+
"""
|
| 1025 |
+
subs = self._subscribers
|
| 1026 |
+
if event not in subs:
|
| 1027 |
+
raise ValueError('No subscribers: %r' % event)
|
| 1028 |
+
subs[event].remove(subscriber)
|
| 1029 |
+
|
| 1030 |
+
def get_subscribers(self, event):
|
| 1031 |
+
"""
|
| 1032 |
+
Return an iterator for the subscribers for an event.
|
| 1033 |
+
:param event: The event to return subscribers for.
|
| 1034 |
+
"""
|
| 1035 |
+
return iter(self._subscribers.get(event, ()))
|
| 1036 |
+
|
| 1037 |
+
def publish(self, event, *args, **kwargs):
|
| 1038 |
+
"""
|
| 1039 |
+
Publish a event and return a list of values returned by its
|
| 1040 |
+
subscribers.
|
| 1041 |
+
|
| 1042 |
+
:param event: The event to publish.
|
| 1043 |
+
:param args: The positional arguments to pass to the event's
|
| 1044 |
+
subscribers.
|
| 1045 |
+
:param kwargs: The keyword arguments to pass to the event's
|
| 1046 |
+
subscribers.
|
| 1047 |
+
"""
|
| 1048 |
+
result = []
|
| 1049 |
+
for subscriber in self.get_subscribers(event):
|
| 1050 |
+
try:
|
| 1051 |
+
value = subscriber(event, *args, **kwargs)
|
| 1052 |
+
except Exception:
|
| 1053 |
+
logger.exception('Exception during event publication')
|
| 1054 |
+
value = None
|
| 1055 |
+
result.append(value)
|
| 1056 |
+
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
|
| 1057 |
+
event, args, kwargs, result)
|
| 1058 |
+
return result
|
| 1059 |
+
|
| 1060 |
+
#
|
| 1061 |
+
# Simple sequencing
|
| 1062 |
+
#
|
| 1063 |
+
class Sequencer(object):
|
| 1064 |
+
def __init__(self):
|
| 1065 |
+
self._preds = {}
|
| 1066 |
+
self._succs = {}
|
| 1067 |
+
self._nodes = set() # nodes with no preds/succs
|
| 1068 |
+
|
| 1069 |
+
def add_node(self, node):
|
| 1070 |
+
self._nodes.add(node)
|
| 1071 |
+
|
| 1072 |
+
def remove_node(self, node, edges=False):
|
| 1073 |
+
if node in self._nodes:
|
| 1074 |
+
self._nodes.remove(node)
|
| 1075 |
+
if edges:
|
| 1076 |
+
for p in set(self._preds.get(node, ())):
|
| 1077 |
+
self.remove(p, node)
|
| 1078 |
+
for s in set(self._succs.get(node, ())):
|
| 1079 |
+
self.remove(node, s)
|
| 1080 |
+
# Remove empties
|
| 1081 |
+
for k, v in list(self._preds.items()):
|
| 1082 |
+
if not v:
|
| 1083 |
+
del self._preds[k]
|
| 1084 |
+
for k, v in list(self._succs.items()):
|
| 1085 |
+
if not v:
|
| 1086 |
+
del self._succs[k]
|
| 1087 |
+
|
| 1088 |
+
def add(self, pred, succ):
|
| 1089 |
+
assert pred != succ
|
| 1090 |
+
self._preds.setdefault(succ, set()).add(pred)
|
| 1091 |
+
self._succs.setdefault(pred, set()).add(succ)
|
| 1092 |
+
|
| 1093 |
+
def remove(self, pred, succ):
|
| 1094 |
+
assert pred != succ
|
| 1095 |
+
try:
|
| 1096 |
+
preds = self._preds[succ]
|
| 1097 |
+
succs = self._succs[pred]
|
| 1098 |
+
except KeyError: # pragma: no cover
|
| 1099 |
+
raise ValueError('%r not a successor of anything' % succ)
|
| 1100 |
+
try:
|
| 1101 |
+
preds.remove(pred)
|
| 1102 |
+
succs.remove(succ)
|
| 1103 |
+
except KeyError: # pragma: no cover
|
| 1104 |
+
raise ValueError('%r not a successor of %r' % (succ, pred))
|
| 1105 |
+
|
| 1106 |
+
def is_step(self, step):
|
| 1107 |
+
return (step in self._preds or step in self._succs or
|
| 1108 |
+
step in self._nodes)
|
| 1109 |
+
|
| 1110 |
+
def get_steps(self, final):
|
| 1111 |
+
if not self.is_step(final):
|
| 1112 |
+
raise ValueError('Unknown: %r' % final)
|
| 1113 |
+
result = []
|
| 1114 |
+
todo = []
|
| 1115 |
+
seen = set()
|
| 1116 |
+
todo.append(final)
|
| 1117 |
+
while todo:
|
| 1118 |
+
step = todo.pop(0)
|
| 1119 |
+
if step in seen:
|
| 1120 |
+
# if a step was already seen,
|
| 1121 |
+
# move it to the end (so it will appear earlier
|
| 1122 |
+
# when reversed on return) ... but not for the
|
| 1123 |
+
# final step, as that would be confusing for
|
| 1124 |
+
# users
|
| 1125 |
+
if step != final:
|
| 1126 |
+
result.remove(step)
|
| 1127 |
+
result.append(step)
|
| 1128 |
+
else:
|
| 1129 |
+
seen.add(step)
|
| 1130 |
+
result.append(step)
|
| 1131 |
+
preds = self._preds.get(step, ())
|
| 1132 |
+
todo.extend(preds)
|
| 1133 |
+
return reversed(result)
|
| 1134 |
+
|
| 1135 |
+
@property
|
| 1136 |
+
def strong_connections(self):
|
| 1137 |
+
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
|
| 1138 |
+
index_counter = [0]
|
| 1139 |
+
stack = []
|
| 1140 |
+
lowlinks = {}
|
| 1141 |
+
index = {}
|
| 1142 |
+
result = []
|
| 1143 |
+
|
| 1144 |
+
graph = self._succs
|
| 1145 |
+
|
| 1146 |
+
def strongconnect(node):
|
| 1147 |
+
# set the depth index for this node to the smallest unused index
|
| 1148 |
+
index[node] = index_counter[0]
|
| 1149 |
+
lowlinks[node] = index_counter[0]
|
| 1150 |
+
index_counter[0] += 1
|
| 1151 |
+
stack.append(node)
|
| 1152 |
+
|
| 1153 |
+
# Consider successors
|
| 1154 |
+
try:
|
| 1155 |
+
successors = graph[node]
|
| 1156 |
+
except Exception:
|
| 1157 |
+
successors = []
|
| 1158 |
+
for successor in successors:
|
| 1159 |
+
if successor not in lowlinks:
|
| 1160 |
+
# Successor has not yet been visited
|
| 1161 |
+
strongconnect(successor)
|
| 1162 |
+
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
|
| 1163 |
+
elif successor in stack:
|
| 1164 |
+
# the successor is in the stack and hence in the current
|
| 1165 |
+
# strongly connected component (SCC)
|
| 1166 |
+
lowlinks[node] = min(lowlinks[node],index[successor])
|
| 1167 |
+
|
| 1168 |
+
# If `node` is a root node, pop the stack and generate an SCC
|
| 1169 |
+
if lowlinks[node] == index[node]:
|
| 1170 |
+
connected_component = []
|
| 1171 |
+
|
| 1172 |
+
while True:
|
| 1173 |
+
successor = stack.pop()
|
| 1174 |
+
connected_component.append(successor)
|
| 1175 |
+
if successor == node: break
|
| 1176 |
+
component = tuple(connected_component)
|
| 1177 |
+
# storing the result
|
| 1178 |
+
result.append(component)
|
| 1179 |
+
|
| 1180 |
+
for node in graph:
|
| 1181 |
+
if node not in lowlinks:
|
| 1182 |
+
strongconnect(node)
|
| 1183 |
+
|
| 1184 |
+
return result
|
| 1185 |
+
|
| 1186 |
+
@property
|
| 1187 |
+
def dot(self):
|
| 1188 |
+
result = ['digraph G {']
|
| 1189 |
+
for succ in self._preds:
|
| 1190 |
+
preds = self._preds[succ]
|
| 1191 |
+
for pred in preds:
|
| 1192 |
+
result.append(' %s -> %s;' % (pred, succ))
|
| 1193 |
+
for node in self._nodes:
|
| 1194 |
+
result.append(' %s;' % node)
|
| 1195 |
+
result.append('}')
|
| 1196 |
+
return '\n'.join(result)
|
| 1197 |
+
|
| 1198 |
+
#
|
| 1199 |
+
# Unarchiving functionality for zip, tar, tgz, tbz, whl
|
| 1200 |
+
#
|
| 1201 |
+
|
| 1202 |
+
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
|
| 1203 |
+
'.tgz', '.tbz', '.whl')
|
| 1204 |
+
|
| 1205 |
+
def unarchive(archive_filename, dest_dir, format=None, check=True):
|
| 1206 |
+
|
| 1207 |
+
def check_path(path):
|
| 1208 |
+
if not isinstance(path, text_type):
|
| 1209 |
+
path = path.decode('utf-8')
|
| 1210 |
+
p = os.path.abspath(os.path.join(dest_dir, path))
|
| 1211 |
+
if not p.startswith(dest_dir) or p[plen] != os.sep:
|
| 1212 |
+
raise ValueError('path outside destination: %r' % p)
|
| 1213 |
+
|
| 1214 |
+
dest_dir = os.path.abspath(dest_dir)
|
| 1215 |
+
plen = len(dest_dir)
|
| 1216 |
+
archive = None
|
| 1217 |
+
if format is None:
|
| 1218 |
+
if archive_filename.endswith(('.zip', '.whl')):
|
| 1219 |
+
format = 'zip'
|
| 1220 |
+
elif archive_filename.endswith(('.tar.gz', '.tgz')):
|
| 1221 |
+
format = 'tgz'
|
| 1222 |
+
mode = 'r:gz'
|
| 1223 |
+
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
|
| 1224 |
+
format = 'tbz'
|
| 1225 |
+
mode = 'r:bz2'
|
| 1226 |
+
elif archive_filename.endswith('.tar'):
|
| 1227 |
+
format = 'tar'
|
| 1228 |
+
mode = 'r'
|
| 1229 |
+
else: # pragma: no cover
|
| 1230 |
+
raise ValueError('Unknown format for %r' % archive_filename)
|
| 1231 |
+
try:
|
| 1232 |
+
if format == 'zip':
|
| 1233 |
+
archive = ZipFile(archive_filename, 'r')
|
| 1234 |
+
if check:
|
| 1235 |
+
names = archive.namelist()
|
| 1236 |
+
for name in names:
|
| 1237 |
+
check_path(name)
|
| 1238 |
+
else:
|
| 1239 |
+
archive = tarfile.open(archive_filename, mode)
|
| 1240 |
+
if check:
|
| 1241 |
+
names = archive.getnames()
|
| 1242 |
+
for name in names:
|
| 1243 |
+
check_path(name)
|
| 1244 |
+
if format != 'zip' and sys.version_info[0] < 3:
|
| 1245 |
+
# See Python issue 17153. If the dest path contains Unicode,
|
| 1246 |
+
# tarfile extraction fails on Python 2.x if a member path name
|
| 1247 |
+
# contains non-ASCII characters - it leads to an implicit
|
| 1248 |
+
# bytes -> unicode conversion using ASCII to decode.
|
| 1249 |
+
for tarinfo in archive.getmembers():
|
| 1250 |
+
if not isinstance(tarinfo.name, text_type):
|
| 1251 |
+
tarinfo.name = tarinfo.name.decode('utf-8')
|
| 1252 |
+
archive.extractall(dest_dir)
|
| 1253 |
+
|
| 1254 |
+
finally:
|
| 1255 |
+
if archive:
|
| 1256 |
+
archive.close()
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
def zip_dir(directory):
|
| 1260 |
+
"""zip a directory tree into a BytesIO object"""
|
| 1261 |
+
result = io.BytesIO()
|
| 1262 |
+
dlen = len(directory)
|
| 1263 |
+
with ZipFile(result, "w") as zf:
|
| 1264 |
+
for root, dirs, files in os.walk(directory):
|
| 1265 |
+
for name in files:
|
| 1266 |
+
full = os.path.join(root, name)
|
| 1267 |
+
rel = root[dlen:]
|
| 1268 |
+
dest = os.path.join(rel, name)
|
| 1269 |
+
zf.write(full, dest)
|
| 1270 |
+
return result
|
| 1271 |
+
|
| 1272 |
+
#
|
| 1273 |
+
# Simple progress bar
|
| 1274 |
+
#
|
| 1275 |
+
|
| 1276 |
+
UNITS = ('', 'K', 'M', 'G','T','P')
|
| 1277 |
+
|
| 1278 |
+
|
| 1279 |
+
class Progress(object):
|
| 1280 |
+
unknown = 'UNKNOWN'
|
| 1281 |
+
|
| 1282 |
+
def __init__(self, minval=0, maxval=100):
|
| 1283 |
+
assert maxval is None or maxval >= minval
|
| 1284 |
+
self.min = self.cur = minval
|
| 1285 |
+
self.max = maxval
|
| 1286 |
+
self.started = None
|
| 1287 |
+
self.elapsed = 0
|
| 1288 |
+
self.done = False
|
| 1289 |
+
|
| 1290 |
+
def update(self, curval):
|
| 1291 |
+
assert self.min <= curval
|
| 1292 |
+
assert self.max is None or curval <= self.max
|
| 1293 |
+
self.cur = curval
|
| 1294 |
+
now = time.time()
|
| 1295 |
+
if self.started is None:
|
| 1296 |
+
self.started = now
|
| 1297 |
+
else:
|
| 1298 |
+
self.elapsed = now - self.started
|
| 1299 |
+
|
| 1300 |
+
def increment(self, incr):
|
| 1301 |
+
assert incr >= 0
|
| 1302 |
+
self.update(self.cur + incr)
|
| 1303 |
+
|
| 1304 |
+
def start(self):
|
| 1305 |
+
self.update(self.min)
|
| 1306 |
+
return self
|
| 1307 |
+
|
| 1308 |
+
def stop(self):
|
| 1309 |
+
if self.max is not None:
|
| 1310 |
+
self.update(self.max)
|
| 1311 |
+
self.done = True
|
| 1312 |
+
|
| 1313 |
+
@property
|
| 1314 |
+
def maximum(self):
|
| 1315 |
+
return self.unknown if self.max is None else self.max
|
| 1316 |
+
|
| 1317 |
+
@property
|
| 1318 |
+
def percentage(self):
|
| 1319 |
+
if self.done:
|
| 1320 |
+
result = '100 %'
|
| 1321 |
+
elif self.max is None:
|
| 1322 |
+
result = ' ?? %'
|
| 1323 |
+
else:
|
| 1324 |
+
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
|
| 1325 |
+
result = '%3d %%' % v
|
| 1326 |
+
return result
|
| 1327 |
+
|
| 1328 |
+
def format_duration(self, duration):
|
| 1329 |
+
if (duration <= 0) and self.max is None or self.cur == self.min:
|
| 1330 |
+
result = '??:??:??'
|
| 1331 |
+
#elif duration < 1:
|
| 1332 |
+
# result = '--:--:--'
|
| 1333 |
+
else:
|
| 1334 |
+
result = time.strftime('%H:%M:%S', time.gmtime(duration))
|
| 1335 |
+
return result
|
| 1336 |
+
|
| 1337 |
+
@property
|
| 1338 |
+
def ETA(self):
|
| 1339 |
+
if self.done:
|
| 1340 |
+
prefix = 'Done'
|
| 1341 |
+
t = self.elapsed
|
| 1342 |
+
#import pdb; pdb.set_trace()
|
| 1343 |
+
else:
|
| 1344 |
+
prefix = 'ETA '
|
| 1345 |
+
if self.max is None:
|
| 1346 |
+
t = -1
|
| 1347 |
+
elif self.elapsed == 0 or (self.cur == self.min):
|
| 1348 |
+
t = 0
|
| 1349 |
+
else:
|
| 1350 |
+
#import pdb; pdb.set_trace()
|
| 1351 |
+
t = float(self.max - self.min)
|
| 1352 |
+
t /= self.cur - self.min
|
| 1353 |
+
t = (t - 1) * self.elapsed
|
| 1354 |
+
return '%s: %s' % (prefix, self.format_duration(t))
|
| 1355 |
+
|
| 1356 |
+
@property
|
| 1357 |
+
def speed(self):
|
| 1358 |
+
if self.elapsed == 0:
|
| 1359 |
+
result = 0.0
|
| 1360 |
+
else:
|
| 1361 |
+
result = (self.cur - self.min) / self.elapsed
|
| 1362 |
+
for unit in UNITS:
|
| 1363 |
+
if result < 1000:
|
| 1364 |
+
break
|
| 1365 |
+
result /= 1000.0
|
| 1366 |
+
return '%d %sB/s' % (result, unit)
|
| 1367 |
+
|
| 1368 |
+
#
|
| 1369 |
+
# Glob functionality
|
| 1370 |
+
#
|
| 1371 |
+
|
| 1372 |
+
RICH_GLOB = re.compile(r'\{([^}]*)\}')
|
| 1373 |
+
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
|
| 1374 |
+
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
|
| 1375 |
+
|
| 1376 |
+
|
| 1377 |
+
def iglob(path_glob):
|
| 1378 |
+
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
|
| 1379 |
+
if _CHECK_RECURSIVE_GLOB.search(path_glob):
|
| 1380 |
+
msg = """invalid glob %r: recursive glob "**" must be used alone"""
|
| 1381 |
+
raise ValueError(msg % path_glob)
|
| 1382 |
+
if _CHECK_MISMATCH_SET.search(path_glob):
|
| 1383 |
+
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
|
| 1384 |
+
raise ValueError(msg % path_glob)
|
| 1385 |
+
return _iglob(path_glob)
|
| 1386 |
+
|
| 1387 |
+
|
| 1388 |
+
def _iglob(path_glob):
|
| 1389 |
+
rich_path_glob = RICH_GLOB.split(path_glob, 1)
|
| 1390 |
+
if len(rich_path_glob) > 1:
|
| 1391 |
+
assert len(rich_path_glob) == 3, rich_path_glob
|
| 1392 |
+
prefix, set, suffix = rich_path_glob
|
| 1393 |
+
for item in set.split(','):
|
| 1394 |
+
for path in _iglob(''.join((prefix, item, suffix))):
|
| 1395 |
+
yield path
|
| 1396 |
+
else:
|
| 1397 |
+
if '**' not in path_glob:
|
| 1398 |
+
for item in std_iglob(path_glob):
|
| 1399 |
+
yield item
|
| 1400 |
+
else:
|
| 1401 |
+
prefix, radical = path_glob.split('**', 1)
|
| 1402 |
+
if prefix == '':
|
| 1403 |
+
prefix = '.'
|
| 1404 |
+
if radical == '':
|
| 1405 |
+
radical = '*'
|
| 1406 |
+
else:
|
| 1407 |
+
# we support both
|
| 1408 |
+
radical = radical.lstrip('/')
|
| 1409 |
+
radical = radical.lstrip('\\')
|
| 1410 |
+
for path, dir, files in os.walk(prefix):
|
| 1411 |
+
path = os.path.normpath(path)
|
| 1412 |
+
for fn in _iglob(os.path.join(path, radical)):
|
| 1413 |
+
yield fn
|
| 1414 |
+
|
| 1415 |
+
if ssl:
|
| 1416 |
+
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
|
| 1417 |
+
CertificateError)
|
| 1418 |
+
|
| 1419 |
+
|
| 1420 |
+
#
|
| 1421 |
+
# HTTPSConnection which verifies certificates/matches domains
|
| 1422 |
+
#
|
| 1423 |
+
|
| 1424 |
+
class HTTPSConnection(httplib.HTTPSConnection):
|
| 1425 |
+
ca_certs = None # set this to the path to the certs file (.pem)
|
| 1426 |
+
check_domain = True # only used if ca_certs is not None
|
| 1427 |
+
|
| 1428 |
+
# noinspection PyPropertyAccess
|
| 1429 |
+
def connect(self):
|
| 1430 |
+
sock = socket.create_connection((self.host, self.port), self.timeout)
|
| 1431 |
+
if getattr(self, '_tunnel_host', False):
|
| 1432 |
+
self.sock = sock
|
| 1433 |
+
self._tunnel()
|
| 1434 |
+
|
| 1435 |
+
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
| 1436 |
+
if hasattr(ssl, 'OP_NO_SSLv2'):
|
| 1437 |
+
context.options |= ssl.OP_NO_SSLv2
|
| 1438 |
+
if self.cert_file:
|
| 1439 |
+
context.load_cert_chain(self.cert_file, self.key_file)
|
| 1440 |
+
kwargs = {}
|
| 1441 |
+
if self.ca_certs:
|
| 1442 |
+
context.verify_mode = ssl.CERT_REQUIRED
|
| 1443 |
+
context.load_verify_locations(cafile=self.ca_certs)
|
| 1444 |
+
if getattr(ssl, 'HAS_SNI', False):
|
| 1445 |
+
kwargs['server_hostname'] = self.host
|
| 1446 |
+
|
| 1447 |
+
self.sock = context.wrap_socket(sock, **kwargs)
|
| 1448 |
+
if self.ca_certs and self.check_domain:
|
| 1449 |
+
try:
|
| 1450 |
+
match_hostname(self.sock.getpeercert(), self.host)
|
| 1451 |
+
logger.debug('Host verified: %s', self.host)
|
| 1452 |
+
except CertificateError: # pragma: no cover
|
| 1453 |
+
self.sock.shutdown(socket.SHUT_RDWR)
|
| 1454 |
+
self.sock.close()
|
| 1455 |
+
raise
|
| 1456 |
+
|
| 1457 |
+
class HTTPSHandler(BaseHTTPSHandler):
|
| 1458 |
+
def __init__(self, ca_certs, check_domain=True):
|
| 1459 |
+
BaseHTTPSHandler.__init__(self)
|
| 1460 |
+
self.ca_certs = ca_certs
|
| 1461 |
+
self.check_domain = check_domain
|
| 1462 |
+
|
| 1463 |
+
def _conn_maker(self, *args, **kwargs):
|
| 1464 |
+
"""
|
| 1465 |
+
This is called to create a connection instance. Normally you'd
|
| 1466 |
+
pass a connection class to do_open, but it doesn't actually check for
|
| 1467 |
+
a class, and just expects a callable. As long as we behave just as a
|
| 1468 |
+
constructor would have, we should be OK. If it ever changes so that
|
| 1469 |
+
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
|
| 1470 |
+
which just sets check_domain to False in the class definition, and
|
| 1471 |
+
choose which one to pass to do_open.
|
| 1472 |
+
"""
|
| 1473 |
+
result = HTTPSConnection(*args, **kwargs)
|
| 1474 |
+
if self.ca_certs:
|
| 1475 |
+
result.ca_certs = self.ca_certs
|
| 1476 |
+
result.check_domain = self.check_domain
|
| 1477 |
+
return result
|
| 1478 |
+
|
| 1479 |
+
def https_open(self, req):
|
| 1480 |
+
try:
|
| 1481 |
+
return self.do_open(self._conn_maker, req)
|
| 1482 |
+
except URLError as e:
|
| 1483 |
+
if 'certificate verify failed' in str(e.reason):
|
| 1484 |
+
raise CertificateError('Unable to verify server certificate '
|
| 1485 |
+
'for %s' % req.host)
|
| 1486 |
+
else:
|
| 1487 |
+
raise
|
| 1488 |
+
|
| 1489 |
+
#
|
| 1490 |
+
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
|
| 1491 |
+
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
|
| 1492 |
+
# HTML containing a http://xyz link when it should be https://xyz),
|
| 1493 |
+
# you can use the following handler class, which does not allow HTTP traffic.
|
| 1494 |
+
#
|
| 1495 |
+
# It works by inheriting from HTTPHandler - so build_opener won't add a
|
| 1496 |
+
# handler for HTTP itself.
|
| 1497 |
+
#
|
| 1498 |
+
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
|
| 1499 |
+
def http_open(self, req):
|
| 1500 |
+
raise URLError('Unexpected HTTP request on what should be a secure '
|
| 1501 |
+
'connection: %s' % req)
|
| 1502 |
+
|
| 1503 |
+
#
|
| 1504 |
+
# XML-RPC with timeouts
|
| 1505 |
+
#
|
| 1506 |
+
class Transport(xmlrpclib.Transport):
|
| 1507 |
+
def __init__(self, timeout, use_datetime=0):
|
| 1508 |
+
self.timeout = timeout
|
| 1509 |
+
xmlrpclib.Transport.__init__(self, use_datetime)
|
| 1510 |
+
|
| 1511 |
+
def make_connection(self, host):
|
| 1512 |
+
h, eh, x509 = self.get_host_info(host)
|
| 1513 |
+
if not self._connection or host != self._connection[0]:
|
| 1514 |
+
self._extra_headers = eh
|
| 1515 |
+
self._connection = host, httplib.HTTPConnection(h)
|
| 1516 |
+
return self._connection[1]
|
| 1517 |
+
|
| 1518 |
+
if ssl:
|
| 1519 |
+
class SafeTransport(xmlrpclib.SafeTransport):
|
| 1520 |
+
def __init__(self, timeout, use_datetime=0):
|
| 1521 |
+
self.timeout = timeout
|
| 1522 |
+
xmlrpclib.SafeTransport.__init__(self, use_datetime)
|
| 1523 |
+
|
| 1524 |
+
def make_connection(self, host):
|
| 1525 |
+
h, eh, kwargs = self.get_host_info(host)
|
| 1526 |
+
if not kwargs:
|
| 1527 |
+
kwargs = {}
|
| 1528 |
+
kwargs['timeout'] = self.timeout
|
| 1529 |
+
if not self._connection or host != self._connection[0]:
|
| 1530 |
+
self._extra_headers = eh
|
| 1531 |
+
self._connection = host, httplib.HTTPSConnection(h, None,
|
| 1532 |
+
**kwargs)
|
| 1533 |
+
return self._connection[1]
|
| 1534 |
+
|
| 1535 |
+
|
| 1536 |
+
class ServerProxy(xmlrpclib.ServerProxy):
|
| 1537 |
+
def __init__(self, uri, **kwargs):
|
| 1538 |
+
self.timeout = timeout = kwargs.pop('timeout', None)
|
| 1539 |
+
# The above classes only come into play if a timeout
|
| 1540 |
+
# is specified
|
| 1541 |
+
if timeout is not None:
|
| 1542 |
+
# scheme = splittype(uri) # deprecated as of Python 3.8
|
| 1543 |
+
scheme = urlparse(uri)[0]
|
| 1544 |
+
use_datetime = kwargs.get('use_datetime', 0)
|
| 1545 |
+
if scheme == 'https':
|
| 1546 |
+
tcls = SafeTransport
|
| 1547 |
+
else:
|
| 1548 |
+
tcls = Transport
|
| 1549 |
+
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
|
| 1550 |
+
self.transport = t
|
| 1551 |
+
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
|
| 1552 |
+
|
| 1553 |
+
#
|
| 1554 |
+
# CSV functionality. This is provided because on 2.x, the csv module can't
|
| 1555 |
+
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
|
| 1556 |
+
#
|
| 1557 |
+
|
| 1558 |
+
def _csv_open(fn, mode, **kwargs):
|
| 1559 |
+
if sys.version_info[0] < 3:
|
| 1560 |
+
mode += 'b'
|
| 1561 |
+
else:
|
| 1562 |
+
kwargs['newline'] = ''
|
| 1563 |
+
# Python 3 determines encoding from locale. Force 'utf-8'
|
| 1564 |
+
# file encoding to match other forced utf-8 encoding
|
| 1565 |
+
kwargs['encoding'] = 'utf-8'
|
| 1566 |
+
return open(fn, mode, **kwargs)
|
| 1567 |
+
|
| 1568 |
+
|
| 1569 |
+
class CSVBase(object):
|
| 1570 |
+
defaults = {
|
| 1571 |
+
'delimiter': str(','), # The strs are used because we need native
|
| 1572 |
+
'quotechar': str('"'), # str in the csv API (2.x won't take
|
| 1573 |
+
'lineterminator': str('\n') # Unicode)
|
| 1574 |
+
}
|
| 1575 |
+
|
| 1576 |
+
def __enter__(self):
|
| 1577 |
+
return self
|
| 1578 |
+
|
| 1579 |
+
def __exit__(self, *exc_info):
|
| 1580 |
+
self.stream.close()
|
| 1581 |
+
|
| 1582 |
+
|
| 1583 |
+
class CSVReader(CSVBase):
|
| 1584 |
+
def __init__(self, **kwargs):
|
| 1585 |
+
if 'stream' in kwargs:
|
| 1586 |
+
stream = kwargs['stream']
|
| 1587 |
+
if sys.version_info[0] >= 3:
|
| 1588 |
+
# needs to be a text stream
|
| 1589 |
+
stream = codecs.getreader('utf-8')(stream)
|
| 1590 |
+
self.stream = stream
|
| 1591 |
+
else:
|
| 1592 |
+
self.stream = _csv_open(kwargs['path'], 'r')
|
| 1593 |
+
self.reader = csv.reader(self.stream, **self.defaults)
|
| 1594 |
+
|
| 1595 |
+
def __iter__(self):
|
| 1596 |
+
return self
|
| 1597 |
+
|
| 1598 |
+
def next(self):
|
| 1599 |
+
result = next(self.reader)
|
| 1600 |
+
if sys.version_info[0] < 3:
|
| 1601 |
+
for i, item in enumerate(result):
|
| 1602 |
+
if not isinstance(item, text_type):
|
| 1603 |
+
result[i] = item.decode('utf-8')
|
| 1604 |
+
return result
|
| 1605 |
+
|
| 1606 |
+
__next__ = next
|
| 1607 |
+
|
| 1608 |
+
class CSVWriter(CSVBase):
|
| 1609 |
+
def __init__(self, fn, **kwargs):
|
| 1610 |
+
self.stream = _csv_open(fn, 'w')
|
| 1611 |
+
self.writer = csv.writer(self.stream, **self.defaults)
|
| 1612 |
+
|
| 1613 |
+
def writerow(self, row):
|
| 1614 |
+
if sys.version_info[0] < 3:
|
| 1615 |
+
r = []
|
| 1616 |
+
for item in row:
|
| 1617 |
+
if isinstance(item, text_type):
|
| 1618 |
+
item = item.encode('utf-8')
|
| 1619 |
+
r.append(item)
|
| 1620 |
+
row = r
|
| 1621 |
+
self.writer.writerow(row)
|
| 1622 |
+
|
| 1623 |
+
#
|
| 1624 |
+
# Configurator functionality
|
| 1625 |
+
#
|
| 1626 |
+
|
| 1627 |
+
class Configurator(BaseConfigurator):
|
| 1628 |
+
|
| 1629 |
+
value_converters = dict(BaseConfigurator.value_converters)
|
| 1630 |
+
value_converters['inc'] = 'inc_convert'
|
| 1631 |
+
|
| 1632 |
+
def __init__(self, config, base=None):
|
| 1633 |
+
super(Configurator, self).__init__(config)
|
| 1634 |
+
self.base = base or os.getcwd()
|
| 1635 |
+
|
| 1636 |
+
def configure_custom(self, config):
|
| 1637 |
+
def convert(o):
|
| 1638 |
+
if isinstance(o, (list, tuple)):
|
| 1639 |
+
result = type(o)([convert(i) for i in o])
|
| 1640 |
+
elif isinstance(o, dict):
|
| 1641 |
+
if '()' in o:
|
| 1642 |
+
result = self.configure_custom(o)
|
| 1643 |
+
else:
|
| 1644 |
+
result = {}
|
| 1645 |
+
for k in o:
|
| 1646 |
+
result[k] = convert(o[k])
|
| 1647 |
+
else:
|
| 1648 |
+
result = self.convert(o)
|
| 1649 |
+
return result
|
| 1650 |
+
|
| 1651 |
+
c = config.pop('()')
|
| 1652 |
+
if not callable(c):
|
| 1653 |
+
c = self.resolve(c)
|
| 1654 |
+
props = config.pop('.', None)
|
| 1655 |
+
# Check for valid identifiers
|
| 1656 |
+
args = config.pop('[]', ())
|
| 1657 |
+
if args:
|
| 1658 |
+
args = tuple([convert(o) for o in args])
|
| 1659 |
+
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
|
| 1660 |
+
kwargs = dict(items)
|
| 1661 |
+
result = c(*args, **kwargs)
|
| 1662 |
+
if props:
|
| 1663 |
+
for n, v in props.items():
|
| 1664 |
+
setattr(result, n, convert(v))
|
| 1665 |
+
return result
|
| 1666 |
+
|
| 1667 |
+
def __getitem__(self, key):
|
| 1668 |
+
result = self.config[key]
|
| 1669 |
+
if isinstance(result, dict) and '()' in result:
|
| 1670 |
+
self.config[key] = result = self.configure_custom(result)
|
| 1671 |
+
return result
|
| 1672 |
+
|
| 1673 |
+
def inc_convert(self, value):
|
| 1674 |
+
"""Default converter for the inc:// protocol."""
|
| 1675 |
+
if not os.path.isabs(value):
|
| 1676 |
+
value = os.path.join(self.base, value)
|
| 1677 |
+
with codecs.open(value, 'r', encoding='utf-8') as f:
|
| 1678 |
+
result = json.load(f)
|
| 1679 |
+
return result
|
| 1680 |
+
|
| 1681 |
+
|
| 1682 |
+
class SubprocessMixin(object):
|
| 1683 |
+
"""
|
| 1684 |
+
Mixin for running subprocesses and capturing their output
|
| 1685 |
+
"""
|
| 1686 |
+
def __init__(self, verbose=False, progress=None):
|
| 1687 |
+
self.verbose = verbose
|
| 1688 |
+
self.progress = progress
|
| 1689 |
+
|
| 1690 |
+
def reader(self, stream, context):
|
| 1691 |
+
"""
|
| 1692 |
+
Read lines from a subprocess' output stream and either pass to a progress
|
| 1693 |
+
callable (if specified) or write progress information to sys.stderr.
|
| 1694 |
+
"""
|
| 1695 |
+
progress = self.progress
|
| 1696 |
+
verbose = self.verbose
|
| 1697 |
+
while True:
|
| 1698 |
+
s = stream.readline()
|
| 1699 |
+
if not s:
|
| 1700 |
+
break
|
| 1701 |
+
if progress is not None:
|
| 1702 |
+
progress(s, context)
|
| 1703 |
+
else:
|
| 1704 |
+
if not verbose:
|
| 1705 |
+
sys.stderr.write('.')
|
| 1706 |
+
else:
|
| 1707 |
+
sys.stderr.write(s.decode('utf-8'))
|
| 1708 |
+
sys.stderr.flush()
|
| 1709 |
+
stream.close()
|
| 1710 |
+
|
| 1711 |
+
def run_command(self, cmd, **kwargs):
|
| 1712 |
+
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
| 1713 |
+
stderr=subprocess.PIPE, **kwargs)
|
| 1714 |
+
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
|
| 1715 |
+
t1.start()
|
| 1716 |
+
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
|
| 1717 |
+
t2.start()
|
| 1718 |
+
p.wait()
|
| 1719 |
+
t1.join()
|
| 1720 |
+
t2.join()
|
| 1721 |
+
if self.progress is not None:
|
| 1722 |
+
self.progress('done.', 'main')
|
| 1723 |
+
elif self.verbose:
|
| 1724 |
+
sys.stderr.write('done.\n')
|
| 1725 |
+
return p
|
| 1726 |
+
|
| 1727 |
+
|
| 1728 |
+
def normalize_name(name):
|
| 1729 |
+
"""Normalize a python package name a la PEP 503"""
|
| 1730 |
+
# https://www.python.org/dev/peps/pep-0503/#normalized-names
|
| 1731 |
+
return re.sub('[-_.]+', '-', name).lower()
|
| 1732 |
+
|
| 1733 |
+
# def _get_pypirc_command():
|
| 1734 |
+
# """
|
| 1735 |
+
# Get the distutils command for interacting with PyPI configurations.
|
| 1736 |
+
# :return: the command.
|
| 1737 |
+
# """
|
| 1738 |
+
# from distutils.core import Distribution
|
| 1739 |
+
# from distutils.config import PyPIRCCommand
|
| 1740 |
+
# d = Distribution()
|
| 1741 |
+
# return PyPIRCCommand(d)
|
| 1742 |
+
|
| 1743 |
+
class PyPIRCFile(object):
|
| 1744 |
+
|
| 1745 |
+
DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
|
| 1746 |
+
DEFAULT_REALM = 'pypi'
|
| 1747 |
+
|
| 1748 |
+
def __init__(self, fn=None, url=None):
|
| 1749 |
+
if fn is None:
|
| 1750 |
+
fn = os.path.join(os.path.expanduser('~'), '.pypirc')
|
| 1751 |
+
self.filename = fn
|
| 1752 |
+
self.url = url
|
| 1753 |
+
|
| 1754 |
+
def read(self):
|
| 1755 |
+
result = {}
|
| 1756 |
+
|
| 1757 |
+
if os.path.exists(self.filename):
|
| 1758 |
+
repository = self.url or self.DEFAULT_REPOSITORY
|
| 1759 |
+
|
| 1760 |
+
config = configparser.RawConfigParser()
|
| 1761 |
+
config.read(self.filename)
|
| 1762 |
+
sections = config.sections()
|
| 1763 |
+
if 'distutils' in sections:
|
| 1764 |
+
# let's get the list of servers
|
| 1765 |
+
index_servers = config.get('distutils', 'index-servers')
|
| 1766 |
+
_servers = [server.strip() for server in
|
| 1767 |
+
index_servers.split('\n')
|
| 1768 |
+
if server.strip() != '']
|
| 1769 |
+
if _servers == []:
|
| 1770 |
+
# nothing set, let's try to get the default pypi
|
| 1771 |
+
if 'pypi' in sections:
|
| 1772 |
+
_servers = ['pypi']
|
| 1773 |
+
else:
|
| 1774 |
+
for server in _servers:
|
| 1775 |
+
result = {'server': server}
|
| 1776 |
+
result['username'] = config.get(server, 'username')
|
| 1777 |
+
|
| 1778 |
+
# optional params
|
| 1779 |
+
for key, default in (('repository', self.DEFAULT_REPOSITORY),
|
| 1780 |
+
('realm', self.DEFAULT_REALM),
|
| 1781 |
+
('password', None)):
|
| 1782 |
+
if config.has_option(server, key):
|
| 1783 |
+
result[key] = config.get(server, key)
|
| 1784 |
+
else:
|
| 1785 |
+
result[key] = default
|
| 1786 |
+
|
| 1787 |
+
# work around people having "repository" for the "pypi"
|
| 1788 |
+
# section of their config set to the HTTP (rather than
|
| 1789 |
+
# HTTPS) URL
|
| 1790 |
+
if (server == 'pypi' and
|
| 1791 |
+
repository in (self.DEFAULT_REPOSITORY, 'pypi')):
|
| 1792 |
+
result['repository'] = self.DEFAULT_REPOSITORY
|
| 1793 |
+
elif (result['server'] != repository and
|
| 1794 |
+
result['repository'] != repository):
|
| 1795 |
+
result = {}
|
| 1796 |
+
elif 'server-login' in sections:
|
| 1797 |
+
# old format
|
| 1798 |
+
server = 'server-login'
|
| 1799 |
+
if config.has_option(server, 'repository'):
|
| 1800 |
+
repository = config.get(server, 'repository')
|
| 1801 |
+
else:
|
| 1802 |
+
repository = self.DEFAULT_REPOSITORY
|
| 1803 |
+
result = {
|
| 1804 |
+
'username': config.get(server, 'username'),
|
| 1805 |
+
'password': config.get(server, 'password'),
|
| 1806 |
+
'repository': repository,
|
| 1807 |
+
'server': server,
|
| 1808 |
+
'realm': self.DEFAULT_REALM
|
| 1809 |
+
}
|
| 1810 |
+
return result
|
| 1811 |
+
|
| 1812 |
+
def update(self, username, password):
|
| 1813 |
+
# import pdb; pdb.set_trace()
|
| 1814 |
+
config = configparser.RawConfigParser()
|
| 1815 |
+
fn = self.filename
|
| 1816 |
+
config.read(fn)
|
| 1817 |
+
if not config.has_section('pypi'):
|
| 1818 |
+
config.add_section('pypi')
|
| 1819 |
+
config.set('pypi', 'username', username)
|
| 1820 |
+
config.set('pypi', 'password', password)
|
| 1821 |
+
with open(fn, 'w') as f:
|
| 1822 |
+
config.write(f)
|
| 1823 |
+
|
| 1824 |
+
def _load_pypirc(index):
|
| 1825 |
+
"""
|
| 1826 |
+
Read the PyPI access configuration as supported by distutils.
|
| 1827 |
+
"""
|
| 1828 |
+
return PyPIRCFile(url=index.url).read()
|
| 1829 |
+
|
| 1830 |
+
def _store_pypirc(index):
|
| 1831 |
+
PyPIRCFile().update(index.username, index.password)
|
| 1832 |
+
|
| 1833 |
+
#
|
| 1834 |
+
# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
|
| 1835 |
+
# tweaks
|
| 1836 |
+
#
|
| 1837 |
+
|
| 1838 |
+
def get_host_platform():
|
| 1839 |
+
"""Return a string that identifies the current platform. This is used mainly to
|
| 1840 |
+
distinguish platform-specific build directories and platform-specific built
|
| 1841 |
+
distributions. Typically includes the OS name and version and the
|
| 1842 |
+
architecture (as supplied by 'os.uname()'), although the exact information
|
| 1843 |
+
included depends on the OS; eg. on Linux, the kernel version isn't
|
| 1844 |
+
particularly important.
|
| 1845 |
+
|
| 1846 |
+
Examples of returned values:
|
| 1847 |
+
linux-i586
|
| 1848 |
+
linux-alpha (?)
|
| 1849 |
+
solaris-2.6-sun4u
|
| 1850 |
+
|
| 1851 |
+
Windows will return one of:
|
| 1852 |
+
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
|
| 1853 |
+
win32 (all others - specifically, sys.platform is returned)
|
| 1854 |
+
|
| 1855 |
+
For other non-POSIX platforms, currently just returns 'sys.platform'.
|
| 1856 |
+
|
| 1857 |
+
"""
|
| 1858 |
+
if os.name == 'nt':
|
| 1859 |
+
if 'amd64' in sys.version.lower():
|
| 1860 |
+
return 'win-amd64'
|
| 1861 |
+
if '(arm)' in sys.version.lower():
|
| 1862 |
+
return 'win-arm32'
|
| 1863 |
+
if '(arm64)' in sys.version.lower():
|
| 1864 |
+
return 'win-arm64'
|
| 1865 |
+
return sys.platform
|
| 1866 |
+
|
| 1867 |
+
# Set for cross builds explicitly
|
| 1868 |
+
if "_PYTHON_HOST_PLATFORM" in os.environ:
|
| 1869 |
+
return os.environ["_PYTHON_HOST_PLATFORM"]
|
| 1870 |
+
|
| 1871 |
+
if os.name != 'posix' or not hasattr(os, 'uname'):
|
| 1872 |
+
# XXX what about the architecture? NT is Intel or Alpha,
|
| 1873 |
+
# Mac OS is M68k or PPC, etc.
|
| 1874 |
+
return sys.platform
|
| 1875 |
+
|
| 1876 |
+
# Try to distinguish various flavours of Unix
|
| 1877 |
+
|
| 1878 |
+
(osname, host, release, version, machine) = os.uname()
|
| 1879 |
+
|
| 1880 |
+
# Convert the OS name to lowercase, remove '/' characters, and translate
|
| 1881 |
+
# spaces (for "Power Macintosh")
|
| 1882 |
+
osname = osname.lower().replace('/', '')
|
| 1883 |
+
machine = machine.replace(' ', '_').replace('/', '-')
|
| 1884 |
+
|
| 1885 |
+
if osname[:5] == 'linux':
|
| 1886 |
+
# At least on Linux/Intel, 'machine' is the processor --
|
| 1887 |
+
# i386, etc.
|
| 1888 |
+
# XXX what about Alpha, SPARC, etc?
|
| 1889 |
+
return "%s-%s" % (osname, machine)
|
| 1890 |
+
|
| 1891 |
+
elif osname[:5] == 'sunos':
|
| 1892 |
+
if release[0] >= '5': # SunOS 5 == Solaris 2
|
| 1893 |
+
osname = 'solaris'
|
| 1894 |
+
release = '%d.%s' % (int(release[0]) - 3, release[2:])
|
| 1895 |
+
# We can't use 'platform.architecture()[0]' because a
|
| 1896 |
+
# bootstrap problem. We use a dict to get an error
|
| 1897 |
+
# if some suspicious happens.
|
| 1898 |
+
bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
|
| 1899 |
+
machine += '.%s' % bitness[sys.maxsize]
|
| 1900 |
+
# fall through to standard osname-release-machine representation
|
| 1901 |
+
elif osname[:3] == 'aix':
|
| 1902 |
+
from _aix_support import aix_platform
|
| 1903 |
+
return aix_platform()
|
| 1904 |
+
elif osname[:6] == 'cygwin':
|
| 1905 |
+
osname = 'cygwin'
|
| 1906 |
+
rel_re = re.compile (r'[\d.]+', re.ASCII)
|
| 1907 |
+
m = rel_re.match(release)
|
| 1908 |
+
if m:
|
| 1909 |
+
release = m.group()
|
| 1910 |
+
elif osname[:6] == 'darwin':
|
| 1911 |
+
import _osx_support, distutils.sysconfig
|
| 1912 |
+
osname, release, machine = _osx_support.get_platform_osx(
|
| 1913 |
+
distutils.sysconfig.get_config_vars(),
|
| 1914 |
+
osname, release, machine)
|
| 1915 |
+
|
| 1916 |
+
return '%s-%s-%s' % (osname, release, machine)
|
| 1917 |
+
|
| 1918 |
+
|
| 1919 |
+
_TARGET_TO_PLAT = {
|
| 1920 |
+
'x86' : 'win32',
|
| 1921 |
+
'x64' : 'win-amd64',
|
| 1922 |
+
'arm' : 'win-arm32',
|
| 1923 |
+
}
|
| 1924 |
+
|
| 1925 |
+
|
| 1926 |
+
def get_platform():
|
| 1927 |
+
if os.name != 'nt':
|
| 1928 |
+
return get_host_platform()
|
| 1929 |
+
cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
|
| 1930 |
+
if cross_compilation_target not in _TARGET_TO_PLAT:
|
| 1931 |
+
return get_host_platform()
|
| 1932 |
+
return _TARGET_TO_PLAT[cross_compilation_target]
|
.venv/Lib/site-packages/pip/_vendor/distlib/version.py
ADDED
|
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2012-2017 The Python Software Foundation.
|
| 4 |
+
# See LICENSE.txt and CONTRIBUTORS.txt.
|
| 5 |
+
#
|
| 6 |
+
"""
|
| 7 |
+
Implementation of a flexible versioning scheme providing support for PEP-440,
|
| 8 |
+
setuptools-compatible and semantic versioning.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
import re
|
| 13 |
+
|
| 14 |
+
from .compat import string_types
|
| 15 |
+
from .util import parse_requirement
|
| 16 |
+
|
| 17 |
+
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
|
| 18 |
+
'LegacyVersion', 'LegacyMatcher',
|
| 19 |
+
'SemanticVersion', 'SemanticMatcher',
|
| 20 |
+
'UnsupportedVersionError', 'get_scheme']
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class UnsupportedVersionError(ValueError):
|
| 26 |
+
"""This is an unsupported version."""
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Version(object):
|
| 31 |
+
def __init__(self, s):
|
| 32 |
+
self._string = s = s.strip()
|
| 33 |
+
self._parts = parts = self.parse(s)
|
| 34 |
+
assert isinstance(parts, tuple)
|
| 35 |
+
assert len(parts) > 0
|
| 36 |
+
|
| 37 |
+
def parse(self, s):
|
| 38 |
+
raise NotImplementedError('please implement in a subclass')
|
| 39 |
+
|
| 40 |
+
def _check_compatible(self, other):
|
| 41 |
+
if type(self) != type(other):
|
| 42 |
+
raise TypeError('cannot compare %r and %r' % (self, other))
|
| 43 |
+
|
| 44 |
+
def __eq__(self, other):
|
| 45 |
+
self._check_compatible(other)
|
| 46 |
+
return self._parts == other._parts
|
| 47 |
+
|
| 48 |
+
def __ne__(self, other):
|
| 49 |
+
return not self.__eq__(other)
|
| 50 |
+
|
| 51 |
+
def __lt__(self, other):
|
| 52 |
+
self._check_compatible(other)
|
| 53 |
+
return self._parts < other._parts
|
| 54 |
+
|
| 55 |
+
def __gt__(self, other):
|
| 56 |
+
return not (self.__lt__(other) or self.__eq__(other))
|
| 57 |
+
|
| 58 |
+
def __le__(self, other):
|
| 59 |
+
return self.__lt__(other) or self.__eq__(other)
|
| 60 |
+
|
| 61 |
+
def __ge__(self, other):
|
| 62 |
+
return self.__gt__(other) or self.__eq__(other)
|
| 63 |
+
|
| 64 |
+
# See http://docs.python.org/reference/datamodel#object.__hash__
|
| 65 |
+
def __hash__(self):
|
| 66 |
+
return hash(self._parts)
|
| 67 |
+
|
| 68 |
+
def __repr__(self):
|
| 69 |
+
return "%s('%s')" % (self.__class__.__name__, self._string)
|
| 70 |
+
|
| 71 |
+
def __str__(self):
|
| 72 |
+
return self._string
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def is_prerelease(self):
|
| 76 |
+
raise NotImplementedError('Please implement in subclasses.')
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class Matcher(object):
|
| 80 |
+
version_class = None
|
| 81 |
+
|
| 82 |
+
# value is either a callable or the name of a method
|
| 83 |
+
_operators = {
|
| 84 |
+
'<': lambda v, c, p: v < c,
|
| 85 |
+
'>': lambda v, c, p: v > c,
|
| 86 |
+
'<=': lambda v, c, p: v == c or v < c,
|
| 87 |
+
'>=': lambda v, c, p: v == c or v > c,
|
| 88 |
+
'==': lambda v, c, p: v == c,
|
| 89 |
+
'===': lambda v, c, p: v == c,
|
| 90 |
+
# by default, compatible => >=.
|
| 91 |
+
'~=': lambda v, c, p: v == c or v > c,
|
| 92 |
+
'!=': lambda v, c, p: v != c,
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
# this is a method only to support alternative implementations
|
| 96 |
+
# via overriding
|
| 97 |
+
def parse_requirement(self, s):
|
| 98 |
+
return parse_requirement(s)
|
| 99 |
+
|
| 100 |
+
def __init__(self, s):
|
| 101 |
+
if self.version_class is None:
|
| 102 |
+
raise ValueError('Please specify a version class')
|
| 103 |
+
self._string = s = s.strip()
|
| 104 |
+
r = self.parse_requirement(s)
|
| 105 |
+
if not r:
|
| 106 |
+
raise ValueError('Not valid: %r' % s)
|
| 107 |
+
self.name = r.name
|
| 108 |
+
self.key = self.name.lower() # for case-insensitive comparisons
|
| 109 |
+
clist = []
|
| 110 |
+
if r.constraints:
|
| 111 |
+
# import pdb; pdb.set_trace()
|
| 112 |
+
for op, s in r.constraints:
|
| 113 |
+
if s.endswith('.*'):
|
| 114 |
+
if op not in ('==', '!='):
|
| 115 |
+
raise ValueError('\'.*\' not allowed for '
|
| 116 |
+
'%r constraints' % op)
|
| 117 |
+
# Could be a partial version (e.g. for '2.*') which
|
| 118 |
+
# won't parse as a version, so keep it as a string
|
| 119 |
+
vn, prefix = s[:-2], True
|
| 120 |
+
# Just to check that vn is a valid version
|
| 121 |
+
self.version_class(vn)
|
| 122 |
+
else:
|
| 123 |
+
# Should parse as a version, so we can create an
|
| 124 |
+
# instance for the comparison
|
| 125 |
+
vn, prefix = self.version_class(s), False
|
| 126 |
+
clist.append((op, vn, prefix))
|
| 127 |
+
self._parts = tuple(clist)
|
| 128 |
+
|
| 129 |
+
def match(self, version):
|
| 130 |
+
"""
|
| 131 |
+
Check if the provided version matches the constraints.
|
| 132 |
+
|
| 133 |
+
:param version: The version to match against this instance.
|
| 134 |
+
:type version: String or :class:`Version` instance.
|
| 135 |
+
"""
|
| 136 |
+
if isinstance(version, string_types):
|
| 137 |
+
version = self.version_class(version)
|
| 138 |
+
for operator, constraint, prefix in self._parts:
|
| 139 |
+
f = self._operators.get(operator)
|
| 140 |
+
if isinstance(f, string_types):
|
| 141 |
+
f = getattr(self, f)
|
| 142 |
+
if not f:
|
| 143 |
+
msg = ('%r not implemented '
|
| 144 |
+
'for %s' % (operator, self.__class__.__name__))
|
| 145 |
+
raise NotImplementedError(msg)
|
| 146 |
+
if not f(version, constraint, prefix):
|
| 147 |
+
return False
|
| 148 |
+
return True
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def exact_version(self):
|
| 152 |
+
result = None
|
| 153 |
+
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
|
| 154 |
+
result = self._parts[0][1]
|
| 155 |
+
return result
|
| 156 |
+
|
| 157 |
+
def _check_compatible(self, other):
|
| 158 |
+
if type(self) != type(other) or self.name != other.name:
|
| 159 |
+
raise TypeError('cannot compare %s and %s' % (self, other))
|
| 160 |
+
|
| 161 |
+
def __eq__(self, other):
|
| 162 |
+
self._check_compatible(other)
|
| 163 |
+
return self.key == other.key and self._parts == other._parts
|
| 164 |
+
|
| 165 |
+
def __ne__(self, other):
|
| 166 |
+
return not self.__eq__(other)
|
| 167 |
+
|
| 168 |
+
# See http://docs.python.org/reference/datamodel#object.__hash__
|
| 169 |
+
def __hash__(self):
|
| 170 |
+
return hash(self.key) + hash(self._parts)
|
| 171 |
+
|
| 172 |
+
def __repr__(self):
|
| 173 |
+
return "%s(%r)" % (self.__class__.__name__, self._string)
|
| 174 |
+
|
| 175 |
+
def __str__(self):
|
| 176 |
+
return self._string
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
|
| 180 |
+
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
|
| 181 |
+
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _pep_440_key(s):
|
| 185 |
+
s = s.strip()
|
| 186 |
+
m = PEP440_VERSION_RE.match(s)
|
| 187 |
+
if not m:
|
| 188 |
+
raise UnsupportedVersionError('Not a valid version: %s' % s)
|
| 189 |
+
groups = m.groups()
|
| 190 |
+
nums = tuple(int(v) for v in groups[1].split('.'))
|
| 191 |
+
while len(nums) > 1 and nums[-1] == 0:
|
| 192 |
+
nums = nums[:-1]
|
| 193 |
+
|
| 194 |
+
if not groups[0]:
|
| 195 |
+
epoch = 0
|
| 196 |
+
else:
|
| 197 |
+
epoch = int(groups[0][:-1])
|
| 198 |
+
pre = groups[4:6]
|
| 199 |
+
post = groups[7:9]
|
| 200 |
+
dev = groups[10:12]
|
| 201 |
+
local = groups[13]
|
| 202 |
+
if pre == (None, None):
|
| 203 |
+
pre = ()
|
| 204 |
+
else:
|
| 205 |
+
pre = pre[0], int(pre[1])
|
| 206 |
+
if post == (None, None):
|
| 207 |
+
post = ()
|
| 208 |
+
else:
|
| 209 |
+
post = post[0], int(post[1])
|
| 210 |
+
if dev == (None, None):
|
| 211 |
+
dev = ()
|
| 212 |
+
else:
|
| 213 |
+
dev = dev[0], int(dev[1])
|
| 214 |
+
if local is None:
|
| 215 |
+
local = ()
|
| 216 |
+
else:
|
| 217 |
+
parts = []
|
| 218 |
+
for part in local.split('.'):
|
| 219 |
+
# to ensure that numeric compares as > lexicographic, avoid
|
| 220 |
+
# comparing them directly, but encode a tuple which ensures
|
| 221 |
+
# correct sorting
|
| 222 |
+
if part.isdigit():
|
| 223 |
+
part = (1, int(part))
|
| 224 |
+
else:
|
| 225 |
+
part = (0, part)
|
| 226 |
+
parts.append(part)
|
| 227 |
+
local = tuple(parts)
|
| 228 |
+
if not pre:
|
| 229 |
+
# either before pre-release, or final release and after
|
| 230 |
+
if not post and dev:
|
| 231 |
+
# before pre-release
|
| 232 |
+
pre = ('a', -1) # to sort before a0
|
| 233 |
+
else:
|
| 234 |
+
pre = ('z',) # to sort after all pre-releases
|
| 235 |
+
# now look at the state of post and dev.
|
| 236 |
+
if not post:
|
| 237 |
+
post = ('_',) # sort before 'a'
|
| 238 |
+
if not dev:
|
| 239 |
+
dev = ('final',)
|
| 240 |
+
|
| 241 |
+
#print('%s -> %s' % (s, m.groups()))
|
| 242 |
+
return epoch, nums, pre, post, dev, local
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
_normalized_key = _pep_440_key
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
class NormalizedVersion(Version):
|
| 249 |
+
"""A rational version.
|
| 250 |
+
|
| 251 |
+
Good:
|
| 252 |
+
1.2 # equivalent to "1.2.0"
|
| 253 |
+
1.2.0
|
| 254 |
+
1.2a1
|
| 255 |
+
1.2.3a2
|
| 256 |
+
1.2.3b1
|
| 257 |
+
1.2.3c1
|
| 258 |
+
1.2.3.4
|
| 259 |
+
TODO: fill this out
|
| 260 |
+
|
| 261 |
+
Bad:
|
| 262 |
+
1 # minimum two numbers
|
| 263 |
+
1.2a # release level must have a release serial
|
| 264 |
+
1.2.3b
|
| 265 |
+
"""
|
| 266 |
+
def parse(self, s):
|
| 267 |
+
result = _normalized_key(s)
|
| 268 |
+
# _normalized_key loses trailing zeroes in the release
|
| 269 |
+
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
|
| 270 |
+
# However, PEP 440 prefix matching needs it: for example,
|
| 271 |
+
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
|
| 272 |
+
m = PEP440_VERSION_RE.match(s) # must succeed
|
| 273 |
+
groups = m.groups()
|
| 274 |
+
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
|
| 275 |
+
return result
|
| 276 |
+
|
| 277 |
+
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
|
| 278 |
+
|
| 279 |
+
@property
|
| 280 |
+
def is_prerelease(self):
|
| 281 |
+
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _match_prefix(x, y):
|
| 285 |
+
x = str(x)
|
| 286 |
+
y = str(y)
|
| 287 |
+
if x == y:
|
| 288 |
+
return True
|
| 289 |
+
if not x.startswith(y):
|
| 290 |
+
return False
|
| 291 |
+
n = len(y)
|
| 292 |
+
return x[n] == '.'
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class NormalizedMatcher(Matcher):
|
| 296 |
+
version_class = NormalizedVersion
|
| 297 |
+
|
| 298 |
+
# value is either a callable or the name of a method
|
| 299 |
+
_operators = {
|
| 300 |
+
'~=': '_match_compatible',
|
| 301 |
+
'<': '_match_lt',
|
| 302 |
+
'>': '_match_gt',
|
| 303 |
+
'<=': '_match_le',
|
| 304 |
+
'>=': '_match_ge',
|
| 305 |
+
'==': '_match_eq',
|
| 306 |
+
'===': '_match_arbitrary',
|
| 307 |
+
'!=': '_match_ne',
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
def _adjust_local(self, version, constraint, prefix):
|
| 311 |
+
if prefix:
|
| 312 |
+
strip_local = '+' not in constraint and version._parts[-1]
|
| 313 |
+
else:
|
| 314 |
+
# both constraint and version are
|
| 315 |
+
# NormalizedVersion instances.
|
| 316 |
+
# If constraint does not have a local component,
|
| 317 |
+
# ensure the version doesn't, either.
|
| 318 |
+
strip_local = not constraint._parts[-1] and version._parts[-1]
|
| 319 |
+
if strip_local:
|
| 320 |
+
s = version._string.split('+', 1)[0]
|
| 321 |
+
version = self.version_class(s)
|
| 322 |
+
return version, constraint
|
| 323 |
+
|
| 324 |
+
def _match_lt(self, version, constraint, prefix):
|
| 325 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 326 |
+
if version >= constraint:
|
| 327 |
+
return False
|
| 328 |
+
release_clause = constraint._release_clause
|
| 329 |
+
pfx = '.'.join([str(i) for i in release_clause])
|
| 330 |
+
return not _match_prefix(version, pfx)
|
| 331 |
+
|
| 332 |
+
def _match_gt(self, version, constraint, prefix):
|
| 333 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 334 |
+
if version <= constraint:
|
| 335 |
+
return False
|
| 336 |
+
release_clause = constraint._release_clause
|
| 337 |
+
pfx = '.'.join([str(i) for i in release_clause])
|
| 338 |
+
return not _match_prefix(version, pfx)
|
| 339 |
+
|
| 340 |
+
def _match_le(self, version, constraint, prefix):
|
| 341 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 342 |
+
return version <= constraint
|
| 343 |
+
|
| 344 |
+
def _match_ge(self, version, constraint, prefix):
|
| 345 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 346 |
+
return version >= constraint
|
| 347 |
+
|
| 348 |
+
def _match_eq(self, version, constraint, prefix):
|
| 349 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 350 |
+
if not prefix:
|
| 351 |
+
result = (version == constraint)
|
| 352 |
+
else:
|
| 353 |
+
result = _match_prefix(version, constraint)
|
| 354 |
+
return result
|
| 355 |
+
|
| 356 |
+
def _match_arbitrary(self, version, constraint, prefix):
|
| 357 |
+
return str(version) == str(constraint)
|
| 358 |
+
|
| 359 |
+
def _match_ne(self, version, constraint, prefix):
|
| 360 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 361 |
+
if not prefix:
|
| 362 |
+
result = (version != constraint)
|
| 363 |
+
else:
|
| 364 |
+
result = not _match_prefix(version, constraint)
|
| 365 |
+
return result
|
| 366 |
+
|
| 367 |
+
def _match_compatible(self, version, constraint, prefix):
|
| 368 |
+
version, constraint = self._adjust_local(version, constraint, prefix)
|
| 369 |
+
if version == constraint:
|
| 370 |
+
return True
|
| 371 |
+
if version < constraint:
|
| 372 |
+
return False
|
| 373 |
+
# if not prefix:
|
| 374 |
+
# return True
|
| 375 |
+
release_clause = constraint._release_clause
|
| 376 |
+
if len(release_clause) > 1:
|
| 377 |
+
release_clause = release_clause[:-1]
|
| 378 |
+
pfx = '.'.join([str(i) for i in release_clause])
|
| 379 |
+
return _match_prefix(version, pfx)
|
| 380 |
+
|
| 381 |
+
_REPLACEMENTS = (
|
| 382 |
+
(re.compile('[.+-]$'), ''), # remove trailing puncts
|
| 383 |
+
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
|
| 384 |
+
(re.compile('^[.-]'), ''), # remove leading puncts
|
| 385 |
+
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
|
| 386 |
+
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
|
| 387 |
+
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
|
| 388 |
+
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
|
| 389 |
+
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
|
| 390 |
+
(re.compile(r'\b(pre-alpha|prealpha)\b'),
|
| 391 |
+
'pre.alpha'), # standardise
|
| 392 |
+
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
_SUFFIX_REPLACEMENTS = (
|
| 396 |
+
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
|
| 397 |
+
(re.compile('[,*")([\\]]'), ''), # remove unwanted chars
|
| 398 |
+
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
|
| 399 |
+
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
|
| 400 |
+
(re.compile(r'\.$'), ''), # trailing '.'
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def _suggest_semantic_version(s):
|
| 407 |
+
"""
|
| 408 |
+
Try to suggest a semantic form for a version for which
|
| 409 |
+
_suggest_normalized_version couldn't come up with anything.
|
| 410 |
+
"""
|
| 411 |
+
result = s.strip().lower()
|
| 412 |
+
for pat, repl in _REPLACEMENTS:
|
| 413 |
+
result = pat.sub(repl, result)
|
| 414 |
+
if not result:
|
| 415 |
+
result = '0.0.0'
|
| 416 |
+
|
| 417 |
+
# Now look for numeric prefix, and separate it out from
|
| 418 |
+
# the rest.
|
| 419 |
+
#import pdb; pdb.set_trace()
|
| 420 |
+
m = _NUMERIC_PREFIX.match(result)
|
| 421 |
+
if not m:
|
| 422 |
+
prefix = '0.0.0'
|
| 423 |
+
suffix = result
|
| 424 |
+
else:
|
| 425 |
+
prefix = m.groups()[0].split('.')
|
| 426 |
+
prefix = [int(i) for i in prefix]
|
| 427 |
+
while len(prefix) < 3:
|
| 428 |
+
prefix.append(0)
|
| 429 |
+
if len(prefix) == 3:
|
| 430 |
+
suffix = result[m.end():]
|
| 431 |
+
else:
|
| 432 |
+
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
|
| 433 |
+
prefix = prefix[:3]
|
| 434 |
+
prefix = '.'.join([str(i) for i in prefix])
|
| 435 |
+
suffix = suffix.strip()
|
| 436 |
+
if suffix:
|
| 437 |
+
#import pdb; pdb.set_trace()
|
| 438 |
+
# massage the suffix.
|
| 439 |
+
for pat, repl in _SUFFIX_REPLACEMENTS:
|
| 440 |
+
suffix = pat.sub(repl, suffix)
|
| 441 |
+
|
| 442 |
+
if not suffix:
|
| 443 |
+
result = prefix
|
| 444 |
+
else:
|
| 445 |
+
sep = '-' if 'dev' in suffix else '+'
|
| 446 |
+
result = prefix + sep + suffix
|
| 447 |
+
if not is_semver(result):
|
| 448 |
+
result = None
|
| 449 |
+
return result
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def _suggest_normalized_version(s):
|
| 453 |
+
"""Suggest a normalized version close to the given version string.
|
| 454 |
+
|
| 455 |
+
If you have a version string that isn't rational (i.e. NormalizedVersion
|
| 456 |
+
doesn't like it) then you might be able to get an equivalent (or close)
|
| 457 |
+
rational version from this function.
|
| 458 |
+
|
| 459 |
+
This does a number of simple normalizations to the given string, based
|
| 460 |
+
on observation of versions currently in use on PyPI. Given a dump of
|
| 461 |
+
those version during PyCon 2009, 4287 of them:
|
| 462 |
+
- 2312 (53.93%) match NormalizedVersion without change
|
| 463 |
+
with the automatic suggestion
|
| 464 |
+
- 3474 (81.04%) match when using this suggestion method
|
| 465 |
+
|
| 466 |
+
@param s {str} An irrational version string.
|
| 467 |
+
@returns A rational version string, or None, if couldn't determine one.
|
| 468 |
+
"""
|
| 469 |
+
try:
|
| 470 |
+
_normalized_key(s)
|
| 471 |
+
return s # already rational
|
| 472 |
+
except UnsupportedVersionError:
|
| 473 |
+
pass
|
| 474 |
+
|
| 475 |
+
rs = s.lower()
|
| 476 |
+
|
| 477 |
+
# part of this could use maketrans
|
| 478 |
+
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
|
| 479 |
+
('beta', 'b'), ('rc', 'c'), ('-final', ''),
|
| 480 |
+
('-pre', 'c'),
|
| 481 |
+
('-release', ''), ('.release', ''), ('-stable', ''),
|
| 482 |
+
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
|
| 483 |
+
('final', '')):
|
| 484 |
+
rs = rs.replace(orig, repl)
|
| 485 |
+
|
| 486 |
+
# if something ends with dev or pre, we add a 0
|
| 487 |
+
rs = re.sub(r"pre$", r"pre0", rs)
|
| 488 |
+
rs = re.sub(r"dev$", r"dev0", rs)
|
| 489 |
+
|
| 490 |
+
# if we have something like "b-2" or "a.2" at the end of the
|
| 491 |
+
# version, that is probably beta, alpha, etc
|
| 492 |
+
# let's remove the dash or dot
|
| 493 |
+
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
|
| 494 |
+
|
| 495 |
+
# 1.0-dev-r371 -> 1.0.dev371
|
| 496 |
+
# 0.1-dev-r79 -> 0.1.dev79
|
| 497 |
+
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
|
| 498 |
+
|
| 499 |
+
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
|
| 500 |
+
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
|
| 501 |
+
|
| 502 |
+
# Clean: v0.3, v1.0
|
| 503 |
+
if rs.startswith('v'):
|
| 504 |
+
rs = rs[1:]
|
| 505 |
+
|
| 506 |
+
# Clean leading '0's on numbers.
|
| 507 |
+
#TODO: unintended side-effect on, e.g., "2003.05.09"
|
| 508 |
+
# PyPI stats: 77 (~2%) better
|
| 509 |
+
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
|
| 510 |
+
|
| 511 |
+
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
|
| 512 |
+
# zero.
|
| 513 |
+
# PyPI stats: 245 (7.56%) better
|
| 514 |
+
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
|
| 515 |
+
|
| 516 |
+
# the 'dev-rNNN' tag is a dev tag
|
| 517 |
+
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
|
| 518 |
+
|
| 519 |
+
# clean the - when used as a pre delimiter
|
| 520 |
+
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
|
| 521 |
+
|
| 522 |
+
# a terminal "dev" or "devel" can be changed into ".dev0"
|
| 523 |
+
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
|
| 524 |
+
|
| 525 |
+
# a terminal "dev" can be changed into ".dev0"
|
| 526 |
+
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
|
| 527 |
+
|
| 528 |
+
# a terminal "final" or "stable" can be removed
|
| 529 |
+
rs = re.sub(r"(final|stable)$", "", rs)
|
| 530 |
+
|
| 531 |
+
# The 'r' and the '-' tags are post release tags
|
| 532 |
+
# 0.4a1.r10 -> 0.4a1.post10
|
| 533 |
+
# 0.9.33-17222 -> 0.9.33.post17222
|
| 534 |
+
# 0.9.33-r17222 -> 0.9.33.post17222
|
| 535 |
+
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
|
| 536 |
+
|
| 537 |
+
# Clean 'r' instead of 'dev' usage:
|
| 538 |
+
# 0.9.33+r17222 -> 0.9.33.dev17222
|
| 539 |
+
# 1.0dev123 -> 1.0.dev123
|
| 540 |
+
# 1.0.git123 -> 1.0.dev123
|
| 541 |
+
# 1.0.bzr123 -> 1.0.dev123
|
| 542 |
+
# 0.1a0dev.123 -> 0.1a0.dev123
|
| 543 |
+
# PyPI stats: ~150 (~4%) better
|
| 544 |
+
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
|
| 545 |
+
|
| 546 |
+
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
|
| 547 |
+
# 0.2.pre1 -> 0.2c1
|
| 548 |
+
# 0.2-c1 -> 0.2c1
|
| 549 |
+
# 1.0preview123 -> 1.0c123
|
| 550 |
+
# PyPI stats: ~21 (0.62%) better
|
| 551 |
+
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
|
| 552 |
+
|
| 553 |
+
# Tcl/Tk uses "px" for their post release markers
|
| 554 |
+
rs = re.sub(r"p(\d+)$", r".post\1", rs)
|
| 555 |
+
|
| 556 |
+
try:
|
| 557 |
+
_normalized_key(rs)
|
| 558 |
+
except UnsupportedVersionError:
|
| 559 |
+
rs = None
|
| 560 |
+
return rs
|
| 561 |
+
|
| 562 |
+
#
|
| 563 |
+
# Legacy version processing (distribute-compatible)
|
| 564 |
+
#
|
| 565 |
+
|
| 566 |
+
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
|
| 567 |
+
_VERSION_REPLACE = {
|
| 568 |
+
'pre': 'c',
|
| 569 |
+
'preview': 'c',
|
| 570 |
+
'-': 'final-',
|
| 571 |
+
'rc': 'c',
|
| 572 |
+
'dev': '@',
|
| 573 |
+
'': None,
|
| 574 |
+
'.': None,
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def _legacy_key(s):
|
| 579 |
+
def get_parts(s):
|
| 580 |
+
result = []
|
| 581 |
+
for p in _VERSION_PART.split(s.lower()):
|
| 582 |
+
p = _VERSION_REPLACE.get(p, p)
|
| 583 |
+
if p:
|
| 584 |
+
if '0' <= p[:1] <= '9':
|
| 585 |
+
p = p.zfill(8)
|
| 586 |
+
else:
|
| 587 |
+
p = '*' + p
|
| 588 |
+
result.append(p)
|
| 589 |
+
result.append('*final')
|
| 590 |
+
return result
|
| 591 |
+
|
| 592 |
+
result = []
|
| 593 |
+
for p in get_parts(s):
|
| 594 |
+
if p.startswith('*'):
|
| 595 |
+
if p < '*final':
|
| 596 |
+
while result and result[-1] == '*final-':
|
| 597 |
+
result.pop()
|
| 598 |
+
while result and result[-1] == '00000000':
|
| 599 |
+
result.pop()
|
| 600 |
+
result.append(p)
|
| 601 |
+
return tuple(result)
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
class LegacyVersion(Version):
|
| 605 |
+
def parse(self, s):
|
| 606 |
+
return _legacy_key(s)
|
| 607 |
+
|
| 608 |
+
@property
|
| 609 |
+
def is_prerelease(self):
|
| 610 |
+
result = False
|
| 611 |
+
for x in self._parts:
|
| 612 |
+
if (isinstance(x, string_types) and x.startswith('*') and
|
| 613 |
+
x < '*final'):
|
| 614 |
+
result = True
|
| 615 |
+
break
|
| 616 |
+
return result
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
class LegacyMatcher(Matcher):
|
| 620 |
+
version_class = LegacyVersion
|
| 621 |
+
|
| 622 |
+
_operators = dict(Matcher._operators)
|
| 623 |
+
_operators['~='] = '_match_compatible'
|
| 624 |
+
|
| 625 |
+
numeric_re = re.compile(r'^(\d+(\.\d+)*)')
|
| 626 |
+
|
| 627 |
+
def _match_compatible(self, version, constraint, prefix):
|
| 628 |
+
if version < constraint:
|
| 629 |
+
return False
|
| 630 |
+
m = self.numeric_re.match(str(constraint))
|
| 631 |
+
if not m:
|
| 632 |
+
logger.warning('Cannot compute compatible match for version %s '
|
| 633 |
+
' and constraint %s', version, constraint)
|
| 634 |
+
return True
|
| 635 |
+
s = m.groups()[0]
|
| 636 |
+
if '.' in s:
|
| 637 |
+
s = s.rsplit('.', 1)[0]
|
| 638 |
+
return _match_prefix(version, s)
|
| 639 |
+
|
| 640 |
+
#
|
| 641 |
+
# Semantic versioning
|
| 642 |
+
#
|
| 643 |
+
|
| 644 |
+
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
|
| 645 |
+
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
|
| 646 |
+
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def is_semver(s):
|
| 650 |
+
return _SEMVER_RE.match(s)
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def _semantic_key(s):
|
| 654 |
+
def make_tuple(s, absent):
|
| 655 |
+
if s is None:
|
| 656 |
+
result = (absent,)
|
| 657 |
+
else:
|
| 658 |
+
parts = s[1:].split('.')
|
| 659 |
+
# We can't compare ints and strings on Python 3, so fudge it
|
| 660 |
+
# by zero-filling numeric values so simulate a numeric comparison
|
| 661 |
+
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
|
| 662 |
+
return result
|
| 663 |
+
|
| 664 |
+
m = is_semver(s)
|
| 665 |
+
if not m:
|
| 666 |
+
raise UnsupportedVersionError(s)
|
| 667 |
+
groups = m.groups()
|
| 668 |
+
major, minor, patch = [int(i) for i in groups[:3]]
|
| 669 |
+
# choose the '|' and '*' so that versions sort correctly
|
| 670 |
+
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
|
| 671 |
+
return (major, minor, patch), pre, build
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
class SemanticVersion(Version):
|
| 675 |
+
def parse(self, s):
|
| 676 |
+
return _semantic_key(s)
|
| 677 |
+
|
| 678 |
+
@property
|
| 679 |
+
def is_prerelease(self):
|
| 680 |
+
return self._parts[1][0] != '|'
|
| 681 |
+
|
| 682 |
+
|
| 683 |
+
class SemanticMatcher(Matcher):
|
| 684 |
+
version_class = SemanticVersion
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
class VersionScheme(object):
|
| 688 |
+
def __init__(self, key, matcher, suggester=None):
|
| 689 |
+
self.key = key
|
| 690 |
+
self.matcher = matcher
|
| 691 |
+
self.suggester = suggester
|
| 692 |
+
|
| 693 |
+
def is_valid_version(self, s):
|
| 694 |
+
try:
|
| 695 |
+
self.matcher.version_class(s)
|
| 696 |
+
result = True
|
| 697 |
+
except UnsupportedVersionError:
|
| 698 |
+
result = False
|
| 699 |
+
return result
|
| 700 |
+
|
| 701 |
+
def is_valid_matcher(self, s):
|
| 702 |
+
try:
|
| 703 |
+
self.matcher(s)
|
| 704 |
+
result = True
|
| 705 |
+
except UnsupportedVersionError:
|
| 706 |
+
result = False
|
| 707 |
+
return result
|
| 708 |
+
|
| 709 |
+
def is_valid_constraint_list(self, s):
|
| 710 |
+
"""
|
| 711 |
+
Used for processing some metadata fields
|
| 712 |
+
"""
|
| 713 |
+
# See issue #140. Be tolerant of a single trailing comma.
|
| 714 |
+
if s.endswith(','):
|
| 715 |
+
s = s[:-1]
|
| 716 |
+
return self.is_valid_matcher('dummy_name (%s)' % s)
|
| 717 |
+
|
| 718 |
+
def suggest(self, s):
|
| 719 |
+
if self.suggester is None:
|
| 720 |
+
result = None
|
| 721 |
+
else:
|
| 722 |
+
result = self.suggester(s)
|
| 723 |
+
return result
|
| 724 |
+
|
| 725 |
+
_SCHEMES = {
|
| 726 |
+
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
|
| 727 |
+
_suggest_normalized_version),
|
| 728 |
+
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
|
| 729 |
+
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
|
| 730 |
+
_suggest_semantic_version),
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
_SCHEMES['default'] = _SCHEMES['normalized']
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def get_scheme(name):
|
| 737 |
+
if name not in _SCHEMES:
|
| 738 |
+
raise ValueError('unknown scheme name: %r' % name)
|
| 739 |
+
return _SCHEMES[name]
|
.venv/Lib/site-packages/pip/_vendor/distlib/w32.exe
ADDED
|
Binary file (91.6 kB). View file
|
|
|
.venv/Lib/site-packages/pip/_vendor/distlib/w64-arm.exe
ADDED
|
Binary file (168 kB). View file
|
|
|
.venv/Lib/site-packages/pip/_vendor/distlib/w64.exe
ADDED
|
Binary file (102 kB). View file
|
|
|
.venv/Lib/site-packages/pip/_vendor/distlib/wheel.py
ADDED
|
@@ -0,0 +1,1082 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
#
|
| 3 |
+
# Copyright (C) 2013-2020 Vinay Sajip.
|
| 4 |
+
# Licensed to the Python Software Foundation under a contributor agreement.
|
| 5 |
+
# See LICENSE.txt and CONTRIBUTORS.txt.
|
| 6 |
+
#
|
| 7 |
+
from __future__ import unicode_literals
|
| 8 |
+
|
| 9 |
+
import base64
|
| 10 |
+
import codecs
|
| 11 |
+
import datetime
|
| 12 |
+
from email import message_from_file
|
| 13 |
+
import hashlib
|
| 14 |
+
import json
|
| 15 |
+
import logging
|
| 16 |
+
import os
|
| 17 |
+
import posixpath
|
| 18 |
+
import re
|
| 19 |
+
import shutil
|
| 20 |
+
import sys
|
| 21 |
+
import tempfile
|
| 22 |
+
import zipfile
|
| 23 |
+
|
| 24 |
+
from . import __version__, DistlibException
|
| 25 |
+
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
|
| 26 |
+
from .database import InstalledDistribution
|
| 27 |
+
from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
|
| 28 |
+
LEGACY_METADATA_FILENAME)
|
| 29 |
+
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
|
| 30 |
+
cached_property, get_cache_base, read_exports, tempdir,
|
| 31 |
+
get_platform)
|
| 32 |
+
from .version import NormalizedVersion, UnsupportedVersionError
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
cache = None # created when needed
|
| 37 |
+
|
| 38 |
+
if hasattr(sys, 'pypy_version_info'): # pragma: no cover
|
| 39 |
+
IMP_PREFIX = 'pp'
|
| 40 |
+
elif sys.platform.startswith('java'): # pragma: no cover
|
| 41 |
+
IMP_PREFIX = 'jy'
|
| 42 |
+
elif sys.platform == 'cli': # pragma: no cover
|
| 43 |
+
IMP_PREFIX = 'ip'
|
| 44 |
+
else:
|
| 45 |
+
IMP_PREFIX = 'cp'
|
| 46 |
+
|
| 47 |
+
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
|
| 48 |
+
if not VER_SUFFIX: # pragma: no cover
|
| 49 |
+
VER_SUFFIX = '%s%s' % sys.version_info[:2]
|
| 50 |
+
PYVER = 'py' + VER_SUFFIX
|
| 51 |
+
IMPVER = IMP_PREFIX + VER_SUFFIX
|
| 52 |
+
|
| 53 |
+
ARCH = get_platform().replace('-', '_').replace('.', '_')
|
| 54 |
+
|
| 55 |
+
ABI = sysconfig.get_config_var('SOABI')
|
| 56 |
+
if ABI and ABI.startswith('cpython-'):
|
| 57 |
+
ABI = ABI.replace('cpython-', 'cp').split('-')[0]
|
| 58 |
+
else:
|
| 59 |
+
def _derive_abi():
|
| 60 |
+
parts = ['cp', VER_SUFFIX]
|
| 61 |
+
if sysconfig.get_config_var('Py_DEBUG'):
|
| 62 |
+
parts.append('d')
|
| 63 |
+
if IMP_PREFIX == 'cp':
|
| 64 |
+
vi = sys.version_info[:2]
|
| 65 |
+
if vi < (3, 8):
|
| 66 |
+
wpm = sysconfig.get_config_var('WITH_PYMALLOC')
|
| 67 |
+
if wpm is None:
|
| 68 |
+
wpm = True
|
| 69 |
+
if wpm:
|
| 70 |
+
parts.append('m')
|
| 71 |
+
if vi < (3, 3):
|
| 72 |
+
us = sysconfig.get_config_var('Py_UNICODE_SIZE')
|
| 73 |
+
if us == 4 or (us is None and sys.maxunicode == 0x10FFFF):
|
| 74 |
+
parts.append('u')
|
| 75 |
+
return ''.join(parts)
|
| 76 |
+
ABI = _derive_abi()
|
| 77 |
+
del _derive_abi
|
| 78 |
+
|
| 79 |
+
FILENAME_RE = re.compile(r'''
|
| 80 |
+
(?P<nm>[^-]+)
|
| 81 |
+
-(?P<vn>\d+[^-]*)
|
| 82 |
+
(-(?P<bn>\d+[^-]*))?
|
| 83 |
+
-(?P<py>\w+\d+(\.\w+\d+)*)
|
| 84 |
+
-(?P<bi>\w+)
|
| 85 |
+
-(?P<ar>\w+(\.\w+)*)
|
| 86 |
+
\.whl$
|
| 87 |
+
''', re.IGNORECASE | re.VERBOSE)
|
| 88 |
+
|
| 89 |
+
NAME_VERSION_RE = re.compile(r'''
|
| 90 |
+
(?P<nm>[^-]+)
|
| 91 |
+
-(?P<vn>\d+[^-]*)
|
| 92 |
+
(-(?P<bn>\d+[^-]*))?$
|
| 93 |
+
''', re.IGNORECASE | re.VERBOSE)
|
| 94 |
+
|
| 95 |
+
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
|
| 96 |
+
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
|
| 97 |
+
SHEBANG_PYTHON = b'#!python'
|
| 98 |
+
SHEBANG_PYTHONW = b'#!pythonw'
|
| 99 |
+
|
| 100 |
+
if os.sep == '/':
|
| 101 |
+
to_posix = lambda o: o
|
| 102 |
+
else:
|
| 103 |
+
to_posix = lambda o: o.replace(os.sep, '/')
|
| 104 |
+
|
| 105 |
+
if sys.version_info[0] < 3:
|
| 106 |
+
import imp
|
| 107 |
+
else:
|
| 108 |
+
imp = None
|
| 109 |
+
import importlib.machinery
|
| 110 |
+
import importlib.util
|
| 111 |
+
|
| 112 |
+
def _get_suffixes():
|
| 113 |
+
if imp:
|
| 114 |
+
return [s[0] for s in imp.get_suffixes()]
|
| 115 |
+
else:
|
| 116 |
+
return importlib.machinery.EXTENSION_SUFFIXES
|
| 117 |
+
|
| 118 |
+
def _load_dynamic(name, path):
|
| 119 |
+
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
|
| 120 |
+
if imp:
|
| 121 |
+
return imp.load_dynamic(name, path)
|
| 122 |
+
else:
|
| 123 |
+
spec = importlib.util.spec_from_file_location(name, path)
|
| 124 |
+
module = importlib.util.module_from_spec(spec)
|
| 125 |
+
sys.modules[name] = module
|
| 126 |
+
spec.loader.exec_module(module)
|
| 127 |
+
return module
|
| 128 |
+
|
| 129 |
+
class Mounter(object):
|
| 130 |
+
def __init__(self):
|
| 131 |
+
self.impure_wheels = {}
|
| 132 |
+
self.libs = {}
|
| 133 |
+
|
| 134 |
+
def add(self, pathname, extensions):
|
| 135 |
+
self.impure_wheels[pathname] = extensions
|
| 136 |
+
self.libs.update(extensions)
|
| 137 |
+
|
| 138 |
+
def remove(self, pathname):
|
| 139 |
+
extensions = self.impure_wheels.pop(pathname)
|
| 140 |
+
for k, v in extensions:
|
| 141 |
+
if k in self.libs:
|
| 142 |
+
del self.libs[k]
|
| 143 |
+
|
| 144 |
+
def find_module(self, fullname, path=None):
|
| 145 |
+
if fullname in self.libs:
|
| 146 |
+
result = self
|
| 147 |
+
else:
|
| 148 |
+
result = None
|
| 149 |
+
return result
|
| 150 |
+
|
| 151 |
+
def load_module(self, fullname):
|
| 152 |
+
if fullname in sys.modules:
|
| 153 |
+
result = sys.modules[fullname]
|
| 154 |
+
else:
|
| 155 |
+
if fullname not in self.libs:
|
| 156 |
+
raise ImportError('unable to find extension for %s' % fullname)
|
| 157 |
+
result = _load_dynamic(fullname, self.libs[fullname])
|
| 158 |
+
result.__loader__ = self
|
| 159 |
+
parts = fullname.rsplit('.', 1)
|
| 160 |
+
if len(parts) > 1:
|
| 161 |
+
result.__package__ = parts[0]
|
| 162 |
+
return result
|
| 163 |
+
|
| 164 |
+
_hook = Mounter()
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class Wheel(object):
|
| 168 |
+
"""
|
| 169 |
+
Class to build and install from Wheel files (PEP 427).
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
wheel_version = (1, 1)
|
| 173 |
+
hash_kind = 'sha256'
|
| 174 |
+
|
| 175 |
+
def __init__(self, filename=None, sign=False, verify=False):
|
| 176 |
+
"""
|
| 177 |
+
Initialise an instance using a (valid) filename.
|
| 178 |
+
"""
|
| 179 |
+
self.sign = sign
|
| 180 |
+
self.should_verify = verify
|
| 181 |
+
self.buildver = ''
|
| 182 |
+
self.pyver = [PYVER]
|
| 183 |
+
self.abi = ['none']
|
| 184 |
+
self.arch = ['any']
|
| 185 |
+
self.dirname = os.getcwd()
|
| 186 |
+
if filename is None:
|
| 187 |
+
self.name = 'dummy'
|
| 188 |
+
self.version = '0.1'
|
| 189 |
+
self._filename = self.filename
|
| 190 |
+
else:
|
| 191 |
+
m = NAME_VERSION_RE.match(filename)
|
| 192 |
+
if m:
|
| 193 |
+
info = m.groupdict('')
|
| 194 |
+
self.name = info['nm']
|
| 195 |
+
# Reinstate the local version separator
|
| 196 |
+
self.version = info['vn'].replace('_', '-')
|
| 197 |
+
self.buildver = info['bn']
|
| 198 |
+
self._filename = self.filename
|
| 199 |
+
else:
|
| 200 |
+
dirname, filename = os.path.split(filename)
|
| 201 |
+
m = FILENAME_RE.match(filename)
|
| 202 |
+
if not m:
|
| 203 |
+
raise DistlibException('Invalid name or '
|
| 204 |
+
'filename: %r' % filename)
|
| 205 |
+
if dirname:
|
| 206 |
+
self.dirname = os.path.abspath(dirname)
|
| 207 |
+
self._filename = filename
|
| 208 |
+
info = m.groupdict('')
|
| 209 |
+
self.name = info['nm']
|
| 210 |
+
self.version = info['vn']
|
| 211 |
+
self.buildver = info['bn']
|
| 212 |
+
self.pyver = info['py'].split('.')
|
| 213 |
+
self.abi = info['bi'].split('.')
|
| 214 |
+
self.arch = info['ar'].split('.')
|
| 215 |
+
|
| 216 |
+
@property
|
| 217 |
+
def filename(self):
|
| 218 |
+
"""
|
| 219 |
+
Build and return a filename from the various components.
|
| 220 |
+
"""
|
| 221 |
+
if self.buildver:
|
| 222 |
+
buildver = '-' + self.buildver
|
| 223 |
+
else:
|
| 224 |
+
buildver = ''
|
| 225 |
+
pyver = '.'.join(self.pyver)
|
| 226 |
+
abi = '.'.join(self.abi)
|
| 227 |
+
arch = '.'.join(self.arch)
|
| 228 |
+
# replace - with _ as a local version separator
|
| 229 |
+
version = self.version.replace('-', '_')
|
| 230 |
+
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
|
| 231 |
+
pyver, abi, arch)
|
| 232 |
+
|
| 233 |
+
@property
|
| 234 |
+
def exists(self):
|
| 235 |
+
path = os.path.join(self.dirname, self.filename)
|
| 236 |
+
return os.path.isfile(path)
|
| 237 |
+
|
| 238 |
+
@property
|
| 239 |
+
def tags(self):
|
| 240 |
+
for pyver in self.pyver:
|
| 241 |
+
for abi in self.abi:
|
| 242 |
+
for arch in self.arch:
|
| 243 |
+
yield pyver, abi, arch
|
| 244 |
+
|
| 245 |
+
@cached_property
|
| 246 |
+
def metadata(self):
|
| 247 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 248 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 249 |
+
info_dir = '%s.dist-info' % name_ver
|
| 250 |
+
wrapper = codecs.getreader('utf-8')
|
| 251 |
+
with ZipFile(pathname, 'r') as zf:
|
| 252 |
+
wheel_metadata = self.get_wheel_metadata(zf)
|
| 253 |
+
wv = wheel_metadata['Wheel-Version'].split('.', 1)
|
| 254 |
+
file_version = tuple([int(i) for i in wv])
|
| 255 |
+
# if file_version < (1, 1):
|
| 256 |
+
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
|
| 257 |
+
# LEGACY_METADATA_FILENAME]
|
| 258 |
+
# else:
|
| 259 |
+
# fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
|
| 260 |
+
fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
|
| 261 |
+
result = None
|
| 262 |
+
for fn in fns:
|
| 263 |
+
try:
|
| 264 |
+
metadata_filename = posixpath.join(info_dir, fn)
|
| 265 |
+
with zf.open(metadata_filename) as bf:
|
| 266 |
+
wf = wrapper(bf)
|
| 267 |
+
result = Metadata(fileobj=wf)
|
| 268 |
+
if result:
|
| 269 |
+
break
|
| 270 |
+
except KeyError:
|
| 271 |
+
pass
|
| 272 |
+
if not result:
|
| 273 |
+
raise ValueError('Invalid wheel, because metadata is '
|
| 274 |
+
'missing: looked in %s' % ', '.join(fns))
|
| 275 |
+
return result
|
| 276 |
+
|
| 277 |
+
def get_wheel_metadata(self, zf):
|
| 278 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 279 |
+
info_dir = '%s.dist-info' % name_ver
|
| 280 |
+
metadata_filename = posixpath.join(info_dir, 'WHEEL')
|
| 281 |
+
with zf.open(metadata_filename) as bf:
|
| 282 |
+
wf = codecs.getreader('utf-8')(bf)
|
| 283 |
+
message = message_from_file(wf)
|
| 284 |
+
return dict(message)
|
| 285 |
+
|
| 286 |
+
@cached_property
|
| 287 |
+
def info(self):
|
| 288 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 289 |
+
with ZipFile(pathname, 'r') as zf:
|
| 290 |
+
result = self.get_wheel_metadata(zf)
|
| 291 |
+
return result
|
| 292 |
+
|
| 293 |
+
def process_shebang(self, data):
|
| 294 |
+
m = SHEBANG_RE.match(data)
|
| 295 |
+
if m:
|
| 296 |
+
end = m.end()
|
| 297 |
+
shebang, data_after_shebang = data[:end], data[end:]
|
| 298 |
+
# Preserve any arguments after the interpreter
|
| 299 |
+
if b'pythonw' in shebang.lower():
|
| 300 |
+
shebang_python = SHEBANG_PYTHONW
|
| 301 |
+
else:
|
| 302 |
+
shebang_python = SHEBANG_PYTHON
|
| 303 |
+
m = SHEBANG_DETAIL_RE.match(shebang)
|
| 304 |
+
if m:
|
| 305 |
+
args = b' ' + m.groups()[-1]
|
| 306 |
+
else:
|
| 307 |
+
args = b''
|
| 308 |
+
shebang = shebang_python + args
|
| 309 |
+
data = shebang + data_after_shebang
|
| 310 |
+
else:
|
| 311 |
+
cr = data.find(b'\r')
|
| 312 |
+
lf = data.find(b'\n')
|
| 313 |
+
if cr < 0 or cr > lf:
|
| 314 |
+
term = b'\n'
|
| 315 |
+
else:
|
| 316 |
+
if data[cr:cr + 2] == b'\r\n':
|
| 317 |
+
term = b'\r\n'
|
| 318 |
+
else:
|
| 319 |
+
term = b'\r'
|
| 320 |
+
data = SHEBANG_PYTHON + term + data
|
| 321 |
+
return data
|
| 322 |
+
|
| 323 |
+
def get_hash(self, data, hash_kind=None):
|
| 324 |
+
if hash_kind is None:
|
| 325 |
+
hash_kind = self.hash_kind
|
| 326 |
+
try:
|
| 327 |
+
hasher = getattr(hashlib, hash_kind)
|
| 328 |
+
except AttributeError:
|
| 329 |
+
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
|
| 330 |
+
result = hasher(data).digest()
|
| 331 |
+
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
|
| 332 |
+
return hash_kind, result
|
| 333 |
+
|
| 334 |
+
def write_record(self, records, record_path, archive_record_path):
|
| 335 |
+
records = list(records) # make a copy, as mutated
|
| 336 |
+
records.append((archive_record_path, '', ''))
|
| 337 |
+
with CSVWriter(record_path) as writer:
|
| 338 |
+
for row in records:
|
| 339 |
+
writer.writerow(row)
|
| 340 |
+
|
| 341 |
+
def write_records(self, info, libdir, archive_paths):
|
| 342 |
+
records = []
|
| 343 |
+
distinfo, info_dir = info
|
| 344 |
+
hasher = getattr(hashlib, self.hash_kind)
|
| 345 |
+
for ap, p in archive_paths:
|
| 346 |
+
with open(p, 'rb') as f:
|
| 347 |
+
data = f.read()
|
| 348 |
+
digest = '%s=%s' % self.get_hash(data)
|
| 349 |
+
size = os.path.getsize(p)
|
| 350 |
+
records.append((ap, digest, size))
|
| 351 |
+
|
| 352 |
+
p = os.path.join(distinfo, 'RECORD')
|
| 353 |
+
ap = to_posix(os.path.join(info_dir, 'RECORD'))
|
| 354 |
+
self.write_record(records, p, ap)
|
| 355 |
+
archive_paths.append((ap, p))
|
| 356 |
+
|
| 357 |
+
def build_zip(self, pathname, archive_paths):
|
| 358 |
+
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
|
| 359 |
+
for ap, p in archive_paths:
|
| 360 |
+
logger.debug('Wrote %s to %s in wheel', p, ap)
|
| 361 |
+
zf.write(p, ap)
|
| 362 |
+
|
| 363 |
+
def build(self, paths, tags=None, wheel_version=None):
|
| 364 |
+
"""
|
| 365 |
+
Build a wheel from files in specified paths, and use any specified tags
|
| 366 |
+
when determining the name of the wheel.
|
| 367 |
+
"""
|
| 368 |
+
if tags is None:
|
| 369 |
+
tags = {}
|
| 370 |
+
|
| 371 |
+
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
|
| 372 |
+
if libkey == 'platlib':
|
| 373 |
+
is_pure = 'false'
|
| 374 |
+
default_pyver = [IMPVER]
|
| 375 |
+
default_abi = [ABI]
|
| 376 |
+
default_arch = [ARCH]
|
| 377 |
+
else:
|
| 378 |
+
is_pure = 'true'
|
| 379 |
+
default_pyver = [PYVER]
|
| 380 |
+
default_abi = ['none']
|
| 381 |
+
default_arch = ['any']
|
| 382 |
+
|
| 383 |
+
self.pyver = tags.get('pyver', default_pyver)
|
| 384 |
+
self.abi = tags.get('abi', default_abi)
|
| 385 |
+
self.arch = tags.get('arch', default_arch)
|
| 386 |
+
|
| 387 |
+
libdir = paths[libkey]
|
| 388 |
+
|
| 389 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 390 |
+
data_dir = '%s.data' % name_ver
|
| 391 |
+
info_dir = '%s.dist-info' % name_ver
|
| 392 |
+
|
| 393 |
+
archive_paths = []
|
| 394 |
+
|
| 395 |
+
# First, stuff which is not in site-packages
|
| 396 |
+
for key in ('data', 'headers', 'scripts'):
|
| 397 |
+
if key not in paths:
|
| 398 |
+
continue
|
| 399 |
+
path = paths[key]
|
| 400 |
+
if os.path.isdir(path):
|
| 401 |
+
for root, dirs, files in os.walk(path):
|
| 402 |
+
for fn in files:
|
| 403 |
+
p = fsdecode(os.path.join(root, fn))
|
| 404 |
+
rp = os.path.relpath(p, path)
|
| 405 |
+
ap = to_posix(os.path.join(data_dir, key, rp))
|
| 406 |
+
archive_paths.append((ap, p))
|
| 407 |
+
if key == 'scripts' and not p.endswith('.exe'):
|
| 408 |
+
with open(p, 'rb') as f:
|
| 409 |
+
data = f.read()
|
| 410 |
+
data = self.process_shebang(data)
|
| 411 |
+
with open(p, 'wb') as f:
|
| 412 |
+
f.write(data)
|
| 413 |
+
|
| 414 |
+
# Now, stuff which is in site-packages, other than the
|
| 415 |
+
# distinfo stuff.
|
| 416 |
+
path = libdir
|
| 417 |
+
distinfo = None
|
| 418 |
+
for root, dirs, files in os.walk(path):
|
| 419 |
+
if root == path:
|
| 420 |
+
# At the top level only, save distinfo for later
|
| 421 |
+
# and skip it for now
|
| 422 |
+
for i, dn in enumerate(dirs):
|
| 423 |
+
dn = fsdecode(dn)
|
| 424 |
+
if dn.endswith('.dist-info'):
|
| 425 |
+
distinfo = os.path.join(root, dn)
|
| 426 |
+
del dirs[i]
|
| 427 |
+
break
|
| 428 |
+
assert distinfo, '.dist-info directory expected, not found'
|
| 429 |
+
|
| 430 |
+
for fn in files:
|
| 431 |
+
# comment out next suite to leave .pyc files in
|
| 432 |
+
if fsdecode(fn).endswith(('.pyc', '.pyo')):
|
| 433 |
+
continue
|
| 434 |
+
p = os.path.join(root, fn)
|
| 435 |
+
rp = to_posix(os.path.relpath(p, path))
|
| 436 |
+
archive_paths.append((rp, p))
|
| 437 |
+
|
| 438 |
+
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
|
| 439 |
+
files = os.listdir(distinfo)
|
| 440 |
+
for fn in files:
|
| 441 |
+
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
|
| 442 |
+
p = fsdecode(os.path.join(distinfo, fn))
|
| 443 |
+
ap = to_posix(os.path.join(info_dir, fn))
|
| 444 |
+
archive_paths.append((ap, p))
|
| 445 |
+
|
| 446 |
+
wheel_metadata = [
|
| 447 |
+
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
|
| 448 |
+
'Generator: distlib %s' % __version__,
|
| 449 |
+
'Root-Is-Purelib: %s' % is_pure,
|
| 450 |
+
]
|
| 451 |
+
for pyver, abi, arch in self.tags:
|
| 452 |
+
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
|
| 453 |
+
p = os.path.join(distinfo, 'WHEEL')
|
| 454 |
+
with open(p, 'w') as f:
|
| 455 |
+
f.write('\n'.join(wheel_metadata))
|
| 456 |
+
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
|
| 457 |
+
archive_paths.append((ap, p))
|
| 458 |
+
|
| 459 |
+
# sort the entries by archive path. Not needed by any spec, but it
|
| 460 |
+
# keeps the archive listing and RECORD tidier than they would otherwise
|
| 461 |
+
# be. Use the number of path segments to keep directory entries together,
|
| 462 |
+
# and keep the dist-info stuff at the end.
|
| 463 |
+
def sorter(t):
|
| 464 |
+
ap = t[0]
|
| 465 |
+
n = ap.count('/')
|
| 466 |
+
if '.dist-info' in ap:
|
| 467 |
+
n += 10000
|
| 468 |
+
return (n, ap)
|
| 469 |
+
archive_paths = sorted(archive_paths, key=sorter)
|
| 470 |
+
|
| 471 |
+
# Now, at last, RECORD.
|
| 472 |
+
# Paths in here are archive paths - nothing else makes sense.
|
| 473 |
+
self.write_records((distinfo, info_dir), libdir, archive_paths)
|
| 474 |
+
# Now, ready to build the zip file
|
| 475 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 476 |
+
self.build_zip(pathname, archive_paths)
|
| 477 |
+
return pathname
|
| 478 |
+
|
| 479 |
+
def skip_entry(self, arcname):
|
| 480 |
+
"""
|
| 481 |
+
Determine whether an archive entry should be skipped when verifying
|
| 482 |
+
or installing.
|
| 483 |
+
"""
|
| 484 |
+
# The signature file won't be in RECORD,
|
| 485 |
+
# and we don't currently don't do anything with it
|
| 486 |
+
# We also skip directories, as they won't be in RECORD
|
| 487 |
+
# either. See:
|
| 488 |
+
#
|
| 489 |
+
# https://github.com/pypa/wheel/issues/294
|
| 490 |
+
# https://github.com/pypa/wheel/issues/287
|
| 491 |
+
# https://github.com/pypa/wheel/pull/289
|
| 492 |
+
#
|
| 493 |
+
return arcname.endswith(('/', '/RECORD.jws'))
|
| 494 |
+
|
| 495 |
+
def install(self, paths, maker, **kwargs):
|
| 496 |
+
"""
|
| 497 |
+
Install a wheel to the specified paths. If kwarg ``warner`` is
|
| 498 |
+
specified, it should be a callable, which will be called with two
|
| 499 |
+
tuples indicating the wheel version of this software and the wheel
|
| 500 |
+
version in the file, if there is a discrepancy in the versions.
|
| 501 |
+
This can be used to issue any warnings to raise any exceptions.
|
| 502 |
+
If kwarg ``lib_only`` is True, only the purelib/platlib files are
|
| 503 |
+
installed, and the headers, scripts, data and dist-info metadata are
|
| 504 |
+
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
|
| 505 |
+
bytecode will try to use file-hash based invalidation (PEP-552) on
|
| 506 |
+
supported interpreter versions (CPython 2.7+).
|
| 507 |
+
|
| 508 |
+
The return value is a :class:`InstalledDistribution` instance unless
|
| 509 |
+
``options.lib_only`` is True, in which case the return value is ``None``.
|
| 510 |
+
"""
|
| 511 |
+
|
| 512 |
+
dry_run = maker.dry_run
|
| 513 |
+
warner = kwargs.get('warner')
|
| 514 |
+
lib_only = kwargs.get('lib_only', False)
|
| 515 |
+
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
|
| 516 |
+
|
| 517 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 518 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 519 |
+
data_dir = '%s.data' % name_ver
|
| 520 |
+
info_dir = '%s.dist-info' % name_ver
|
| 521 |
+
|
| 522 |
+
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
|
| 523 |
+
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
|
| 524 |
+
record_name = posixpath.join(info_dir, 'RECORD')
|
| 525 |
+
|
| 526 |
+
wrapper = codecs.getreader('utf-8')
|
| 527 |
+
|
| 528 |
+
with ZipFile(pathname, 'r') as zf:
|
| 529 |
+
with zf.open(wheel_metadata_name) as bwf:
|
| 530 |
+
wf = wrapper(bwf)
|
| 531 |
+
message = message_from_file(wf)
|
| 532 |
+
wv = message['Wheel-Version'].split('.', 1)
|
| 533 |
+
file_version = tuple([int(i) for i in wv])
|
| 534 |
+
if (file_version != self.wheel_version) and warner:
|
| 535 |
+
warner(self.wheel_version, file_version)
|
| 536 |
+
|
| 537 |
+
if message['Root-Is-Purelib'] == 'true':
|
| 538 |
+
libdir = paths['purelib']
|
| 539 |
+
else:
|
| 540 |
+
libdir = paths['platlib']
|
| 541 |
+
|
| 542 |
+
records = {}
|
| 543 |
+
with zf.open(record_name) as bf:
|
| 544 |
+
with CSVReader(stream=bf) as reader:
|
| 545 |
+
for row in reader:
|
| 546 |
+
p = row[0]
|
| 547 |
+
records[p] = row
|
| 548 |
+
|
| 549 |
+
data_pfx = posixpath.join(data_dir, '')
|
| 550 |
+
info_pfx = posixpath.join(info_dir, '')
|
| 551 |
+
script_pfx = posixpath.join(data_dir, 'scripts', '')
|
| 552 |
+
|
| 553 |
+
# make a new instance rather than a copy of maker's,
|
| 554 |
+
# as we mutate it
|
| 555 |
+
fileop = FileOperator(dry_run=dry_run)
|
| 556 |
+
fileop.record = True # so we can rollback if needed
|
| 557 |
+
|
| 558 |
+
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
|
| 559 |
+
|
| 560 |
+
outfiles = [] # for RECORD writing
|
| 561 |
+
|
| 562 |
+
# for script copying/shebang processing
|
| 563 |
+
workdir = tempfile.mkdtemp()
|
| 564 |
+
# set target dir later
|
| 565 |
+
# we default add_launchers to False, as the
|
| 566 |
+
# Python Launcher should be used instead
|
| 567 |
+
maker.source_dir = workdir
|
| 568 |
+
maker.target_dir = None
|
| 569 |
+
try:
|
| 570 |
+
for zinfo in zf.infolist():
|
| 571 |
+
arcname = zinfo.filename
|
| 572 |
+
if isinstance(arcname, text_type):
|
| 573 |
+
u_arcname = arcname
|
| 574 |
+
else:
|
| 575 |
+
u_arcname = arcname.decode('utf-8')
|
| 576 |
+
if self.skip_entry(u_arcname):
|
| 577 |
+
continue
|
| 578 |
+
row = records[u_arcname]
|
| 579 |
+
if row[2] and str(zinfo.file_size) != row[2]:
|
| 580 |
+
raise DistlibException('size mismatch for '
|
| 581 |
+
'%s' % u_arcname)
|
| 582 |
+
if row[1]:
|
| 583 |
+
kind, value = row[1].split('=', 1)
|
| 584 |
+
with zf.open(arcname) as bf:
|
| 585 |
+
data = bf.read()
|
| 586 |
+
_, digest = self.get_hash(data, kind)
|
| 587 |
+
if digest != value:
|
| 588 |
+
raise DistlibException('digest mismatch for '
|
| 589 |
+
'%s' % arcname)
|
| 590 |
+
|
| 591 |
+
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
|
| 592 |
+
logger.debug('lib_only: skipping %s', u_arcname)
|
| 593 |
+
continue
|
| 594 |
+
is_script = (u_arcname.startswith(script_pfx)
|
| 595 |
+
and not u_arcname.endswith('.exe'))
|
| 596 |
+
|
| 597 |
+
if u_arcname.startswith(data_pfx):
|
| 598 |
+
_, where, rp = u_arcname.split('/', 2)
|
| 599 |
+
outfile = os.path.join(paths[where], convert_path(rp))
|
| 600 |
+
else:
|
| 601 |
+
# meant for site-packages.
|
| 602 |
+
if u_arcname in (wheel_metadata_name, record_name):
|
| 603 |
+
continue
|
| 604 |
+
outfile = os.path.join(libdir, convert_path(u_arcname))
|
| 605 |
+
if not is_script:
|
| 606 |
+
with zf.open(arcname) as bf:
|
| 607 |
+
fileop.copy_stream(bf, outfile)
|
| 608 |
+
# Issue #147: permission bits aren't preserved. Using
|
| 609 |
+
# zf.extract(zinfo, libdir) should have worked, but didn't,
|
| 610 |
+
# see https://www.thetopsites.net/article/53834422.shtml
|
| 611 |
+
# So ... manually preserve permission bits as given in zinfo
|
| 612 |
+
if os.name == 'posix':
|
| 613 |
+
# just set the normal permission bits
|
| 614 |
+
os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF)
|
| 615 |
+
outfiles.append(outfile)
|
| 616 |
+
# Double check the digest of the written file
|
| 617 |
+
if not dry_run and row[1]:
|
| 618 |
+
with open(outfile, 'rb') as bf:
|
| 619 |
+
data = bf.read()
|
| 620 |
+
_, newdigest = self.get_hash(data, kind)
|
| 621 |
+
if newdigest != digest:
|
| 622 |
+
raise DistlibException('digest mismatch '
|
| 623 |
+
'on write for '
|
| 624 |
+
'%s' % outfile)
|
| 625 |
+
if bc and outfile.endswith('.py'):
|
| 626 |
+
try:
|
| 627 |
+
pyc = fileop.byte_compile(outfile,
|
| 628 |
+
hashed_invalidation=bc_hashed_invalidation)
|
| 629 |
+
outfiles.append(pyc)
|
| 630 |
+
except Exception:
|
| 631 |
+
# Don't give up if byte-compilation fails,
|
| 632 |
+
# but log it and perhaps warn the user
|
| 633 |
+
logger.warning('Byte-compilation failed',
|
| 634 |
+
exc_info=True)
|
| 635 |
+
else:
|
| 636 |
+
fn = os.path.basename(convert_path(arcname))
|
| 637 |
+
workname = os.path.join(workdir, fn)
|
| 638 |
+
with zf.open(arcname) as bf:
|
| 639 |
+
fileop.copy_stream(bf, workname)
|
| 640 |
+
|
| 641 |
+
dn, fn = os.path.split(outfile)
|
| 642 |
+
maker.target_dir = dn
|
| 643 |
+
filenames = maker.make(fn)
|
| 644 |
+
fileop.set_executable_mode(filenames)
|
| 645 |
+
outfiles.extend(filenames)
|
| 646 |
+
|
| 647 |
+
if lib_only:
|
| 648 |
+
logger.debug('lib_only: returning None')
|
| 649 |
+
dist = None
|
| 650 |
+
else:
|
| 651 |
+
# Generate scripts
|
| 652 |
+
|
| 653 |
+
# Try to get pydist.json so we can see if there are
|
| 654 |
+
# any commands to generate. If this fails (e.g. because
|
| 655 |
+
# of a legacy wheel), log a warning but don't give up.
|
| 656 |
+
commands = None
|
| 657 |
+
file_version = self.info['Wheel-Version']
|
| 658 |
+
if file_version == '1.0':
|
| 659 |
+
# Use legacy info
|
| 660 |
+
ep = posixpath.join(info_dir, 'entry_points.txt')
|
| 661 |
+
try:
|
| 662 |
+
with zf.open(ep) as bwf:
|
| 663 |
+
epdata = read_exports(bwf)
|
| 664 |
+
commands = {}
|
| 665 |
+
for key in ('console', 'gui'):
|
| 666 |
+
k = '%s_scripts' % key
|
| 667 |
+
if k in epdata:
|
| 668 |
+
commands['wrap_%s' % key] = d = {}
|
| 669 |
+
for v in epdata[k].values():
|
| 670 |
+
s = '%s:%s' % (v.prefix, v.suffix)
|
| 671 |
+
if v.flags:
|
| 672 |
+
s += ' [%s]' % ','.join(v.flags)
|
| 673 |
+
d[v.name] = s
|
| 674 |
+
except Exception:
|
| 675 |
+
logger.warning('Unable to read legacy script '
|
| 676 |
+
'metadata, so cannot generate '
|
| 677 |
+
'scripts')
|
| 678 |
+
else:
|
| 679 |
+
try:
|
| 680 |
+
with zf.open(metadata_name) as bwf:
|
| 681 |
+
wf = wrapper(bwf)
|
| 682 |
+
commands = json.load(wf).get('extensions')
|
| 683 |
+
if commands:
|
| 684 |
+
commands = commands.get('python.commands')
|
| 685 |
+
except Exception:
|
| 686 |
+
logger.warning('Unable to read JSON metadata, so '
|
| 687 |
+
'cannot generate scripts')
|
| 688 |
+
if commands:
|
| 689 |
+
console_scripts = commands.get('wrap_console', {})
|
| 690 |
+
gui_scripts = commands.get('wrap_gui', {})
|
| 691 |
+
if console_scripts or gui_scripts:
|
| 692 |
+
script_dir = paths.get('scripts', '')
|
| 693 |
+
if not os.path.isdir(script_dir):
|
| 694 |
+
raise ValueError('Valid script path not '
|
| 695 |
+
'specified')
|
| 696 |
+
maker.target_dir = script_dir
|
| 697 |
+
for k, v in console_scripts.items():
|
| 698 |
+
script = '%s = %s' % (k, v)
|
| 699 |
+
filenames = maker.make(script)
|
| 700 |
+
fileop.set_executable_mode(filenames)
|
| 701 |
+
|
| 702 |
+
if gui_scripts:
|
| 703 |
+
options = {'gui': True }
|
| 704 |
+
for k, v in gui_scripts.items():
|
| 705 |
+
script = '%s = %s' % (k, v)
|
| 706 |
+
filenames = maker.make(script, options)
|
| 707 |
+
fileop.set_executable_mode(filenames)
|
| 708 |
+
|
| 709 |
+
p = os.path.join(libdir, info_dir)
|
| 710 |
+
dist = InstalledDistribution(p)
|
| 711 |
+
|
| 712 |
+
# Write SHARED
|
| 713 |
+
paths = dict(paths) # don't change passed in dict
|
| 714 |
+
del paths['purelib']
|
| 715 |
+
del paths['platlib']
|
| 716 |
+
paths['lib'] = libdir
|
| 717 |
+
p = dist.write_shared_locations(paths, dry_run)
|
| 718 |
+
if p:
|
| 719 |
+
outfiles.append(p)
|
| 720 |
+
|
| 721 |
+
# Write RECORD
|
| 722 |
+
dist.write_installed_files(outfiles, paths['prefix'],
|
| 723 |
+
dry_run)
|
| 724 |
+
return dist
|
| 725 |
+
except Exception: # pragma: no cover
|
| 726 |
+
logger.exception('installation failed.')
|
| 727 |
+
fileop.rollback()
|
| 728 |
+
raise
|
| 729 |
+
finally:
|
| 730 |
+
shutil.rmtree(workdir)
|
| 731 |
+
|
| 732 |
+
def _get_dylib_cache(self):
|
| 733 |
+
global cache
|
| 734 |
+
if cache is None:
|
| 735 |
+
# Use native string to avoid issues on 2.x: see Python #20140.
|
| 736 |
+
base = os.path.join(get_cache_base(), str('dylib-cache'),
|
| 737 |
+
'%s.%s' % sys.version_info[:2])
|
| 738 |
+
cache = Cache(base)
|
| 739 |
+
return cache
|
| 740 |
+
|
| 741 |
+
def _get_extensions(self):
|
| 742 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 743 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 744 |
+
info_dir = '%s.dist-info' % name_ver
|
| 745 |
+
arcname = posixpath.join(info_dir, 'EXTENSIONS')
|
| 746 |
+
wrapper = codecs.getreader('utf-8')
|
| 747 |
+
result = []
|
| 748 |
+
with ZipFile(pathname, 'r') as zf:
|
| 749 |
+
try:
|
| 750 |
+
with zf.open(arcname) as bf:
|
| 751 |
+
wf = wrapper(bf)
|
| 752 |
+
extensions = json.load(wf)
|
| 753 |
+
cache = self._get_dylib_cache()
|
| 754 |
+
prefix = cache.prefix_to_dir(pathname)
|
| 755 |
+
cache_base = os.path.join(cache.base, prefix)
|
| 756 |
+
if not os.path.isdir(cache_base):
|
| 757 |
+
os.makedirs(cache_base)
|
| 758 |
+
for name, relpath in extensions.items():
|
| 759 |
+
dest = os.path.join(cache_base, convert_path(relpath))
|
| 760 |
+
if not os.path.exists(dest):
|
| 761 |
+
extract = True
|
| 762 |
+
else:
|
| 763 |
+
file_time = os.stat(dest).st_mtime
|
| 764 |
+
file_time = datetime.datetime.fromtimestamp(file_time)
|
| 765 |
+
info = zf.getinfo(relpath)
|
| 766 |
+
wheel_time = datetime.datetime(*info.date_time)
|
| 767 |
+
extract = wheel_time > file_time
|
| 768 |
+
if extract:
|
| 769 |
+
zf.extract(relpath, cache_base)
|
| 770 |
+
result.append((name, dest))
|
| 771 |
+
except KeyError:
|
| 772 |
+
pass
|
| 773 |
+
return result
|
| 774 |
+
|
| 775 |
+
def is_compatible(self):
|
| 776 |
+
"""
|
| 777 |
+
Determine if a wheel is compatible with the running system.
|
| 778 |
+
"""
|
| 779 |
+
return is_compatible(self)
|
| 780 |
+
|
| 781 |
+
def is_mountable(self):
|
| 782 |
+
"""
|
| 783 |
+
Determine if a wheel is asserted as mountable by its metadata.
|
| 784 |
+
"""
|
| 785 |
+
return True # for now - metadata details TBD
|
| 786 |
+
|
| 787 |
+
def mount(self, append=False):
|
| 788 |
+
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
|
| 789 |
+
if not self.is_compatible():
|
| 790 |
+
msg = 'Wheel %s not compatible with this Python.' % pathname
|
| 791 |
+
raise DistlibException(msg)
|
| 792 |
+
if not self.is_mountable():
|
| 793 |
+
msg = 'Wheel %s is marked as not mountable.' % pathname
|
| 794 |
+
raise DistlibException(msg)
|
| 795 |
+
if pathname in sys.path:
|
| 796 |
+
logger.debug('%s already in path', pathname)
|
| 797 |
+
else:
|
| 798 |
+
if append:
|
| 799 |
+
sys.path.append(pathname)
|
| 800 |
+
else:
|
| 801 |
+
sys.path.insert(0, pathname)
|
| 802 |
+
extensions = self._get_extensions()
|
| 803 |
+
if extensions:
|
| 804 |
+
if _hook not in sys.meta_path:
|
| 805 |
+
sys.meta_path.append(_hook)
|
| 806 |
+
_hook.add(pathname, extensions)
|
| 807 |
+
|
| 808 |
+
def unmount(self):
|
| 809 |
+
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
|
| 810 |
+
if pathname not in sys.path:
|
| 811 |
+
logger.debug('%s not in path', pathname)
|
| 812 |
+
else:
|
| 813 |
+
sys.path.remove(pathname)
|
| 814 |
+
if pathname in _hook.impure_wheels:
|
| 815 |
+
_hook.remove(pathname)
|
| 816 |
+
if not _hook.impure_wheels:
|
| 817 |
+
if _hook in sys.meta_path:
|
| 818 |
+
sys.meta_path.remove(_hook)
|
| 819 |
+
|
| 820 |
+
def verify(self):
|
| 821 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 822 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 823 |
+
data_dir = '%s.data' % name_ver
|
| 824 |
+
info_dir = '%s.dist-info' % name_ver
|
| 825 |
+
|
| 826 |
+
metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
|
| 827 |
+
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
|
| 828 |
+
record_name = posixpath.join(info_dir, 'RECORD')
|
| 829 |
+
|
| 830 |
+
wrapper = codecs.getreader('utf-8')
|
| 831 |
+
|
| 832 |
+
with ZipFile(pathname, 'r') as zf:
|
| 833 |
+
with zf.open(wheel_metadata_name) as bwf:
|
| 834 |
+
wf = wrapper(bwf)
|
| 835 |
+
message = message_from_file(wf)
|
| 836 |
+
wv = message['Wheel-Version'].split('.', 1)
|
| 837 |
+
file_version = tuple([int(i) for i in wv])
|
| 838 |
+
# TODO version verification
|
| 839 |
+
|
| 840 |
+
records = {}
|
| 841 |
+
with zf.open(record_name) as bf:
|
| 842 |
+
with CSVReader(stream=bf) as reader:
|
| 843 |
+
for row in reader:
|
| 844 |
+
p = row[0]
|
| 845 |
+
records[p] = row
|
| 846 |
+
|
| 847 |
+
for zinfo in zf.infolist():
|
| 848 |
+
arcname = zinfo.filename
|
| 849 |
+
if isinstance(arcname, text_type):
|
| 850 |
+
u_arcname = arcname
|
| 851 |
+
else:
|
| 852 |
+
u_arcname = arcname.decode('utf-8')
|
| 853 |
+
# See issue #115: some wheels have .. in their entries, but
|
| 854 |
+
# in the filename ... e.g. __main__..py ! So the check is
|
| 855 |
+
# updated to look for .. in the directory portions
|
| 856 |
+
p = u_arcname.split('/')
|
| 857 |
+
if '..' in p:
|
| 858 |
+
raise DistlibException('invalid entry in '
|
| 859 |
+
'wheel: %r' % u_arcname)
|
| 860 |
+
|
| 861 |
+
if self.skip_entry(u_arcname):
|
| 862 |
+
continue
|
| 863 |
+
row = records[u_arcname]
|
| 864 |
+
if row[2] and str(zinfo.file_size) != row[2]:
|
| 865 |
+
raise DistlibException('size mismatch for '
|
| 866 |
+
'%s' % u_arcname)
|
| 867 |
+
if row[1]:
|
| 868 |
+
kind, value = row[1].split('=', 1)
|
| 869 |
+
with zf.open(arcname) as bf:
|
| 870 |
+
data = bf.read()
|
| 871 |
+
_, digest = self.get_hash(data, kind)
|
| 872 |
+
if digest != value:
|
| 873 |
+
raise DistlibException('digest mismatch for '
|
| 874 |
+
'%s' % arcname)
|
| 875 |
+
|
| 876 |
+
def update(self, modifier, dest_dir=None, **kwargs):
|
| 877 |
+
"""
|
| 878 |
+
Update the contents of a wheel in a generic way. The modifier should
|
| 879 |
+
be a callable which expects a dictionary argument: its keys are
|
| 880 |
+
archive-entry paths, and its values are absolute filesystem paths
|
| 881 |
+
where the contents the corresponding archive entries can be found. The
|
| 882 |
+
modifier is free to change the contents of the files pointed to, add
|
| 883 |
+
new entries and remove entries, before returning. This method will
|
| 884 |
+
extract the entire contents of the wheel to a temporary location, call
|
| 885 |
+
the modifier, and then use the passed (and possibly updated)
|
| 886 |
+
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
|
| 887 |
+
wheel is written there -- otherwise, the original wheel is overwritten.
|
| 888 |
+
|
| 889 |
+
The modifier should return True if it updated the wheel, else False.
|
| 890 |
+
This method returns the same value the modifier returns.
|
| 891 |
+
"""
|
| 892 |
+
|
| 893 |
+
def get_version(path_map, info_dir):
|
| 894 |
+
version = path = None
|
| 895 |
+
key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
|
| 896 |
+
if key not in path_map:
|
| 897 |
+
key = '%s/PKG-INFO' % info_dir
|
| 898 |
+
if key in path_map:
|
| 899 |
+
path = path_map[key]
|
| 900 |
+
version = Metadata(path=path).version
|
| 901 |
+
return version, path
|
| 902 |
+
|
| 903 |
+
def update_version(version, path):
|
| 904 |
+
updated = None
|
| 905 |
+
try:
|
| 906 |
+
v = NormalizedVersion(version)
|
| 907 |
+
i = version.find('-')
|
| 908 |
+
if i < 0:
|
| 909 |
+
updated = '%s+1' % version
|
| 910 |
+
else:
|
| 911 |
+
parts = [int(s) for s in version[i + 1:].split('.')]
|
| 912 |
+
parts[-1] += 1
|
| 913 |
+
updated = '%s+%s' % (version[:i],
|
| 914 |
+
'.'.join(str(i) for i in parts))
|
| 915 |
+
except UnsupportedVersionError:
|
| 916 |
+
logger.debug('Cannot update non-compliant (PEP-440) '
|
| 917 |
+
'version %r', version)
|
| 918 |
+
if updated:
|
| 919 |
+
md = Metadata(path=path)
|
| 920 |
+
md.version = updated
|
| 921 |
+
legacy = path.endswith(LEGACY_METADATA_FILENAME)
|
| 922 |
+
md.write(path=path, legacy=legacy)
|
| 923 |
+
logger.debug('Version updated from %r to %r', version,
|
| 924 |
+
updated)
|
| 925 |
+
|
| 926 |
+
pathname = os.path.join(self.dirname, self.filename)
|
| 927 |
+
name_ver = '%s-%s' % (self.name, self.version)
|
| 928 |
+
info_dir = '%s.dist-info' % name_ver
|
| 929 |
+
record_name = posixpath.join(info_dir, 'RECORD')
|
| 930 |
+
with tempdir() as workdir:
|
| 931 |
+
with ZipFile(pathname, 'r') as zf:
|
| 932 |
+
path_map = {}
|
| 933 |
+
for zinfo in zf.infolist():
|
| 934 |
+
arcname = zinfo.filename
|
| 935 |
+
if isinstance(arcname, text_type):
|
| 936 |
+
u_arcname = arcname
|
| 937 |
+
else:
|
| 938 |
+
u_arcname = arcname.decode('utf-8')
|
| 939 |
+
if u_arcname == record_name:
|
| 940 |
+
continue
|
| 941 |
+
if '..' in u_arcname:
|
| 942 |
+
raise DistlibException('invalid entry in '
|
| 943 |
+
'wheel: %r' % u_arcname)
|
| 944 |
+
zf.extract(zinfo, workdir)
|
| 945 |
+
path = os.path.join(workdir, convert_path(u_arcname))
|
| 946 |
+
path_map[u_arcname] = path
|
| 947 |
+
|
| 948 |
+
# Remember the version.
|
| 949 |
+
original_version, _ = get_version(path_map, info_dir)
|
| 950 |
+
# Files extracted. Call the modifier.
|
| 951 |
+
modified = modifier(path_map, **kwargs)
|
| 952 |
+
if modified:
|
| 953 |
+
# Something changed - need to build a new wheel.
|
| 954 |
+
current_version, path = get_version(path_map, info_dir)
|
| 955 |
+
if current_version and (current_version == original_version):
|
| 956 |
+
# Add or update local version to signify changes.
|
| 957 |
+
update_version(current_version, path)
|
| 958 |
+
# Decide where the new wheel goes.
|
| 959 |
+
if dest_dir is None:
|
| 960 |
+
fd, newpath = tempfile.mkstemp(suffix='.whl',
|
| 961 |
+
prefix='wheel-update-',
|
| 962 |
+
dir=workdir)
|
| 963 |
+
os.close(fd)
|
| 964 |
+
else:
|
| 965 |
+
if not os.path.isdir(dest_dir):
|
| 966 |
+
raise DistlibException('Not a directory: %r' % dest_dir)
|
| 967 |
+
newpath = os.path.join(dest_dir, self.filename)
|
| 968 |
+
archive_paths = list(path_map.items())
|
| 969 |
+
distinfo = os.path.join(workdir, info_dir)
|
| 970 |
+
info = distinfo, info_dir
|
| 971 |
+
self.write_records(info, workdir, archive_paths)
|
| 972 |
+
self.build_zip(newpath, archive_paths)
|
| 973 |
+
if dest_dir is None:
|
| 974 |
+
shutil.copyfile(newpath, pathname)
|
| 975 |
+
return modified
|
| 976 |
+
|
| 977 |
+
def _get_glibc_version():
|
| 978 |
+
import platform
|
| 979 |
+
ver = platform.libc_ver()
|
| 980 |
+
result = []
|
| 981 |
+
if ver[0] == 'glibc':
|
| 982 |
+
for s in ver[1].split('.'):
|
| 983 |
+
result.append(int(s) if s.isdigit() else 0)
|
| 984 |
+
result = tuple(result)
|
| 985 |
+
return result
|
| 986 |
+
|
| 987 |
+
def compatible_tags():
|
| 988 |
+
"""
|
| 989 |
+
Return (pyver, abi, arch) tuples compatible with this Python.
|
| 990 |
+
"""
|
| 991 |
+
versions = [VER_SUFFIX]
|
| 992 |
+
major = VER_SUFFIX[0]
|
| 993 |
+
for minor in range(sys.version_info[1] - 1, - 1, -1):
|
| 994 |
+
versions.append(''.join([major, str(minor)]))
|
| 995 |
+
|
| 996 |
+
abis = []
|
| 997 |
+
for suffix in _get_suffixes():
|
| 998 |
+
if suffix.startswith('.abi'):
|
| 999 |
+
abis.append(suffix.split('.', 2)[1])
|
| 1000 |
+
abis.sort()
|
| 1001 |
+
if ABI != 'none':
|
| 1002 |
+
abis.insert(0, ABI)
|
| 1003 |
+
abis.append('none')
|
| 1004 |
+
result = []
|
| 1005 |
+
|
| 1006 |
+
arches = [ARCH]
|
| 1007 |
+
if sys.platform == 'darwin':
|
| 1008 |
+
m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
|
| 1009 |
+
if m:
|
| 1010 |
+
name, major, minor, arch = m.groups()
|
| 1011 |
+
minor = int(minor)
|
| 1012 |
+
matches = [arch]
|
| 1013 |
+
if arch in ('i386', 'ppc'):
|
| 1014 |
+
matches.append('fat')
|
| 1015 |
+
if arch in ('i386', 'ppc', 'x86_64'):
|
| 1016 |
+
matches.append('fat3')
|
| 1017 |
+
if arch in ('ppc64', 'x86_64'):
|
| 1018 |
+
matches.append('fat64')
|
| 1019 |
+
if arch in ('i386', 'x86_64'):
|
| 1020 |
+
matches.append('intel')
|
| 1021 |
+
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
|
| 1022 |
+
matches.append('universal')
|
| 1023 |
+
while minor >= 0:
|
| 1024 |
+
for match in matches:
|
| 1025 |
+
s = '%s_%s_%s_%s' % (name, major, minor, match)
|
| 1026 |
+
if s != ARCH: # already there
|
| 1027 |
+
arches.append(s)
|
| 1028 |
+
minor -= 1
|
| 1029 |
+
|
| 1030 |
+
# Most specific - our Python version, ABI and arch
|
| 1031 |
+
for abi in abis:
|
| 1032 |
+
for arch in arches:
|
| 1033 |
+
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
|
| 1034 |
+
# manylinux
|
| 1035 |
+
if abi != 'none' and sys.platform.startswith('linux'):
|
| 1036 |
+
arch = arch.replace('linux_', '')
|
| 1037 |
+
parts = _get_glibc_version()
|
| 1038 |
+
if len(parts) == 2:
|
| 1039 |
+
if parts >= (2, 5):
|
| 1040 |
+
result.append((''.join((IMP_PREFIX, versions[0])), abi,
|
| 1041 |
+
'manylinux1_%s' % arch))
|
| 1042 |
+
if parts >= (2, 12):
|
| 1043 |
+
result.append((''.join((IMP_PREFIX, versions[0])), abi,
|
| 1044 |
+
'manylinux2010_%s' % arch))
|
| 1045 |
+
if parts >= (2, 17):
|
| 1046 |
+
result.append((''.join((IMP_PREFIX, versions[0])), abi,
|
| 1047 |
+
'manylinux2014_%s' % arch))
|
| 1048 |
+
result.append((''.join((IMP_PREFIX, versions[0])), abi,
|
| 1049 |
+
'manylinux_%s_%s_%s' % (parts[0], parts[1],
|
| 1050 |
+
arch)))
|
| 1051 |
+
|
| 1052 |
+
# where no ABI / arch dependency, but IMP_PREFIX dependency
|
| 1053 |
+
for i, version in enumerate(versions):
|
| 1054 |
+
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
|
| 1055 |
+
if i == 0:
|
| 1056 |
+
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
|
| 1057 |
+
|
| 1058 |
+
# no IMP_PREFIX, ABI or arch dependency
|
| 1059 |
+
for i, version in enumerate(versions):
|
| 1060 |
+
result.append((''.join(('py', version)), 'none', 'any'))
|
| 1061 |
+
if i == 0:
|
| 1062 |
+
result.append((''.join(('py', version[0])), 'none', 'any'))
|
| 1063 |
+
|
| 1064 |
+
return set(result)
|
| 1065 |
+
|
| 1066 |
+
|
| 1067 |
+
COMPATIBLE_TAGS = compatible_tags()
|
| 1068 |
+
|
| 1069 |
+
del compatible_tags
|
| 1070 |
+
|
| 1071 |
+
|
| 1072 |
+
def is_compatible(wheel, tags=None):
|
| 1073 |
+
if not isinstance(wheel, Wheel):
|
| 1074 |
+
wheel = Wheel(wheel) # assume it's a filename
|
| 1075 |
+
result = False
|
| 1076 |
+
if tags is None:
|
| 1077 |
+
tags = COMPATIBLE_TAGS
|
| 1078 |
+
for ver, abi, arch in tags:
|
| 1079 |
+
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
|
| 1080 |
+
result = True
|
| 1081 |
+
break
|
| 1082 |
+
return result
|
.venv/Lib/site-packages/pip/_vendor/pygments/__init__.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pygments
|
| 3 |
+
~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments is a syntax highlighting package written in Python.
|
| 6 |
+
|
| 7 |
+
It is a generic syntax highlighter for general use in all kinds of software
|
| 8 |
+
such as forum systems, wikis or other applications that need to prettify
|
| 9 |
+
source code. Highlights are:
|
| 10 |
+
|
| 11 |
+
* a wide range of common languages and markup formats is supported
|
| 12 |
+
* special attention is paid to details, increasing quality by a fair amount
|
| 13 |
+
* support for new languages and formats are added easily
|
| 14 |
+
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
|
| 15 |
+
formats that PIL supports, and ANSI sequences
|
| 16 |
+
* it is usable as a command-line tool and as a library
|
| 17 |
+
* ... and it highlights even Brainfuck!
|
| 18 |
+
|
| 19 |
+
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
|
| 20 |
+
|
| 21 |
+
.. _Pygments master branch:
|
| 22 |
+
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
|
| 23 |
+
|
| 24 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 25 |
+
:license: BSD, see LICENSE for details.
|
| 26 |
+
"""
|
| 27 |
+
from io import StringIO, BytesIO
|
| 28 |
+
|
| 29 |
+
__version__ = '2.15.1'
|
| 30 |
+
__docformat__ = 'restructuredtext'
|
| 31 |
+
|
| 32 |
+
__all__ = ['lex', 'format', 'highlight']
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def lex(code, lexer):
|
| 36 |
+
"""
|
| 37 |
+
Lex `code` with the `lexer` (must be a `Lexer` instance)
|
| 38 |
+
and return an iterable of tokens. Currently, this only calls
|
| 39 |
+
`lexer.get_tokens()`.
|
| 40 |
+
"""
|
| 41 |
+
try:
|
| 42 |
+
return lexer.get_tokens(code)
|
| 43 |
+
except TypeError:
|
| 44 |
+
# Heuristic to catch a common mistake.
|
| 45 |
+
from pip._vendor.pygments.lexer import RegexLexer
|
| 46 |
+
if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
|
| 47 |
+
raise TypeError('lex() argument must be a lexer instance, '
|
| 48 |
+
'not a class')
|
| 49 |
+
raise
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
|
| 53 |
+
"""
|
| 54 |
+
Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
|
| 55 |
+
(a `Formatter` instance).
|
| 56 |
+
|
| 57 |
+
If ``outfile`` is given and a valid file object (an object with a
|
| 58 |
+
``write`` method), the result will be written to it, otherwise it
|
| 59 |
+
is returned as a string.
|
| 60 |
+
"""
|
| 61 |
+
try:
|
| 62 |
+
if not outfile:
|
| 63 |
+
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
|
| 64 |
+
formatter.format(tokens, realoutfile)
|
| 65 |
+
return realoutfile.getvalue()
|
| 66 |
+
else:
|
| 67 |
+
formatter.format(tokens, outfile)
|
| 68 |
+
except TypeError:
|
| 69 |
+
# Heuristic to catch a common mistake.
|
| 70 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 71 |
+
if isinstance(formatter, type) and issubclass(formatter, Formatter):
|
| 72 |
+
raise TypeError('format() argument must be a formatter instance, '
|
| 73 |
+
'not a class')
|
| 74 |
+
raise
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def highlight(code, lexer, formatter, outfile=None):
|
| 78 |
+
"""
|
| 79 |
+
This is the most high-level highlighting function. It combines `lex` and
|
| 80 |
+
`format` in one function.
|
| 81 |
+
"""
|
| 82 |
+
return format(lex(code, lexer), formatter, outfile)
|
.venv/Lib/site-packages/pip/_vendor/pygments/__main__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.__main__
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Main entry point for ``python -m pygments``.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import sys
|
| 12 |
+
from pip._vendor.pygments.cmdline import main
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
sys.exit(main(sys.argv))
|
| 16 |
+
except KeyboardInterrupt:
|
| 17 |
+
sys.exit(1)
|
.venv/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py
ADDED
|
@@ -0,0 +1,940 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.filters
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Module containing filter lookup functions and default
|
| 6 |
+
filters.
|
| 7 |
+
|
| 8 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 9 |
+
:license: BSD, see LICENSE for details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import re
|
| 13 |
+
|
| 14 |
+
from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
|
| 15 |
+
string_to_tokentype
|
| 16 |
+
from pip._vendor.pygments.filter import Filter
|
| 17 |
+
from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
|
| 18 |
+
get_choice_opt, ClassNotFound, OptionError
|
| 19 |
+
from pip._vendor.pygments.plugin import find_plugin_filters
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def find_filter_class(filtername):
|
| 23 |
+
"""Lookup a filter by name. Return None if not found."""
|
| 24 |
+
if filtername in FILTERS:
|
| 25 |
+
return FILTERS[filtername]
|
| 26 |
+
for name, cls in find_plugin_filters():
|
| 27 |
+
if name == filtername:
|
| 28 |
+
return cls
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_filter_by_name(filtername, **options):
|
| 33 |
+
"""Return an instantiated filter.
|
| 34 |
+
|
| 35 |
+
Options are passed to the filter initializer if wanted.
|
| 36 |
+
Raise a ClassNotFound if not found.
|
| 37 |
+
"""
|
| 38 |
+
cls = find_filter_class(filtername)
|
| 39 |
+
if cls:
|
| 40 |
+
return cls(**options)
|
| 41 |
+
else:
|
| 42 |
+
raise ClassNotFound('filter %r not found' % filtername)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_all_filters():
|
| 46 |
+
"""Return a generator of all filter names."""
|
| 47 |
+
yield from FILTERS
|
| 48 |
+
for name, _ in find_plugin_filters():
|
| 49 |
+
yield name
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _replace_special(ttype, value, regex, specialttype,
|
| 53 |
+
replacefunc=lambda x: x):
|
| 54 |
+
last = 0
|
| 55 |
+
for match in regex.finditer(value):
|
| 56 |
+
start, end = match.start(), match.end()
|
| 57 |
+
if start != last:
|
| 58 |
+
yield ttype, value[last:start]
|
| 59 |
+
yield specialttype, replacefunc(value[start:end])
|
| 60 |
+
last = end
|
| 61 |
+
if last != len(value):
|
| 62 |
+
yield ttype, value[last:]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class CodeTagFilter(Filter):
|
| 66 |
+
"""Highlight special code tags in comments and docstrings.
|
| 67 |
+
|
| 68 |
+
Options accepted:
|
| 69 |
+
|
| 70 |
+
`codetags` : list of strings
|
| 71 |
+
A list of strings that are flagged as code tags. The default is to
|
| 72 |
+
highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
|
| 73 |
+
|
| 74 |
+
.. versionchanged:: 2.13
|
| 75 |
+
Now recognizes ``FIXME`` by default.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, **options):
|
| 79 |
+
Filter.__init__(self, **options)
|
| 80 |
+
tags = get_list_opt(options, 'codetags',
|
| 81 |
+
['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
|
| 82 |
+
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
|
| 83 |
+
re.escape(tag) for tag in tags if tag
|
| 84 |
+
]))
|
| 85 |
+
|
| 86 |
+
def filter(self, lexer, stream):
|
| 87 |
+
regex = self.tag_re
|
| 88 |
+
for ttype, value in stream:
|
| 89 |
+
if ttype in String.Doc or \
|
| 90 |
+
ttype in Comment and \
|
| 91 |
+
ttype not in Comment.Preproc:
|
| 92 |
+
yield from _replace_special(ttype, value, regex, Comment.Special)
|
| 93 |
+
else:
|
| 94 |
+
yield ttype, value
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class SymbolFilter(Filter):
|
| 98 |
+
"""Convert mathematical symbols such as \\<longrightarrow> in Isabelle
|
| 99 |
+
or \\longrightarrow in LaTeX into Unicode characters.
|
| 100 |
+
|
| 101 |
+
This is mostly useful for HTML or console output when you want to
|
| 102 |
+
approximate the source rendering you'd see in an IDE.
|
| 103 |
+
|
| 104 |
+
Options accepted:
|
| 105 |
+
|
| 106 |
+
`lang` : string
|
| 107 |
+
The symbol language. Must be one of ``'isabelle'`` or
|
| 108 |
+
``'latex'``. The default is ``'isabelle'``.
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
latex_symbols = {
|
| 112 |
+
'\\alpha' : '\U000003b1',
|
| 113 |
+
'\\beta' : '\U000003b2',
|
| 114 |
+
'\\gamma' : '\U000003b3',
|
| 115 |
+
'\\delta' : '\U000003b4',
|
| 116 |
+
'\\varepsilon' : '\U000003b5',
|
| 117 |
+
'\\zeta' : '\U000003b6',
|
| 118 |
+
'\\eta' : '\U000003b7',
|
| 119 |
+
'\\vartheta' : '\U000003b8',
|
| 120 |
+
'\\iota' : '\U000003b9',
|
| 121 |
+
'\\kappa' : '\U000003ba',
|
| 122 |
+
'\\lambda' : '\U000003bb',
|
| 123 |
+
'\\mu' : '\U000003bc',
|
| 124 |
+
'\\nu' : '\U000003bd',
|
| 125 |
+
'\\xi' : '\U000003be',
|
| 126 |
+
'\\pi' : '\U000003c0',
|
| 127 |
+
'\\varrho' : '\U000003c1',
|
| 128 |
+
'\\sigma' : '\U000003c3',
|
| 129 |
+
'\\tau' : '\U000003c4',
|
| 130 |
+
'\\upsilon' : '\U000003c5',
|
| 131 |
+
'\\varphi' : '\U000003c6',
|
| 132 |
+
'\\chi' : '\U000003c7',
|
| 133 |
+
'\\psi' : '\U000003c8',
|
| 134 |
+
'\\omega' : '\U000003c9',
|
| 135 |
+
'\\Gamma' : '\U00000393',
|
| 136 |
+
'\\Delta' : '\U00000394',
|
| 137 |
+
'\\Theta' : '\U00000398',
|
| 138 |
+
'\\Lambda' : '\U0000039b',
|
| 139 |
+
'\\Xi' : '\U0000039e',
|
| 140 |
+
'\\Pi' : '\U000003a0',
|
| 141 |
+
'\\Sigma' : '\U000003a3',
|
| 142 |
+
'\\Upsilon' : '\U000003a5',
|
| 143 |
+
'\\Phi' : '\U000003a6',
|
| 144 |
+
'\\Psi' : '\U000003a8',
|
| 145 |
+
'\\Omega' : '\U000003a9',
|
| 146 |
+
'\\leftarrow' : '\U00002190',
|
| 147 |
+
'\\longleftarrow' : '\U000027f5',
|
| 148 |
+
'\\rightarrow' : '\U00002192',
|
| 149 |
+
'\\longrightarrow' : '\U000027f6',
|
| 150 |
+
'\\Leftarrow' : '\U000021d0',
|
| 151 |
+
'\\Longleftarrow' : '\U000027f8',
|
| 152 |
+
'\\Rightarrow' : '\U000021d2',
|
| 153 |
+
'\\Longrightarrow' : '\U000027f9',
|
| 154 |
+
'\\leftrightarrow' : '\U00002194',
|
| 155 |
+
'\\longleftrightarrow' : '\U000027f7',
|
| 156 |
+
'\\Leftrightarrow' : '\U000021d4',
|
| 157 |
+
'\\Longleftrightarrow' : '\U000027fa',
|
| 158 |
+
'\\mapsto' : '\U000021a6',
|
| 159 |
+
'\\longmapsto' : '\U000027fc',
|
| 160 |
+
'\\relbar' : '\U00002500',
|
| 161 |
+
'\\Relbar' : '\U00002550',
|
| 162 |
+
'\\hookleftarrow' : '\U000021a9',
|
| 163 |
+
'\\hookrightarrow' : '\U000021aa',
|
| 164 |
+
'\\leftharpoondown' : '\U000021bd',
|
| 165 |
+
'\\rightharpoondown' : '\U000021c1',
|
| 166 |
+
'\\leftharpoonup' : '\U000021bc',
|
| 167 |
+
'\\rightharpoonup' : '\U000021c0',
|
| 168 |
+
'\\rightleftharpoons' : '\U000021cc',
|
| 169 |
+
'\\leadsto' : '\U0000219d',
|
| 170 |
+
'\\downharpoonleft' : '\U000021c3',
|
| 171 |
+
'\\downharpoonright' : '\U000021c2',
|
| 172 |
+
'\\upharpoonleft' : '\U000021bf',
|
| 173 |
+
'\\upharpoonright' : '\U000021be',
|
| 174 |
+
'\\restriction' : '\U000021be',
|
| 175 |
+
'\\uparrow' : '\U00002191',
|
| 176 |
+
'\\Uparrow' : '\U000021d1',
|
| 177 |
+
'\\downarrow' : '\U00002193',
|
| 178 |
+
'\\Downarrow' : '\U000021d3',
|
| 179 |
+
'\\updownarrow' : '\U00002195',
|
| 180 |
+
'\\Updownarrow' : '\U000021d5',
|
| 181 |
+
'\\langle' : '\U000027e8',
|
| 182 |
+
'\\rangle' : '\U000027e9',
|
| 183 |
+
'\\lceil' : '\U00002308',
|
| 184 |
+
'\\rceil' : '\U00002309',
|
| 185 |
+
'\\lfloor' : '\U0000230a',
|
| 186 |
+
'\\rfloor' : '\U0000230b',
|
| 187 |
+
'\\flqq' : '\U000000ab',
|
| 188 |
+
'\\frqq' : '\U000000bb',
|
| 189 |
+
'\\bot' : '\U000022a5',
|
| 190 |
+
'\\top' : '\U000022a4',
|
| 191 |
+
'\\wedge' : '\U00002227',
|
| 192 |
+
'\\bigwedge' : '\U000022c0',
|
| 193 |
+
'\\vee' : '\U00002228',
|
| 194 |
+
'\\bigvee' : '\U000022c1',
|
| 195 |
+
'\\forall' : '\U00002200',
|
| 196 |
+
'\\exists' : '\U00002203',
|
| 197 |
+
'\\nexists' : '\U00002204',
|
| 198 |
+
'\\neg' : '\U000000ac',
|
| 199 |
+
'\\Box' : '\U000025a1',
|
| 200 |
+
'\\Diamond' : '\U000025c7',
|
| 201 |
+
'\\vdash' : '\U000022a2',
|
| 202 |
+
'\\models' : '\U000022a8',
|
| 203 |
+
'\\dashv' : '\U000022a3',
|
| 204 |
+
'\\surd' : '\U0000221a',
|
| 205 |
+
'\\le' : '\U00002264',
|
| 206 |
+
'\\ge' : '\U00002265',
|
| 207 |
+
'\\ll' : '\U0000226a',
|
| 208 |
+
'\\gg' : '\U0000226b',
|
| 209 |
+
'\\lesssim' : '\U00002272',
|
| 210 |
+
'\\gtrsim' : '\U00002273',
|
| 211 |
+
'\\lessapprox' : '\U00002a85',
|
| 212 |
+
'\\gtrapprox' : '\U00002a86',
|
| 213 |
+
'\\in' : '\U00002208',
|
| 214 |
+
'\\notin' : '\U00002209',
|
| 215 |
+
'\\subset' : '\U00002282',
|
| 216 |
+
'\\supset' : '\U00002283',
|
| 217 |
+
'\\subseteq' : '\U00002286',
|
| 218 |
+
'\\supseteq' : '\U00002287',
|
| 219 |
+
'\\sqsubset' : '\U0000228f',
|
| 220 |
+
'\\sqsupset' : '\U00002290',
|
| 221 |
+
'\\sqsubseteq' : '\U00002291',
|
| 222 |
+
'\\sqsupseteq' : '\U00002292',
|
| 223 |
+
'\\cap' : '\U00002229',
|
| 224 |
+
'\\bigcap' : '\U000022c2',
|
| 225 |
+
'\\cup' : '\U0000222a',
|
| 226 |
+
'\\bigcup' : '\U000022c3',
|
| 227 |
+
'\\sqcup' : '\U00002294',
|
| 228 |
+
'\\bigsqcup' : '\U00002a06',
|
| 229 |
+
'\\sqcap' : '\U00002293',
|
| 230 |
+
'\\Bigsqcap' : '\U00002a05',
|
| 231 |
+
'\\setminus' : '\U00002216',
|
| 232 |
+
'\\propto' : '\U0000221d',
|
| 233 |
+
'\\uplus' : '\U0000228e',
|
| 234 |
+
'\\bigplus' : '\U00002a04',
|
| 235 |
+
'\\sim' : '\U0000223c',
|
| 236 |
+
'\\doteq' : '\U00002250',
|
| 237 |
+
'\\simeq' : '\U00002243',
|
| 238 |
+
'\\approx' : '\U00002248',
|
| 239 |
+
'\\asymp' : '\U0000224d',
|
| 240 |
+
'\\cong' : '\U00002245',
|
| 241 |
+
'\\equiv' : '\U00002261',
|
| 242 |
+
'\\Join' : '\U000022c8',
|
| 243 |
+
'\\bowtie' : '\U00002a1d',
|
| 244 |
+
'\\prec' : '\U0000227a',
|
| 245 |
+
'\\succ' : '\U0000227b',
|
| 246 |
+
'\\preceq' : '\U0000227c',
|
| 247 |
+
'\\succeq' : '\U0000227d',
|
| 248 |
+
'\\parallel' : '\U00002225',
|
| 249 |
+
'\\mid' : '\U000000a6',
|
| 250 |
+
'\\pm' : '\U000000b1',
|
| 251 |
+
'\\mp' : '\U00002213',
|
| 252 |
+
'\\times' : '\U000000d7',
|
| 253 |
+
'\\div' : '\U000000f7',
|
| 254 |
+
'\\cdot' : '\U000022c5',
|
| 255 |
+
'\\star' : '\U000022c6',
|
| 256 |
+
'\\circ' : '\U00002218',
|
| 257 |
+
'\\dagger' : '\U00002020',
|
| 258 |
+
'\\ddagger' : '\U00002021',
|
| 259 |
+
'\\lhd' : '\U000022b2',
|
| 260 |
+
'\\rhd' : '\U000022b3',
|
| 261 |
+
'\\unlhd' : '\U000022b4',
|
| 262 |
+
'\\unrhd' : '\U000022b5',
|
| 263 |
+
'\\triangleleft' : '\U000025c3',
|
| 264 |
+
'\\triangleright' : '\U000025b9',
|
| 265 |
+
'\\triangle' : '\U000025b3',
|
| 266 |
+
'\\triangleq' : '\U0000225c',
|
| 267 |
+
'\\oplus' : '\U00002295',
|
| 268 |
+
'\\bigoplus' : '\U00002a01',
|
| 269 |
+
'\\otimes' : '\U00002297',
|
| 270 |
+
'\\bigotimes' : '\U00002a02',
|
| 271 |
+
'\\odot' : '\U00002299',
|
| 272 |
+
'\\bigodot' : '\U00002a00',
|
| 273 |
+
'\\ominus' : '\U00002296',
|
| 274 |
+
'\\oslash' : '\U00002298',
|
| 275 |
+
'\\dots' : '\U00002026',
|
| 276 |
+
'\\cdots' : '\U000022ef',
|
| 277 |
+
'\\sum' : '\U00002211',
|
| 278 |
+
'\\prod' : '\U0000220f',
|
| 279 |
+
'\\coprod' : '\U00002210',
|
| 280 |
+
'\\infty' : '\U0000221e',
|
| 281 |
+
'\\int' : '\U0000222b',
|
| 282 |
+
'\\oint' : '\U0000222e',
|
| 283 |
+
'\\clubsuit' : '\U00002663',
|
| 284 |
+
'\\diamondsuit' : '\U00002662',
|
| 285 |
+
'\\heartsuit' : '\U00002661',
|
| 286 |
+
'\\spadesuit' : '\U00002660',
|
| 287 |
+
'\\aleph' : '\U00002135',
|
| 288 |
+
'\\emptyset' : '\U00002205',
|
| 289 |
+
'\\nabla' : '\U00002207',
|
| 290 |
+
'\\partial' : '\U00002202',
|
| 291 |
+
'\\flat' : '\U0000266d',
|
| 292 |
+
'\\natural' : '\U0000266e',
|
| 293 |
+
'\\sharp' : '\U0000266f',
|
| 294 |
+
'\\angle' : '\U00002220',
|
| 295 |
+
'\\copyright' : '\U000000a9',
|
| 296 |
+
'\\textregistered' : '\U000000ae',
|
| 297 |
+
'\\textonequarter' : '\U000000bc',
|
| 298 |
+
'\\textonehalf' : '\U000000bd',
|
| 299 |
+
'\\textthreequarters' : '\U000000be',
|
| 300 |
+
'\\textordfeminine' : '\U000000aa',
|
| 301 |
+
'\\textordmasculine' : '\U000000ba',
|
| 302 |
+
'\\euro' : '\U000020ac',
|
| 303 |
+
'\\pounds' : '\U000000a3',
|
| 304 |
+
'\\yen' : '\U000000a5',
|
| 305 |
+
'\\textcent' : '\U000000a2',
|
| 306 |
+
'\\textcurrency' : '\U000000a4',
|
| 307 |
+
'\\textdegree' : '\U000000b0',
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
isabelle_symbols = {
|
| 311 |
+
'\\<zero>' : '\U0001d7ec',
|
| 312 |
+
'\\<one>' : '\U0001d7ed',
|
| 313 |
+
'\\<two>' : '\U0001d7ee',
|
| 314 |
+
'\\<three>' : '\U0001d7ef',
|
| 315 |
+
'\\<four>' : '\U0001d7f0',
|
| 316 |
+
'\\<five>' : '\U0001d7f1',
|
| 317 |
+
'\\<six>' : '\U0001d7f2',
|
| 318 |
+
'\\<seven>' : '\U0001d7f3',
|
| 319 |
+
'\\<eight>' : '\U0001d7f4',
|
| 320 |
+
'\\<nine>' : '\U0001d7f5',
|
| 321 |
+
'\\<A>' : '\U0001d49c',
|
| 322 |
+
'\\<B>' : '\U0000212c',
|
| 323 |
+
'\\<C>' : '\U0001d49e',
|
| 324 |
+
'\\<D>' : '\U0001d49f',
|
| 325 |
+
'\\<E>' : '\U00002130',
|
| 326 |
+
'\\<F>' : '\U00002131',
|
| 327 |
+
'\\<G>' : '\U0001d4a2',
|
| 328 |
+
'\\<H>' : '\U0000210b',
|
| 329 |
+
'\\<I>' : '\U00002110',
|
| 330 |
+
'\\<J>' : '\U0001d4a5',
|
| 331 |
+
'\\<K>' : '\U0001d4a6',
|
| 332 |
+
'\\<L>' : '\U00002112',
|
| 333 |
+
'\\<M>' : '\U00002133',
|
| 334 |
+
'\\<N>' : '\U0001d4a9',
|
| 335 |
+
'\\<O>' : '\U0001d4aa',
|
| 336 |
+
'\\<P>' : '\U0001d4ab',
|
| 337 |
+
'\\<Q>' : '\U0001d4ac',
|
| 338 |
+
'\\<R>' : '\U0000211b',
|
| 339 |
+
'\\<S>' : '\U0001d4ae',
|
| 340 |
+
'\\<T>' : '\U0001d4af',
|
| 341 |
+
'\\<U>' : '\U0001d4b0',
|
| 342 |
+
'\\<V>' : '\U0001d4b1',
|
| 343 |
+
'\\<W>' : '\U0001d4b2',
|
| 344 |
+
'\\<X>' : '\U0001d4b3',
|
| 345 |
+
'\\<Y>' : '\U0001d4b4',
|
| 346 |
+
'\\<Z>' : '\U0001d4b5',
|
| 347 |
+
'\\<a>' : '\U0001d5ba',
|
| 348 |
+
'\\<b>' : '\U0001d5bb',
|
| 349 |
+
'\\<c>' : '\U0001d5bc',
|
| 350 |
+
'\\<d>' : '\U0001d5bd',
|
| 351 |
+
'\\<e>' : '\U0001d5be',
|
| 352 |
+
'\\<f>' : '\U0001d5bf',
|
| 353 |
+
'\\<g>' : '\U0001d5c0',
|
| 354 |
+
'\\<h>' : '\U0001d5c1',
|
| 355 |
+
'\\<i>' : '\U0001d5c2',
|
| 356 |
+
'\\<j>' : '\U0001d5c3',
|
| 357 |
+
'\\<k>' : '\U0001d5c4',
|
| 358 |
+
'\\<l>' : '\U0001d5c5',
|
| 359 |
+
'\\<m>' : '\U0001d5c6',
|
| 360 |
+
'\\<n>' : '\U0001d5c7',
|
| 361 |
+
'\\<o>' : '\U0001d5c8',
|
| 362 |
+
'\\<p>' : '\U0001d5c9',
|
| 363 |
+
'\\<q>' : '\U0001d5ca',
|
| 364 |
+
'\\<r>' : '\U0001d5cb',
|
| 365 |
+
'\\<s>' : '\U0001d5cc',
|
| 366 |
+
'\\<t>' : '\U0001d5cd',
|
| 367 |
+
'\\<u>' : '\U0001d5ce',
|
| 368 |
+
'\\<v>' : '\U0001d5cf',
|
| 369 |
+
'\\<w>' : '\U0001d5d0',
|
| 370 |
+
'\\<x>' : '\U0001d5d1',
|
| 371 |
+
'\\<y>' : '\U0001d5d2',
|
| 372 |
+
'\\<z>' : '\U0001d5d3',
|
| 373 |
+
'\\<AA>' : '\U0001d504',
|
| 374 |
+
'\\<BB>' : '\U0001d505',
|
| 375 |
+
'\\<CC>' : '\U0000212d',
|
| 376 |
+
'\\<DD>' : '\U0001d507',
|
| 377 |
+
'\\<EE>' : '\U0001d508',
|
| 378 |
+
'\\<FF>' : '\U0001d509',
|
| 379 |
+
'\\<GG>' : '\U0001d50a',
|
| 380 |
+
'\\<HH>' : '\U0000210c',
|
| 381 |
+
'\\<II>' : '\U00002111',
|
| 382 |
+
'\\<JJ>' : '\U0001d50d',
|
| 383 |
+
'\\<KK>' : '\U0001d50e',
|
| 384 |
+
'\\<LL>' : '\U0001d50f',
|
| 385 |
+
'\\<MM>' : '\U0001d510',
|
| 386 |
+
'\\<NN>' : '\U0001d511',
|
| 387 |
+
'\\<OO>' : '\U0001d512',
|
| 388 |
+
'\\<PP>' : '\U0001d513',
|
| 389 |
+
'\\<QQ>' : '\U0001d514',
|
| 390 |
+
'\\<RR>' : '\U0000211c',
|
| 391 |
+
'\\<SS>' : '\U0001d516',
|
| 392 |
+
'\\<TT>' : '\U0001d517',
|
| 393 |
+
'\\<UU>' : '\U0001d518',
|
| 394 |
+
'\\<VV>' : '\U0001d519',
|
| 395 |
+
'\\<WW>' : '\U0001d51a',
|
| 396 |
+
'\\<XX>' : '\U0001d51b',
|
| 397 |
+
'\\<YY>' : '\U0001d51c',
|
| 398 |
+
'\\<ZZ>' : '\U00002128',
|
| 399 |
+
'\\<aa>' : '\U0001d51e',
|
| 400 |
+
'\\<bb>' : '\U0001d51f',
|
| 401 |
+
'\\<cc>' : '\U0001d520',
|
| 402 |
+
'\\<dd>' : '\U0001d521',
|
| 403 |
+
'\\<ee>' : '\U0001d522',
|
| 404 |
+
'\\<ff>' : '\U0001d523',
|
| 405 |
+
'\\<gg>' : '\U0001d524',
|
| 406 |
+
'\\<hh>' : '\U0001d525',
|
| 407 |
+
'\\<ii>' : '\U0001d526',
|
| 408 |
+
'\\<jj>' : '\U0001d527',
|
| 409 |
+
'\\<kk>' : '\U0001d528',
|
| 410 |
+
'\\<ll>' : '\U0001d529',
|
| 411 |
+
'\\<mm>' : '\U0001d52a',
|
| 412 |
+
'\\<nn>' : '\U0001d52b',
|
| 413 |
+
'\\<oo>' : '\U0001d52c',
|
| 414 |
+
'\\<pp>' : '\U0001d52d',
|
| 415 |
+
'\\<qq>' : '\U0001d52e',
|
| 416 |
+
'\\<rr>' : '\U0001d52f',
|
| 417 |
+
'\\<ss>' : '\U0001d530',
|
| 418 |
+
'\\<tt>' : '\U0001d531',
|
| 419 |
+
'\\<uu>' : '\U0001d532',
|
| 420 |
+
'\\<vv>' : '\U0001d533',
|
| 421 |
+
'\\<ww>' : '\U0001d534',
|
| 422 |
+
'\\<xx>' : '\U0001d535',
|
| 423 |
+
'\\<yy>' : '\U0001d536',
|
| 424 |
+
'\\<zz>' : '\U0001d537',
|
| 425 |
+
'\\<alpha>' : '\U000003b1',
|
| 426 |
+
'\\<beta>' : '\U000003b2',
|
| 427 |
+
'\\<gamma>' : '\U000003b3',
|
| 428 |
+
'\\<delta>' : '\U000003b4',
|
| 429 |
+
'\\<epsilon>' : '\U000003b5',
|
| 430 |
+
'\\<zeta>' : '\U000003b6',
|
| 431 |
+
'\\<eta>' : '\U000003b7',
|
| 432 |
+
'\\<theta>' : '\U000003b8',
|
| 433 |
+
'\\<iota>' : '\U000003b9',
|
| 434 |
+
'\\<kappa>' : '\U000003ba',
|
| 435 |
+
'\\<lambda>' : '\U000003bb',
|
| 436 |
+
'\\<mu>' : '\U000003bc',
|
| 437 |
+
'\\<nu>' : '\U000003bd',
|
| 438 |
+
'\\<xi>' : '\U000003be',
|
| 439 |
+
'\\<pi>' : '\U000003c0',
|
| 440 |
+
'\\<rho>' : '\U000003c1',
|
| 441 |
+
'\\<sigma>' : '\U000003c3',
|
| 442 |
+
'\\<tau>' : '\U000003c4',
|
| 443 |
+
'\\<upsilon>' : '\U000003c5',
|
| 444 |
+
'\\<phi>' : '\U000003c6',
|
| 445 |
+
'\\<chi>' : '\U000003c7',
|
| 446 |
+
'\\<psi>' : '\U000003c8',
|
| 447 |
+
'\\<omega>' : '\U000003c9',
|
| 448 |
+
'\\<Gamma>' : '\U00000393',
|
| 449 |
+
'\\<Delta>' : '\U00000394',
|
| 450 |
+
'\\<Theta>' : '\U00000398',
|
| 451 |
+
'\\<Lambda>' : '\U0000039b',
|
| 452 |
+
'\\<Xi>' : '\U0000039e',
|
| 453 |
+
'\\<Pi>' : '\U000003a0',
|
| 454 |
+
'\\<Sigma>' : '\U000003a3',
|
| 455 |
+
'\\<Upsilon>' : '\U000003a5',
|
| 456 |
+
'\\<Phi>' : '\U000003a6',
|
| 457 |
+
'\\<Psi>' : '\U000003a8',
|
| 458 |
+
'\\<Omega>' : '\U000003a9',
|
| 459 |
+
'\\<bool>' : '\U0001d539',
|
| 460 |
+
'\\<complex>' : '\U00002102',
|
| 461 |
+
'\\<nat>' : '\U00002115',
|
| 462 |
+
'\\<rat>' : '\U0000211a',
|
| 463 |
+
'\\<real>' : '\U0000211d',
|
| 464 |
+
'\\<int>' : '\U00002124',
|
| 465 |
+
'\\<leftarrow>' : '\U00002190',
|
| 466 |
+
'\\<longleftarrow>' : '\U000027f5',
|
| 467 |
+
'\\<rightarrow>' : '\U00002192',
|
| 468 |
+
'\\<longrightarrow>' : '\U000027f6',
|
| 469 |
+
'\\<Leftarrow>' : '\U000021d0',
|
| 470 |
+
'\\<Longleftarrow>' : '\U000027f8',
|
| 471 |
+
'\\<Rightarrow>' : '\U000021d2',
|
| 472 |
+
'\\<Longrightarrow>' : '\U000027f9',
|
| 473 |
+
'\\<leftrightarrow>' : '\U00002194',
|
| 474 |
+
'\\<longleftrightarrow>' : '\U000027f7',
|
| 475 |
+
'\\<Leftrightarrow>' : '\U000021d4',
|
| 476 |
+
'\\<Longleftrightarrow>' : '\U000027fa',
|
| 477 |
+
'\\<mapsto>' : '\U000021a6',
|
| 478 |
+
'\\<longmapsto>' : '\U000027fc',
|
| 479 |
+
'\\<midarrow>' : '\U00002500',
|
| 480 |
+
'\\<Midarrow>' : '\U00002550',
|
| 481 |
+
'\\<hookleftarrow>' : '\U000021a9',
|
| 482 |
+
'\\<hookrightarrow>' : '\U000021aa',
|
| 483 |
+
'\\<leftharpoondown>' : '\U000021bd',
|
| 484 |
+
'\\<rightharpoondown>' : '\U000021c1',
|
| 485 |
+
'\\<leftharpoonup>' : '\U000021bc',
|
| 486 |
+
'\\<rightharpoonup>' : '\U000021c0',
|
| 487 |
+
'\\<rightleftharpoons>' : '\U000021cc',
|
| 488 |
+
'\\<leadsto>' : '\U0000219d',
|
| 489 |
+
'\\<downharpoonleft>' : '\U000021c3',
|
| 490 |
+
'\\<downharpoonright>' : '\U000021c2',
|
| 491 |
+
'\\<upharpoonleft>' : '\U000021bf',
|
| 492 |
+
'\\<upharpoonright>' : '\U000021be',
|
| 493 |
+
'\\<restriction>' : '\U000021be',
|
| 494 |
+
'\\<Colon>' : '\U00002237',
|
| 495 |
+
'\\<up>' : '\U00002191',
|
| 496 |
+
'\\<Up>' : '\U000021d1',
|
| 497 |
+
'\\<down>' : '\U00002193',
|
| 498 |
+
'\\<Down>' : '\U000021d3',
|
| 499 |
+
'\\<updown>' : '\U00002195',
|
| 500 |
+
'\\<Updown>' : '\U000021d5',
|
| 501 |
+
'\\<langle>' : '\U000027e8',
|
| 502 |
+
'\\<rangle>' : '\U000027e9',
|
| 503 |
+
'\\<lceil>' : '\U00002308',
|
| 504 |
+
'\\<rceil>' : '\U00002309',
|
| 505 |
+
'\\<lfloor>' : '\U0000230a',
|
| 506 |
+
'\\<rfloor>' : '\U0000230b',
|
| 507 |
+
'\\<lparr>' : '\U00002987',
|
| 508 |
+
'\\<rparr>' : '\U00002988',
|
| 509 |
+
'\\<lbrakk>' : '\U000027e6',
|
| 510 |
+
'\\<rbrakk>' : '\U000027e7',
|
| 511 |
+
'\\<lbrace>' : '\U00002983',
|
| 512 |
+
'\\<rbrace>' : '\U00002984',
|
| 513 |
+
'\\<guillemotleft>' : '\U000000ab',
|
| 514 |
+
'\\<guillemotright>' : '\U000000bb',
|
| 515 |
+
'\\<bottom>' : '\U000022a5',
|
| 516 |
+
'\\<top>' : '\U000022a4',
|
| 517 |
+
'\\<and>' : '\U00002227',
|
| 518 |
+
'\\<And>' : '\U000022c0',
|
| 519 |
+
'\\<or>' : '\U00002228',
|
| 520 |
+
'\\<Or>' : '\U000022c1',
|
| 521 |
+
'\\<forall>' : '\U00002200',
|
| 522 |
+
'\\<exists>' : '\U00002203',
|
| 523 |
+
'\\<nexists>' : '\U00002204',
|
| 524 |
+
'\\<not>' : '\U000000ac',
|
| 525 |
+
'\\<box>' : '\U000025a1',
|
| 526 |
+
'\\<diamond>' : '\U000025c7',
|
| 527 |
+
'\\<turnstile>' : '\U000022a2',
|
| 528 |
+
'\\<Turnstile>' : '\U000022a8',
|
| 529 |
+
'\\<tturnstile>' : '\U000022a9',
|
| 530 |
+
'\\<TTurnstile>' : '\U000022ab',
|
| 531 |
+
'\\<stileturn>' : '\U000022a3',
|
| 532 |
+
'\\<surd>' : '\U0000221a',
|
| 533 |
+
'\\<le>' : '\U00002264',
|
| 534 |
+
'\\<ge>' : '\U00002265',
|
| 535 |
+
'\\<lless>' : '\U0000226a',
|
| 536 |
+
'\\<ggreater>' : '\U0000226b',
|
| 537 |
+
'\\<lesssim>' : '\U00002272',
|
| 538 |
+
'\\<greatersim>' : '\U00002273',
|
| 539 |
+
'\\<lessapprox>' : '\U00002a85',
|
| 540 |
+
'\\<greaterapprox>' : '\U00002a86',
|
| 541 |
+
'\\<in>' : '\U00002208',
|
| 542 |
+
'\\<notin>' : '\U00002209',
|
| 543 |
+
'\\<subset>' : '\U00002282',
|
| 544 |
+
'\\<supset>' : '\U00002283',
|
| 545 |
+
'\\<subseteq>' : '\U00002286',
|
| 546 |
+
'\\<supseteq>' : '\U00002287',
|
| 547 |
+
'\\<sqsubset>' : '\U0000228f',
|
| 548 |
+
'\\<sqsupset>' : '\U00002290',
|
| 549 |
+
'\\<sqsubseteq>' : '\U00002291',
|
| 550 |
+
'\\<sqsupseteq>' : '\U00002292',
|
| 551 |
+
'\\<inter>' : '\U00002229',
|
| 552 |
+
'\\<Inter>' : '\U000022c2',
|
| 553 |
+
'\\<union>' : '\U0000222a',
|
| 554 |
+
'\\<Union>' : '\U000022c3',
|
| 555 |
+
'\\<squnion>' : '\U00002294',
|
| 556 |
+
'\\<Squnion>' : '\U00002a06',
|
| 557 |
+
'\\<sqinter>' : '\U00002293',
|
| 558 |
+
'\\<Sqinter>' : '\U00002a05',
|
| 559 |
+
'\\<setminus>' : '\U00002216',
|
| 560 |
+
'\\<propto>' : '\U0000221d',
|
| 561 |
+
'\\<uplus>' : '\U0000228e',
|
| 562 |
+
'\\<Uplus>' : '\U00002a04',
|
| 563 |
+
'\\<noteq>' : '\U00002260',
|
| 564 |
+
'\\<sim>' : '\U0000223c',
|
| 565 |
+
'\\<doteq>' : '\U00002250',
|
| 566 |
+
'\\<simeq>' : '\U00002243',
|
| 567 |
+
'\\<approx>' : '\U00002248',
|
| 568 |
+
'\\<asymp>' : '\U0000224d',
|
| 569 |
+
'\\<cong>' : '\U00002245',
|
| 570 |
+
'\\<smile>' : '\U00002323',
|
| 571 |
+
'\\<equiv>' : '\U00002261',
|
| 572 |
+
'\\<frown>' : '\U00002322',
|
| 573 |
+
'\\<Join>' : '\U000022c8',
|
| 574 |
+
'\\<bowtie>' : '\U00002a1d',
|
| 575 |
+
'\\<prec>' : '\U0000227a',
|
| 576 |
+
'\\<succ>' : '\U0000227b',
|
| 577 |
+
'\\<preceq>' : '\U0000227c',
|
| 578 |
+
'\\<succeq>' : '\U0000227d',
|
| 579 |
+
'\\<parallel>' : '\U00002225',
|
| 580 |
+
'\\<bar>' : '\U000000a6',
|
| 581 |
+
'\\<plusminus>' : '\U000000b1',
|
| 582 |
+
'\\<minusplus>' : '\U00002213',
|
| 583 |
+
'\\<times>' : '\U000000d7',
|
| 584 |
+
'\\<div>' : '\U000000f7',
|
| 585 |
+
'\\<cdot>' : '\U000022c5',
|
| 586 |
+
'\\<star>' : '\U000022c6',
|
| 587 |
+
'\\<bullet>' : '\U00002219',
|
| 588 |
+
'\\<circ>' : '\U00002218',
|
| 589 |
+
'\\<dagger>' : '\U00002020',
|
| 590 |
+
'\\<ddagger>' : '\U00002021',
|
| 591 |
+
'\\<lhd>' : '\U000022b2',
|
| 592 |
+
'\\<rhd>' : '\U000022b3',
|
| 593 |
+
'\\<unlhd>' : '\U000022b4',
|
| 594 |
+
'\\<unrhd>' : '\U000022b5',
|
| 595 |
+
'\\<triangleleft>' : '\U000025c3',
|
| 596 |
+
'\\<triangleright>' : '\U000025b9',
|
| 597 |
+
'\\<triangle>' : '\U000025b3',
|
| 598 |
+
'\\<triangleq>' : '\U0000225c',
|
| 599 |
+
'\\<oplus>' : '\U00002295',
|
| 600 |
+
'\\<Oplus>' : '\U00002a01',
|
| 601 |
+
'\\<otimes>' : '\U00002297',
|
| 602 |
+
'\\<Otimes>' : '\U00002a02',
|
| 603 |
+
'\\<odot>' : '\U00002299',
|
| 604 |
+
'\\<Odot>' : '\U00002a00',
|
| 605 |
+
'\\<ominus>' : '\U00002296',
|
| 606 |
+
'\\<oslash>' : '\U00002298',
|
| 607 |
+
'\\<dots>' : '\U00002026',
|
| 608 |
+
'\\<cdots>' : '\U000022ef',
|
| 609 |
+
'\\<Sum>' : '\U00002211',
|
| 610 |
+
'\\<Prod>' : '\U0000220f',
|
| 611 |
+
'\\<Coprod>' : '\U00002210',
|
| 612 |
+
'\\<infinity>' : '\U0000221e',
|
| 613 |
+
'\\<integral>' : '\U0000222b',
|
| 614 |
+
'\\<ointegral>' : '\U0000222e',
|
| 615 |
+
'\\<clubsuit>' : '\U00002663',
|
| 616 |
+
'\\<diamondsuit>' : '\U00002662',
|
| 617 |
+
'\\<heartsuit>' : '\U00002661',
|
| 618 |
+
'\\<spadesuit>' : '\U00002660',
|
| 619 |
+
'\\<aleph>' : '\U00002135',
|
| 620 |
+
'\\<emptyset>' : '\U00002205',
|
| 621 |
+
'\\<nabla>' : '\U00002207',
|
| 622 |
+
'\\<partial>' : '\U00002202',
|
| 623 |
+
'\\<flat>' : '\U0000266d',
|
| 624 |
+
'\\<natural>' : '\U0000266e',
|
| 625 |
+
'\\<sharp>' : '\U0000266f',
|
| 626 |
+
'\\<angle>' : '\U00002220',
|
| 627 |
+
'\\<copyright>' : '\U000000a9',
|
| 628 |
+
'\\<registered>' : '\U000000ae',
|
| 629 |
+
'\\<hyphen>' : '\U000000ad',
|
| 630 |
+
'\\<inverse>' : '\U000000af',
|
| 631 |
+
'\\<onequarter>' : '\U000000bc',
|
| 632 |
+
'\\<onehalf>' : '\U000000bd',
|
| 633 |
+
'\\<threequarters>' : '\U000000be',
|
| 634 |
+
'\\<ordfeminine>' : '\U000000aa',
|
| 635 |
+
'\\<ordmasculine>' : '\U000000ba',
|
| 636 |
+
'\\<section>' : '\U000000a7',
|
| 637 |
+
'\\<paragraph>' : '\U000000b6',
|
| 638 |
+
'\\<exclamdown>' : '\U000000a1',
|
| 639 |
+
'\\<questiondown>' : '\U000000bf',
|
| 640 |
+
'\\<euro>' : '\U000020ac',
|
| 641 |
+
'\\<pounds>' : '\U000000a3',
|
| 642 |
+
'\\<yen>' : '\U000000a5',
|
| 643 |
+
'\\<cent>' : '\U000000a2',
|
| 644 |
+
'\\<currency>' : '\U000000a4',
|
| 645 |
+
'\\<degree>' : '\U000000b0',
|
| 646 |
+
'\\<amalg>' : '\U00002a3f',
|
| 647 |
+
'\\<mho>' : '\U00002127',
|
| 648 |
+
'\\<lozenge>' : '\U000025ca',
|
| 649 |
+
'\\<wp>' : '\U00002118',
|
| 650 |
+
'\\<wrong>' : '\U00002240',
|
| 651 |
+
'\\<struct>' : '\U000022c4',
|
| 652 |
+
'\\<acute>' : '\U000000b4',
|
| 653 |
+
'\\<index>' : '\U00000131',
|
| 654 |
+
'\\<dieresis>' : '\U000000a8',
|
| 655 |
+
'\\<cedilla>' : '\U000000b8',
|
| 656 |
+
'\\<hungarumlaut>' : '\U000002dd',
|
| 657 |
+
'\\<some>' : '\U000003f5',
|
| 658 |
+
'\\<newline>' : '\U000023ce',
|
| 659 |
+
'\\<open>' : '\U00002039',
|
| 660 |
+
'\\<close>' : '\U0000203a',
|
| 661 |
+
'\\<here>' : '\U00002302',
|
| 662 |
+
'\\<^sub>' : '\U000021e9',
|
| 663 |
+
'\\<^sup>' : '\U000021e7',
|
| 664 |
+
'\\<^bold>' : '\U00002759',
|
| 665 |
+
'\\<^bsub>' : '\U000021d8',
|
| 666 |
+
'\\<^esub>' : '\U000021d9',
|
| 667 |
+
'\\<^bsup>' : '\U000021d7',
|
| 668 |
+
'\\<^esup>' : '\U000021d6',
|
| 669 |
+
}
|
| 670 |
+
|
| 671 |
+
lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
|
| 672 |
+
|
| 673 |
+
def __init__(self, **options):
|
| 674 |
+
Filter.__init__(self, **options)
|
| 675 |
+
lang = get_choice_opt(options, 'lang',
|
| 676 |
+
['isabelle', 'latex'], 'isabelle')
|
| 677 |
+
self.symbols = self.lang_map[lang]
|
| 678 |
+
|
| 679 |
+
def filter(self, lexer, stream):
|
| 680 |
+
for ttype, value in stream:
|
| 681 |
+
if value in self.symbols:
|
| 682 |
+
yield ttype, self.symbols[value]
|
| 683 |
+
else:
|
| 684 |
+
yield ttype, value
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
class KeywordCaseFilter(Filter):
|
| 688 |
+
"""Convert keywords to lowercase or uppercase or capitalize them, which
|
| 689 |
+
means first letter uppercase, rest lowercase.
|
| 690 |
+
|
| 691 |
+
This can be useful e.g. if you highlight Pascal code and want to adapt the
|
| 692 |
+
code to your styleguide.
|
| 693 |
+
|
| 694 |
+
Options accepted:
|
| 695 |
+
|
| 696 |
+
`case` : string
|
| 697 |
+
The casing to convert keywords to. Must be one of ``'lower'``,
|
| 698 |
+
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
|
| 699 |
+
"""
|
| 700 |
+
|
| 701 |
+
def __init__(self, **options):
|
| 702 |
+
Filter.__init__(self, **options)
|
| 703 |
+
case = get_choice_opt(options, 'case',
|
| 704 |
+
['lower', 'upper', 'capitalize'], 'lower')
|
| 705 |
+
self.convert = getattr(str, case)
|
| 706 |
+
|
| 707 |
+
def filter(self, lexer, stream):
|
| 708 |
+
for ttype, value in stream:
|
| 709 |
+
if ttype in Keyword:
|
| 710 |
+
yield ttype, self.convert(value)
|
| 711 |
+
else:
|
| 712 |
+
yield ttype, value
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
class NameHighlightFilter(Filter):
|
| 716 |
+
"""Highlight a normal Name (and Name.*) token with a different token type.
|
| 717 |
+
|
| 718 |
+
Example::
|
| 719 |
+
|
| 720 |
+
filter = NameHighlightFilter(
|
| 721 |
+
names=['foo', 'bar', 'baz'],
|
| 722 |
+
tokentype=Name.Function,
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
This would highlight the names "foo", "bar" and "baz"
|
| 726 |
+
as functions. `Name.Function` is the default token type.
|
| 727 |
+
|
| 728 |
+
Options accepted:
|
| 729 |
+
|
| 730 |
+
`names` : list of strings
|
| 731 |
+
A list of names that should be given the different token type.
|
| 732 |
+
There is no default.
|
| 733 |
+
`tokentype` : TokenType or string
|
| 734 |
+
A token type or a string containing a token type name that is
|
| 735 |
+
used for highlighting the strings in `names`. The default is
|
| 736 |
+
`Name.Function`.
|
| 737 |
+
"""
|
| 738 |
+
|
| 739 |
+
def __init__(self, **options):
|
| 740 |
+
Filter.__init__(self, **options)
|
| 741 |
+
self.names = set(get_list_opt(options, 'names', []))
|
| 742 |
+
tokentype = options.get('tokentype')
|
| 743 |
+
if tokentype:
|
| 744 |
+
self.tokentype = string_to_tokentype(tokentype)
|
| 745 |
+
else:
|
| 746 |
+
self.tokentype = Name.Function
|
| 747 |
+
|
| 748 |
+
def filter(self, lexer, stream):
|
| 749 |
+
for ttype, value in stream:
|
| 750 |
+
if ttype in Name and value in self.names:
|
| 751 |
+
yield self.tokentype, value
|
| 752 |
+
else:
|
| 753 |
+
yield ttype, value
|
| 754 |
+
|
| 755 |
+
|
| 756 |
+
class ErrorToken(Exception):
|
| 757 |
+
pass
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
class RaiseOnErrorTokenFilter(Filter):
|
| 761 |
+
"""Raise an exception when the lexer generates an error token.
|
| 762 |
+
|
| 763 |
+
Options accepted:
|
| 764 |
+
|
| 765 |
+
`excclass` : Exception class
|
| 766 |
+
The exception class to raise.
|
| 767 |
+
The default is `pygments.filters.ErrorToken`.
|
| 768 |
+
|
| 769 |
+
.. versionadded:: 0.8
|
| 770 |
+
"""
|
| 771 |
+
|
| 772 |
+
def __init__(self, **options):
|
| 773 |
+
Filter.__init__(self, **options)
|
| 774 |
+
self.exception = options.get('excclass', ErrorToken)
|
| 775 |
+
try:
|
| 776 |
+
# issubclass() will raise TypeError if first argument is not a class
|
| 777 |
+
if not issubclass(self.exception, Exception):
|
| 778 |
+
raise TypeError
|
| 779 |
+
except TypeError:
|
| 780 |
+
raise OptionError('excclass option is not an exception class')
|
| 781 |
+
|
| 782 |
+
def filter(self, lexer, stream):
|
| 783 |
+
for ttype, value in stream:
|
| 784 |
+
if ttype is Error:
|
| 785 |
+
raise self.exception(value)
|
| 786 |
+
yield ttype, value
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
class VisibleWhitespaceFilter(Filter):
|
| 790 |
+
"""Convert tabs, newlines and/or spaces to visible characters.
|
| 791 |
+
|
| 792 |
+
Options accepted:
|
| 793 |
+
|
| 794 |
+
`spaces` : string or bool
|
| 795 |
+
If this is a one-character string, spaces will be replaces by this string.
|
| 796 |
+
If it is another true value, spaces will be replaced by ``·`` (unicode
|
| 797 |
+
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
|
| 798 |
+
default is ``False``.
|
| 799 |
+
`tabs` : string or bool
|
| 800 |
+
The same as for `spaces`, but the default replacement character is ``»``
|
| 801 |
+
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
|
| 802 |
+
is ``False``. Note: this will not work if the `tabsize` option for the
|
| 803 |
+
lexer is nonzero, as tabs will already have been expanded then.
|
| 804 |
+
`tabsize` : int
|
| 805 |
+
If tabs are to be replaced by this filter (see the `tabs` option), this
|
| 806 |
+
is the total number of characters that a tab should be expanded to.
|
| 807 |
+
The default is ``8``.
|
| 808 |
+
`newlines` : string or bool
|
| 809 |
+
The same as for `spaces`, but the default replacement character is ``¶``
|
| 810 |
+
(unicode PILCROW SIGN). The default value is ``False``.
|
| 811 |
+
`wstokentype` : bool
|
| 812 |
+
If true, give whitespace the special `Whitespace` token type. This allows
|
| 813 |
+
styling the visible whitespace differently (e.g. greyed out), but it can
|
| 814 |
+
disrupt background colors. The default is ``True``.
|
| 815 |
+
|
| 816 |
+
.. versionadded:: 0.8
|
| 817 |
+
"""
|
| 818 |
+
|
| 819 |
+
def __init__(self, **options):
|
| 820 |
+
Filter.__init__(self, **options)
|
| 821 |
+
for name, default in [('spaces', '·'),
|
| 822 |
+
('tabs', '»'),
|
| 823 |
+
('newlines', '¶')]:
|
| 824 |
+
opt = options.get(name, False)
|
| 825 |
+
if isinstance(opt, str) and len(opt) == 1:
|
| 826 |
+
setattr(self, name, opt)
|
| 827 |
+
else:
|
| 828 |
+
setattr(self, name, (opt and default or ''))
|
| 829 |
+
tabsize = get_int_opt(options, 'tabsize', 8)
|
| 830 |
+
if self.tabs:
|
| 831 |
+
self.tabs += ' ' * (tabsize - 1)
|
| 832 |
+
if self.newlines:
|
| 833 |
+
self.newlines += '\n'
|
| 834 |
+
self.wstt = get_bool_opt(options, 'wstokentype', True)
|
| 835 |
+
|
| 836 |
+
def filter(self, lexer, stream):
|
| 837 |
+
if self.wstt:
|
| 838 |
+
spaces = self.spaces or ' '
|
| 839 |
+
tabs = self.tabs or '\t'
|
| 840 |
+
newlines = self.newlines or '\n'
|
| 841 |
+
regex = re.compile(r'\s')
|
| 842 |
+
|
| 843 |
+
def replacefunc(wschar):
|
| 844 |
+
if wschar == ' ':
|
| 845 |
+
return spaces
|
| 846 |
+
elif wschar == '\t':
|
| 847 |
+
return tabs
|
| 848 |
+
elif wschar == '\n':
|
| 849 |
+
return newlines
|
| 850 |
+
return wschar
|
| 851 |
+
|
| 852 |
+
for ttype, value in stream:
|
| 853 |
+
yield from _replace_special(ttype, value, regex, Whitespace,
|
| 854 |
+
replacefunc)
|
| 855 |
+
else:
|
| 856 |
+
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
|
| 857 |
+
# simpler processing
|
| 858 |
+
for ttype, value in stream:
|
| 859 |
+
if spaces:
|
| 860 |
+
value = value.replace(' ', spaces)
|
| 861 |
+
if tabs:
|
| 862 |
+
value = value.replace('\t', tabs)
|
| 863 |
+
if newlines:
|
| 864 |
+
value = value.replace('\n', newlines)
|
| 865 |
+
yield ttype, value
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
class GobbleFilter(Filter):
|
| 869 |
+
"""Gobbles source code lines (eats initial characters).
|
| 870 |
+
|
| 871 |
+
This filter drops the first ``n`` characters off every line of code. This
|
| 872 |
+
may be useful when the source code fed to the lexer is indented by a fixed
|
| 873 |
+
amount of space that isn't desired in the output.
|
| 874 |
+
|
| 875 |
+
Options accepted:
|
| 876 |
+
|
| 877 |
+
`n` : int
|
| 878 |
+
The number of characters to gobble.
|
| 879 |
+
|
| 880 |
+
.. versionadded:: 1.2
|
| 881 |
+
"""
|
| 882 |
+
def __init__(self, **options):
|
| 883 |
+
Filter.__init__(self, **options)
|
| 884 |
+
self.n = get_int_opt(options, 'n', 0)
|
| 885 |
+
|
| 886 |
+
def gobble(self, value, left):
|
| 887 |
+
if left < len(value):
|
| 888 |
+
return value[left:], 0
|
| 889 |
+
else:
|
| 890 |
+
return '', left - len(value)
|
| 891 |
+
|
| 892 |
+
def filter(self, lexer, stream):
|
| 893 |
+
n = self.n
|
| 894 |
+
left = n # How many characters left to gobble.
|
| 895 |
+
for ttype, value in stream:
|
| 896 |
+
# Remove ``left`` tokens from first line, ``n`` from all others.
|
| 897 |
+
parts = value.split('\n')
|
| 898 |
+
(parts[0], left) = self.gobble(parts[0], left)
|
| 899 |
+
for i in range(1, len(parts)):
|
| 900 |
+
(parts[i], left) = self.gobble(parts[i], n)
|
| 901 |
+
value = '\n'.join(parts)
|
| 902 |
+
|
| 903 |
+
if value != '':
|
| 904 |
+
yield ttype, value
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
class TokenMergeFilter(Filter):
|
| 908 |
+
"""Merges consecutive tokens with the same token type in the output
|
| 909 |
+
stream of a lexer.
|
| 910 |
+
|
| 911 |
+
.. versionadded:: 1.2
|
| 912 |
+
"""
|
| 913 |
+
def __init__(self, **options):
|
| 914 |
+
Filter.__init__(self, **options)
|
| 915 |
+
|
| 916 |
+
def filter(self, lexer, stream):
|
| 917 |
+
current_type = None
|
| 918 |
+
current_value = None
|
| 919 |
+
for ttype, value in stream:
|
| 920 |
+
if ttype is current_type:
|
| 921 |
+
current_value += value
|
| 922 |
+
else:
|
| 923 |
+
if current_type is not None:
|
| 924 |
+
yield current_type, current_value
|
| 925 |
+
current_type = ttype
|
| 926 |
+
current_value = value
|
| 927 |
+
if current_type is not None:
|
| 928 |
+
yield current_type, current_value
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
FILTERS = {
|
| 932 |
+
'codetagify': CodeTagFilter,
|
| 933 |
+
'keywordcase': KeywordCaseFilter,
|
| 934 |
+
'highlight': NameHighlightFilter,
|
| 935 |
+
'raiseonerror': RaiseOnErrorTokenFilter,
|
| 936 |
+
'whitespace': VisibleWhitespaceFilter,
|
| 937 |
+
'gobble': GobbleFilter,
|
| 938 |
+
'tokenmerge': TokenMergeFilter,
|
| 939 |
+
'symbols': SymbolFilter,
|
| 940 |
+
}
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments formatters.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
import sys
|
| 13 |
+
import types
|
| 14 |
+
import fnmatch
|
| 15 |
+
from os.path import basename
|
| 16 |
+
|
| 17 |
+
from pip._vendor.pygments.formatters._mapping import FORMATTERS
|
| 18 |
+
from pip._vendor.pygments.plugin import find_plugin_formatters
|
| 19 |
+
from pip._vendor.pygments.util import ClassNotFound
|
| 20 |
+
|
| 21 |
+
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
|
| 22 |
+
'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
|
| 23 |
+
|
| 24 |
+
_formatter_cache = {} # classes by name
|
| 25 |
+
_pattern_cache = {}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _fn_matches(fn, glob):
|
| 29 |
+
"""Return whether the supplied file name fn matches pattern filename."""
|
| 30 |
+
if glob not in _pattern_cache:
|
| 31 |
+
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
|
| 32 |
+
return pattern.match(fn)
|
| 33 |
+
return _pattern_cache[glob].match(fn)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _load_formatters(module_name):
|
| 37 |
+
"""Load a formatter (and all others in the module too)."""
|
| 38 |
+
mod = __import__(module_name, None, None, ['__all__'])
|
| 39 |
+
for formatter_name in mod.__all__:
|
| 40 |
+
cls = getattr(mod, formatter_name)
|
| 41 |
+
_formatter_cache[cls.name] = cls
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_all_formatters():
|
| 45 |
+
"""Return a generator for all formatter classes."""
|
| 46 |
+
# NB: this returns formatter classes, not info like get_all_lexers().
|
| 47 |
+
for info in FORMATTERS.values():
|
| 48 |
+
if info[1] not in _formatter_cache:
|
| 49 |
+
_load_formatters(info[0])
|
| 50 |
+
yield _formatter_cache[info[1]]
|
| 51 |
+
for _, formatter in find_plugin_formatters():
|
| 52 |
+
yield formatter
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def find_formatter_class(alias):
|
| 56 |
+
"""Lookup a formatter by alias.
|
| 57 |
+
|
| 58 |
+
Returns None if not found.
|
| 59 |
+
"""
|
| 60 |
+
for module_name, name, aliases, _, _ in FORMATTERS.values():
|
| 61 |
+
if alias in aliases:
|
| 62 |
+
if name not in _formatter_cache:
|
| 63 |
+
_load_formatters(module_name)
|
| 64 |
+
return _formatter_cache[name]
|
| 65 |
+
for _, cls in find_plugin_formatters():
|
| 66 |
+
if alias in cls.aliases:
|
| 67 |
+
return cls
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_formatter_by_name(_alias, **options):
|
| 71 |
+
"""
|
| 72 |
+
Return an instance of a :class:`.Formatter` subclass that has `alias` in its
|
| 73 |
+
aliases list. The formatter is given the `options` at its instantiation.
|
| 74 |
+
|
| 75 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
|
| 76 |
+
alias is found.
|
| 77 |
+
"""
|
| 78 |
+
cls = find_formatter_class(_alias)
|
| 79 |
+
if cls is None:
|
| 80 |
+
raise ClassNotFound("no formatter found for name %r" % _alias)
|
| 81 |
+
return cls(**options)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def load_formatter_from_file(filename, formattername="CustomFormatter", **options):
|
| 85 |
+
"""
|
| 86 |
+
Return a `Formatter` subclass instance loaded from the provided file, relative
|
| 87 |
+
to the current directory.
|
| 88 |
+
|
| 89 |
+
The file is expected to contain a Formatter class named ``formattername``
|
| 90 |
+
(by default, CustomFormatter). Users should be very careful with the input, because
|
| 91 |
+
this method is equivalent to running ``eval()`` on the input file. The formatter is
|
| 92 |
+
given the `options` at its instantiation.
|
| 93 |
+
|
| 94 |
+
:exc:`pygments.util.ClassNotFound` is raised if there are any errors loading
|
| 95 |
+
the formatter.
|
| 96 |
+
|
| 97 |
+
.. versionadded:: 2.2
|
| 98 |
+
"""
|
| 99 |
+
try:
|
| 100 |
+
# This empty dict will contain the namespace for the exec'd file
|
| 101 |
+
custom_namespace = {}
|
| 102 |
+
with open(filename, 'rb') as f:
|
| 103 |
+
exec(f.read(), custom_namespace)
|
| 104 |
+
# Retrieve the class `formattername` from that namespace
|
| 105 |
+
if formattername not in custom_namespace:
|
| 106 |
+
raise ClassNotFound('no valid %s class found in %s' %
|
| 107 |
+
(formattername, filename))
|
| 108 |
+
formatter_class = custom_namespace[formattername]
|
| 109 |
+
# And finally instantiate it with the options
|
| 110 |
+
return formatter_class(**options)
|
| 111 |
+
except OSError as err:
|
| 112 |
+
raise ClassNotFound('cannot read %s: %s' % (filename, err))
|
| 113 |
+
except ClassNotFound:
|
| 114 |
+
raise
|
| 115 |
+
except Exception as err:
|
| 116 |
+
raise ClassNotFound('error when loading custom formatter: %s' % err)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def get_formatter_for_filename(fn, **options):
|
| 120 |
+
"""
|
| 121 |
+
Return a :class:`.Formatter` subclass instance that has a filename pattern
|
| 122 |
+
matching `fn`. The formatter is given the `options` at its instantiation.
|
| 123 |
+
|
| 124 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
|
| 125 |
+
is found.
|
| 126 |
+
"""
|
| 127 |
+
fn = basename(fn)
|
| 128 |
+
for modname, name, _, filenames, _ in FORMATTERS.values():
|
| 129 |
+
for filename in filenames:
|
| 130 |
+
if _fn_matches(fn, filename):
|
| 131 |
+
if name not in _formatter_cache:
|
| 132 |
+
_load_formatters(modname)
|
| 133 |
+
return _formatter_cache[name](**options)
|
| 134 |
+
for cls in find_plugin_formatters():
|
| 135 |
+
for filename in cls.filenames:
|
| 136 |
+
if _fn_matches(fn, filename):
|
| 137 |
+
return cls(**options)
|
| 138 |
+
raise ClassNotFound("no formatter found for file name %r" % fn)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class _automodule(types.ModuleType):
|
| 142 |
+
"""Automatically import formatters."""
|
| 143 |
+
|
| 144 |
+
def __getattr__(self, name):
|
| 145 |
+
info = FORMATTERS.get(name)
|
| 146 |
+
if info:
|
| 147 |
+
_load_formatters(info[0])
|
| 148 |
+
cls = _formatter_cache[info[1]]
|
| 149 |
+
setattr(self, name, cls)
|
| 150 |
+
return cls
|
| 151 |
+
raise AttributeError(name)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
oldmod = sys.modules[__name__]
|
| 155 |
+
newmod = _automodule(__name__)
|
| 156 |
+
newmod.__dict__.update(oldmod.__dict__)
|
| 157 |
+
sys.modules[__name__] = newmod
|
| 158 |
+
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/_mapping.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Automatically generated by scripts/gen_mapfiles.py.
|
| 2 |
+
# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
|
| 3 |
+
|
| 4 |
+
FORMATTERS = {
|
| 5 |
+
'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
|
| 6 |
+
'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
| 7 |
+
'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
| 8 |
+
'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
|
| 9 |
+
'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). The ``<div>``'s CSS class can be set by the `cssclass` option."),
|
| 10 |
+
'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
|
| 11 |
+
'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
| 12 |
+
'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
| 13 |
+
'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
|
| 14 |
+
'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
|
| 15 |
+
'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
|
| 16 |
+
'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
|
| 17 |
+
'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
|
| 18 |
+
'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
|
| 19 |
+
'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
| 20 |
+
'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
|
| 21 |
+
'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
| 22 |
+
'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
|
| 23 |
+
}
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/bbcode.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.bbcode
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
BBcode formatter.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt
|
| 14 |
+
|
| 15 |
+
__all__ = ['BBCodeFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class BBCodeFormatter(Formatter):
|
| 19 |
+
"""
|
| 20 |
+
Format tokens with BBcodes. These formatting codes are used by many
|
| 21 |
+
bulletin boards, so you can highlight your sourcecode with pygments before
|
| 22 |
+
posting it there.
|
| 23 |
+
|
| 24 |
+
This formatter has no support for background colors and borders, as there
|
| 25 |
+
are no common BBcode tags for that.
|
| 26 |
+
|
| 27 |
+
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
|
| 28 |
+
so you can't use the highlighting together with that tag.
|
| 29 |
+
Text in a [code] tag usually is shown with a monospace font (which this
|
| 30 |
+
formatter can do with the ``monofont`` option) and no spaces (which you
|
| 31 |
+
need for indentation) are removed.
|
| 32 |
+
|
| 33 |
+
Additional options accepted:
|
| 34 |
+
|
| 35 |
+
`style`
|
| 36 |
+
The style to use, can be a string or a Style subclass (default:
|
| 37 |
+
``'default'``).
|
| 38 |
+
|
| 39 |
+
`codetag`
|
| 40 |
+
If set to true, put the output into ``[code]`` tags (default:
|
| 41 |
+
``false``)
|
| 42 |
+
|
| 43 |
+
`monofont`
|
| 44 |
+
If set to true, add a tag to show the code with a monospace font
|
| 45 |
+
(default: ``false``).
|
| 46 |
+
"""
|
| 47 |
+
name = 'BBCode'
|
| 48 |
+
aliases = ['bbcode', 'bb']
|
| 49 |
+
filenames = []
|
| 50 |
+
|
| 51 |
+
def __init__(self, **options):
|
| 52 |
+
Formatter.__init__(self, **options)
|
| 53 |
+
self._code = get_bool_opt(options, 'codetag', False)
|
| 54 |
+
self._mono = get_bool_opt(options, 'monofont', False)
|
| 55 |
+
|
| 56 |
+
self.styles = {}
|
| 57 |
+
self._make_styles()
|
| 58 |
+
|
| 59 |
+
def _make_styles(self):
|
| 60 |
+
for ttype, ndef in self.style:
|
| 61 |
+
start = end = ''
|
| 62 |
+
if ndef['color']:
|
| 63 |
+
start += '[color=#%s]' % ndef['color']
|
| 64 |
+
end = '[/color]' + end
|
| 65 |
+
if ndef['bold']:
|
| 66 |
+
start += '[b]'
|
| 67 |
+
end = '[/b]' + end
|
| 68 |
+
if ndef['italic']:
|
| 69 |
+
start += '[i]'
|
| 70 |
+
end = '[/i]' + end
|
| 71 |
+
if ndef['underline']:
|
| 72 |
+
start += '[u]'
|
| 73 |
+
end = '[/u]' + end
|
| 74 |
+
# there are no common BBcodes for background-color and border
|
| 75 |
+
|
| 76 |
+
self.styles[ttype] = start, end
|
| 77 |
+
|
| 78 |
+
def format_unencoded(self, tokensource, outfile):
|
| 79 |
+
if self._code:
|
| 80 |
+
outfile.write('[code]')
|
| 81 |
+
if self._mono:
|
| 82 |
+
outfile.write('[font=monospace]')
|
| 83 |
+
|
| 84 |
+
lastval = ''
|
| 85 |
+
lasttype = None
|
| 86 |
+
|
| 87 |
+
for ttype, value in tokensource:
|
| 88 |
+
while ttype not in self.styles:
|
| 89 |
+
ttype = ttype.parent
|
| 90 |
+
if ttype == lasttype:
|
| 91 |
+
lastval += value
|
| 92 |
+
else:
|
| 93 |
+
if lastval:
|
| 94 |
+
start, end = self.styles[lasttype]
|
| 95 |
+
outfile.write(''.join((start, lastval, end)))
|
| 96 |
+
lastval = value
|
| 97 |
+
lasttype = ttype
|
| 98 |
+
|
| 99 |
+
if lastval:
|
| 100 |
+
start, end = self.styles[lasttype]
|
| 101 |
+
outfile.write(''.join((start, lastval, end)))
|
| 102 |
+
|
| 103 |
+
if self._mono:
|
| 104 |
+
outfile.write('[/font]')
|
| 105 |
+
if self._code:
|
| 106 |
+
outfile.write('[/code]')
|
| 107 |
+
if self._code or self._mono:
|
| 108 |
+
outfile.write('\n')
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/groff.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.groff
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for groff output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import math
|
| 12 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
|
| 14 |
+
|
| 15 |
+
__all__ = ['GroffFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class GroffFormatter(Formatter):
|
| 19 |
+
"""
|
| 20 |
+
Format tokens with groff escapes to change their color and font style.
|
| 21 |
+
|
| 22 |
+
.. versionadded:: 2.11
|
| 23 |
+
|
| 24 |
+
Additional options accepted:
|
| 25 |
+
|
| 26 |
+
`style`
|
| 27 |
+
The style to use, can be a string or a Style subclass (default:
|
| 28 |
+
``'default'``).
|
| 29 |
+
|
| 30 |
+
`monospaced`
|
| 31 |
+
If set to true, monospace font will be used (default: ``true``).
|
| 32 |
+
|
| 33 |
+
`linenos`
|
| 34 |
+
If set to true, print the line numbers (default: ``false``).
|
| 35 |
+
|
| 36 |
+
`wrap`
|
| 37 |
+
Wrap lines to the specified number of characters. Disabled if set to 0
|
| 38 |
+
(default: ``0``).
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
name = 'groff'
|
| 42 |
+
aliases = ['groff','troff','roff']
|
| 43 |
+
filenames = []
|
| 44 |
+
|
| 45 |
+
def __init__(self, **options):
|
| 46 |
+
Formatter.__init__(self, **options)
|
| 47 |
+
|
| 48 |
+
self.monospaced = get_bool_opt(options, 'monospaced', True)
|
| 49 |
+
self.linenos = get_bool_opt(options, 'linenos', False)
|
| 50 |
+
self._lineno = 0
|
| 51 |
+
self.wrap = get_int_opt(options, 'wrap', 0)
|
| 52 |
+
self._linelen = 0
|
| 53 |
+
|
| 54 |
+
self.styles = {}
|
| 55 |
+
self._make_styles()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _make_styles(self):
|
| 59 |
+
regular = '\\f[CR]' if self.monospaced else '\\f[R]'
|
| 60 |
+
bold = '\\f[CB]' if self.monospaced else '\\f[B]'
|
| 61 |
+
italic = '\\f[CI]' if self.monospaced else '\\f[I]'
|
| 62 |
+
|
| 63 |
+
for ttype, ndef in self.style:
|
| 64 |
+
start = end = ''
|
| 65 |
+
if ndef['color']:
|
| 66 |
+
start += '\\m[%s]' % ndef['color']
|
| 67 |
+
end = '\\m[]' + end
|
| 68 |
+
if ndef['bold']:
|
| 69 |
+
start += bold
|
| 70 |
+
end = regular + end
|
| 71 |
+
if ndef['italic']:
|
| 72 |
+
start += italic
|
| 73 |
+
end = regular + end
|
| 74 |
+
if ndef['bgcolor']:
|
| 75 |
+
start += '\\M[%s]' % ndef['bgcolor']
|
| 76 |
+
end = '\\M[]' + end
|
| 77 |
+
|
| 78 |
+
self.styles[ttype] = start, end
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def _define_colors(self, outfile):
|
| 82 |
+
colors = set()
|
| 83 |
+
for _, ndef in self.style:
|
| 84 |
+
if ndef['color'] is not None:
|
| 85 |
+
colors.add(ndef['color'])
|
| 86 |
+
|
| 87 |
+
for color in sorted(colors):
|
| 88 |
+
outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _write_lineno(self, outfile):
|
| 92 |
+
self._lineno += 1
|
| 93 |
+
outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _wrap_line(self, line):
|
| 97 |
+
length = len(line.rstrip('\n'))
|
| 98 |
+
space = ' ' if self.linenos else ''
|
| 99 |
+
newline = ''
|
| 100 |
+
|
| 101 |
+
if length > self.wrap:
|
| 102 |
+
for i in range(0, math.floor(length / self.wrap)):
|
| 103 |
+
chunk = line[i*self.wrap:i*self.wrap+self.wrap]
|
| 104 |
+
newline += (chunk + '\n' + space)
|
| 105 |
+
remainder = length % self.wrap
|
| 106 |
+
if remainder > 0:
|
| 107 |
+
newline += line[-remainder-1:]
|
| 108 |
+
self._linelen = remainder
|
| 109 |
+
elif self._linelen + length > self.wrap:
|
| 110 |
+
newline = ('\n' + space) + line
|
| 111 |
+
self._linelen = length
|
| 112 |
+
else:
|
| 113 |
+
newline = line
|
| 114 |
+
self._linelen += length
|
| 115 |
+
|
| 116 |
+
return newline
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _escape_chars(self, text):
|
| 120 |
+
text = text.replace('\\', '\\[u005C]'). \
|
| 121 |
+
replace('.', '\\[char46]'). \
|
| 122 |
+
replace('\'', '\\[u0027]'). \
|
| 123 |
+
replace('`', '\\[u0060]'). \
|
| 124 |
+
replace('~', '\\[u007E]')
|
| 125 |
+
copy = text
|
| 126 |
+
|
| 127 |
+
for char in copy:
|
| 128 |
+
if len(char) != len(char.encode()):
|
| 129 |
+
uni = char.encode('unicode_escape') \
|
| 130 |
+
.decode()[1:] \
|
| 131 |
+
.replace('x', 'u00') \
|
| 132 |
+
.upper()
|
| 133 |
+
text = text.replace(char, '\\[u' + uni[1:] + ']')
|
| 134 |
+
|
| 135 |
+
return text
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def format_unencoded(self, tokensource, outfile):
|
| 139 |
+
self._define_colors(outfile)
|
| 140 |
+
|
| 141 |
+
outfile.write('.nf\n\\f[CR]\n')
|
| 142 |
+
|
| 143 |
+
if self.linenos:
|
| 144 |
+
self._write_lineno(outfile)
|
| 145 |
+
|
| 146 |
+
for ttype, value in tokensource:
|
| 147 |
+
while ttype not in self.styles:
|
| 148 |
+
ttype = ttype.parent
|
| 149 |
+
start, end = self.styles[ttype]
|
| 150 |
+
|
| 151 |
+
for line in value.splitlines(True):
|
| 152 |
+
if self.wrap > 0:
|
| 153 |
+
line = self._wrap_line(line)
|
| 154 |
+
|
| 155 |
+
if start and end:
|
| 156 |
+
text = self._escape_chars(line.rstrip('\n'))
|
| 157 |
+
if text != '':
|
| 158 |
+
outfile.write(''.join((start, text, end)))
|
| 159 |
+
else:
|
| 160 |
+
outfile.write(self._escape_chars(line.rstrip('\n')))
|
| 161 |
+
|
| 162 |
+
if line.endswith('\n'):
|
| 163 |
+
if self.linenos:
|
| 164 |
+
self._write_lineno(outfile)
|
| 165 |
+
self._linelen = 0
|
| 166 |
+
else:
|
| 167 |
+
outfile.write('\n')
|
| 168 |
+
self._linelen = 0
|
| 169 |
+
|
| 170 |
+
outfile.write('\n.fi')
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/html.py
ADDED
|
@@ -0,0 +1,989 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.html
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for HTML output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import functools
|
| 12 |
+
import os
|
| 13 |
+
import sys
|
| 14 |
+
import os.path
|
| 15 |
+
from io import StringIO
|
| 16 |
+
|
| 17 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 18 |
+
from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES
|
| 19 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
import ctags
|
| 23 |
+
except ImportError:
|
| 24 |
+
ctags = None
|
| 25 |
+
|
| 26 |
+
__all__ = ['HtmlFormatter']
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
_escape_html_table = {
|
| 30 |
+
ord('&'): '&',
|
| 31 |
+
ord('<'): '<',
|
| 32 |
+
ord('>'): '>',
|
| 33 |
+
ord('"'): '"',
|
| 34 |
+
ord("'"): ''',
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def escape_html(text, table=_escape_html_table):
|
| 39 |
+
"""Escape &, <, > as well as single and double quotes for HTML."""
|
| 40 |
+
return text.translate(table)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def webify(color):
|
| 44 |
+
if color.startswith('calc') or color.startswith('var'):
|
| 45 |
+
return color
|
| 46 |
+
else:
|
| 47 |
+
return '#' + color
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _get_ttype_class(ttype):
|
| 51 |
+
fname = STANDARD_TYPES.get(ttype)
|
| 52 |
+
if fname:
|
| 53 |
+
return fname
|
| 54 |
+
aname = ''
|
| 55 |
+
while fname is None:
|
| 56 |
+
aname = '-' + ttype[-1] + aname
|
| 57 |
+
ttype = ttype.parent
|
| 58 |
+
fname = STANDARD_TYPES.get(ttype)
|
| 59 |
+
return fname + aname
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
CSSFILE_TEMPLATE = '''\
|
| 63 |
+
/*
|
| 64 |
+
generated by Pygments <https://pygments.org/>
|
| 65 |
+
Copyright 2006-2023 by the Pygments team.
|
| 66 |
+
Licensed under the BSD license, see LICENSE for details.
|
| 67 |
+
*/
|
| 68 |
+
%(styledefs)s
|
| 69 |
+
'''
|
| 70 |
+
|
| 71 |
+
DOC_HEADER = '''\
|
| 72 |
+
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
| 73 |
+
"http://www.w3.org/TR/html4/strict.dtd">
|
| 74 |
+
<!--
|
| 75 |
+
generated by Pygments <https://pygments.org/>
|
| 76 |
+
Copyright 2006-2023 by the Pygments team.
|
| 77 |
+
Licensed under the BSD license, see LICENSE for details.
|
| 78 |
+
-->
|
| 79 |
+
<html>
|
| 80 |
+
<head>
|
| 81 |
+
<title>%(title)s</title>
|
| 82 |
+
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
|
| 83 |
+
<style type="text/css">
|
| 84 |
+
''' + CSSFILE_TEMPLATE + '''
|
| 85 |
+
</style>
|
| 86 |
+
</head>
|
| 87 |
+
<body>
|
| 88 |
+
<h2>%(title)s</h2>
|
| 89 |
+
|
| 90 |
+
'''
|
| 91 |
+
|
| 92 |
+
DOC_HEADER_EXTERNALCSS = '''\
|
| 93 |
+
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
| 94 |
+
"http://www.w3.org/TR/html4/strict.dtd">
|
| 95 |
+
|
| 96 |
+
<html>
|
| 97 |
+
<head>
|
| 98 |
+
<title>%(title)s</title>
|
| 99 |
+
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
|
| 100 |
+
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
|
| 101 |
+
</head>
|
| 102 |
+
<body>
|
| 103 |
+
<h2>%(title)s</h2>
|
| 104 |
+
|
| 105 |
+
'''
|
| 106 |
+
|
| 107 |
+
DOC_FOOTER = '''\
|
| 108 |
+
</body>
|
| 109 |
+
</html>
|
| 110 |
+
'''
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class HtmlFormatter(Formatter):
|
| 114 |
+
r"""
|
| 115 |
+
Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed
|
| 116 |
+
in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option).
|
| 117 |
+
The ``<div>``'s CSS class can be set by the `cssclass` option.
|
| 118 |
+
|
| 119 |
+
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
|
| 120 |
+
additionally wrapped inside a ``<table>`` which has one row and two
|
| 121 |
+
cells: one containing the line numbers and one containing the code.
|
| 122 |
+
Example:
|
| 123 |
+
|
| 124 |
+
.. sourcecode:: html
|
| 125 |
+
|
| 126 |
+
<div class="highlight" >
|
| 127 |
+
<table><tr>
|
| 128 |
+
<td class="linenos" title="click to toggle"
|
| 129 |
+
onclick="with (this.firstChild.style)
|
| 130 |
+
{ display = (display == '') ? 'none' : '' }">
|
| 131 |
+
<pre>1
|
| 132 |
+
2</pre>
|
| 133 |
+
</td>
|
| 134 |
+
<td class="code">
|
| 135 |
+
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
|
| 136 |
+
<span class="Ke">pass</span>
|
| 137 |
+
</pre>
|
| 138 |
+
</td>
|
| 139 |
+
</tr></table></div>
|
| 140 |
+
|
| 141 |
+
(whitespace added to improve clarity).
|
| 142 |
+
|
| 143 |
+
A list of lines can be specified using the `hl_lines` option to make these
|
| 144 |
+
lines highlighted (as of Pygments 0.11).
|
| 145 |
+
|
| 146 |
+
With the `full` option, a complete HTML 4 document is output, including
|
| 147 |
+
the style definitions inside a ``<style>`` tag, or in a separate file if
|
| 148 |
+
the `cssfile` option is given.
|
| 149 |
+
|
| 150 |
+
When `tagsfile` is set to the path of a ctags index file, it is used to
|
| 151 |
+
generate hyperlinks from names to their definition. You must enable
|
| 152 |
+
`lineanchors` and run ctags with the `-n` option for this to work. The
|
| 153 |
+
`python-ctags` module from PyPI must be installed to use this feature;
|
| 154 |
+
otherwise a `RuntimeError` will be raised.
|
| 155 |
+
|
| 156 |
+
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
|
| 157 |
+
containing CSS rules for the CSS classes used by the formatter. The
|
| 158 |
+
argument `arg` can be used to specify additional CSS selectors that
|
| 159 |
+
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
|
| 160 |
+
would result in the following CSS classes:
|
| 161 |
+
|
| 162 |
+
.. sourcecode:: css
|
| 163 |
+
|
| 164 |
+
td .code .kw { font-weight: bold; color: #00FF00 }
|
| 165 |
+
td .code .cm { color: #999999 }
|
| 166 |
+
...
|
| 167 |
+
|
| 168 |
+
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
|
| 169 |
+
`get_style_defs()` method to request multiple prefixes for the tokens:
|
| 170 |
+
|
| 171 |
+
.. sourcecode:: python
|
| 172 |
+
|
| 173 |
+
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
|
| 174 |
+
|
| 175 |
+
The output would then look like this:
|
| 176 |
+
|
| 177 |
+
.. sourcecode:: css
|
| 178 |
+
|
| 179 |
+
div.syntax pre .kw,
|
| 180 |
+
pre.syntax .kw { font-weight: bold; color: #00FF00 }
|
| 181 |
+
div.syntax pre .cm,
|
| 182 |
+
pre.syntax .cm { color: #999999 }
|
| 183 |
+
...
|
| 184 |
+
|
| 185 |
+
Additional options accepted:
|
| 186 |
+
|
| 187 |
+
`nowrap`
|
| 188 |
+
If set to ``True``, don't add a ``<pre>`` and a ``<div>`` tag
|
| 189 |
+
around the tokens. This disables most other options (default: ``False``).
|
| 190 |
+
|
| 191 |
+
`full`
|
| 192 |
+
Tells the formatter to output a "full" document, i.e. a complete
|
| 193 |
+
self-contained document (default: ``False``).
|
| 194 |
+
|
| 195 |
+
`title`
|
| 196 |
+
If `full` is true, the title that should be used to caption the
|
| 197 |
+
document (default: ``''``).
|
| 198 |
+
|
| 199 |
+
`style`
|
| 200 |
+
The style to use, can be a string or a Style subclass (default:
|
| 201 |
+
``'default'``). This option has no effect if the `cssfile`
|
| 202 |
+
and `noclobber_cssfile` option are given and the file specified in
|
| 203 |
+
`cssfile` exists.
|
| 204 |
+
|
| 205 |
+
`noclasses`
|
| 206 |
+
If set to true, token ``<span>`` tags (as well as line number elements)
|
| 207 |
+
will not use CSS classes, but inline styles. This is not recommended
|
| 208 |
+
for larger pieces of code since it increases output size by quite a bit
|
| 209 |
+
(default: ``False``).
|
| 210 |
+
|
| 211 |
+
`classprefix`
|
| 212 |
+
Since the token types use relatively short class names, they may clash
|
| 213 |
+
with some of your own class names. In this case you can use the
|
| 214 |
+
`classprefix` option to give a string to prepend to all Pygments-generated
|
| 215 |
+
CSS class names for token types.
|
| 216 |
+
Note that this option also affects the output of `get_style_defs()`.
|
| 217 |
+
|
| 218 |
+
`cssclass`
|
| 219 |
+
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
|
| 220 |
+
If you set this option, the default selector for `get_style_defs()`
|
| 221 |
+
will be this class.
|
| 222 |
+
|
| 223 |
+
.. versionadded:: 0.9
|
| 224 |
+
If you select the ``'table'`` line numbers, the wrapping table will
|
| 225 |
+
have a CSS class of this string plus ``'table'``, the default is
|
| 226 |
+
accordingly ``'highlighttable'``.
|
| 227 |
+
|
| 228 |
+
`cssstyles`
|
| 229 |
+
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
|
| 230 |
+
|
| 231 |
+
`prestyles`
|
| 232 |
+
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
|
| 233 |
+
|
| 234 |
+
.. versionadded:: 0.11
|
| 235 |
+
|
| 236 |
+
`cssfile`
|
| 237 |
+
If the `full` option is true and this option is given, it must be the
|
| 238 |
+
name of an external file. If the filename does not include an absolute
|
| 239 |
+
path, the file's path will be assumed to be relative to the main output
|
| 240 |
+
file's path, if the latter can be found. The stylesheet is then written
|
| 241 |
+
to this file instead of the HTML file.
|
| 242 |
+
|
| 243 |
+
.. versionadded:: 0.6
|
| 244 |
+
|
| 245 |
+
`noclobber_cssfile`
|
| 246 |
+
If `cssfile` is given and the specified file exists, the css file will
|
| 247 |
+
not be overwritten. This allows the use of the `full` option in
|
| 248 |
+
combination with a user specified css file. Default is ``False``.
|
| 249 |
+
|
| 250 |
+
.. versionadded:: 1.1
|
| 251 |
+
|
| 252 |
+
`linenos`
|
| 253 |
+
If set to ``'table'``, output line numbers as a table with two cells,
|
| 254 |
+
one containing the line numbers, the other the whole code. This is
|
| 255 |
+
copy-and-paste-friendly, but may cause alignment problems with some
|
| 256 |
+
browsers or fonts. If set to ``'inline'``, the line numbers will be
|
| 257 |
+
integrated in the ``<pre>`` tag that contains the code (that setting
|
| 258 |
+
is *new in Pygments 0.8*).
|
| 259 |
+
|
| 260 |
+
For compatibility with Pygments 0.7 and earlier, every true value
|
| 261 |
+
except ``'inline'`` means the same as ``'table'`` (in particular, that
|
| 262 |
+
means also ``True``).
|
| 263 |
+
|
| 264 |
+
The default value is ``False``, which means no line numbers at all.
|
| 265 |
+
|
| 266 |
+
**Note:** with the default ("table") line number mechanism, the line
|
| 267 |
+
numbers and code can have different line heights in Internet Explorer
|
| 268 |
+
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
|
| 269 |
+
CSS property (you get the default line spacing with ``line-height:
|
| 270 |
+
125%``).
|
| 271 |
+
|
| 272 |
+
`hl_lines`
|
| 273 |
+
Specify a list of lines to be highlighted. The line numbers are always
|
| 274 |
+
relative to the input (i.e. the first line is line 1) and are
|
| 275 |
+
independent of `linenostart`.
|
| 276 |
+
|
| 277 |
+
.. versionadded:: 0.11
|
| 278 |
+
|
| 279 |
+
`linenostart`
|
| 280 |
+
The line number for the first line (default: ``1``).
|
| 281 |
+
|
| 282 |
+
`linenostep`
|
| 283 |
+
If set to a number n > 1, only every nth line number is printed.
|
| 284 |
+
|
| 285 |
+
`linenospecial`
|
| 286 |
+
If set to a number n > 0, every nth line number is given the CSS
|
| 287 |
+
class ``"special"`` (default: ``0``).
|
| 288 |
+
|
| 289 |
+
`nobackground`
|
| 290 |
+
If set to ``True``, the formatter won't output the background color
|
| 291 |
+
for the wrapping element (this automatically defaults to ``False``
|
| 292 |
+
when there is no wrapping element [eg: no argument for the
|
| 293 |
+
`get_syntax_defs` method given]) (default: ``False``).
|
| 294 |
+
|
| 295 |
+
.. versionadded:: 0.6
|
| 296 |
+
|
| 297 |
+
`lineseparator`
|
| 298 |
+
This string is output between lines of code. It defaults to ``"\n"``,
|
| 299 |
+
which is enough to break a line inside ``<pre>`` tags, but you can
|
| 300 |
+
e.g. set it to ``"<br>"`` to get HTML line breaks.
|
| 301 |
+
|
| 302 |
+
.. versionadded:: 0.7
|
| 303 |
+
|
| 304 |
+
`lineanchors`
|
| 305 |
+
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
|
| 306 |
+
output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
|
| 307 |
+
This allows easy linking to certain lines.
|
| 308 |
+
|
| 309 |
+
.. versionadded:: 0.9
|
| 310 |
+
|
| 311 |
+
`linespans`
|
| 312 |
+
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
|
| 313 |
+
output line in a span tag with an ``id`` of ``foo-linenumber``.
|
| 314 |
+
This allows easy access to lines via javascript.
|
| 315 |
+
|
| 316 |
+
.. versionadded:: 1.6
|
| 317 |
+
|
| 318 |
+
`anchorlinenos`
|
| 319 |
+
If set to `True`, will wrap line numbers in <a> tags. Used in
|
| 320 |
+
combination with `linenos` and `lineanchors`.
|
| 321 |
+
|
| 322 |
+
`tagsfile`
|
| 323 |
+
If set to the path of a ctags file, wrap names in anchor tags that
|
| 324 |
+
link to their definitions. `lineanchors` should be used, and the
|
| 325 |
+
tags file should specify line numbers (see the `-n` option to ctags).
|
| 326 |
+
|
| 327 |
+
.. versionadded:: 1.6
|
| 328 |
+
|
| 329 |
+
`tagurlformat`
|
| 330 |
+
A string formatting pattern used to generate links to ctags definitions.
|
| 331 |
+
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
|
| 332 |
+
Defaults to an empty string, resulting in just `#prefix-number` links.
|
| 333 |
+
|
| 334 |
+
.. versionadded:: 1.6
|
| 335 |
+
|
| 336 |
+
`filename`
|
| 337 |
+
A string used to generate a filename when rendering ``<pre>`` blocks,
|
| 338 |
+
for example if displaying source code. If `linenos` is set to
|
| 339 |
+
``'table'`` then the filename will be rendered in an initial row
|
| 340 |
+
containing a single `<th>` which spans both columns.
|
| 341 |
+
|
| 342 |
+
.. versionadded:: 2.1
|
| 343 |
+
|
| 344 |
+
`wrapcode`
|
| 345 |
+
Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
|
| 346 |
+
by the HTML5 specification.
|
| 347 |
+
|
| 348 |
+
.. versionadded:: 2.4
|
| 349 |
+
|
| 350 |
+
`debug_token_types`
|
| 351 |
+
Add ``title`` attributes to all token ``<span>`` tags that show the
|
| 352 |
+
name of the token.
|
| 353 |
+
|
| 354 |
+
.. versionadded:: 2.10
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
**Subclassing the HTML formatter**
|
| 358 |
+
|
| 359 |
+
.. versionadded:: 0.7
|
| 360 |
+
|
| 361 |
+
The HTML formatter is now built in a way that allows easy subclassing, thus
|
| 362 |
+
customizing the output HTML code. The `format()` method calls
|
| 363 |
+
`self._format_lines()` which returns a generator that yields tuples of ``(1,
|
| 364 |
+
line)``, where the ``1`` indicates that the ``line`` is a line of the
|
| 365 |
+
formatted source code.
|
| 366 |
+
|
| 367 |
+
If the `nowrap` option is set, the generator is the iterated over and the
|
| 368 |
+
resulting HTML is output.
|
| 369 |
+
|
| 370 |
+
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
|
| 371 |
+
other generators. These may add some HTML code to the one generated by
|
| 372 |
+
`_format_lines()`, either by modifying the lines generated by the latter,
|
| 373 |
+
then yielding them again with ``(1, line)``, and/or by yielding other HTML
|
| 374 |
+
code before or after the lines, with ``(0, html)``. The distinction between
|
| 375 |
+
source lines and other code makes it possible to wrap the generator multiple
|
| 376 |
+
times.
|
| 377 |
+
|
| 378 |
+
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
|
| 379 |
+
|
| 380 |
+
A custom `HtmlFormatter` subclass could look like this:
|
| 381 |
+
|
| 382 |
+
.. sourcecode:: python
|
| 383 |
+
|
| 384 |
+
class CodeHtmlFormatter(HtmlFormatter):
|
| 385 |
+
|
| 386 |
+
def wrap(self, source, *, include_div):
|
| 387 |
+
return self._wrap_code(source)
|
| 388 |
+
|
| 389 |
+
def _wrap_code(self, source):
|
| 390 |
+
yield 0, '<code>'
|
| 391 |
+
for i, t in source:
|
| 392 |
+
if i == 1:
|
| 393 |
+
# it's a line of formatted code
|
| 394 |
+
t += '<br>'
|
| 395 |
+
yield i, t
|
| 396 |
+
yield 0, '</code>'
|
| 397 |
+
|
| 398 |
+
This results in wrapping the formatted lines with a ``<code>`` tag, where the
|
| 399 |
+
source lines are broken using ``<br>`` tags.
|
| 400 |
+
|
| 401 |
+
After calling `wrap()`, the `format()` method also adds the "line numbers"
|
| 402 |
+
and/or "full document" wrappers if the respective options are set. Then, all
|
| 403 |
+
HTML yielded by the wrapped generator is output.
|
| 404 |
+
"""
|
| 405 |
+
|
| 406 |
+
name = 'HTML'
|
| 407 |
+
aliases = ['html']
|
| 408 |
+
filenames = ['*.html', '*.htm']
|
| 409 |
+
|
| 410 |
+
def __init__(self, **options):
|
| 411 |
+
Formatter.__init__(self, **options)
|
| 412 |
+
self.title = self._decodeifneeded(self.title)
|
| 413 |
+
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
| 414 |
+
self.noclasses = get_bool_opt(options, 'noclasses', False)
|
| 415 |
+
self.classprefix = options.get('classprefix', '')
|
| 416 |
+
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
|
| 417 |
+
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
|
| 418 |
+
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
|
| 419 |
+
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
|
| 420 |
+
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
|
| 421 |
+
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
|
| 422 |
+
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
|
| 423 |
+
self.filename = self._decodeifneeded(options.get('filename', ''))
|
| 424 |
+
self.wrapcode = get_bool_opt(options, 'wrapcode', False)
|
| 425 |
+
self.span_element_openers = {}
|
| 426 |
+
self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
|
| 427 |
+
|
| 428 |
+
if self.tagsfile:
|
| 429 |
+
if not ctags:
|
| 430 |
+
raise RuntimeError('The "ctags" package must to be installed '
|
| 431 |
+
'to be able to use the "tagsfile" feature.')
|
| 432 |
+
self._ctags = ctags.CTags(self.tagsfile)
|
| 433 |
+
|
| 434 |
+
linenos = options.get('linenos', False)
|
| 435 |
+
if linenos == 'inline':
|
| 436 |
+
self.linenos = 2
|
| 437 |
+
elif linenos:
|
| 438 |
+
# compatibility with <= 0.7
|
| 439 |
+
self.linenos = 1
|
| 440 |
+
else:
|
| 441 |
+
self.linenos = 0
|
| 442 |
+
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
|
| 443 |
+
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
|
| 444 |
+
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
|
| 445 |
+
self.nobackground = get_bool_opt(options, 'nobackground', False)
|
| 446 |
+
self.lineseparator = options.get('lineseparator', '\n')
|
| 447 |
+
self.lineanchors = options.get('lineanchors', '')
|
| 448 |
+
self.linespans = options.get('linespans', '')
|
| 449 |
+
self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
|
| 450 |
+
self.hl_lines = set()
|
| 451 |
+
for lineno in get_list_opt(options, 'hl_lines', []):
|
| 452 |
+
try:
|
| 453 |
+
self.hl_lines.add(int(lineno))
|
| 454 |
+
except ValueError:
|
| 455 |
+
pass
|
| 456 |
+
|
| 457 |
+
self._create_stylesheet()
|
| 458 |
+
|
| 459 |
+
def _get_css_class(self, ttype):
|
| 460 |
+
"""Return the css class of this token type prefixed with
|
| 461 |
+
the classprefix option."""
|
| 462 |
+
ttypeclass = _get_ttype_class(ttype)
|
| 463 |
+
if ttypeclass:
|
| 464 |
+
return self.classprefix + ttypeclass
|
| 465 |
+
return ''
|
| 466 |
+
|
| 467 |
+
def _get_css_classes(self, ttype):
|
| 468 |
+
"""Return the CSS classes of this token type prefixed with the classprefix option."""
|
| 469 |
+
cls = self._get_css_class(ttype)
|
| 470 |
+
while ttype not in STANDARD_TYPES:
|
| 471 |
+
ttype = ttype.parent
|
| 472 |
+
cls = self._get_css_class(ttype) + ' ' + cls
|
| 473 |
+
return cls or ''
|
| 474 |
+
|
| 475 |
+
def _get_css_inline_styles(self, ttype):
|
| 476 |
+
"""Return the inline CSS styles for this token type."""
|
| 477 |
+
cclass = self.ttype2class.get(ttype)
|
| 478 |
+
while cclass is None:
|
| 479 |
+
ttype = ttype.parent
|
| 480 |
+
cclass = self.ttype2class.get(ttype)
|
| 481 |
+
return cclass or ''
|
| 482 |
+
|
| 483 |
+
def _create_stylesheet(self):
|
| 484 |
+
t2c = self.ttype2class = {Token: ''}
|
| 485 |
+
c2s = self.class2style = {}
|
| 486 |
+
for ttype, ndef in self.style:
|
| 487 |
+
name = self._get_css_class(ttype)
|
| 488 |
+
style = ''
|
| 489 |
+
if ndef['color']:
|
| 490 |
+
style += 'color: %s; ' % webify(ndef['color'])
|
| 491 |
+
if ndef['bold']:
|
| 492 |
+
style += 'font-weight: bold; '
|
| 493 |
+
if ndef['italic']:
|
| 494 |
+
style += 'font-style: italic; '
|
| 495 |
+
if ndef['underline']:
|
| 496 |
+
style += 'text-decoration: underline; '
|
| 497 |
+
if ndef['bgcolor']:
|
| 498 |
+
style += 'background-color: %s; ' % webify(ndef['bgcolor'])
|
| 499 |
+
if ndef['border']:
|
| 500 |
+
style += 'border: 1px solid %s; ' % webify(ndef['border'])
|
| 501 |
+
if style:
|
| 502 |
+
t2c[ttype] = name
|
| 503 |
+
# save len(ttype) to enable ordering the styles by
|
| 504 |
+
# hierarchy (necessary for CSS cascading rules!)
|
| 505 |
+
c2s[name] = (style[:-2], ttype, len(ttype))
|
| 506 |
+
|
| 507 |
+
def get_style_defs(self, arg=None):
|
| 508 |
+
"""
|
| 509 |
+
Return CSS style definitions for the classes produced by the current
|
| 510 |
+
highlighting style. ``arg`` can be a string or list of selectors to
|
| 511 |
+
insert before the token type classes.
|
| 512 |
+
"""
|
| 513 |
+
style_lines = []
|
| 514 |
+
|
| 515 |
+
style_lines.extend(self.get_linenos_style_defs())
|
| 516 |
+
style_lines.extend(self.get_background_style_defs(arg))
|
| 517 |
+
style_lines.extend(self.get_token_style_defs(arg))
|
| 518 |
+
|
| 519 |
+
return '\n'.join(style_lines)
|
| 520 |
+
|
| 521 |
+
def get_token_style_defs(self, arg=None):
|
| 522 |
+
prefix = self.get_css_prefix(arg)
|
| 523 |
+
|
| 524 |
+
styles = [
|
| 525 |
+
(level, ttype, cls, style)
|
| 526 |
+
for cls, (style, ttype, level) in self.class2style.items()
|
| 527 |
+
if cls and style
|
| 528 |
+
]
|
| 529 |
+
styles.sort()
|
| 530 |
+
|
| 531 |
+
lines = [
|
| 532 |
+
'%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
|
| 533 |
+
for (level, ttype, cls, style) in styles
|
| 534 |
+
]
|
| 535 |
+
|
| 536 |
+
return lines
|
| 537 |
+
|
| 538 |
+
def get_background_style_defs(self, arg=None):
|
| 539 |
+
prefix = self.get_css_prefix(arg)
|
| 540 |
+
bg_color = self.style.background_color
|
| 541 |
+
hl_color = self.style.highlight_color
|
| 542 |
+
|
| 543 |
+
lines = []
|
| 544 |
+
|
| 545 |
+
if arg and not self.nobackground and bg_color is not None:
|
| 546 |
+
text_style = ''
|
| 547 |
+
if Text in self.ttype2class:
|
| 548 |
+
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
|
| 549 |
+
lines.insert(
|
| 550 |
+
0, '%s{ background: %s;%s }' % (
|
| 551 |
+
prefix(''), bg_color, text_style
|
| 552 |
+
)
|
| 553 |
+
)
|
| 554 |
+
if hl_color is not None:
|
| 555 |
+
lines.insert(
|
| 556 |
+
0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
return lines
|
| 560 |
+
|
| 561 |
+
def get_linenos_style_defs(self):
|
| 562 |
+
lines = [
|
| 563 |
+
'pre { %s }' % self._pre_style,
|
| 564 |
+
'td.linenos .normal { %s }' % self._linenos_style,
|
| 565 |
+
'span.linenos { %s }' % self._linenos_style,
|
| 566 |
+
'td.linenos .special { %s }' % self._linenos_special_style,
|
| 567 |
+
'span.linenos.special { %s }' % self._linenos_special_style,
|
| 568 |
+
]
|
| 569 |
+
|
| 570 |
+
return lines
|
| 571 |
+
|
| 572 |
+
def get_css_prefix(self, arg):
|
| 573 |
+
if arg is None:
|
| 574 |
+
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
|
| 575 |
+
if isinstance(arg, str):
|
| 576 |
+
args = [arg]
|
| 577 |
+
else:
|
| 578 |
+
args = list(arg)
|
| 579 |
+
|
| 580 |
+
def prefix(cls):
|
| 581 |
+
if cls:
|
| 582 |
+
cls = '.' + cls
|
| 583 |
+
tmp = []
|
| 584 |
+
for arg in args:
|
| 585 |
+
tmp.append((arg and arg + ' ' or '') + cls)
|
| 586 |
+
return ', '.join(tmp)
|
| 587 |
+
|
| 588 |
+
return prefix
|
| 589 |
+
|
| 590 |
+
@property
|
| 591 |
+
def _pre_style(self):
|
| 592 |
+
return 'line-height: 125%;'
|
| 593 |
+
|
| 594 |
+
@property
|
| 595 |
+
def _linenos_style(self):
|
| 596 |
+
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
|
| 597 |
+
self.style.line_number_color,
|
| 598 |
+
self.style.line_number_background_color
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
@property
|
| 602 |
+
def _linenos_special_style(self):
|
| 603 |
+
return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
|
| 604 |
+
self.style.line_number_special_color,
|
| 605 |
+
self.style.line_number_special_background_color
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
def _decodeifneeded(self, value):
|
| 609 |
+
if isinstance(value, bytes):
|
| 610 |
+
if self.encoding:
|
| 611 |
+
return value.decode(self.encoding)
|
| 612 |
+
return value.decode()
|
| 613 |
+
return value
|
| 614 |
+
|
| 615 |
+
def _wrap_full(self, inner, outfile):
|
| 616 |
+
if self.cssfile:
|
| 617 |
+
if os.path.isabs(self.cssfile):
|
| 618 |
+
# it's an absolute filename
|
| 619 |
+
cssfilename = self.cssfile
|
| 620 |
+
else:
|
| 621 |
+
try:
|
| 622 |
+
filename = outfile.name
|
| 623 |
+
if not filename or filename[0] == '<':
|
| 624 |
+
# pseudo files, e.g. name == '<fdopen>'
|
| 625 |
+
raise AttributeError
|
| 626 |
+
cssfilename = os.path.join(os.path.dirname(filename),
|
| 627 |
+
self.cssfile)
|
| 628 |
+
except AttributeError:
|
| 629 |
+
print('Note: Cannot determine output file name, '
|
| 630 |
+
'using current directory as base for the CSS file name',
|
| 631 |
+
file=sys.stderr)
|
| 632 |
+
cssfilename = self.cssfile
|
| 633 |
+
# write CSS file only if noclobber_cssfile isn't given as an option.
|
| 634 |
+
try:
|
| 635 |
+
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
|
| 636 |
+
with open(cssfilename, "w", encoding="utf-8") as cf:
|
| 637 |
+
cf.write(CSSFILE_TEMPLATE %
|
| 638 |
+
{'styledefs': self.get_style_defs('body')})
|
| 639 |
+
except OSError as err:
|
| 640 |
+
err.strerror = 'Error writing CSS file: ' + err.strerror
|
| 641 |
+
raise
|
| 642 |
+
|
| 643 |
+
yield 0, (DOC_HEADER_EXTERNALCSS %
|
| 644 |
+
dict(title=self.title,
|
| 645 |
+
cssfile=self.cssfile,
|
| 646 |
+
encoding=self.encoding))
|
| 647 |
+
else:
|
| 648 |
+
yield 0, (DOC_HEADER %
|
| 649 |
+
dict(title=self.title,
|
| 650 |
+
styledefs=self.get_style_defs('body'),
|
| 651 |
+
encoding=self.encoding))
|
| 652 |
+
|
| 653 |
+
yield from inner
|
| 654 |
+
yield 0, DOC_FOOTER
|
| 655 |
+
|
| 656 |
+
def _wrap_tablelinenos(self, inner):
|
| 657 |
+
dummyoutfile = StringIO()
|
| 658 |
+
lncount = 0
|
| 659 |
+
for t, line in inner:
|
| 660 |
+
if t:
|
| 661 |
+
lncount += 1
|
| 662 |
+
dummyoutfile.write(line)
|
| 663 |
+
|
| 664 |
+
fl = self.linenostart
|
| 665 |
+
mw = len(str(lncount + fl - 1))
|
| 666 |
+
sp = self.linenospecial
|
| 667 |
+
st = self.linenostep
|
| 668 |
+
anchor_name = self.lineanchors or self.linespans
|
| 669 |
+
aln = self.anchorlinenos
|
| 670 |
+
nocls = self.noclasses
|
| 671 |
+
|
| 672 |
+
lines = []
|
| 673 |
+
|
| 674 |
+
for i in range(fl, fl+lncount):
|
| 675 |
+
print_line = i % st == 0
|
| 676 |
+
special_line = sp and i % sp == 0
|
| 677 |
+
|
| 678 |
+
if print_line:
|
| 679 |
+
line = '%*d' % (mw, i)
|
| 680 |
+
if aln:
|
| 681 |
+
line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
|
| 682 |
+
else:
|
| 683 |
+
line = ' ' * mw
|
| 684 |
+
|
| 685 |
+
if nocls:
|
| 686 |
+
if special_line:
|
| 687 |
+
style = ' style="%s"' % self._linenos_special_style
|
| 688 |
+
else:
|
| 689 |
+
style = ' style="%s"' % self._linenos_style
|
| 690 |
+
else:
|
| 691 |
+
if special_line:
|
| 692 |
+
style = ' class="special"'
|
| 693 |
+
else:
|
| 694 |
+
style = ' class="normal"'
|
| 695 |
+
|
| 696 |
+
if style:
|
| 697 |
+
line = '<span%s>%s</span>' % (style, line)
|
| 698 |
+
|
| 699 |
+
lines.append(line)
|
| 700 |
+
|
| 701 |
+
ls = '\n'.join(lines)
|
| 702 |
+
|
| 703 |
+
# If a filename was specified, we can't put it into the code table as it
|
| 704 |
+
# would misalign the line numbers. Hence we emit a separate row for it.
|
| 705 |
+
filename_tr = ""
|
| 706 |
+
if self.filename:
|
| 707 |
+
filename_tr = (
|
| 708 |
+
'<tr><th colspan="2" class="filename">'
|
| 709 |
+
'<span class="filename">' + self.filename + '</span>'
|
| 710 |
+
'</th></tr>')
|
| 711 |
+
|
| 712 |
+
# in case you wonder about the seemingly redundant <div> here: since the
|
| 713 |
+
# content in the other cell also is wrapped in a div, some browsers in
|
| 714 |
+
# some configurations seem to mess up the formatting...
|
| 715 |
+
yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
|
| 716 |
+
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
|
| 717 |
+
ls + '</pre></div></td><td class="code">')
|
| 718 |
+
yield 0, '<div>'
|
| 719 |
+
yield 0, dummyoutfile.getvalue()
|
| 720 |
+
yield 0, '</div>'
|
| 721 |
+
yield 0, '</td></tr></table>'
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
def _wrap_inlinelinenos(self, inner):
|
| 725 |
+
# need a list of lines since we need the width of a single number :(
|
| 726 |
+
inner_lines = list(inner)
|
| 727 |
+
sp = self.linenospecial
|
| 728 |
+
st = self.linenostep
|
| 729 |
+
num = self.linenostart
|
| 730 |
+
mw = len(str(len(inner_lines) + num - 1))
|
| 731 |
+
anchor_name = self.lineanchors or self.linespans
|
| 732 |
+
aln = self.anchorlinenos
|
| 733 |
+
nocls = self.noclasses
|
| 734 |
+
|
| 735 |
+
for _, inner_line in inner_lines:
|
| 736 |
+
print_line = num % st == 0
|
| 737 |
+
special_line = sp and num % sp == 0
|
| 738 |
+
|
| 739 |
+
if print_line:
|
| 740 |
+
line = '%*d' % (mw, num)
|
| 741 |
+
else:
|
| 742 |
+
line = ' ' * mw
|
| 743 |
+
|
| 744 |
+
if nocls:
|
| 745 |
+
if special_line:
|
| 746 |
+
style = ' style="%s"' % self._linenos_special_style
|
| 747 |
+
else:
|
| 748 |
+
style = ' style="%s"' % self._linenos_style
|
| 749 |
+
else:
|
| 750 |
+
if special_line:
|
| 751 |
+
style = ' class="linenos special"'
|
| 752 |
+
else:
|
| 753 |
+
style = ' class="linenos"'
|
| 754 |
+
|
| 755 |
+
if style:
|
| 756 |
+
linenos = '<span%s>%s</span>' % (style, line)
|
| 757 |
+
else:
|
| 758 |
+
linenos = line
|
| 759 |
+
|
| 760 |
+
if aln:
|
| 761 |
+
yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
|
| 762 |
+
inner_line)
|
| 763 |
+
else:
|
| 764 |
+
yield 1, linenos + inner_line
|
| 765 |
+
num += 1
|
| 766 |
+
|
| 767 |
+
def _wrap_lineanchors(self, inner):
|
| 768 |
+
s = self.lineanchors
|
| 769 |
+
# subtract 1 since we have to increment i *before* yielding
|
| 770 |
+
i = self.linenostart - 1
|
| 771 |
+
for t, line in inner:
|
| 772 |
+
if t:
|
| 773 |
+
i += 1
|
| 774 |
+
href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
|
| 775 |
+
yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
|
| 776 |
+
else:
|
| 777 |
+
yield 0, line
|
| 778 |
+
|
| 779 |
+
def _wrap_linespans(self, inner):
|
| 780 |
+
s = self.linespans
|
| 781 |
+
i = self.linenostart - 1
|
| 782 |
+
for t, line in inner:
|
| 783 |
+
if t:
|
| 784 |
+
i += 1
|
| 785 |
+
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
|
| 786 |
+
else:
|
| 787 |
+
yield 0, line
|
| 788 |
+
|
| 789 |
+
def _wrap_div(self, inner):
|
| 790 |
+
style = []
|
| 791 |
+
if (self.noclasses and not self.nobackground and
|
| 792 |
+
self.style.background_color is not None):
|
| 793 |
+
style.append('background: %s' % (self.style.background_color,))
|
| 794 |
+
if self.cssstyles:
|
| 795 |
+
style.append(self.cssstyles)
|
| 796 |
+
style = '; '.join(style)
|
| 797 |
+
|
| 798 |
+
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
|
| 799 |
+
(style and (' style="%s"' % style)) + '>')
|
| 800 |
+
yield from inner
|
| 801 |
+
yield 0, '</div>\n'
|
| 802 |
+
|
| 803 |
+
def _wrap_pre(self, inner):
|
| 804 |
+
style = []
|
| 805 |
+
if self.prestyles:
|
| 806 |
+
style.append(self.prestyles)
|
| 807 |
+
if self.noclasses:
|
| 808 |
+
style.append(self._pre_style)
|
| 809 |
+
style = '; '.join(style)
|
| 810 |
+
|
| 811 |
+
if self.filename and self.linenos != 1:
|
| 812 |
+
yield 0, ('<span class="filename">' + self.filename + '</span>')
|
| 813 |
+
|
| 814 |
+
# the empty span here is to keep leading empty lines from being
|
| 815 |
+
# ignored by HTML parsers
|
| 816 |
+
yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
|
| 817 |
+
yield from inner
|
| 818 |
+
yield 0, '</pre>'
|
| 819 |
+
|
| 820 |
+
def _wrap_code(self, inner):
|
| 821 |
+
yield 0, '<code>'
|
| 822 |
+
yield from inner
|
| 823 |
+
yield 0, '</code>'
|
| 824 |
+
|
| 825 |
+
@functools.lru_cache(maxsize=100)
|
| 826 |
+
def _translate_parts(self, value):
|
| 827 |
+
"""HTML-escape a value and split it by newlines."""
|
| 828 |
+
return value.translate(_escape_html_table).split('\n')
|
| 829 |
+
|
| 830 |
+
def _format_lines(self, tokensource):
|
| 831 |
+
"""
|
| 832 |
+
Just format the tokens, without any wrapping tags.
|
| 833 |
+
Yield individual lines.
|
| 834 |
+
"""
|
| 835 |
+
nocls = self.noclasses
|
| 836 |
+
lsep = self.lineseparator
|
| 837 |
+
tagsfile = self.tagsfile
|
| 838 |
+
|
| 839 |
+
lspan = ''
|
| 840 |
+
line = []
|
| 841 |
+
for ttype, value in tokensource:
|
| 842 |
+
try:
|
| 843 |
+
cspan = self.span_element_openers[ttype]
|
| 844 |
+
except KeyError:
|
| 845 |
+
title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else ''
|
| 846 |
+
if nocls:
|
| 847 |
+
css_style = self._get_css_inline_styles(ttype)
|
| 848 |
+
if css_style:
|
| 849 |
+
css_style = self.class2style[css_style][0]
|
| 850 |
+
cspan = '<span style="%s"%s>' % (css_style, title)
|
| 851 |
+
else:
|
| 852 |
+
cspan = ''
|
| 853 |
+
else:
|
| 854 |
+
css_class = self._get_css_classes(ttype)
|
| 855 |
+
if css_class:
|
| 856 |
+
cspan = '<span class="%s"%s>' % (css_class, title)
|
| 857 |
+
else:
|
| 858 |
+
cspan = ''
|
| 859 |
+
self.span_element_openers[ttype] = cspan
|
| 860 |
+
|
| 861 |
+
parts = self._translate_parts(value)
|
| 862 |
+
|
| 863 |
+
if tagsfile and ttype in Token.Name:
|
| 864 |
+
filename, linenumber = self._lookup_ctag(value)
|
| 865 |
+
if linenumber:
|
| 866 |
+
base, filename = os.path.split(filename)
|
| 867 |
+
if base:
|
| 868 |
+
base += '/'
|
| 869 |
+
filename, extension = os.path.splitext(filename)
|
| 870 |
+
url = self.tagurlformat % {'path': base, 'fname': filename,
|
| 871 |
+
'fext': extension}
|
| 872 |
+
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
|
| 873 |
+
(url, self.lineanchors, linenumber, parts[0])
|
| 874 |
+
parts[-1] = parts[-1] + "</a>"
|
| 875 |
+
|
| 876 |
+
# for all but the last line
|
| 877 |
+
for part in parts[:-1]:
|
| 878 |
+
if line:
|
| 879 |
+
# Also check for part being non-empty, so we avoid creating
|
| 880 |
+
# empty <span> tags
|
| 881 |
+
if lspan != cspan and part:
|
| 882 |
+
line.extend(((lspan and '</span>'), cspan, part,
|
| 883 |
+
(cspan and '</span>'), lsep))
|
| 884 |
+
else: # both are the same, or the current part was empty
|
| 885 |
+
line.extend((part, (lspan and '</span>'), lsep))
|
| 886 |
+
yield 1, ''.join(line)
|
| 887 |
+
line = []
|
| 888 |
+
elif part:
|
| 889 |
+
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
|
| 890 |
+
else:
|
| 891 |
+
yield 1, lsep
|
| 892 |
+
# for the last line
|
| 893 |
+
if line and parts[-1]:
|
| 894 |
+
if lspan != cspan:
|
| 895 |
+
line.extend(((lspan and '</span>'), cspan, parts[-1]))
|
| 896 |
+
lspan = cspan
|
| 897 |
+
else:
|
| 898 |
+
line.append(parts[-1])
|
| 899 |
+
elif parts[-1]:
|
| 900 |
+
line = [cspan, parts[-1]]
|
| 901 |
+
lspan = cspan
|
| 902 |
+
# else we neither have to open a new span nor set lspan
|
| 903 |
+
|
| 904 |
+
if line:
|
| 905 |
+
line.extend(((lspan and '</span>'), lsep))
|
| 906 |
+
yield 1, ''.join(line)
|
| 907 |
+
|
| 908 |
+
def _lookup_ctag(self, token):
|
| 909 |
+
entry = ctags.TagEntry()
|
| 910 |
+
if self._ctags.find(entry, token.encode(), 0):
|
| 911 |
+
return entry['file'], entry['lineNumber']
|
| 912 |
+
else:
|
| 913 |
+
return None, None
|
| 914 |
+
|
| 915 |
+
def _highlight_lines(self, tokensource):
|
| 916 |
+
"""
|
| 917 |
+
Highlighted the lines specified in the `hl_lines` option by
|
| 918 |
+
post-processing the token stream coming from `_format_lines`.
|
| 919 |
+
"""
|
| 920 |
+
hls = self.hl_lines
|
| 921 |
+
|
| 922 |
+
for i, (t, value) in enumerate(tokensource):
|
| 923 |
+
if t != 1:
|
| 924 |
+
yield t, value
|
| 925 |
+
if i + 1 in hls: # i + 1 because Python indexes start at 0
|
| 926 |
+
if self.noclasses:
|
| 927 |
+
style = ''
|
| 928 |
+
if self.style.highlight_color is not None:
|
| 929 |
+
style = (' style="background-color: %s"' %
|
| 930 |
+
(self.style.highlight_color,))
|
| 931 |
+
yield 1, '<span%s>%s</span>' % (style, value)
|
| 932 |
+
else:
|
| 933 |
+
yield 1, '<span class="hll">%s</span>' % value
|
| 934 |
+
else:
|
| 935 |
+
yield 1, value
|
| 936 |
+
|
| 937 |
+
def wrap(self, source):
|
| 938 |
+
"""
|
| 939 |
+
Wrap the ``source``, which is a generator yielding
|
| 940 |
+
individual lines, in custom generators. See docstring
|
| 941 |
+
for `format`. Can be overridden.
|
| 942 |
+
"""
|
| 943 |
+
|
| 944 |
+
output = source
|
| 945 |
+
if self.wrapcode:
|
| 946 |
+
output = self._wrap_code(output)
|
| 947 |
+
|
| 948 |
+
output = self._wrap_pre(output)
|
| 949 |
+
|
| 950 |
+
return output
|
| 951 |
+
|
| 952 |
+
def format_unencoded(self, tokensource, outfile):
|
| 953 |
+
"""
|
| 954 |
+
The formatting process uses several nested generators; which of
|
| 955 |
+
them are used is determined by the user's options.
|
| 956 |
+
|
| 957 |
+
Each generator should take at least one argument, ``inner``,
|
| 958 |
+
and wrap the pieces of text generated by this.
|
| 959 |
+
|
| 960 |
+
Always yield 2-tuples: (code, text). If "code" is 1, the text
|
| 961 |
+
is part of the original tokensource being highlighted, if it's
|
| 962 |
+
0, the text is some piece of wrapping. This makes it possible to
|
| 963 |
+
use several different wrappers that process the original source
|
| 964 |
+
linewise, e.g. line number generators.
|
| 965 |
+
"""
|
| 966 |
+
source = self._format_lines(tokensource)
|
| 967 |
+
|
| 968 |
+
# As a special case, we wrap line numbers before line highlighting
|
| 969 |
+
# so the line numbers get wrapped in the highlighting tag.
|
| 970 |
+
if not self.nowrap and self.linenos == 2:
|
| 971 |
+
source = self._wrap_inlinelinenos(source)
|
| 972 |
+
|
| 973 |
+
if self.hl_lines:
|
| 974 |
+
source = self._highlight_lines(source)
|
| 975 |
+
|
| 976 |
+
if not self.nowrap:
|
| 977 |
+
if self.lineanchors:
|
| 978 |
+
source = self._wrap_lineanchors(source)
|
| 979 |
+
if self.linespans:
|
| 980 |
+
source = self._wrap_linespans(source)
|
| 981 |
+
source = self.wrap(source)
|
| 982 |
+
if self.linenos == 1:
|
| 983 |
+
source = self._wrap_tablelinenos(source)
|
| 984 |
+
source = self._wrap_div(source)
|
| 985 |
+
if self.full:
|
| 986 |
+
source = self._wrap_full(source, outfile)
|
| 987 |
+
|
| 988 |
+
for t, piece in source:
|
| 989 |
+
outfile.write(piece)
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/img.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.img
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for Pixmap output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
|
| 14 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 15 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
| 16 |
+
get_choice_opt
|
| 17 |
+
|
| 18 |
+
import subprocess
|
| 19 |
+
|
| 20 |
+
# Import this carefully
|
| 21 |
+
try:
|
| 22 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 23 |
+
pil_available = True
|
| 24 |
+
except ImportError:
|
| 25 |
+
pil_available = False
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import _winreg
|
| 29 |
+
except ImportError:
|
| 30 |
+
try:
|
| 31 |
+
import winreg as _winreg
|
| 32 |
+
except ImportError:
|
| 33 |
+
_winreg = None
|
| 34 |
+
|
| 35 |
+
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
|
| 36 |
+
'BmpImageFormatter']
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# For some unknown reason every font calls it something different
|
| 40 |
+
STYLES = {
|
| 41 |
+
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
|
| 42 |
+
'ITALIC': ['Oblique', 'Italic'],
|
| 43 |
+
'BOLD': ['Bold'],
|
| 44 |
+
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
# A sane default for modern systems
|
| 48 |
+
DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
|
| 49 |
+
DEFAULT_FONT_NAME_WIN = 'Courier New'
|
| 50 |
+
DEFAULT_FONT_NAME_MAC = 'Menlo'
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class PilNotAvailable(ImportError):
|
| 54 |
+
"""When Python imaging library is not available"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class FontNotFound(Exception):
|
| 58 |
+
"""When there are no usable fonts specified"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class FontManager:
|
| 62 |
+
"""
|
| 63 |
+
Manages a set of fonts: normal, italic, bold, etc...
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(self, font_name, font_size=14):
|
| 67 |
+
self.font_name = font_name
|
| 68 |
+
self.font_size = font_size
|
| 69 |
+
self.fonts = {}
|
| 70 |
+
self.encoding = None
|
| 71 |
+
if sys.platform.startswith('win'):
|
| 72 |
+
if not font_name:
|
| 73 |
+
self.font_name = DEFAULT_FONT_NAME_WIN
|
| 74 |
+
self._create_win()
|
| 75 |
+
elif sys.platform.startswith('darwin'):
|
| 76 |
+
if not font_name:
|
| 77 |
+
self.font_name = DEFAULT_FONT_NAME_MAC
|
| 78 |
+
self._create_mac()
|
| 79 |
+
else:
|
| 80 |
+
if not font_name:
|
| 81 |
+
self.font_name = DEFAULT_FONT_NAME_NIX
|
| 82 |
+
self._create_nix()
|
| 83 |
+
|
| 84 |
+
def _get_nix_font_path(self, name, style):
|
| 85 |
+
proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
|
| 86 |
+
stdout=subprocess.PIPE, stderr=None)
|
| 87 |
+
stdout, _ = proc.communicate()
|
| 88 |
+
if proc.returncode == 0:
|
| 89 |
+
lines = stdout.splitlines()
|
| 90 |
+
for line in lines:
|
| 91 |
+
if line.startswith(b'Fontconfig warning:'):
|
| 92 |
+
continue
|
| 93 |
+
path = line.decode().strip().strip(':')
|
| 94 |
+
if path:
|
| 95 |
+
return path
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
def _create_nix(self):
|
| 99 |
+
for name in STYLES['NORMAL']:
|
| 100 |
+
path = self._get_nix_font_path(self.font_name, name)
|
| 101 |
+
if path is not None:
|
| 102 |
+
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
| 103 |
+
break
|
| 104 |
+
else:
|
| 105 |
+
raise FontNotFound('No usable fonts named: "%s"' %
|
| 106 |
+
self.font_name)
|
| 107 |
+
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
| 108 |
+
for stylename in STYLES[style]:
|
| 109 |
+
path = self._get_nix_font_path(self.font_name, stylename)
|
| 110 |
+
if path is not None:
|
| 111 |
+
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
| 112 |
+
break
|
| 113 |
+
else:
|
| 114 |
+
if style == 'BOLDITALIC':
|
| 115 |
+
self.fonts[style] = self.fonts['BOLD']
|
| 116 |
+
else:
|
| 117 |
+
self.fonts[style] = self.fonts['NORMAL']
|
| 118 |
+
|
| 119 |
+
def _get_mac_font_path(self, font_map, name, style):
|
| 120 |
+
return font_map.get((name + ' ' + style).strip().lower())
|
| 121 |
+
|
| 122 |
+
def _create_mac(self):
|
| 123 |
+
font_map = {}
|
| 124 |
+
for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
|
| 125 |
+
'/Library/Fonts/', '/System/Library/Fonts/'):
|
| 126 |
+
font_map.update(
|
| 127 |
+
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
|
| 128 |
+
for f in os.listdir(font_dir)
|
| 129 |
+
if f.lower().endswith(('ttf', 'ttc')))
|
| 130 |
+
|
| 131 |
+
for name in STYLES['NORMAL']:
|
| 132 |
+
path = self._get_mac_font_path(font_map, self.font_name, name)
|
| 133 |
+
if path is not None:
|
| 134 |
+
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
| 135 |
+
break
|
| 136 |
+
else:
|
| 137 |
+
raise FontNotFound('No usable fonts named: "%s"' %
|
| 138 |
+
self.font_name)
|
| 139 |
+
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
| 140 |
+
for stylename in STYLES[style]:
|
| 141 |
+
path = self._get_mac_font_path(font_map, self.font_name, stylename)
|
| 142 |
+
if path is not None:
|
| 143 |
+
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
| 144 |
+
break
|
| 145 |
+
else:
|
| 146 |
+
if style == 'BOLDITALIC':
|
| 147 |
+
self.fonts[style] = self.fonts['BOLD']
|
| 148 |
+
else:
|
| 149 |
+
self.fonts[style] = self.fonts['NORMAL']
|
| 150 |
+
|
| 151 |
+
def _lookup_win(self, key, basename, styles, fail=False):
|
| 152 |
+
for suffix in ('', ' (TrueType)'):
|
| 153 |
+
for style in styles:
|
| 154 |
+
try:
|
| 155 |
+
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
|
| 156 |
+
val, _ = _winreg.QueryValueEx(key, valname)
|
| 157 |
+
return val
|
| 158 |
+
except OSError:
|
| 159 |
+
continue
|
| 160 |
+
else:
|
| 161 |
+
if fail:
|
| 162 |
+
raise FontNotFound('Font %s (%s) not found in registry' %
|
| 163 |
+
(basename, styles[0]))
|
| 164 |
+
return None
|
| 165 |
+
|
| 166 |
+
def _create_win(self):
|
| 167 |
+
lookuperror = None
|
| 168 |
+
keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
|
| 169 |
+
(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
|
| 170 |
+
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
|
| 171 |
+
(_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
|
| 172 |
+
for keyname in keynames:
|
| 173 |
+
try:
|
| 174 |
+
key = _winreg.OpenKey(*keyname)
|
| 175 |
+
try:
|
| 176 |
+
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
|
| 177 |
+
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
|
| 178 |
+
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
|
| 179 |
+
path = self._lookup_win(key, self.font_name, STYLES[style])
|
| 180 |
+
if path:
|
| 181 |
+
self.fonts[style] = ImageFont.truetype(path, self.font_size)
|
| 182 |
+
else:
|
| 183 |
+
if style == 'BOLDITALIC':
|
| 184 |
+
self.fonts[style] = self.fonts['BOLD']
|
| 185 |
+
else:
|
| 186 |
+
self.fonts[style] = self.fonts['NORMAL']
|
| 187 |
+
return
|
| 188 |
+
except FontNotFound as err:
|
| 189 |
+
lookuperror = err
|
| 190 |
+
finally:
|
| 191 |
+
_winreg.CloseKey(key)
|
| 192 |
+
except OSError:
|
| 193 |
+
pass
|
| 194 |
+
else:
|
| 195 |
+
# If we get here, we checked all registry keys and had no luck
|
| 196 |
+
# We can be in one of two situations now:
|
| 197 |
+
# * All key lookups failed. In this case lookuperror is None and we
|
| 198 |
+
# will raise a generic error
|
| 199 |
+
# * At least one lookup failed with a FontNotFound error. In this
|
| 200 |
+
# case, we will raise that as a more specific error
|
| 201 |
+
if lookuperror:
|
| 202 |
+
raise lookuperror
|
| 203 |
+
raise FontNotFound('Can\'t open Windows font registry key')
|
| 204 |
+
|
| 205 |
+
def get_char_size(self):
|
| 206 |
+
"""
|
| 207 |
+
Get the character size.
|
| 208 |
+
"""
|
| 209 |
+
return self.get_text_size('M')
|
| 210 |
+
|
| 211 |
+
def get_text_size(self, text):
|
| 212 |
+
"""
|
| 213 |
+
Get the text size (width, height).
|
| 214 |
+
"""
|
| 215 |
+
font = self.fonts['NORMAL']
|
| 216 |
+
if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
|
| 217 |
+
return font.getbbox(text)[2:4]
|
| 218 |
+
else:
|
| 219 |
+
return font.getsize(text)
|
| 220 |
+
|
| 221 |
+
def get_font(self, bold, oblique):
|
| 222 |
+
"""
|
| 223 |
+
Get the font based on bold and italic flags.
|
| 224 |
+
"""
|
| 225 |
+
if bold and oblique:
|
| 226 |
+
return self.fonts['BOLDITALIC']
|
| 227 |
+
elif bold:
|
| 228 |
+
return self.fonts['BOLD']
|
| 229 |
+
elif oblique:
|
| 230 |
+
return self.fonts['ITALIC']
|
| 231 |
+
else:
|
| 232 |
+
return self.fonts['NORMAL']
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
class ImageFormatter(Formatter):
|
| 236 |
+
"""
|
| 237 |
+
Create a PNG image from source code. This uses the Python Imaging Library to
|
| 238 |
+
generate a pixmap from the source code.
|
| 239 |
+
|
| 240 |
+
.. versionadded:: 0.10
|
| 241 |
+
|
| 242 |
+
Additional options accepted:
|
| 243 |
+
|
| 244 |
+
`image_format`
|
| 245 |
+
An image format to output to that is recognised by PIL, these include:
|
| 246 |
+
|
| 247 |
+
* "PNG" (default)
|
| 248 |
+
* "JPEG"
|
| 249 |
+
* "BMP"
|
| 250 |
+
* "GIF"
|
| 251 |
+
|
| 252 |
+
`line_pad`
|
| 253 |
+
The extra spacing (in pixels) between each line of text.
|
| 254 |
+
|
| 255 |
+
Default: 2
|
| 256 |
+
|
| 257 |
+
`font_name`
|
| 258 |
+
The font name to be used as the base font from which others, such as
|
| 259 |
+
bold and italic fonts will be generated. This really should be a
|
| 260 |
+
monospace font to look sane.
|
| 261 |
+
|
| 262 |
+
Default: "Courier New" on Windows, "Menlo" on Mac OS, and
|
| 263 |
+
"DejaVu Sans Mono" on \\*nix
|
| 264 |
+
|
| 265 |
+
`font_size`
|
| 266 |
+
The font size in points to be used.
|
| 267 |
+
|
| 268 |
+
Default: 14
|
| 269 |
+
|
| 270 |
+
`image_pad`
|
| 271 |
+
The padding, in pixels to be used at each edge of the resulting image.
|
| 272 |
+
|
| 273 |
+
Default: 10
|
| 274 |
+
|
| 275 |
+
`line_numbers`
|
| 276 |
+
Whether line numbers should be shown: True/False
|
| 277 |
+
|
| 278 |
+
Default: True
|
| 279 |
+
|
| 280 |
+
`line_number_start`
|
| 281 |
+
The line number of the first line.
|
| 282 |
+
|
| 283 |
+
Default: 1
|
| 284 |
+
|
| 285 |
+
`line_number_step`
|
| 286 |
+
The step used when printing line numbers.
|
| 287 |
+
|
| 288 |
+
Default: 1
|
| 289 |
+
|
| 290 |
+
`line_number_bg`
|
| 291 |
+
The background colour (in "#123456" format) of the line number bar, or
|
| 292 |
+
None to use the style background color.
|
| 293 |
+
|
| 294 |
+
Default: "#eed"
|
| 295 |
+
|
| 296 |
+
`line_number_fg`
|
| 297 |
+
The text color of the line numbers (in "#123456"-like format).
|
| 298 |
+
|
| 299 |
+
Default: "#886"
|
| 300 |
+
|
| 301 |
+
`line_number_chars`
|
| 302 |
+
The number of columns of line numbers allowable in the line number
|
| 303 |
+
margin.
|
| 304 |
+
|
| 305 |
+
Default: 2
|
| 306 |
+
|
| 307 |
+
`line_number_bold`
|
| 308 |
+
Whether line numbers will be bold: True/False
|
| 309 |
+
|
| 310 |
+
Default: False
|
| 311 |
+
|
| 312 |
+
`line_number_italic`
|
| 313 |
+
Whether line numbers will be italicized: True/False
|
| 314 |
+
|
| 315 |
+
Default: False
|
| 316 |
+
|
| 317 |
+
`line_number_separator`
|
| 318 |
+
Whether a line will be drawn between the line number area and the
|
| 319 |
+
source code area: True/False
|
| 320 |
+
|
| 321 |
+
Default: True
|
| 322 |
+
|
| 323 |
+
`line_number_pad`
|
| 324 |
+
The horizontal padding (in pixels) between the line number margin, and
|
| 325 |
+
the source code area.
|
| 326 |
+
|
| 327 |
+
Default: 6
|
| 328 |
+
|
| 329 |
+
`hl_lines`
|
| 330 |
+
Specify a list of lines to be highlighted.
|
| 331 |
+
|
| 332 |
+
.. versionadded:: 1.2
|
| 333 |
+
|
| 334 |
+
Default: empty list
|
| 335 |
+
|
| 336 |
+
`hl_color`
|
| 337 |
+
Specify the color for highlighting lines.
|
| 338 |
+
|
| 339 |
+
.. versionadded:: 1.2
|
| 340 |
+
|
| 341 |
+
Default: highlight color of the selected style
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
# Required by the pygments mapper
|
| 345 |
+
name = 'img'
|
| 346 |
+
aliases = ['img', 'IMG', 'png']
|
| 347 |
+
filenames = ['*.png']
|
| 348 |
+
|
| 349 |
+
unicodeoutput = False
|
| 350 |
+
|
| 351 |
+
default_image_format = 'png'
|
| 352 |
+
|
| 353 |
+
def __init__(self, **options):
|
| 354 |
+
"""
|
| 355 |
+
See the class docstring for explanation of options.
|
| 356 |
+
"""
|
| 357 |
+
if not pil_available:
|
| 358 |
+
raise PilNotAvailable(
|
| 359 |
+
'Python Imaging Library is required for this formatter')
|
| 360 |
+
Formatter.__init__(self, **options)
|
| 361 |
+
self.encoding = 'latin1' # let pygments.format() do the right thing
|
| 362 |
+
# Read the style
|
| 363 |
+
self.styles = dict(self.style)
|
| 364 |
+
if self.style.background_color is None:
|
| 365 |
+
self.background_color = '#fff'
|
| 366 |
+
else:
|
| 367 |
+
self.background_color = self.style.background_color
|
| 368 |
+
# Image options
|
| 369 |
+
self.image_format = get_choice_opt(
|
| 370 |
+
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
|
| 371 |
+
self.default_image_format, normcase=True)
|
| 372 |
+
self.image_pad = get_int_opt(options, 'image_pad', 10)
|
| 373 |
+
self.line_pad = get_int_opt(options, 'line_pad', 2)
|
| 374 |
+
# The fonts
|
| 375 |
+
fontsize = get_int_opt(options, 'font_size', 14)
|
| 376 |
+
self.fonts = FontManager(options.get('font_name', ''), fontsize)
|
| 377 |
+
self.fontw, self.fonth = self.fonts.get_char_size()
|
| 378 |
+
# Line number options
|
| 379 |
+
self.line_number_fg = options.get('line_number_fg', '#886')
|
| 380 |
+
self.line_number_bg = options.get('line_number_bg', '#eed')
|
| 381 |
+
self.line_number_chars = get_int_opt(options,
|
| 382 |
+
'line_number_chars', 2)
|
| 383 |
+
self.line_number_bold = get_bool_opt(options,
|
| 384 |
+
'line_number_bold', False)
|
| 385 |
+
self.line_number_italic = get_bool_opt(options,
|
| 386 |
+
'line_number_italic', False)
|
| 387 |
+
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
|
| 388 |
+
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
|
| 389 |
+
self.line_number_separator = get_bool_opt(options,
|
| 390 |
+
'line_number_separator', True)
|
| 391 |
+
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
|
| 392 |
+
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
|
| 393 |
+
if self.line_numbers:
|
| 394 |
+
self.line_number_width = (self.fontw * self.line_number_chars +
|
| 395 |
+
self.line_number_pad * 2)
|
| 396 |
+
else:
|
| 397 |
+
self.line_number_width = 0
|
| 398 |
+
self.hl_lines = []
|
| 399 |
+
hl_lines_str = get_list_opt(options, 'hl_lines', [])
|
| 400 |
+
for line in hl_lines_str:
|
| 401 |
+
try:
|
| 402 |
+
self.hl_lines.append(int(line))
|
| 403 |
+
except ValueError:
|
| 404 |
+
pass
|
| 405 |
+
self.hl_color = options.get('hl_color',
|
| 406 |
+
self.style.highlight_color) or '#f90'
|
| 407 |
+
self.drawables = []
|
| 408 |
+
|
| 409 |
+
def get_style_defs(self, arg=''):
|
| 410 |
+
raise NotImplementedError('The -S option is meaningless for the image '
|
| 411 |
+
'formatter. Use -O style=<stylename> instead.')
|
| 412 |
+
|
| 413 |
+
def _get_line_height(self):
|
| 414 |
+
"""
|
| 415 |
+
Get the height of a line.
|
| 416 |
+
"""
|
| 417 |
+
return self.fonth + self.line_pad
|
| 418 |
+
|
| 419 |
+
def _get_line_y(self, lineno):
|
| 420 |
+
"""
|
| 421 |
+
Get the Y coordinate of a line number.
|
| 422 |
+
"""
|
| 423 |
+
return lineno * self._get_line_height() + self.image_pad
|
| 424 |
+
|
| 425 |
+
def _get_char_width(self):
|
| 426 |
+
"""
|
| 427 |
+
Get the width of a character.
|
| 428 |
+
"""
|
| 429 |
+
return self.fontw
|
| 430 |
+
|
| 431 |
+
def _get_char_x(self, linelength):
|
| 432 |
+
"""
|
| 433 |
+
Get the X coordinate of a character position.
|
| 434 |
+
"""
|
| 435 |
+
return linelength + self.image_pad + self.line_number_width
|
| 436 |
+
|
| 437 |
+
def _get_text_pos(self, linelength, lineno):
|
| 438 |
+
"""
|
| 439 |
+
Get the actual position for a character and line position.
|
| 440 |
+
"""
|
| 441 |
+
return self._get_char_x(linelength), self._get_line_y(lineno)
|
| 442 |
+
|
| 443 |
+
def _get_linenumber_pos(self, lineno):
|
| 444 |
+
"""
|
| 445 |
+
Get the actual position for the start of a line number.
|
| 446 |
+
"""
|
| 447 |
+
return (self.image_pad, self._get_line_y(lineno))
|
| 448 |
+
|
| 449 |
+
def _get_text_color(self, style):
|
| 450 |
+
"""
|
| 451 |
+
Get the correct color for the token from the style.
|
| 452 |
+
"""
|
| 453 |
+
if style['color'] is not None:
|
| 454 |
+
fill = '#' + style['color']
|
| 455 |
+
else:
|
| 456 |
+
fill = '#000'
|
| 457 |
+
return fill
|
| 458 |
+
|
| 459 |
+
def _get_text_bg_color(self, style):
|
| 460 |
+
"""
|
| 461 |
+
Get the correct background color for the token from the style.
|
| 462 |
+
"""
|
| 463 |
+
if style['bgcolor'] is not None:
|
| 464 |
+
bg_color = '#' + style['bgcolor']
|
| 465 |
+
else:
|
| 466 |
+
bg_color = None
|
| 467 |
+
return bg_color
|
| 468 |
+
|
| 469 |
+
def _get_style_font(self, style):
|
| 470 |
+
"""
|
| 471 |
+
Get the correct font for the style.
|
| 472 |
+
"""
|
| 473 |
+
return self.fonts.get_font(style['bold'], style['italic'])
|
| 474 |
+
|
| 475 |
+
def _get_image_size(self, maxlinelength, maxlineno):
|
| 476 |
+
"""
|
| 477 |
+
Get the required image size.
|
| 478 |
+
"""
|
| 479 |
+
return (self._get_char_x(maxlinelength) + self.image_pad,
|
| 480 |
+
self._get_line_y(maxlineno + 0) + self.image_pad)
|
| 481 |
+
|
| 482 |
+
def _draw_linenumber(self, posno, lineno):
|
| 483 |
+
"""
|
| 484 |
+
Remember a line number drawable to paint later.
|
| 485 |
+
"""
|
| 486 |
+
self._draw_text(
|
| 487 |
+
self._get_linenumber_pos(posno),
|
| 488 |
+
str(lineno).rjust(self.line_number_chars),
|
| 489 |
+
font=self.fonts.get_font(self.line_number_bold,
|
| 490 |
+
self.line_number_italic),
|
| 491 |
+
text_fg=self.line_number_fg,
|
| 492 |
+
text_bg=None,
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
def _draw_text(self, pos, text, font, text_fg, text_bg):
|
| 496 |
+
"""
|
| 497 |
+
Remember a single drawable tuple to paint later.
|
| 498 |
+
"""
|
| 499 |
+
self.drawables.append((pos, text, font, text_fg, text_bg))
|
| 500 |
+
|
| 501 |
+
def _create_drawables(self, tokensource):
|
| 502 |
+
"""
|
| 503 |
+
Create drawables for the token content.
|
| 504 |
+
"""
|
| 505 |
+
lineno = charno = maxcharno = 0
|
| 506 |
+
maxlinelength = linelength = 0
|
| 507 |
+
for ttype, value in tokensource:
|
| 508 |
+
while ttype not in self.styles:
|
| 509 |
+
ttype = ttype.parent
|
| 510 |
+
style = self.styles[ttype]
|
| 511 |
+
# TODO: make sure tab expansion happens earlier in the chain. It
|
| 512 |
+
# really ought to be done on the input, as to do it right here is
|
| 513 |
+
# quite complex.
|
| 514 |
+
value = value.expandtabs(4)
|
| 515 |
+
lines = value.splitlines(True)
|
| 516 |
+
# print lines
|
| 517 |
+
for i, line in enumerate(lines):
|
| 518 |
+
temp = line.rstrip('\n')
|
| 519 |
+
if temp:
|
| 520 |
+
self._draw_text(
|
| 521 |
+
self._get_text_pos(linelength, lineno),
|
| 522 |
+
temp,
|
| 523 |
+
font = self._get_style_font(style),
|
| 524 |
+
text_fg = self._get_text_color(style),
|
| 525 |
+
text_bg = self._get_text_bg_color(style),
|
| 526 |
+
)
|
| 527 |
+
temp_width, _ = self.fonts.get_text_size(temp)
|
| 528 |
+
linelength += temp_width
|
| 529 |
+
maxlinelength = max(maxlinelength, linelength)
|
| 530 |
+
charno += len(temp)
|
| 531 |
+
maxcharno = max(maxcharno, charno)
|
| 532 |
+
if line.endswith('\n'):
|
| 533 |
+
# add a line for each extra line in the value
|
| 534 |
+
linelength = 0
|
| 535 |
+
charno = 0
|
| 536 |
+
lineno += 1
|
| 537 |
+
self.maxlinelength = maxlinelength
|
| 538 |
+
self.maxcharno = maxcharno
|
| 539 |
+
self.maxlineno = lineno
|
| 540 |
+
|
| 541 |
+
def _draw_line_numbers(self):
|
| 542 |
+
"""
|
| 543 |
+
Create drawables for the line numbers.
|
| 544 |
+
"""
|
| 545 |
+
if not self.line_numbers:
|
| 546 |
+
return
|
| 547 |
+
for p in range(self.maxlineno):
|
| 548 |
+
n = p + self.line_number_start
|
| 549 |
+
if (n % self.line_number_step) == 0:
|
| 550 |
+
self._draw_linenumber(p, n)
|
| 551 |
+
|
| 552 |
+
def _paint_line_number_bg(self, im):
|
| 553 |
+
"""
|
| 554 |
+
Paint the line number background on the image.
|
| 555 |
+
"""
|
| 556 |
+
if not self.line_numbers:
|
| 557 |
+
return
|
| 558 |
+
if self.line_number_fg is None:
|
| 559 |
+
return
|
| 560 |
+
draw = ImageDraw.Draw(im)
|
| 561 |
+
recth = im.size[-1]
|
| 562 |
+
rectw = self.image_pad + self.line_number_width - self.line_number_pad
|
| 563 |
+
draw.rectangle([(0, 0), (rectw, recth)],
|
| 564 |
+
fill=self.line_number_bg)
|
| 565 |
+
if self.line_number_separator:
|
| 566 |
+
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
|
| 567 |
+
del draw
|
| 568 |
+
|
| 569 |
+
def format(self, tokensource, outfile):
|
| 570 |
+
"""
|
| 571 |
+
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
| 572 |
+
tuples and write it into ``outfile``.
|
| 573 |
+
|
| 574 |
+
This implementation calculates where it should draw each token on the
|
| 575 |
+
pixmap, then calculates the required pixmap size and draws the items.
|
| 576 |
+
"""
|
| 577 |
+
self._create_drawables(tokensource)
|
| 578 |
+
self._draw_line_numbers()
|
| 579 |
+
im = Image.new(
|
| 580 |
+
'RGB',
|
| 581 |
+
self._get_image_size(self.maxlinelength, self.maxlineno),
|
| 582 |
+
self.background_color
|
| 583 |
+
)
|
| 584 |
+
self._paint_line_number_bg(im)
|
| 585 |
+
draw = ImageDraw.Draw(im)
|
| 586 |
+
# Highlight
|
| 587 |
+
if self.hl_lines:
|
| 588 |
+
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
|
| 589 |
+
recth = self._get_line_height()
|
| 590 |
+
rectw = im.size[0] - x
|
| 591 |
+
for linenumber in self.hl_lines:
|
| 592 |
+
y = self._get_line_y(linenumber - 1)
|
| 593 |
+
draw.rectangle([(x, y), (x + rectw, y + recth)],
|
| 594 |
+
fill=self.hl_color)
|
| 595 |
+
for pos, value, font, text_fg, text_bg in self.drawables:
|
| 596 |
+
if text_bg:
|
| 597 |
+
text_size = draw.textsize(text=value, font=font)
|
| 598 |
+
draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
|
| 599 |
+
draw.text(pos, value, font=font, fill=text_fg)
|
| 600 |
+
im.save(outfile, self.image_format.upper())
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
# Add one formatter per format, so that the "-f gif" option gives the correct result
|
| 604 |
+
# when used in pygmentize.
|
| 605 |
+
|
| 606 |
+
class GifImageFormatter(ImageFormatter):
|
| 607 |
+
"""
|
| 608 |
+
Create a GIF image from source code. This uses the Python Imaging Library to
|
| 609 |
+
generate a pixmap from the source code.
|
| 610 |
+
|
| 611 |
+
.. versionadded:: 1.0
|
| 612 |
+
"""
|
| 613 |
+
|
| 614 |
+
name = 'img_gif'
|
| 615 |
+
aliases = ['gif']
|
| 616 |
+
filenames = ['*.gif']
|
| 617 |
+
default_image_format = 'gif'
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
class JpgImageFormatter(ImageFormatter):
|
| 621 |
+
"""
|
| 622 |
+
Create a JPEG image from source code. This uses the Python Imaging Library to
|
| 623 |
+
generate a pixmap from the source code.
|
| 624 |
+
|
| 625 |
+
.. versionadded:: 1.0
|
| 626 |
+
"""
|
| 627 |
+
|
| 628 |
+
name = 'img_jpg'
|
| 629 |
+
aliases = ['jpg', 'jpeg']
|
| 630 |
+
filenames = ['*.jpg']
|
| 631 |
+
default_image_format = 'jpeg'
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class BmpImageFormatter(ImageFormatter):
|
| 635 |
+
"""
|
| 636 |
+
Create a bitmap image from source code. This uses the Python Imaging Library to
|
| 637 |
+
generate a pixmap from the source code.
|
| 638 |
+
|
| 639 |
+
.. versionadded:: 1.0
|
| 640 |
+
"""
|
| 641 |
+
|
| 642 |
+
name = 'img_bmp'
|
| 643 |
+
aliases = ['bmp', 'bitmap']
|
| 644 |
+
filenames = ['*.bmp']
|
| 645 |
+
default_image_format = 'bmp'
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/irc.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.irc
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for IRC output
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
|
| 13 |
+
Number, Operator, Generic, Token, Whitespace
|
| 14 |
+
from pip._vendor.pygments.util import get_choice_opt
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
__all__ = ['IRCFormatter']
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#: Map token types to a tuple of color values for light and dark
|
| 21 |
+
#: backgrounds.
|
| 22 |
+
IRC_COLORS = {
|
| 23 |
+
Token: ('', ''),
|
| 24 |
+
|
| 25 |
+
Whitespace: ('gray', 'brightblack'),
|
| 26 |
+
Comment: ('gray', 'brightblack'),
|
| 27 |
+
Comment.Preproc: ('cyan', 'brightcyan'),
|
| 28 |
+
Keyword: ('blue', 'brightblue'),
|
| 29 |
+
Keyword.Type: ('cyan', 'brightcyan'),
|
| 30 |
+
Operator.Word: ('magenta', 'brightcyan'),
|
| 31 |
+
Name.Builtin: ('cyan', 'brightcyan'),
|
| 32 |
+
Name.Function: ('green', 'brightgreen'),
|
| 33 |
+
Name.Namespace: ('_cyan_', '_brightcyan_'),
|
| 34 |
+
Name.Class: ('_green_', '_brightgreen_'),
|
| 35 |
+
Name.Exception: ('cyan', 'brightcyan'),
|
| 36 |
+
Name.Decorator: ('brightblack', 'gray'),
|
| 37 |
+
Name.Variable: ('red', 'brightred'),
|
| 38 |
+
Name.Constant: ('red', 'brightred'),
|
| 39 |
+
Name.Attribute: ('cyan', 'brightcyan'),
|
| 40 |
+
Name.Tag: ('brightblue', 'brightblue'),
|
| 41 |
+
String: ('yellow', 'yellow'),
|
| 42 |
+
Number: ('blue', 'brightblue'),
|
| 43 |
+
|
| 44 |
+
Generic.Deleted: ('brightred', 'brightred'),
|
| 45 |
+
Generic.Inserted: ('green', 'brightgreen'),
|
| 46 |
+
Generic.Heading: ('**', '**'),
|
| 47 |
+
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
|
| 48 |
+
Generic.Error: ('brightred', 'brightred'),
|
| 49 |
+
|
| 50 |
+
Error: ('_brightred_', '_brightred_'),
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
IRC_COLOR_MAP = {
|
| 55 |
+
'white': 0,
|
| 56 |
+
'black': 1,
|
| 57 |
+
'blue': 2,
|
| 58 |
+
'brightgreen': 3,
|
| 59 |
+
'brightred': 4,
|
| 60 |
+
'yellow': 5,
|
| 61 |
+
'magenta': 6,
|
| 62 |
+
'orange': 7,
|
| 63 |
+
'green': 7, #compat w/ ansi
|
| 64 |
+
'brightyellow': 8,
|
| 65 |
+
'lightgreen': 9,
|
| 66 |
+
'brightcyan': 9, # compat w/ ansi
|
| 67 |
+
'cyan': 10,
|
| 68 |
+
'lightblue': 11,
|
| 69 |
+
'red': 11, # compat w/ ansi
|
| 70 |
+
'brightblue': 12,
|
| 71 |
+
'brightmagenta': 13,
|
| 72 |
+
'brightblack': 14,
|
| 73 |
+
'gray': 15,
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
def ircformat(color, text):
|
| 77 |
+
if len(color) < 1:
|
| 78 |
+
return text
|
| 79 |
+
add = sub = ''
|
| 80 |
+
if '_' in color: # italic
|
| 81 |
+
add += '\x1D'
|
| 82 |
+
sub = '\x1D' + sub
|
| 83 |
+
color = color.strip('_')
|
| 84 |
+
if '*' in color: # bold
|
| 85 |
+
add += '\x02'
|
| 86 |
+
sub = '\x02' + sub
|
| 87 |
+
color = color.strip('*')
|
| 88 |
+
# underline (\x1F) not supported
|
| 89 |
+
# backgrounds (\x03FF,BB) not supported
|
| 90 |
+
if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
|
| 91 |
+
add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
|
| 92 |
+
sub = '\x03' + sub
|
| 93 |
+
return add + text + sub
|
| 94 |
+
return '<'+add+'>'+text+'</'+sub+'>'
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class IRCFormatter(Formatter):
|
| 98 |
+
r"""
|
| 99 |
+
Format tokens with IRC color sequences
|
| 100 |
+
|
| 101 |
+
The `get_style_defs()` method doesn't do anything special since there is
|
| 102 |
+
no support for common styles.
|
| 103 |
+
|
| 104 |
+
Options accepted:
|
| 105 |
+
|
| 106 |
+
`bg`
|
| 107 |
+
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
|
| 108 |
+
(default: ``"light"``).
|
| 109 |
+
|
| 110 |
+
`colorscheme`
|
| 111 |
+
A dictionary mapping token types to (lightbg, darkbg) color names or
|
| 112 |
+
``None`` (default: ``None`` = use builtin colorscheme).
|
| 113 |
+
|
| 114 |
+
`linenos`
|
| 115 |
+
Set to ``True`` to have line numbers in the output as well
|
| 116 |
+
(default: ``False`` = no line numbers).
|
| 117 |
+
"""
|
| 118 |
+
name = 'IRC'
|
| 119 |
+
aliases = ['irc', 'IRC']
|
| 120 |
+
filenames = []
|
| 121 |
+
|
| 122 |
+
def __init__(self, **options):
|
| 123 |
+
Formatter.__init__(self, **options)
|
| 124 |
+
self.darkbg = get_choice_opt(options, 'bg',
|
| 125 |
+
['light', 'dark'], 'light') == 'dark'
|
| 126 |
+
self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
|
| 127 |
+
self.linenos = options.get('linenos', False)
|
| 128 |
+
self._lineno = 0
|
| 129 |
+
|
| 130 |
+
def _write_lineno(self, outfile):
|
| 131 |
+
if self.linenos:
|
| 132 |
+
self._lineno += 1
|
| 133 |
+
outfile.write("%04d: " % self._lineno)
|
| 134 |
+
|
| 135 |
+
def format_unencoded(self, tokensource, outfile):
|
| 136 |
+
self._write_lineno(outfile)
|
| 137 |
+
|
| 138 |
+
for ttype, value in tokensource:
|
| 139 |
+
color = self.colorscheme.get(ttype)
|
| 140 |
+
while color is None:
|
| 141 |
+
ttype = ttype[:-1]
|
| 142 |
+
color = self.colorscheme.get(ttype)
|
| 143 |
+
if color:
|
| 144 |
+
color = color[self.darkbg]
|
| 145 |
+
spl = value.split('\n')
|
| 146 |
+
for line in spl[:-1]:
|
| 147 |
+
if line:
|
| 148 |
+
outfile.write(ircformat(color, line))
|
| 149 |
+
outfile.write('\n')
|
| 150 |
+
self._write_lineno(outfile)
|
| 151 |
+
if spl[-1]:
|
| 152 |
+
outfile.write(ircformat(color, spl[-1]))
|
| 153 |
+
else:
|
| 154 |
+
outfile.write(value)
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/latex.py
ADDED
|
@@ -0,0 +1,521 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.latex
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for LaTeX fancyvrb output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from io import StringIO
|
| 12 |
+
|
| 13 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 14 |
+
from pip._vendor.pygments.lexer import Lexer, do_insertions
|
| 15 |
+
from pip._vendor.pygments.token import Token, STANDARD_TYPES
|
| 16 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
__all__ = ['LatexFormatter']
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def escape_tex(text, commandprefix):
|
| 23 |
+
return text.replace('\\', '\x00'). \
|
| 24 |
+
replace('{', '\x01'). \
|
| 25 |
+
replace('}', '\x02'). \
|
| 26 |
+
replace('\x00', r'\%sZbs{}' % commandprefix). \
|
| 27 |
+
replace('\x01', r'\%sZob{}' % commandprefix). \
|
| 28 |
+
replace('\x02', r'\%sZcb{}' % commandprefix). \
|
| 29 |
+
replace('^', r'\%sZca{}' % commandprefix). \
|
| 30 |
+
replace('_', r'\%sZus{}' % commandprefix). \
|
| 31 |
+
replace('&', r'\%sZam{}' % commandprefix). \
|
| 32 |
+
replace('<', r'\%sZlt{}' % commandprefix). \
|
| 33 |
+
replace('>', r'\%sZgt{}' % commandprefix). \
|
| 34 |
+
replace('#', r'\%sZsh{}' % commandprefix). \
|
| 35 |
+
replace('%', r'\%sZpc{}' % commandprefix). \
|
| 36 |
+
replace('$', r'\%sZdl{}' % commandprefix). \
|
| 37 |
+
replace('-', r'\%sZhy{}' % commandprefix). \
|
| 38 |
+
replace("'", r'\%sZsq{}' % commandprefix). \
|
| 39 |
+
replace('"', r'\%sZdq{}' % commandprefix). \
|
| 40 |
+
replace('~', r'\%sZti{}' % commandprefix)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
DOC_TEMPLATE = r'''
|
| 44 |
+
\documentclass{%(docclass)s}
|
| 45 |
+
\usepackage{fancyvrb}
|
| 46 |
+
\usepackage{color}
|
| 47 |
+
\usepackage[%(encoding)s]{inputenc}
|
| 48 |
+
%(preamble)s
|
| 49 |
+
|
| 50 |
+
%(styledefs)s
|
| 51 |
+
|
| 52 |
+
\begin{document}
|
| 53 |
+
|
| 54 |
+
\section*{%(title)s}
|
| 55 |
+
|
| 56 |
+
%(code)s
|
| 57 |
+
\end{document}
|
| 58 |
+
'''
|
| 59 |
+
|
| 60 |
+
## Small explanation of the mess below :)
|
| 61 |
+
#
|
| 62 |
+
# The previous version of the LaTeX formatter just assigned a command to
|
| 63 |
+
# each token type defined in the current style. That obviously is
|
| 64 |
+
# problematic if the highlighted code is produced for a different style
|
| 65 |
+
# than the style commands themselves.
|
| 66 |
+
#
|
| 67 |
+
# This version works much like the HTML formatter which assigns multiple
|
| 68 |
+
# CSS classes to each <span> tag, from the most specific to the least
|
| 69 |
+
# specific token type, thus falling back to the parent token type if one
|
| 70 |
+
# is not defined. Here, the classes are there too and use the same short
|
| 71 |
+
# forms given in token.STANDARD_TYPES.
|
| 72 |
+
#
|
| 73 |
+
# Highlighted code now only uses one custom command, which by default is
|
| 74 |
+
# \PY and selectable by the commandprefix option (and in addition the
|
| 75 |
+
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
|
| 76 |
+
# backwards compatibility purposes).
|
| 77 |
+
#
|
| 78 |
+
# \PY has two arguments: the classes, separated by +, and the text to
|
| 79 |
+
# render in that style. The classes are resolved into the respective
|
| 80 |
+
# style commands by magic, which serves to ignore unknown classes.
|
| 81 |
+
#
|
| 82 |
+
# The magic macros are:
|
| 83 |
+
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
|
| 84 |
+
# to render in \PY@do. Their definition determines the style.
|
| 85 |
+
# * \PY@reset resets \PY@it etc. to do nothing.
|
| 86 |
+
# * \PY@toks parses the list of classes, using magic inspired by the
|
| 87 |
+
# keyval package (but modified to use plusses instead of commas
|
| 88 |
+
# because fancyvrb redefines commas inside its environments).
|
| 89 |
+
# * \PY@tok processes one class, calling the \PY@tok@classname command
|
| 90 |
+
# if it exists.
|
| 91 |
+
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
|
| 92 |
+
# for its class.
|
| 93 |
+
# * \PY resets the style, parses the classnames and then calls \PY@do.
|
| 94 |
+
#
|
| 95 |
+
# Tip: to read this code, print it out in substituted form using e.g.
|
| 96 |
+
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
|
| 97 |
+
|
| 98 |
+
STYLE_TEMPLATE = r'''
|
| 99 |
+
\makeatletter
|
| 100 |
+
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
|
| 101 |
+
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
|
| 102 |
+
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
|
| 103 |
+
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
|
| 104 |
+
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
|
| 105 |
+
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
|
| 106 |
+
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
|
| 107 |
+
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
|
| 108 |
+
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
|
| 109 |
+
|
| 110 |
+
%(styles)s
|
| 111 |
+
|
| 112 |
+
\def\%(cp)sZbs{\char`\\}
|
| 113 |
+
\def\%(cp)sZus{\char`\_}
|
| 114 |
+
\def\%(cp)sZob{\char`\{}
|
| 115 |
+
\def\%(cp)sZcb{\char`\}}
|
| 116 |
+
\def\%(cp)sZca{\char`\^}
|
| 117 |
+
\def\%(cp)sZam{\char`\&}
|
| 118 |
+
\def\%(cp)sZlt{\char`\<}
|
| 119 |
+
\def\%(cp)sZgt{\char`\>}
|
| 120 |
+
\def\%(cp)sZsh{\char`\#}
|
| 121 |
+
\def\%(cp)sZpc{\char`\%%}
|
| 122 |
+
\def\%(cp)sZdl{\char`\$}
|
| 123 |
+
\def\%(cp)sZhy{\char`\-}
|
| 124 |
+
\def\%(cp)sZsq{\char`\'}
|
| 125 |
+
\def\%(cp)sZdq{\char`\"}
|
| 126 |
+
\def\%(cp)sZti{\char`\~}
|
| 127 |
+
%% for compatibility with earlier versions
|
| 128 |
+
\def\%(cp)sZat{@}
|
| 129 |
+
\def\%(cp)sZlb{[}
|
| 130 |
+
\def\%(cp)sZrb{]}
|
| 131 |
+
\makeatother
|
| 132 |
+
'''
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _get_ttype_name(ttype):
|
| 136 |
+
fname = STANDARD_TYPES.get(ttype)
|
| 137 |
+
if fname:
|
| 138 |
+
return fname
|
| 139 |
+
aname = ''
|
| 140 |
+
while fname is None:
|
| 141 |
+
aname = ttype[-1] + aname
|
| 142 |
+
ttype = ttype.parent
|
| 143 |
+
fname = STANDARD_TYPES.get(ttype)
|
| 144 |
+
return fname + aname
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class LatexFormatter(Formatter):
|
| 148 |
+
r"""
|
| 149 |
+
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
|
| 150 |
+
standard packages.
|
| 151 |
+
|
| 152 |
+
Without the `full` option, code is formatted as one ``Verbatim``
|
| 153 |
+
environment, like this:
|
| 154 |
+
|
| 155 |
+
.. sourcecode:: latex
|
| 156 |
+
|
| 157 |
+
\begin{Verbatim}[commandchars=\\\{\}]
|
| 158 |
+
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
|
| 159 |
+
\PY{k}{pass}
|
| 160 |
+
\end{Verbatim}
|
| 161 |
+
|
| 162 |
+
Wrapping can be disabled using the `nowrap` option.
|
| 163 |
+
|
| 164 |
+
The special command used here (``\PY``) and all the other macros it needs
|
| 165 |
+
are output by the `get_style_defs` method.
|
| 166 |
+
|
| 167 |
+
With the `full` option, a complete LaTeX document is output, including
|
| 168 |
+
the command definitions in the preamble.
|
| 169 |
+
|
| 170 |
+
The `get_style_defs()` method of a `LatexFormatter` returns a string
|
| 171 |
+
containing ``\def`` commands defining the macros needed inside the
|
| 172 |
+
``Verbatim`` environments.
|
| 173 |
+
|
| 174 |
+
Additional options accepted:
|
| 175 |
+
|
| 176 |
+
`nowrap`
|
| 177 |
+
If set to ``True``, don't wrap the tokens at all, not even inside a
|
| 178 |
+
``\begin{Verbatim}`` environment. This disables most other options
|
| 179 |
+
(default: ``False``).
|
| 180 |
+
|
| 181 |
+
`style`
|
| 182 |
+
The style to use, can be a string or a Style subclass (default:
|
| 183 |
+
``'default'``).
|
| 184 |
+
|
| 185 |
+
`full`
|
| 186 |
+
Tells the formatter to output a "full" document, i.e. a complete
|
| 187 |
+
self-contained document (default: ``False``).
|
| 188 |
+
|
| 189 |
+
`title`
|
| 190 |
+
If `full` is true, the title that should be used to caption the
|
| 191 |
+
document (default: ``''``).
|
| 192 |
+
|
| 193 |
+
`docclass`
|
| 194 |
+
If the `full` option is enabled, this is the document class to use
|
| 195 |
+
(default: ``'article'``).
|
| 196 |
+
|
| 197 |
+
`preamble`
|
| 198 |
+
If the `full` option is enabled, this can be further preamble commands,
|
| 199 |
+
e.g. ``\usepackage`` (default: ``''``).
|
| 200 |
+
|
| 201 |
+
`linenos`
|
| 202 |
+
If set to ``True``, output line numbers (default: ``False``).
|
| 203 |
+
|
| 204 |
+
`linenostart`
|
| 205 |
+
The line number for the first line (default: ``1``).
|
| 206 |
+
|
| 207 |
+
`linenostep`
|
| 208 |
+
If set to a number n > 1, only every nth line number is printed.
|
| 209 |
+
|
| 210 |
+
`verboptions`
|
| 211 |
+
Additional options given to the Verbatim environment (see the *fancyvrb*
|
| 212 |
+
docs for possible values) (default: ``''``).
|
| 213 |
+
|
| 214 |
+
`commandprefix`
|
| 215 |
+
The LaTeX commands used to produce colored output are constructed
|
| 216 |
+
using this prefix and some letters (default: ``'PY'``).
|
| 217 |
+
|
| 218 |
+
.. versionadded:: 0.7
|
| 219 |
+
.. versionchanged:: 0.10
|
| 220 |
+
The default is now ``'PY'`` instead of ``'C'``.
|
| 221 |
+
|
| 222 |
+
`texcomments`
|
| 223 |
+
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
|
| 224 |
+
in comment tokens is not escaped so that LaTeX can render it (default:
|
| 225 |
+
``False``).
|
| 226 |
+
|
| 227 |
+
.. versionadded:: 1.2
|
| 228 |
+
|
| 229 |
+
`mathescape`
|
| 230 |
+
If set to ``True``, enables LaTeX math mode escape in comments. That
|
| 231 |
+
is, ``'$...$'`` inside a comment will trigger math mode (default:
|
| 232 |
+
``False``).
|
| 233 |
+
|
| 234 |
+
.. versionadded:: 1.2
|
| 235 |
+
|
| 236 |
+
`escapeinside`
|
| 237 |
+
If set to a string of length 2, enables escaping to LaTeX. Text
|
| 238 |
+
delimited by these 2 characters is read as LaTeX code and
|
| 239 |
+
typeset accordingly. It has no effect in string literals. It has
|
| 240 |
+
no effect in comments if `texcomments` or `mathescape` is
|
| 241 |
+
set. (default: ``''``).
|
| 242 |
+
|
| 243 |
+
.. versionadded:: 2.0
|
| 244 |
+
|
| 245 |
+
`envname`
|
| 246 |
+
Allows you to pick an alternative environment name replacing Verbatim.
|
| 247 |
+
The alternate environment still has to support Verbatim's option syntax.
|
| 248 |
+
(default: ``'Verbatim'``).
|
| 249 |
+
|
| 250 |
+
.. versionadded:: 2.0
|
| 251 |
+
"""
|
| 252 |
+
name = 'LaTeX'
|
| 253 |
+
aliases = ['latex', 'tex']
|
| 254 |
+
filenames = ['*.tex']
|
| 255 |
+
|
| 256 |
+
def __init__(self, **options):
|
| 257 |
+
Formatter.__init__(self, **options)
|
| 258 |
+
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
| 259 |
+
self.docclass = options.get('docclass', 'article')
|
| 260 |
+
self.preamble = options.get('preamble', '')
|
| 261 |
+
self.linenos = get_bool_opt(options, 'linenos', False)
|
| 262 |
+
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
|
| 263 |
+
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
|
| 264 |
+
self.verboptions = options.get('verboptions', '')
|
| 265 |
+
self.nobackground = get_bool_opt(options, 'nobackground', False)
|
| 266 |
+
self.commandprefix = options.get('commandprefix', 'PY')
|
| 267 |
+
self.texcomments = get_bool_opt(options, 'texcomments', False)
|
| 268 |
+
self.mathescape = get_bool_opt(options, 'mathescape', False)
|
| 269 |
+
self.escapeinside = options.get('escapeinside', '')
|
| 270 |
+
if len(self.escapeinside) == 2:
|
| 271 |
+
self.left = self.escapeinside[0]
|
| 272 |
+
self.right = self.escapeinside[1]
|
| 273 |
+
else:
|
| 274 |
+
self.escapeinside = ''
|
| 275 |
+
self.envname = options.get('envname', 'Verbatim')
|
| 276 |
+
|
| 277 |
+
self._create_stylesheet()
|
| 278 |
+
|
| 279 |
+
def _create_stylesheet(self):
|
| 280 |
+
t2n = self.ttype2name = {Token: ''}
|
| 281 |
+
c2d = self.cmd2def = {}
|
| 282 |
+
cp = self.commandprefix
|
| 283 |
+
|
| 284 |
+
def rgbcolor(col):
|
| 285 |
+
if col:
|
| 286 |
+
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
|
| 287 |
+
for i in (0, 2, 4)])
|
| 288 |
+
else:
|
| 289 |
+
return '1,1,1'
|
| 290 |
+
|
| 291 |
+
for ttype, ndef in self.style:
|
| 292 |
+
name = _get_ttype_name(ttype)
|
| 293 |
+
cmndef = ''
|
| 294 |
+
if ndef['bold']:
|
| 295 |
+
cmndef += r'\let\$$@bf=\textbf'
|
| 296 |
+
if ndef['italic']:
|
| 297 |
+
cmndef += r'\let\$$@it=\textit'
|
| 298 |
+
if ndef['underline']:
|
| 299 |
+
cmndef += r'\let\$$@ul=\underline'
|
| 300 |
+
if ndef['roman']:
|
| 301 |
+
cmndef += r'\let\$$@ff=\textrm'
|
| 302 |
+
if ndef['sans']:
|
| 303 |
+
cmndef += r'\let\$$@ff=\textsf'
|
| 304 |
+
if ndef['mono']:
|
| 305 |
+
cmndef += r'\let\$$@ff=\textsf'
|
| 306 |
+
if ndef['color']:
|
| 307 |
+
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
|
| 308 |
+
rgbcolor(ndef['color']))
|
| 309 |
+
if ndef['border']:
|
| 310 |
+
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
|
| 311 |
+
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
|
| 312 |
+
(rgbcolor(ndef['border']),
|
| 313 |
+
rgbcolor(ndef['bgcolor'])))
|
| 314 |
+
elif ndef['bgcolor']:
|
| 315 |
+
cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
|
| 316 |
+
r'\colorbox[rgb]{%s}{\strut ##1}}}' %
|
| 317 |
+
rgbcolor(ndef['bgcolor']))
|
| 318 |
+
if cmndef == '':
|
| 319 |
+
continue
|
| 320 |
+
cmndef = cmndef.replace('$$', cp)
|
| 321 |
+
t2n[ttype] = name
|
| 322 |
+
c2d[name] = cmndef
|
| 323 |
+
|
| 324 |
+
def get_style_defs(self, arg=''):
|
| 325 |
+
"""
|
| 326 |
+
Return the command sequences needed to define the commands
|
| 327 |
+
used to format text in the verbatim environment. ``arg`` is ignored.
|
| 328 |
+
"""
|
| 329 |
+
cp = self.commandprefix
|
| 330 |
+
styles = []
|
| 331 |
+
for name, definition in self.cmd2def.items():
|
| 332 |
+
styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
|
| 333 |
+
return STYLE_TEMPLATE % {'cp': self.commandprefix,
|
| 334 |
+
'styles': '\n'.join(styles)}
|
| 335 |
+
|
| 336 |
+
def format_unencoded(self, tokensource, outfile):
|
| 337 |
+
# TODO: add support for background colors
|
| 338 |
+
t2n = self.ttype2name
|
| 339 |
+
cp = self.commandprefix
|
| 340 |
+
|
| 341 |
+
if self.full:
|
| 342 |
+
realoutfile = outfile
|
| 343 |
+
outfile = StringIO()
|
| 344 |
+
|
| 345 |
+
if not self.nowrap:
|
| 346 |
+
outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
|
| 347 |
+
if self.linenos:
|
| 348 |
+
start, step = self.linenostart, self.linenostep
|
| 349 |
+
outfile.write(',numbers=left' +
|
| 350 |
+
(start and ',firstnumber=%d' % start or '') +
|
| 351 |
+
(step and ',stepnumber=%d' % step or ''))
|
| 352 |
+
if self.mathescape or self.texcomments or self.escapeinside:
|
| 353 |
+
outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
|
| 354 |
+
'\\catcode`\\_=8\\relax}')
|
| 355 |
+
if self.verboptions:
|
| 356 |
+
outfile.write(',' + self.verboptions)
|
| 357 |
+
outfile.write(']\n')
|
| 358 |
+
|
| 359 |
+
for ttype, value in tokensource:
|
| 360 |
+
if ttype in Token.Comment:
|
| 361 |
+
if self.texcomments:
|
| 362 |
+
# Try to guess comment starting lexeme and escape it ...
|
| 363 |
+
start = value[0:1]
|
| 364 |
+
for i in range(1, len(value)):
|
| 365 |
+
if start[0] != value[i]:
|
| 366 |
+
break
|
| 367 |
+
start += value[i]
|
| 368 |
+
|
| 369 |
+
value = value[len(start):]
|
| 370 |
+
start = escape_tex(start, cp)
|
| 371 |
+
|
| 372 |
+
# ... but do not escape inside comment.
|
| 373 |
+
value = start + value
|
| 374 |
+
elif self.mathescape:
|
| 375 |
+
# Only escape parts not inside a math environment.
|
| 376 |
+
parts = value.split('$')
|
| 377 |
+
in_math = False
|
| 378 |
+
for i, part in enumerate(parts):
|
| 379 |
+
if not in_math:
|
| 380 |
+
parts[i] = escape_tex(part, cp)
|
| 381 |
+
in_math = not in_math
|
| 382 |
+
value = '$'.join(parts)
|
| 383 |
+
elif self.escapeinside:
|
| 384 |
+
text = value
|
| 385 |
+
value = ''
|
| 386 |
+
while text:
|
| 387 |
+
a, sep1, text = text.partition(self.left)
|
| 388 |
+
if sep1:
|
| 389 |
+
b, sep2, text = text.partition(self.right)
|
| 390 |
+
if sep2:
|
| 391 |
+
value += escape_tex(a, cp) + b
|
| 392 |
+
else:
|
| 393 |
+
value += escape_tex(a + sep1 + b, cp)
|
| 394 |
+
else:
|
| 395 |
+
value += escape_tex(a, cp)
|
| 396 |
+
else:
|
| 397 |
+
value = escape_tex(value, cp)
|
| 398 |
+
elif ttype not in Token.Escape:
|
| 399 |
+
value = escape_tex(value, cp)
|
| 400 |
+
styles = []
|
| 401 |
+
while ttype is not Token:
|
| 402 |
+
try:
|
| 403 |
+
styles.append(t2n[ttype])
|
| 404 |
+
except KeyError:
|
| 405 |
+
# not in current style
|
| 406 |
+
styles.append(_get_ttype_name(ttype))
|
| 407 |
+
ttype = ttype.parent
|
| 408 |
+
styleval = '+'.join(reversed(styles))
|
| 409 |
+
if styleval:
|
| 410 |
+
spl = value.split('\n')
|
| 411 |
+
for line in spl[:-1]:
|
| 412 |
+
if line:
|
| 413 |
+
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
|
| 414 |
+
outfile.write('\n')
|
| 415 |
+
if spl[-1]:
|
| 416 |
+
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
|
| 417 |
+
else:
|
| 418 |
+
outfile.write(value)
|
| 419 |
+
|
| 420 |
+
if not self.nowrap:
|
| 421 |
+
outfile.write('\\end{' + self.envname + '}\n')
|
| 422 |
+
|
| 423 |
+
if self.full:
|
| 424 |
+
encoding = self.encoding or 'utf8'
|
| 425 |
+
# map known existings encodings from LaTeX distribution
|
| 426 |
+
encoding = {
|
| 427 |
+
'utf_8': 'utf8',
|
| 428 |
+
'latin_1': 'latin1',
|
| 429 |
+
'iso_8859_1': 'latin1',
|
| 430 |
+
}.get(encoding.replace('-', '_'), encoding)
|
| 431 |
+
realoutfile.write(DOC_TEMPLATE %
|
| 432 |
+
dict(docclass = self.docclass,
|
| 433 |
+
preamble = self.preamble,
|
| 434 |
+
title = self.title,
|
| 435 |
+
encoding = encoding,
|
| 436 |
+
styledefs = self.get_style_defs(),
|
| 437 |
+
code = outfile.getvalue()))
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
class LatexEmbeddedLexer(Lexer):
|
| 441 |
+
"""
|
| 442 |
+
This lexer takes one lexer as argument, the lexer for the language
|
| 443 |
+
being formatted, and the left and right delimiters for escaped text.
|
| 444 |
+
|
| 445 |
+
First everything is scanned using the language lexer to obtain
|
| 446 |
+
strings and comments. All other consecutive tokens are merged and
|
| 447 |
+
the resulting text is scanned for escaped segments, which are given
|
| 448 |
+
the Token.Escape type. Finally text that is not escaped is scanned
|
| 449 |
+
again with the language lexer.
|
| 450 |
+
"""
|
| 451 |
+
def __init__(self, left, right, lang, **options):
|
| 452 |
+
self.left = left
|
| 453 |
+
self.right = right
|
| 454 |
+
self.lang = lang
|
| 455 |
+
Lexer.__init__(self, **options)
|
| 456 |
+
|
| 457 |
+
def get_tokens_unprocessed(self, text):
|
| 458 |
+
# find and remove all the escape tokens (replace with an empty string)
|
| 459 |
+
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
|
| 460 |
+
buffered = ''
|
| 461 |
+
insertions = []
|
| 462 |
+
insertion_buf = []
|
| 463 |
+
for i, t, v in self._find_safe_escape_tokens(text):
|
| 464 |
+
if t is None:
|
| 465 |
+
if insertion_buf:
|
| 466 |
+
insertions.append((len(buffered), insertion_buf))
|
| 467 |
+
insertion_buf = []
|
| 468 |
+
buffered += v
|
| 469 |
+
else:
|
| 470 |
+
insertion_buf.append((i, t, v))
|
| 471 |
+
if insertion_buf:
|
| 472 |
+
insertions.append((len(buffered), insertion_buf))
|
| 473 |
+
return do_insertions(insertions,
|
| 474 |
+
self.lang.get_tokens_unprocessed(buffered))
|
| 475 |
+
|
| 476 |
+
def _find_safe_escape_tokens(self, text):
|
| 477 |
+
""" find escape tokens that are not in strings or comments """
|
| 478 |
+
for i, t, v in self._filter_to(
|
| 479 |
+
self.lang.get_tokens_unprocessed(text),
|
| 480 |
+
lambda t: t in Token.Comment or t in Token.String
|
| 481 |
+
):
|
| 482 |
+
if t is None:
|
| 483 |
+
for i2, t2, v2 in self._find_escape_tokens(v):
|
| 484 |
+
yield i + i2, t2, v2
|
| 485 |
+
else:
|
| 486 |
+
yield i, None, v
|
| 487 |
+
|
| 488 |
+
def _filter_to(self, it, pred):
|
| 489 |
+
""" Keep only the tokens that match `pred`, merge the others together """
|
| 490 |
+
buf = ''
|
| 491 |
+
idx = 0
|
| 492 |
+
for i, t, v in it:
|
| 493 |
+
if pred(t):
|
| 494 |
+
if buf:
|
| 495 |
+
yield idx, None, buf
|
| 496 |
+
buf = ''
|
| 497 |
+
yield i, t, v
|
| 498 |
+
else:
|
| 499 |
+
if not buf:
|
| 500 |
+
idx = i
|
| 501 |
+
buf += v
|
| 502 |
+
if buf:
|
| 503 |
+
yield idx, None, buf
|
| 504 |
+
|
| 505 |
+
def _find_escape_tokens(self, text):
|
| 506 |
+
""" Find escape tokens within text, give token=None otherwise """
|
| 507 |
+
index = 0
|
| 508 |
+
while text:
|
| 509 |
+
a, sep1, text = text.partition(self.left)
|
| 510 |
+
if a:
|
| 511 |
+
yield index, None, a
|
| 512 |
+
index += len(a)
|
| 513 |
+
if sep1:
|
| 514 |
+
b, sep2, text = text.partition(self.right)
|
| 515 |
+
if sep2:
|
| 516 |
+
yield index + len(sep1), Token.Escape, b
|
| 517 |
+
index += len(sep1) + len(b) + len(sep2)
|
| 518 |
+
else:
|
| 519 |
+
yield index, Token.Error, sep1
|
| 520 |
+
index += len(sep1)
|
| 521 |
+
text = b
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/other.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.other
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Other formatters: NullFormatter, RawTokenFormatter.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.util import get_choice_opt
|
| 13 |
+
from pip._vendor.pygments.token import Token
|
| 14 |
+
from pip._vendor.pygments.console import colorize
|
| 15 |
+
|
| 16 |
+
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class NullFormatter(Formatter):
|
| 20 |
+
"""
|
| 21 |
+
Output the text unchanged without any formatting.
|
| 22 |
+
"""
|
| 23 |
+
name = 'Text only'
|
| 24 |
+
aliases = ['text', 'null']
|
| 25 |
+
filenames = ['*.txt']
|
| 26 |
+
|
| 27 |
+
def format(self, tokensource, outfile):
|
| 28 |
+
enc = self.encoding
|
| 29 |
+
for ttype, value in tokensource:
|
| 30 |
+
if enc:
|
| 31 |
+
outfile.write(value.encode(enc))
|
| 32 |
+
else:
|
| 33 |
+
outfile.write(value)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class RawTokenFormatter(Formatter):
|
| 37 |
+
r"""
|
| 38 |
+
Format tokens as a raw representation for storing token streams.
|
| 39 |
+
|
| 40 |
+
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
|
| 41 |
+
be converted to a token stream with the `RawTokenLexer`, described in the
|
| 42 |
+
:doc:`lexer list <lexers>`.
|
| 43 |
+
|
| 44 |
+
Only two options are accepted:
|
| 45 |
+
|
| 46 |
+
`compress`
|
| 47 |
+
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
|
| 48 |
+
compression algorithm after encoding (default: ``''``).
|
| 49 |
+
`error_color`
|
| 50 |
+
If set to a color name, highlight error tokens using that color. If
|
| 51 |
+
set but with no value, defaults to ``'red'``.
|
| 52 |
+
|
| 53 |
+
.. versionadded:: 0.11
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
name = 'Raw tokens'
|
| 57 |
+
aliases = ['raw', 'tokens']
|
| 58 |
+
filenames = ['*.raw']
|
| 59 |
+
|
| 60 |
+
unicodeoutput = False
|
| 61 |
+
|
| 62 |
+
def __init__(self, **options):
|
| 63 |
+
Formatter.__init__(self, **options)
|
| 64 |
+
# We ignore self.encoding if it is set, since it gets set for lexer
|
| 65 |
+
# and formatter if given with -Oencoding on the command line.
|
| 66 |
+
# The RawTokenFormatter outputs only ASCII. Override here.
|
| 67 |
+
self.encoding = 'ascii' # let pygments.format() do the right thing
|
| 68 |
+
self.compress = get_choice_opt(options, 'compress',
|
| 69 |
+
['', 'none', 'gz', 'bz2'], '')
|
| 70 |
+
self.error_color = options.get('error_color', None)
|
| 71 |
+
if self.error_color is True:
|
| 72 |
+
self.error_color = 'red'
|
| 73 |
+
if self.error_color is not None:
|
| 74 |
+
try:
|
| 75 |
+
colorize(self.error_color, '')
|
| 76 |
+
except KeyError:
|
| 77 |
+
raise ValueError("Invalid color %r specified" %
|
| 78 |
+
self.error_color)
|
| 79 |
+
|
| 80 |
+
def format(self, tokensource, outfile):
|
| 81 |
+
try:
|
| 82 |
+
outfile.write(b'')
|
| 83 |
+
except TypeError:
|
| 84 |
+
raise TypeError('The raw tokens formatter needs a binary '
|
| 85 |
+
'output file')
|
| 86 |
+
if self.compress == 'gz':
|
| 87 |
+
import gzip
|
| 88 |
+
outfile = gzip.GzipFile('', 'wb', 9, outfile)
|
| 89 |
+
|
| 90 |
+
write = outfile.write
|
| 91 |
+
flush = outfile.close
|
| 92 |
+
elif self.compress == 'bz2':
|
| 93 |
+
import bz2
|
| 94 |
+
compressor = bz2.BZ2Compressor(9)
|
| 95 |
+
|
| 96 |
+
def write(text):
|
| 97 |
+
outfile.write(compressor.compress(text))
|
| 98 |
+
|
| 99 |
+
def flush():
|
| 100 |
+
outfile.write(compressor.flush())
|
| 101 |
+
outfile.flush()
|
| 102 |
+
else:
|
| 103 |
+
write = outfile.write
|
| 104 |
+
flush = outfile.flush
|
| 105 |
+
|
| 106 |
+
if self.error_color:
|
| 107 |
+
for ttype, value in tokensource:
|
| 108 |
+
line = b"%r\t%r\n" % (ttype, value)
|
| 109 |
+
if ttype is Token.Error:
|
| 110 |
+
write(colorize(self.error_color, line))
|
| 111 |
+
else:
|
| 112 |
+
write(line)
|
| 113 |
+
else:
|
| 114 |
+
for ttype, value in tokensource:
|
| 115 |
+
write(b"%r\t%r\n" % (ttype, value))
|
| 116 |
+
flush()
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
TESTCASE_BEFORE = '''\
|
| 120 |
+
def testNeedsName(lexer):
|
| 121 |
+
fragment = %r
|
| 122 |
+
tokens = [
|
| 123 |
+
'''
|
| 124 |
+
TESTCASE_AFTER = '''\
|
| 125 |
+
]
|
| 126 |
+
assert list(lexer.get_tokens(fragment)) == tokens
|
| 127 |
+
'''
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class TestcaseFormatter(Formatter):
|
| 131 |
+
"""
|
| 132 |
+
Format tokens as appropriate for a new testcase.
|
| 133 |
+
|
| 134 |
+
.. versionadded:: 2.0
|
| 135 |
+
"""
|
| 136 |
+
name = 'Testcase'
|
| 137 |
+
aliases = ['testcase']
|
| 138 |
+
|
| 139 |
+
def __init__(self, **options):
|
| 140 |
+
Formatter.__init__(self, **options)
|
| 141 |
+
if self.encoding is not None and self.encoding != 'utf-8':
|
| 142 |
+
raise ValueError("Only None and utf-8 are allowed encodings.")
|
| 143 |
+
|
| 144 |
+
def format(self, tokensource, outfile):
|
| 145 |
+
indentation = ' ' * 12
|
| 146 |
+
rawbuf = []
|
| 147 |
+
outbuf = []
|
| 148 |
+
for ttype, value in tokensource:
|
| 149 |
+
rawbuf.append(value)
|
| 150 |
+
outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
|
| 151 |
+
|
| 152 |
+
before = TESTCASE_BEFORE % (''.join(rawbuf),)
|
| 153 |
+
during = ''.join(outbuf)
|
| 154 |
+
after = TESTCASE_AFTER
|
| 155 |
+
if self.encoding is None:
|
| 156 |
+
outfile.write(before + during + after)
|
| 157 |
+
else:
|
| 158 |
+
outfile.write(before.encode('utf-8'))
|
| 159 |
+
outfile.write(during.encode('utf-8'))
|
| 160 |
+
outfile.write(after.encode('utf-8'))
|
| 161 |
+
outfile.flush()
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.pangomarkup
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for Pango markup output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = ['PangoMarkupFormatter']
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
_escape_table = {
|
| 18 |
+
ord('&'): '&',
|
| 19 |
+
ord('<'): '<',
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def escape_special_chars(text, table=_escape_table):
|
| 24 |
+
"""Escape & and < for Pango Markup."""
|
| 25 |
+
return text.translate(table)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class PangoMarkupFormatter(Formatter):
|
| 29 |
+
"""
|
| 30 |
+
Format tokens as Pango Markup code. It can then be rendered to an SVG.
|
| 31 |
+
|
| 32 |
+
.. versionadded:: 2.9
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
name = 'Pango Markup'
|
| 36 |
+
aliases = ['pango', 'pangomarkup']
|
| 37 |
+
filenames = []
|
| 38 |
+
|
| 39 |
+
def __init__(self, **options):
|
| 40 |
+
Formatter.__init__(self, **options)
|
| 41 |
+
|
| 42 |
+
self.styles = {}
|
| 43 |
+
|
| 44 |
+
for token, style in self.style:
|
| 45 |
+
start = ''
|
| 46 |
+
end = ''
|
| 47 |
+
if style['color']:
|
| 48 |
+
start += '<span fgcolor="#%s">' % style['color']
|
| 49 |
+
end = '</span>' + end
|
| 50 |
+
if style['bold']:
|
| 51 |
+
start += '<b>'
|
| 52 |
+
end = '</b>' + end
|
| 53 |
+
if style['italic']:
|
| 54 |
+
start += '<i>'
|
| 55 |
+
end = '</i>' + end
|
| 56 |
+
if style['underline']:
|
| 57 |
+
start += '<u>'
|
| 58 |
+
end = '</u>' + end
|
| 59 |
+
self.styles[token] = (start, end)
|
| 60 |
+
|
| 61 |
+
def format_unencoded(self, tokensource, outfile):
|
| 62 |
+
lastval = ''
|
| 63 |
+
lasttype = None
|
| 64 |
+
|
| 65 |
+
outfile.write('<tt>')
|
| 66 |
+
|
| 67 |
+
for ttype, value in tokensource:
|
| 68 |
+
while ttype not in self.styles:
|
| 69 |
+
ttype = ttype.parent
|
| 70 |
+
if ttype == lasttype:
|
| 71 |
+
lastval += escape_special_chars(value)
|
| 72 |
+
else:
|
| 73 |
+
if lastval:
|
| 74 |
+
stylebegin, styleend = self.styles[lasttype]
|
| 75 |
+
outfile.write(stylebegin + lastval + styleend)
|
| 76 |
+
lastval = escape_special_chars(value)
|
| 77 |
+
lasttype = ttype
|
| 78 |
+
|
| 79 |
+
if lastval:
|
| 80 |
+
stylebegin, styleend = self.styles[lasttype]
|
| 81 |
+
outfile.write(stylebegin + lastval + styleend)
|
| 82 |
+
|
| 83 |
+
outfile.write('</tt>')
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.rtf
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
A formatter that generates RTF files.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.util import get_int_opt, surrogatepair
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
__all__ = ['RtfFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class RtfFormatter(Formatter):
|
| 19 |
+
"""
|
| 20 |
+
Format tokens as RTF markup. This formatter automatically outputs full RTF
|
| 21 |
+
documents with color information and other useful stuff. Perfect for Copy and
|
| 22 |
+
Paste into Microsoft(R) Word(R) documents.
|
| 23 |
+
|
| 24 |
+
Please note that ``encoding`` and ``outencoding`` options are ignored.
|
| 25 |
+
The RTF format is ASCII natively, but handles unicode characters correctly
|
| 26 |
+
thanks to escape sequences.
|
| 27 |
+
|
| 28 |
+
.. versionadded:: 0.6
|
| 29 |
+
|
| 30 |
+
Additional options accepted:
|
| 31 |
+
|
| 32 |
+
`style`
|
| 33 |
+
The style to use, can be a string or a Style subclass (default:
|
| 34 |
+
``'default'``).
|
| 35 |
+
|
| 36 |
+
`fontface`
|
| 37 |
+
The used font family, for example ``Bitstream Vera Sans``. Defaults to
|
| 38 |
+
some generic font which is supposed to have fixed width.
|
| 39 |
+
|
| 40 |
+
`fontsize`
|
| 41 |
+
Size of the font used. Size is specified in half points. The
|
| 42 |
+
default is 24 half-points, giving a size 12 font.
|
| 43 |
+
|
| 44 |
+
.. versionadded:: 2.0
|
| 45 |
+
"""
|
| 46 |
+
name = 'RTF'
|
| 47 |
+
aliases = ['rtf']
|
| 48 |
+
filenames = ['*.rtf']
|
| 49 |
+
|
| 50 |
+
def __init__(self, **options):
|
| 51 |
+
r"""
|
| 52 |
+
Additional options accepted:
|
| 53 |
+
|
| 54 |
+
``fontface``
|
| 55 |
+
Name of the font used. Could for example be ``'Courier New'``
|
| 56 |
+
to further specify the default which is ``'\fmodern'``. The RTF
|
| 57 |
+
specification claims that ``\fmodern`` are "Fixed-pitch serif
|
| 58 |
+
and sans serif fonts". Hope every RTF implementation thinks
|
| 59 |
+
the same about modern...
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
Formatter.__init__(self, **options)
|
| 63 |
+
self.fontface = options.get('fontface') or ''
|
| 64 |
+
self.fontsize = get_int_opt(options, 'fontsize', 0)
|
| 65 |
+
|
| 66 |
+
def _escape(self, text):
|
| 67 |
+
return text.replace('\\', '\\\\') \
|
| 68 |
+
.replace('{', '\\{') \
|
| 69 |
+
.replace('}', '\\}')
|
| 70 |
+
|
| 71 |
+
def _escape_text(self, text):
|
| 72 |
+
# empty strings, should give a small performance improvement
|
| 73 |
+
if not text:
|
| 74 |
+
return ''
|
| 75 |
+
|
| 76 |
+
# escape text
|
| 77 |
+
text = self._escape(text)
|
| 78 |
+
|
| 79 |
+
buf = []
|
| 80 |
+
for c in text:
|
| 81 |
+
cn = ord(c)
|
| 82 |
+
if cn < (2**7):
|
| 83 |
+
# ASCII character
|
| 84 |
+
buf.append(str(c))
|
| 85 |
+
elif (2**7) <= cn < (2**16):
|
| 86 |
+
# single unicode escape sequence
|
| 87 |
+
buf.append('{\\u%d}' % cn)
|
| 88 |
+
elif (2**16) <= cn:
|
| 89 |
+
# RTF limits unicode to 16 bits.
|
| 90 |
+
# Force surrogate pairs
|
| 91 |
+
buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
|
| 92 |
+
|
| 93 |
+
return ''.join(buf).replace('\n', '\\par\n')
|
| 94 |
+
|
| 95 |
+
def format_unencoded(self, tokensource, outfile):
|
| 96 |
+
# rtf 1.8 header
|
| 97 |
+
outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
|
| 98 |
+
'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
|
| 99 |
+
'{\\colortbl;' % (self.fontface and
|
| 100 |
+
' ' + self._escape(self.fontface) or
|
| 101 |
+
''))
|
| 102 |
+
|
| 103 |
+
# convert colors and save them in a mapping to access them later.
|
| 104 |
+
color_mapping = {}
|
| 105 |
+
offset = 1
|
| 106 |
+
for _, style in self.style:
|
| 107 |
+
for color in style['color'], style['bgcolor'], style['border']:
|
| 108 |
+
if color and color not in color_mapping:
|
| 109 |
+
color_mapping[color] = offset
|
| 110 |
+
outfile.write('\\red%d\\green%d\\blue%d;' % (
|
| 111 |
+
int(color[0:2], 16),
|
| 112 |
+
int(color[2:4], 16),
|
| 113 |
+
int(color[4:6], 16)
|
| 114 |
+
))
|
| 115 |
+
offset += 1
|
| 116 |
+
outfile.write('}\\f0 ')
|
| 117 |
+
if self.fontsize:
|
| 118 |
+
outfile.write('\\fs%d' % self.fontsize)
|
| 119 |
+
|
| 120 |
+
# highlight stream
|
| 121 |
+
for ttype, value in tokensource:
|
| 122 |
+
while not self.style.styles_token(ttype) and ttype.parent:
|
| 123 |
+
ttype = ttype.parent
|
| 124 |
+
style = self.style.style_for_token(ttype)
|
| 125 |
+
buf = []
|
| 126 |
+
if style['bgcolor']:
|
| 127 |
+
buf.append('\\cb%d' % color_mapping[style['bgcolor']])
|
| 128 |
+
if style['color']:
|
| 129 |
+
buf.append('\\cf%d' % color_mapping[style['color']])
|
| 130 |
+
if style['bold']:
|
| 131 |
+
buf.append('\\b')
|
| 132 |
+
if style['italic']:
|
| 133 |
+
buf.append('\\i')
|
| 134 |
+
if style['underline']:
|
| 135 |
+
buf.append('\\ul')
|
| 136 |
+
if style['border']:
|
| 137 |
+
buf.append('\\chbrdr\\chcfpat%d' %
|
| 138 |
+
color_mapping[style['border']])
|
| 139 |
+
start = ''.join(buf)
|
| 140 |
+
if start:
|
| 141 |
+
outfile.write('{%s ' % start)
|
| 142 |
+
outfile.write(self._escape_text(value))
|
| 143 |
+
if start:
|
| 144 |
+
outfile.write('}')
|
| 145 |
+
|
| 146 |
+
outfile.write('}')
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/svg.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.svg
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for SVG output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.token import Comment
|
| 13 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt
|
| 14 |
+
|
| 15 |
+
__all__ = ['SvgFormatter']
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def escape_html(text):
|
| 19 |
+
"""Escape &, <, > as well as single and double quotes for HTML."""
|
| 20 |
+
return text.replace('&', '&'). \
|
| 21 |
+
replace('<', '<'). \
|
| 22 |
+
replace('>', '>'). \
|
| 23 |
+
replace('"', '"'). \
|
| 24 |
+
replace("'", ''')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class2style = {}
|
| 28 |
+
|
| 29 |
+
class SvgFormatter(Formatter):
|
| 30 |
+
"""
|
| 31 |
+
Format tokens as an SVG graphics file. This formatter is still experimental.
|
| 32 |
+
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
|
| 33 |
+
coordinates containing ``<tspan>`` elements with the individual token styles.
|
| 34 |
+
|
| 35 |
+
By default, this formatter outputs a full SVG document including doctype
|
| 36 |
+
declaration and the ``<svg>`` root element.
|
| 37 |
+
|
| 38 |
+
.. versionadded:: 0.9
|
| 39 |
+
|
| 40 |
+
Additional options accepted:
|
| 41 |
+
|
| 42 |
+
`nowrap`
|
| 43 |
+
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
|
| 44 |
+
don't add a XML declaration and a doctype. If true, the `fontfamily`
|
| 45 |
+
and `fontsize` options are ignored. Defaults to ``False``.
|
| 46 |
+
|
| 47 |
+
`fontfamily`
|
| 48 |
+
The value to give the wrapping ``<g>`` element's ``font-family``
|
| 49 |
+
attribute, defaults to ``"monospace"``.
|
| 50 |
+
|
| 51 |
+
`fontsize`
|
| 52 |
+
The value to give the wrapping ``<g>`` element's ``font-size``
|
| 53 |
+
attribute, defaults to ``"14px"``.
|
| 54 |
+
|
| 55 |
+
`linenos`
|
| 56 |
+
If ``True``, add line numbers (default: ``False``).
|
| 57 |
+
|
| 58 |
+
`linenostart`
|
| 59 |
+
The line number for the first line (default: ``1``).
|
| 60 |
+
|
| 61 |
+
`linenostep`
|
| 62 |
+
If set to a number n > 1, only every nth line number is printed.
|
| 63 |
+
|
| 64 |
+
`linenowidth`
|
| 65 |
+
Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
|
| 66 |
+
for up to 4-digit line numbers. Increase width for longer code blocks).
|
| 67 |
+
|
| 68 |
+
`xoffset`
|
| 69 |
+
Starting offset in X direction, defaults to ``0``.
|
| 70 |
+
|
| 71 |
+
`yoffset`
|
| 72 |
+
Starting offset in Y direction, defaults to the font size if it is given
|
| 73 |
+
in pixels, or ``20`` else. (This is necessary since text coordinates
|
| 74 |
+
refer to the text baseline, not the top edge.)
|
| 75 |
+
|
| 76 |
+
`ystep`
|
| 77 |
+
Offset to add to the Y coordinate for each subsequent line. This should
|
| 78 |
+
roughly be the text size plus 5. It defaults to that value if the text
|
| 79 |
+
size is given in pixels, or ``25`` else.
|
| 80 |
+
|
| 81 |
+
`spacehack`
|
| 82 |
+
Convert spaces in the source to `` ``, which are non-breaking
|
| 83 |
+
spaces. SVG provides the ``xml:space`` attribute to control how
|
| 84 |
+
whitespace inside tags is handled, in theory, the ``preserve`` value
|
| 85 |
+
could be used to keep all whitespace as-is. However, many current SVG
|
| 86 |
+
viewers don't obey that rule, so this option is provided as a workaround
|
| 87 |
+
and defaults to ``True``.
|
| 88 |
+
"""
|
| 89 |
+
name = 'SVG'
|
| 90 |
+
aliases = ['svg']
|
| 91 |
+
filenames = ['*.svg']
|
| 92 |
+
|
| 93 |
+
def __init__(self, **options):
|
| 94 |
+
Formatter.__init__(self, **options)
|
| 95 |
+
self.nowrap = get_bool_opt(options, 'nowrap', False)
|
| 96 |
+
self.fontfamily = options.get('fontfamily', 'monospace')
|
| 97 |
+
self.fontsize = options.get('fontsize', '14px')
|
| 98 |
+
self.xoffset = get_int_opt(options, 'xoffset', 0)
|
| 99 |
+
fs = self.fontsize.strip()
|
| 100 |
+
if fs.endswith('px'): fs = fs[:-2].strip()
|
| 101 |
+
try:
|
| 102 |
+
int_fs = int(fs)
|
| 103 |
+
except:
|
| 104 |
+
int_fs = 20
|
| 105 |
+
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
|
| 106 |
+
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
|
| 107 |
+
self.spacehack = get_bool_opt(options, 'spacehack', True)
|
| 108 |
+
self.linenos = get_bool_opt(options,'linenos',False)
|
| 109 |
+
self.linenostart = get_int_opt(options,'linenostart',1)
|
| 110 |
+
self.linenostep = get_int_opt(options,'linenostep',1)
|
| 111 |
+
self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
|
| 112 |
+
self._stylecache = {}
|
| 113 |
+
|
| 114 |
+
def format_unencoded(self, tokensource, outfile):
|
| 115 |
+
"""
|
| 116 |
+
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
|
| 117 |
+
tuples and write it into ``outfile``.
|
| 118 |
+
|
| 119 |
+
For our implementation we put all lines in their own 'line group'.
|
| 120 |
+
"""
|
| 121 |
+
x = self.xoffset
|
| 122 |
+
y = self.yoffset
|
| 123 |
+
if not self.nowrap:
|
| 124 |
+
if self.encoding:
|
| 125 |
+
outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
|
| 126 |
+
self.encoding)
|
| 127 |
+
else:
|
| 128 |
+
outfile.write('<?xml version="1.0"?>\n')
|
| 129 |
+
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
|
| 130 |
+
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
|
| 131 |
+
'svg10.dtd">\n')
|
| 132 |
+
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
|
| 133 |
+
outfile.write('<g font-family="%s" font-size="%s">\n' %
|
| 134 |
+
(self.fontfamily, self.fontsize))
|
| 135 |
+
|
| 136 |
+
counter = self.linenostart
|
| 137 |
+
counter_step = self.linenostep
|
| 138 |
+
counter_style = self._get_style(Comment)
|
| 139 |
+
line_x = x
|
| 140 |
+
|
| 141 |
+
if self.linenos:
|
| 142 |
+
if counter % counter_step == 0:
|
| 143 |
+
outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' %
|
| 144 |
+
(x+self.linenowidth,y,counter_style,counter))
|
| 145 |
+
line_x += self.linenowidth + self.ystep
|
| 146 |
+
counter += 1
|
| 147 |
+
|
| 148 |
+
outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
|
| 149 |
+
for ttype, value in tokensource:
|
| 150 |
+
style = self._get_style(ttype)
|
| 151 |
+
tspan = style and '<tspan' + style + '>' or ''
|
| 152 |
+
tspanend = tspan and '</tspan>' or ''
|
| 153 |
+
value = escape_html(value)
|
| 154 |
+
if self.spacehack:
|
| 155 |
+
value = value.expandtabs().replace(' ', ' ')
|
| 156 |
+
parts = value.split('\n')
|
| 157 |
+
for part in parts[:-1]:
|
| 158 |
+
outfile.write(tspan + part + tspanend)
|
| 159 |
+
y += self.ystep
|
| 160 |
+
outfile.write('</text>\n')
|
| 161 |
+
if self.linenos and counter % counter_step == 0:
|
| 162 |
+
outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' %
|
| 163 |
+
(x+self.linenowidth,y,counter_style,counter))
|
| 164 |
+
|
| 165 |
+
counter += 1
|
| 166 |
+
outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y))
|
| 167 |
+
outfile.write(tspan + parts[-1] + tspanend)
|
| 168 |
+
outfile.write('</text>')
|
| 169 |
+
|
| 170 |
+
if not self.nowrap:
|
| 171 |
+
outfile.write('</g></svg>\n')
|
| 172 |
+
|
| 173 |
+
def _get_style(self, tokentype):
|
| 174 |
+
if tokentype in self._stylecache:
|
| 175 |
+
return self._stylecache[tokentype]
|
| 176 |
+
otokentype = tokentype
|
| 177 |
+
while not self.style.styles_token(tokentype):
|
| 178 |
+
tokentype = tokentype.parent
|
| 179 |
+
value = self.style.style_for_token(tokentype)
|
| 180 |
+
result = ''
|
| 181 |
+
if value['color']:
|
| 182 |
+
result = ' fill="#' + value['color'] + '"'
|
| 183 |
+
if value['bold']:
|
| 184 |
+
result += ' font-weight="bold"'
|
| 185 |
+
if value['italic']:
|
| 186 |
+
result += ' font-style="italic"'
|
| 187 |
+
self._stylecache[otokentype] = result
|
| 188 |
+
return result
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/terminal.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.terminal
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for terminal output with ANSI sequences.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
|
| 13 |
+
Number, Operator, Generic, Token, Whitespace
|
| 14 |
+
from pip._vendor.pygments.console import ansiformat
|
| 15 |
+
from pip._vendor.pygments.util import get_choice_opt
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
__all__ = ['TerminalFormatter']
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
#: Map token types to a tuple of color values for light and dark
|
| 22 |
+
#: backgrounds.
|
| 23 |
+
TERMINAL_COLORS = {
|
| 24 |
+
Token: ('', ''),
|
| 25 |
+
|
| 26 |
+
Whitespace: ('gray', 'brightblack'),
|
| 27 |
+
Comment: ('gray', 'brightblack'),
|
| 28 |
+
Comment.Preproc: ('cyan', 'brightcyan'),
|
| 29 |
+
Keyword: ('blue', 'brightblue'),
|
| 30 |
+
Keyword.Type: ('cyan', 'brightcyan'),
|
| 31 |
+
Operator.Word: ('magenta', 'brightmagenta'),
|
| 32 |
+
Name.Builtin: ('cyan', 'brightcyan'),
|
| 33 |
+
Name.Function: ('green', 'brightgreen'),
|
| 34 |
+
Name.Namespace: ('_cyan_', '_brightcyan_'),
|
| 35 |
+
Name.Class: ('_green_', '_brightgreen_'),
|
| 36 |
+
Name.Exception: ('cyan', 'brightcyan'),
|
| 37 |
+
Name.Decorator: ('brightblack', 'gray'),
|
| 38 |
+
Name.Variable: ('red', 'brightred'),
|
| 39 |
+
Name.Constant: ('red', 'brightred'),
|
| 40 |
+
Name.Attribute: ('cyan', 'brightcyan'),
|
| 41 |
+
Name.Tag: ('brightblue', 'brightblue'),
|
| 42 |
+
String: ('yellow', 'yellow'),
|
| 43 |
+
Number: ('blue', 'brightblue'),
|
| 44 |
+
|
| 45 |
+
Generic.Deleted: ('brightred', 'brightred'),
|
| 46 |
+
Generic.Inserted: ('green', 'brightgreen'),
|
| 47 |
+
Generic.Heading: ('**', '**'),
|
| 48 |
+
Generic.Subheading: ('*magenta*', '*brightmagenta*'),
|
| 49 |
+
Generic.Prompt: ('**', '**'),
|
| 50 |
+
Generic.Error: ('brightred', 'brightred'),
|
| 51 |
+
|
| 52 |
+
Error: ('_brightred_', '_brightred_'),
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class TerminalFormatter(Formatter):
|
| 57 |
+
r"""
|
| 58 |
+
Format tokens with ANSI color sequences, for output in a text console.
|
| 59 |
+
Color sequences are terminated at newlines, so that paging the output
|
| 60 |
+
works correctly.
|
| 61 |
+
|
| 62 |
+
The `get_style_defs()` method doesn't do anything special since there is
|
| 63 |
+
no support for common styles.
|
| 64 |
+
|
| 65 |
+
Options accepted:
|
| 66 |
+
|
| 67 |
+
`bg`
|
| 68 |
+
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
|
| 69 |
+
(default: ``"light"``).
|
| 70 |
+
|
| 71 |
+
`colorscheme`
|
| 72 |
+
A dictionary mapping token types to (lightbg, darkbg) color names or
|
| 73 |
+
``None`` (default: ``None`` = use builtin colorscheme).
|
| 74 |
+
|
| 75 |
+
`linenos`
|
| 76 |
+
Set to ``True`` to have line numbers on the terminal output as well
|
| 77 |
+
(default: ``False`` = no line numbers).
|
| 78 |
+
"""
|
| 79 |
+
name = 'Terminal'
|
| 80 |
+
aliases = ['terminal', 'console']
|
| 81 |
+
filenames = []
|
| 82 |
+
|
| 83 |
+
def __init__(self, **options):
|
| 84 |
+
Formatter.__init__(self, **options)
|
| 85 |
+
self.darkbg = get_choice_opt(options, 'bg',
|
| 86 |
+
['light', 'dark'], 'light') == 'dark'
|
| 87 |
+
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
|
| 88 |
+
self.linenos = options.get('linenos', False)
|
| 89 |
+
self._lineno = 0
|
| 90 |
+
|
| 91 |
+
def format(self, tokensource, outfile):
|
| 92 |
+
return Formatter.format(self, tokensource, outfile)
|
| 93 |
+
|
| 94 |
+
def _write_lineno(self, outfile):
|
| 95 |
+
self._lineno += 1
|
| 96 |
+
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
|
| 97 |
+
|
| 98 |
+
def _get_color(self, ttype):
|
| 99 |
+
# self.colorscheme is a dict containing usually generic types, so we
|
| 100 |
+
# have to walk the tree of dots. The base Token type must be a key,
|
| 101 |
+
# even if it's empty string, as in the default above.
|
| 102 |
+
colors = self.colorscheme.get(ttype)
|
| 103 |
+
while colors is None:
|
| 104 |
+
ttype = ttype.parent
|
| 105 |
+
colors = self.colorscheme.get(ttype)
|
| 106 |
+
return colors[self.darkbg]
|
| 107 |
+
|
| 108 |
+
def format_unencoded(self, tokensource, outfile):
|
| 109 |
+
if self.linenos:
|
| 110 |
+
self._write_lineno(outfile)
|
| 111 |
+
|
| 112 |
+
for ttype, value in tokensource:
|
| 113 |
+
color = self._get_color(ttype)
|
| 114 |
+
|
| 115 |
+
for line in value.splitlines(True):
|
| 116 |
+
if color:
|
| 117 |
+
outfile.write(ansiformat(color, line.rstrip('\n')))
|
| 118 |
+
else:
|
| 119 |
+
outfile.write(line.rstrip('\n'))
|
| 120 |
+
if line.endswith('\n'):
|
| 121 |
+
if self.linenos:
|
| 122 |
+
self._write_lineno(outfile)
|
| 123 |
+
else:
|
| 124 |
+
outfile.write('\n')
|
| 125 |
+
|
| 126 |
+
if self.linenos:
|
| 127 |
+
outfile.write("\n")
|
.venv/Lib/site-packages/pip/_vendor/pygments/formatters/terminal256.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.terminal256
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Formatter for 256-color terminal output with ANSI sequences.
|
| 6 |
+
|
| 7 |
+
RGB-to-XTERM color conversion routines adapted from xterm256-conv
|
| 8 |
+
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
|
| 9 |
+
by Wolfgang Frisch.
|
| 10 |
+
|
| 11 |
+
Formatter version 1.
|
| 12 |
+
|
| 13 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 14 |
+
:license: BSD, see LICENSE for details.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
# TODO:
|
| 18 |
+
# - Options to map style's bold/underline/italic/border attributes
|
| 19 |
+
# to some ANSI attrbutes (something like 'italic=underline')
|
| 20 |
+
# - An option to output "style RGB to xterm RGB/index" conversion table
|
| 21 |
+
# - An option to indicate that we are running in "reverse background"
|
| 22 |
+
# xterm. This means that default colors are white-on-black, not
|
| 23 |
+
# black-on-while, so colors like "white background" need to be converted
|
| 24 |
+
# to "white background, black foreground", etc...
|
| 25 |
+
|
| 26 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 27 |
+
from pip._vendor.pygments.console import codes
|
| 28 |
+
from pip._vendor.pygments.style import ansicolors
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class EscapeSequence:
|
| 35 |
+
def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
|
| 36 |
+
self.fg = fg
|
| 37 |
+
self.bg = bg
|
| 38 |
+
self.bold = bold
|
| 39 |
+
self.underline = underline
|
| 40 |
+
self.italic = italic
|
| 41 |
+
|
| 42 |
+
def escape(self, attrs):
|
| 43 |
+
if len(attrs):
|
| 44 |
+
return "\x1b[" + ";".join(attrs) + "m"
|
| 45 |
+
return ""
|
| 46 |
+
|
| 47 |
+
def color_string(self):
|
| 48 |
+
attrs = []
|
| 49 |
+
if self.fg is not None:
|
| 50 |
+
if self.fg in ansicolors:
|
| 51 |
+
esc = codes[self.fg.replace('ansi','')]
|
| 52 |
+
if ';01m' in esc:
|
| 53 |
+
self.bold = True
|
| 54 |
+
# extract fg color code.
|
| 55 |
+
attrs.append(esc[2:4])
|
| 56 |
+
else:
|
| 57 |
+
attrs.extend(("38", "5", "%i" % self.fg))
|
| 58 |
+
if self.bg is not None:
|
| 59 |
+
if self.bg in ansicolors:
|
| 60 |
+
esc = codes[self.bg.replace('ansi','')]
|
| 61 |
+
# extract fg color code, add 10 for bg.
|
| 62 |
+
attrs.append(str(int(esc[2:4])+10))
|
| 63 |
+
else:
|
| 64 |
+
attrs.extend(("48", "5", "%i" % self.bg))
|
| 65 |
+
if self.bold:
|
| 66 |
+
attrs.append("01")
|
| 67 |
+
if self.underline:
|
| 68 |
+
attrs.append("04")
|
| 69 |
+
if self.italic:
|
| 70 |
+
attrs.append("03")
|
| 71 |
+
return self.escape(attrs)
|
| 72 |
+
|
| 73 |
+
def true_color_string(self):
|
| 74 |
+
attrs = []
|
| 75 |
+
if self.fg:
|
| 76 |
+
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
|
| 77 |
+
if self.bg:
|
| 78 |
+
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
|
| 79 |
+
if self.bold:
|
| 80 |
+
attrs.append("01")
|
| 81 |
+
if self.underline:
|
| 82 |
+
attrs.append("04")
|
| 83 |
+
if self.italic:
|
| 84 |
+
attrs.append("03")
|
| 85 |
+
return self.escape(attrs)
|
| 86 |
+
|
| 87 |
+
def reset_string(self):
|
| 88 |
+
attrs = []
|
| 89 |
+
if self.fg is not None:
|
| 90 |
+
attrs.append("39")
|
| 91 |
+
if self.bg is not None:
|
| 92 |
+
attrs.append("49")
|
| 93 |
+
if self.bold or self.underline or self.italic:
|
| 94 |
+
attrs.append("00")
|
| 95 |
+
return self.escape(attrs)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class Terminal256Formatter(Formatter):
|
| 99 |
+
"""
|
| 100 |
+
Format tokens with ANSI color sequences, for output in a 256-color
|
| 101 |
+
terminal or console. Like in `TerminalFormatter` color sequences
|
| 102 |
+
are terminated at newlines, so that paging the output works correctly.
|
| 103 |
+
|
| 104 |
+
The formatter takes colors from a style defined by the `style` option
|
| 105 |
+
and converts them to nearest ANSI 256-color escape sequences. Bold and
|
| 106 |
+
underline attributes from the style are preserved (and displayed).
|
| 107 |
+
|
| 108 |
+
.. versionadded:: 0.9
|
| 109 |
+
|
| 110 |
+
.. versionchanged:: 2.2
|
| 111 |
+
If the used style defines foreground colors in the form ``#ansi*``, then
|
| 112 |
+
`Terminal256Formatter` will map these to non extended foreground color.
|
| 113 |
+
See :ref:`AnsiTerminalStyle` for more information.
|
| 114 |
+
|
| 115 |
+
.. versionchanged:: 2.4
|
| 116 |
+
The ANSI color names have been updated with names that are easier to
|
| 117 |
+
understand and align with colornames of other projects and terminals.
|
| 118 |
+
See :ref:`this table <new-ansi-color-names>` for more information.
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
Options accepted:
|
| 122 |
+
|
| 123 |
+
`style`
|
| 124 |
+
The style to use, can be a string or a Style subclass (default:
|
| 125 |
+
``'default'``).
|
| 126 |
+
|
| 127 |
+
`linenos`
|
| 128 |
+
Set to ``True`` to have line numbers on the terminal output as well
|
| 129 |
+
(default: ``False`` = no line numbers).
|
| 130 |
+
"""
|
| 131 |
+
name = 'Terminal256'
|
| 132 |
+
aliases = ['terminal256', 'console256', '256']
|
| 133 |
+
filenames = []
|
| 134 |
+
|
| 135 |
+
def __init__(self, **options):
|
| 136 |
+
Formatter.__init__(self, **options)
|
| 137 |
+
|
| 138 |
+
self.xterm_colors = []
|
| 139 |
+
self.best_match = {}
|
| 140 |
+
self.style_string = {}
|
| 141 |
+
|
| 142 |
+
self.usebold = 'nobold' not in options
|
| 143 |
+
self.useunderline = 'nounderline' not in options
|
| 144 |
+
self.useitalic = 'noitalic' not in options
|
| 145 |
+
|
| 146 |
+
self._build_color_table() # build an RGB-to-256 color conversion table
|
| 147 |
+
self._setup_styles() # convert selected style's colors to term. colors
|
| 148 |
+
|
| 149 |
+
self.linenos = options.get('linenos', False)
|
| 150 |
+
self._lineno = 0
|
| 151 |
+
|
| 152 |
+
def _build_color_table(self):
|
| 153 |
+
# colors 0..15: 16 basic colors
|
| 154 |
+
|
| 155 |
+
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
|
| 156 |
+
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
|
| 157 |
+
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
|
| 158 |
+
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
|
| 159 |
+
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
|
| 160 |
+
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
|
| 161 |
+
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
|
| 162 |
+
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
|
| 163 |
+
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
|
| 164 |
+
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
|
| 165 |
+
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
|
| 166 |
+
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
|
| 167 |
+
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
|
| 168 |
+
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
|
| 169 |
+
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
|
| 170 |
+
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
|
| 171 |
+
|
| 172 |
+
# colors 16..232: the 6x6x6 color cube
|
| 173 |
+
|
| 174 |
+
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
|
| 175 |
+
|
| 176 |
+
for i in range(217):
|
| 177 |
+
r = valuerange[(i // 36) % 6]
|
| 178 |
+
g = valuerange[(i // 6) % 6]
|
| 179 |
+
b = valuerange[i % 6]
|
| 180 |
+
self.xterm_colors.append((r, g, b))
|
| 181 |
+
|
| 182 |
+
# colors 233..253: grayscale
|
| 183 |
+
|
| 184 |
+
for i in range(1, 22):
|
| 185 |
+
v = 8 + i * 10
|
| 186 |
+
self.xterm_colors.append((v, v, v))
|
| 187 |
+
|
| 188 |
+
def _closest_color(self, r, g, b):
|
| 189 |
+
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
|
| 190 |
+
match = 0
|
| 191 |
+
|
| 192 |
+
for i in range(0, 254):
|
| 193 |
+
values = self.xterm_colors[i]
|
| 194 |
+
|
| 195 |
+
rd = r - values[0]
|
| 196 |
+
gd = g - values[1]
|
| 197 |
+
bd = b - values[2]
|
| 198 |
+
d = rd*rd + gd*gd + bd*bd
|
| 199 |
+
|
| 200 |
+
if d < distance:
|
| 201 |
+
match = i
|
| 202 |
+
distance = d
|
| 203 |
+
return match
|
| 204 |
+
|
| 205 |
+
def _color_index(self, color):
|
| 206 |
+
index = self.best_match.get(color, None)
|
| 207 |
+
if color in ansicolors:
|
| 208 |
+
# strip the `ansi/#ansi` part and look up code
|
| 209 |
+
index = color
|
| 210 |
+
self.best_match[color] = index
|
| 211 |
+
if index is None:
|
| 212 |
+
try:
|
| 213 |
+
rgb = int(str(color), 16)
|
| 214 |
+
except ValueError:
|
| 215 |
+
rgb = 0
|
| 216 |
+
|
| 217 |
+
r = (rgb >> 16) & 0xff
|
| 218 |
+
g = (rgb >> 8) & 0xff
|
| 219 |
+
b = rgb & 0xff
|
| 220 |
+
index = self._closest_color(r, g, b)
|
| 221 |
+
self.best_match[color] = index
|
| 222 |
+
return index
|
| 223 |
+
|
| 224 |
+
def _setup_styles(self):
|
| 225 |
+
for ttype, ndef in self.style:
|
| 226 |
+
escape = EscapeSequence()
|
| 227 |
+
# get foreground from ansicolor if set
|
| 228 |
+
if ndef['ansicolor']:
|
| 229 |
+
escape.fg = self._color_index(ndef['ansicolor'])
|
| 230 |
+
elif ndef['color']:
|
| 231 |
+
escape.fg = self._color_index(ndef['color'])
|
| 232 |
+
if ndef['bgansicolor']:
|
| 233 |
+
escape.bg = self._color_index(ndef['bgansicolor'])
|
| 234 |
+
elif ndef['bgcolor']:
|
| 235 |
+
escape.bg = self._color_index(ndef['bgcolor'])
|
| 236 |
+
if self.usebold and ndef['bold']:
|
| 237 |
+
escape.bold = True
|
| 238 |
+
if self.useunderline and ndef['underline']:
|
| 239 |
+
escape.underline = True
|
| 240 |
+
if self.useitalic and ndef['italic']:
|
| 241 |
+
escape.italic = True
|
| 242 |
+
self.style_string[str(ttype)] = (escape.color_string(),
|
| 243 |
+
escape.reset_string())
|
| 244 |
+
|
| 245 |
+
def _write_lineno(self, outfile):
|
| 246 |
+
self._lineno += 1
|
| 247 |
+
outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
|
| 248 |
+
|
| 249 |
+
def format(self, tokensource, outfile):
|
| 250 |
+
return Formatter.format(self, tokensource, outfile)
|
| 251 |
+
|
| 252 |
+
def format_unencoded(self, tokensource, outfile):
|
| 253 |
+
if self.linenos:
|
| 254 |
+
self._write_lineno(outfile)
|
| 255 |
+
|
| 256 |
+
for ttype, value in tokensource:
|
| 257 |
+
not_found = True
|
| 258 |
+
while ttype and not_found:
|
| 259 |
+
try:
|
| 260 |
+
# outfile.write( "<" + str(ttype) + ">" )
|
| 261 |
+
on, off = self.style_string[str(ttype)]
|
| 262 |
+
|
| 263 |
+
# Like TerminalFormatter, add "reset colors" escape sequence
|
| 264 |
+
# on newline.
|
| 265 |
+
spl = value.split('\n')
|
| 266 |
+
for line in spl[:-1]:
|
| 267 |
+
if line:
|
| 268 |
+
outfile.write(on + line + off)
|
| 269 |
+
if self.linenos:
|
| 270 |
+
self._write_lineno(outfile)
|
| 271 |
+
else:
|
| 272 |
+
outfile.write('\n')
|
| 273 |
+
|
| 274 |
+
if spl[-1]:
|
| 275 |
+
outfile.write(on + spl[-1] + off)
|
| 276 |
+
|
| 277 |
+
not_found = False
|
| 278 |
+
# outfile.write( '#' + str(ttype) + '#' )
|
| 279 |
+
|
| 280 |
+
except KeyError:
|
| 281 |
+
# ottype = ttype
|
| 282 |
+
ttype = ttype.parent
|
| 283 |
+
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
|
| 284 |
+
|
| 285 |
+
if not_found:
|
| 286 |
+
outfile.write(value)
|
| 287 |
+
|
| 288 |
+
if self.linenos:
|
| 289 |
+
outfile.write("\n")
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
class TerminalTrueColorFormatter(Terminal256Formatter):
|
| 294 |
+
r"""
|
| 295 |
+
Format tokens with ANSI color sequences, for output in a true-color
|
| 296 |
+
terminal or console. Like in `TerminalFormatter` color sequences
|
| 297 |
+
are terminated at newlines, so that paging the output works correctly.
|
| 298 |
+
|
| 299 |
+
.. versionadded:: 2.1
|
| 300 |
+
|
| 301 |
+
Options accepted:
|
| 302 |
+
|
| 303 |
+
`style`
|
| 304 |
+
The style to use, can be a string or a Style subclass (default:
|
| 305 |
+
``'default'``).
|
| 306 |
+
"""
|
| 307 |
+
name = 'TerminalTrueColor'
|
| 308 |
+
aliases = ['terminal16m', 'console16m', '16m']
|
| 309 |
+
filenames = []
|
| 310 |
+
|
| 311 |
+
def _build_color_table(self):
|
| 312 |
+
pass
|
| 313 |
+
|
| 314 |
+
def _color_tuple(self, color):
|
| 315 |
+
try:
|
| 316 |
+
rgb = int(str(color), 16)
|
| 317 |
+
except ValueError:
|
| 318 |
+
return None
|
| 319 |
+
r = (rgb >> 16) & 0xff
|
| 320 |
+
g = (rgb >> 8) & 0xff
|
| 321 |
+
b = rgb & 0xff
|
| 322 |
+
return (r, g, b)
|
| 323 |
+
|
| 324 |
+
def _setup_styles(self):
|
| 325 |
+
for ttype, ndef in self.style:
|
| 326 |
+
escape = EscapeSequence()
|
| 327 |
+
if ndef['color']:
|
| 328 |
+
escape.fg = self._color_tuple(ndef['color'])
|
| 329 |
+
if ndef['bgcolor']:
|
| 330 |
+
escape.bg = self._color_tuple(ndef['bgcolor'])
|
| 331 |
+
if self.usebold and ndef['bold']:
|
| 332 |
+
escape.bold = True
|
| 333 |
+
if self.useunderline and ndef['underline']:
|
| 334 |
+
escape.underline = True
|
| 335 |
+
if self.useitalic and ndef['italic']:
|
| 336 |
+
escape.italic = True
|
| 337 |
+
self.style_string[str(ttype)] = (escape.true_color_string(),
|
| 338 |
+
escape.reset_string())
|
.venv/Lib/site-packages/pip/_vendor/pygments/lexers/__init__.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.lexers
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments lexers.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
import sys
|
| 13 |
+
import types
|
| 14 |
+
import fnmatch
|
| 15 |
+
from os.path import basename
|
| 16 |
+
|
| 17 |
+
from pip._vendor.pygments.lexers._mapping import LEXERS
|
| 18 |
+
from pip._vendor.pygments.modeline import get_filetype_from_buffer
|
| 19 |
+
from pip._vendor.pygments.plugin import find_plugin_lexers
|
| 20 |
+
from pip._vendor.pygments.util import ClassNotFound, guess_decode
|
| 21 |
+
|
| 22 |
+
COMPAT = {
|
| 23 |
+
'Python3Lexer': 'PythonLexer',
|
| 24 |
+
'Python3TracebackLexer': 'PythonTracebackLexer',
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
|
| 28 |
+
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
|
| 29 |
+
|
| 30 |
+
_lexer_cache = {}
|
| 31 |
+
_pattern_cache = {}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _fn_matches(fn, glob):
|
| 35 |
+
"""Return whether the supplied file name fn matches pattern filename."""
|
| 36 |
+
if glob not in _pattern_cache:
|
| 37 |
+
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
|
| 38 |
+
return pattern.match(fn)
|
| 39 |
+
return _pattern_cache[glob].match(fn)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _load_lexers(module_name):
|
| 43 |
+
"""Load a lexer (and all others in the module too)."""
|
| 44 |
+
mod = __import__(module_name, None, None, ['__all__'])
|
| 45 |
+
for lexer_name in mod.__all__:
|
| 46 |
+
cls = getattr(mod, lexer_name)
|
| 47 |
+
_lexer_cache[cls.name] = cls
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def get_all_lexers(plugins=True):
|
| 51 |
+
"""Return a generator of tuples in the form ``(name, aliases,
|
| 52 |
+
filenames, mimetypes)`` of all know lexers.
|
| 53 |
+
|
| 54 |
+
If *plugins* is true (the default), plugin lexers supplied by entrypoints
|
| 55 |
+
are also returned. Otherwise, only builtin ones are considered.
|
| 56 |
+
"""
|
| 57 |
+
for item in LEXERS.values():
|
| 58 |
+
yield item[1:]
|
| 59 |
+
if plugins:
|
| 60 |
+
for lexer in find_plugin_lexers():
|
| 61 |
+
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def find_lexer_class(name):
|
| 65 |
+
"""
|
| 66 |
+
Return the `Lexer` subclass that with the *name* attribute as given by
|
| 67 |
+
the *name* argument.
|
| 68 |
+
"""
|
| 69 |
+
if name in _lexer_cache:
|
| 70 |
+
return _lexer_cache[name]
|
| 71 |
+
# lookup builtin lexers
|
| 72 |
+
for module_name, lname, aliases, _, _ in LEXERS.values():
|
| 73 |
+
if name == lname:
|
| 74 |
+
_load_lexers(module_name)
|
| 75 |
+
return _lexer_cache[name]
|
| 76 |
+
# continue with lexers from setuptools entrypoints
|
| 77 |
+
for cls in find_plugin_lexers():
|
| 78 |
+
if cls.name == name:
|
| 79 |
+
return cls
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def find_lexer_class_by_name(_alias):
|
| 83 |
+
"""
|
| 84 |
+
Return the `Lexer` subclass that has `alias` in its aliases list, without
|
| 85 |
+
instantiating it.
|
| 86 |
+
|
| 87 |
+
Like `get_lexer_by_name`, but does not instantiate the class.
|
| 88 |
+
|
| 89 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
|
| 90 |
+
found.
|
| 91 |
+
|
| 92 |
+
.. versionadded:: 2.2
|
| 93 |
+
"""
|
| 94 |
+
if not _alias:
|
| 95 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 96 |
+
# lookup builtin lexers
|
| 97 |
+
for module_name, name, aliases, _, _ in LEXERS.values():
|
| 98 |
+
if _alias.lower() in aliases:
|
| 99 |
+
if name not in _lexer_cache:
|
| 100 |
+
_load_lexers(module_name)
|
| 101 |
+
return _lexer_cache[name]
|
| 102 |
+
# continue with lexers from setuptools entrypoints
|
| 103 |
+
for cls in find_plugin_lexers():
|
| 104 |
+
if _alias.lower() in cls.aliases:
|
| 105 |
+
return cls
|
| 106 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_lexer_by_name(_alias, **options):
|
| 110 |
+
"""
|
| 111 |
+
Return an instance of a `Lexer` subclass that has `alias` in its
|
| 112 |
+
aliases list. The lexer is given the `options` at its
|
| 113 |
+
instantiation.
|
| 114 |
+
|
| 115 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
|
| 116 |
+
found.
|
| 117 |
+
"""
|
| 118 |
+
if not _alias:
|
| 119 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 120 |
+
|
| 121 |
+
# lookup builtin lexers
|
| 122 |
+
for module_name, name, aliases, _, _ in LEXERS.values():
|
| 123 |
+
if _alias.lower() in aliases:
|
| 124 |
+
if name not in _lexer_cache:
|
| 125 |
+
_load_lexers(module_name)
|
| 126 |
+
return _lexer_cache[name](**options)
|
| 127 |
+
# continue with lexers from setuptools entrypoints
|
| 128 |
+
for cls in find_plugin_lexers():
|
| 129 |
+
if _alias.lower() in cls.aliases:
|
| 130 |
+
return cls(**options)
|
| 131 |
+
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
|
| 135 |
+
"""Load a lexer from a file.
|
| 136 |
+
|
| 137 |
+
This method expects a file located relative to the current working
|
| 138 |
+
directory, which contains a Lexer class. By default, it expects the
|
| 139 |
+
Lexer to be name CustomLexer; you can specify your own class name
|
| 140 |
+
as the second argument to this function.
|
| 141 |
+
|
| 142 |
+
Users should be very careful with the input, because this method
|
| 143 |
+
is equivalent to running eval on the input file.
|
| 144 |
+
|
| 145 |
+
Raises ClassNotFound if there are any problems importing the Lexer.
|
| 146 |
+
|
| 147 |
+
.. versionadded:: 2.2
|
| 148 |
+
"""
|
| 149 |
+
try:
|
| 150 |
+
# This empty dict will contain the namespace for the exec'd file
|
| 151 |
+
custom_namespace = {}
|
| 152 |
+
with open(filename, 'rb') as f:
|
| 153 |
+
exec(f.read(), custom_namespace)
|
| 154 |
+
# Retrieve the class `lexername` from that namespace
|
| 155 |
+
if lexername not in custom_namespace:
|
| 156 |
+
raise ClassNotFound('no valid %s class found in %s' %
|
| 157 |
+
(lexername, filename))
|
| 158 |
+
lexer_class = custom_namespace[lexername]
|
| 159 |
+
# And finally instantiate it with the options
|
| 160 |
+
return lexer_class(**options)
|
| 161 |
+
except OSError as err:
|
| 162 |
+
raise ClassNotFound('cannot read %s: %s' % (filename, err))
|
| 163 |
+
except ClassNotFound:
|
| 164 |
+
raise
|
| 165 |
+
except Exception as err:
|
| 166 |
+
raise ClassNotFound('error when loading custom lexer: %s' % err)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def find_lexer_class_for_filename(_fn, code=None):
|
| 170 |
+
"""Get a lexer for a filename.
|
| 171 |
+
|
| 172 |
+
If multiple lexers match the filename pattern, use ``analyse_text()`` to
|
| 173 |
+
figure out which one is more appropriate.
|
| 174 |
+
|
| 175 |
+
Returns None if not found.
|
| 176 |
+
"""
|
| 177 |
+
matches = []
|
| 178 |
+
fn = basename(_fn)
|
| 179 |
+
for modname, name, _, filenames, _ in LEXERS.values():
|
| 180 |
+
for filename in filenames:
|
| 181 |
+
if _fn_matches(fn, filename):
|
| 182 |
+
if name not in _lexer_cache:
|
| 183 |
+
_load_lexers(modname)
|
| 184 |
+
matches.append((_lexer_cache[name], filename))
|
| 185 |
+
for cls in find_plugin_lexers():
|
| 186 |
+
for filename in cls.filenames:
|
| 187 |
+
if _fn_matches(fn, filename):
|
| 188 |
+
matches.append((cls, filename))
|
| 189 |
+
|
| 190 |
+
if isinstance(code, bytes):
|
| 191 |
+
# decode it, since all analyse_text functions expect unicode
|
| 192 |
+
code = guess_decode(code)
|
| 193 |
+
|
| 194 |
+
def get_rating(info):
|
| 195 |
+
cls, filename = info
|
| 196 |
+
# explicit patterns get a bonus
|
| 197 |
+
bonus = '*' not in filename and 0.5 or 0
|
| 198 |
+
# The class _always_ defines analyse_text because it's included in
|
| 199 |
+
# the Lexer class. The default implementation returns None which
|
| 200 |
+
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
|
| 201 |
+
# to find lexers which need it overridden.
|
| 202 |
+
if code:
|
| 203 |
+
return cls.analyse_text(code) + bonus, cls.__name__
|
| 204 |
+
return cls.priority + bonus, cls.__name__
|
| 205 |
+
|
| 206 |
+
if matches:
|
| 207 |
+
matches.sort(key=get_rating)
|
| 208 |
+
# print "Possible lexers, after sort:", matches
|
| 209 |
+
return matches[-1][0]
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def get_lexer_for_filename(_fn, code=None, **options):
|
| 213 |
+
"""Get a lexer for a filename.
|
| 214 |
+
|
| 215 |
+
Return a `Lexer` subclass instance that has a filename pattern
|
| 216 |
+
matching `fn`. The lexer is given the `options` at its
|
| 217 |
+
instantiation.
|
| 218 |
+
|
| 219 |
+
Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
|
| 220 |
+
is found.
|
| 221 |
+
|
| 222 |
+
If multiple lexers match the filename pattern, use their ``analyse_text()``
|
| 223 |
+
methods to figure out which one is more appropriate.
|
| 224 |
+
"""
|
| 225 |
+
res = find_lexer_class_for_filename(_fn, code)
|
| 226 |
+
if not res:
|
| 227 |
+
raise ClassNotFound('no lexer for filename %r found' % _fn)
|
| 228 |
+
return res(**options)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def get_lexer_for_mimetype(_mime, **options):
|
| 232 |
+
"""
|
| 233 |
+
Return a `Lexer` subclass instance that has `mime` in its mimetype
|
| 234 |
+
list. The lexer is given the `options` at its instantiation.
|
| 235 |
+
|
| 236 |
+
Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
|
| 237 |
+
is found.
|
| 238 |
+
"""
|
| 239 |
+
for modname, name, _, _, mimetypes in LEXERS.values():
|
| 240 |
+
if _mime in mimetypes:
|
| 241 |
+
if name not in _lexer_cache:
|
| 242 |
+
_load_lexers(modname)
|
| 243 |
+
return _lexer_cache[name](**options)
|
| 244 |
+
for cls in find_plugin_lexers():
|
| 245 |
+
if _mime in cls.mimetypes:
|
| 246 |
+
return cls(**options)
|
| 247 |
+
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _iter_lexerclasses(plugins=True):
|
| 251 |
+
"""Return an iterator over all lexer classes."""
|
| 252 |
+
for key in sorted(LEXERS):
|
| 253 |
+
module_name, name = LEXERS[key][:2]
|
| 254 |
+
if name not in _lexer_cache:
|
| 255 |
+
_load_lexers(module_name)
|
| 256 |
+
yield _lexer_cache[name]
|
| 257 |
+
if plugins:
|
| 258 |
+
yield from find_plugin_lexers()
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def guess_lexer_for_filename(_fn, _text, **options):
|
| 262 |
+
"""
|
| 263 |
+
As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
|
| 264 |
+
or `alias_filenames` that matches `filename` are taken into consideration.
|
| 265 |
+
|
| 266 |
+
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
|
| 267 |
+
handle the content.
|
| 268 |
+
"""
|
| 269 |
+
fn = basename(_fn)
|
| 270 |
+
primary = {}
|
| 271 |
+
matching_lexers = set()
|
| 272 |
+
for lexer in _iter_lexerclasses():
|
| 273 |
+
for filename in lexer.filenames:
|
| 274 |
+
if _fn_matches(fn, filename):
|
| 275 |
+
matching_lexers.add(lexer)
|
| 276 |
+
primary[lexer] = True
|
| 277 |
+
for filename in lexer.alias_filenames:
|
| 278 |
+
if _fn_matches(fn, filename):
|
| 279 |
+
matching_lexers.add(lexer)
|
| 280 |
+
primary[lexer] = False
|
| 281 |
+
if not matching_lexers:
|
| 282 |
+
raise ClassNotFound('no lexer for filename %r found' % fn)
|
| 283 |
+
if len(matching_lexers) == 1:
|
| 284 |
+
return matching_lexers.pop()(**options)
|
| 285 |
+
result = []
|
| 286 |
+
for lexer in matching_lexers:
|
| 287 |
+
rv = lexer.analyse_text(_text)
|
| 288 |
+
if rv == 1.0:
|
| 289 |
+
return lexer(**options)
|
| 290 |
+
result.append((rv, lexer))
|
| 291 |
+
|
| 292 |
+
def type_sort(t):
|
| 293 |
+
# sort by:
|
| 294 |
+
# - analyse score
|
| 295 |
+
# - is primary filename pattern?
|
| 296 |
+
# - priority
|
| 297 |
+
# - last resort: class name
|
| 298 |
+
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
|
| 299 |
+
result.sort(key=type_sort)
|
| 300 |
+
|
| 301 |
+
return result[-1][1](**options)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def guess_lexer(_text, **options):
|
| 305 |
+
"""
|
| 306 |
+
Return a `Lexer` subclass instance that's guessed from the text in
|
| 307 |
+
`text`. For that, the :meth:`.analyse_text()` method of every known lexer
|
| 308 |
+
class is called with the text as argument, and the lexer which returned the
|
| 309 |
+
highest value will be instantiated and returned.
|
| 310 |
+
|
| 311 |
+
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
|
| 312 |
+
handle the content.
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
if not isinstance(_text, str):
|
| 316 |
+
inencoding = options.get('inencoding', options.get('encoding'))
|
| 317 |
+
if inencoding:
|
| 318 |
+
_text = _text.decode(inencoding or 'utf8')
|
| 319 |
+
else:
|
| 320 |
+
_text, _ = guess_decode(_text)
|
| 321 |
+
|
| 322 |
+
# try to get a vim modeline first
|
| 323 |
+
ft = get_filetype_from_buffer(_text)
|
| 324 |
+
|
| 325 |
+
if ft is not None:
|
| 326 |
+
try:
|
| 327 |
+
return get_lexer_by_name(ft, **options)
|
| 328 |
+
except ClassNotFound:
|
| 329 |
+
pass
|
| 330 |
+
|
| 331 |
+
best_lexer = [0.0, None]
|
| 332 |
+
for lexer in _iter_lexerclasses():
|
| 333 |
+
rv = lexer.analyse_text(_text)
|
| 334 |
+
if rv == 1.0:
|
| 335 |
+
return lexer(**options)
|
| 336 |
+
if rv > best_lexer[0]:
|
| 337 |
+
best_lexer[:] = (rv, lexer)
|
| 338 |
+
if not best_lexer[0] or best_lexer[1] is None:
|
| 339 |
+
raise ClassNotFound('no lexer matching the text found')
|
| 340 |
+
return best_lexer[1](**options)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
class _automodule(types.ModuleType):
|
| 344 |
+
"""Automatically import lexers."""
|
| 345 |
+
|
| 346 |
+
def __getattr__(self, name):
|
| 347 |
+
info = LEXERS.get(name)
|
| 348 |
+
if info:
|
| 349 |
+
_load_lexers(info[0])
|
| 350 |
+
cls = _lexer_cache[info[1]]
|
| 351 |
+
setattr(self, name, cls)
|
| 352 |
+
return cls
|
| 353 |
+
if name in COMPAT:
|
| 354 |
+
return getattr(self, COMPAT[name])
|
| 355 |
+
raise AttributeError(name)
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
oldmod = sys.modules[__name__]
|
| 359 |
+
newmod = _automodule(__name__)
|
| 360 |
+
newmod.__dict__.update(oldmod.__dict__)
|
| 361 |
+
sys.modules[__name__] = newmod
|
| 362 |
+
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
.venv/Lib/site-packages/pip/_vendor/pygments/lexers/_mapping.py
ADDED
|
@@ -0,0 +1,559 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Automatically generated by scripts/gen_mapfiles.py.
|
| 2 |
+
# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
|
| 3 |
+
|
| 4 |
+
LEXERS = {
|
| 5 |
+
'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
|
| 6 |
+
'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
|
| 7 |
+
'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
|
| 8 |
+
'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
|
| 9 |
+
'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
|
| 10 |
+
'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
|
| 11 |
+
'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
|
| 12 |
+
'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
|
| 13 |
+
'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
|
| 14 |
+
'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
|
| 15 |
+
'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
|
| 16 |
+
'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
|
| 17 |
+
'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
|
| 18 |
+
'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
|
| 19 |
+
'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
|
| 20 |
+
'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
|
| 21 |
+
'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
|
| 22 |
+
'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
|
| 23 |
+
'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
|
| 24 |
+
'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
|
| 25 |
+
'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
|
| 26 |
+
'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
|
| 27 |
+
'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
|
| 28 |
+
'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
|
| 29 |
+
'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
|
| 30 |
+
'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
|
| 31 |
+
'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
|
| 32 |
+
'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
|
| 33 |
+
'ArturoLexer': ('pip._vendor.pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()),
|
| 34 |
+
'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')),
|
| 35 |
+
'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
|
| 36 |
+
'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
|
| 37 |
+
'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
|
| 38 |
+
'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
|
| 39 |
+
'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
|
| 40 |
+
'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
|
| 41 |
+
'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
|
| 42 |
+
'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
|
| 43 |
+
'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
|
| 44 |
+
'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
|
| 45 |
+
'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
|
| 46 |
+
'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
|
| 47 |
+
'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
|
| 48 |
+
'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
|
| 49 |
+
'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
|
| 50 |
+
'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
|
| 51 |
+
'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
|
| 52 |
+
'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
|
| 53 |
+
'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
|
| 54 |
+
'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
|
| 55 |
+
'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
|
| 56 |
+
'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
|
| 57 |
+
'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
|
| 58 |
+
'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
|
| 59 |
+
'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
|
| 60 |
+
'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
|
| 61 |
+
'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
|
| 62 |
+
'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
|
| 63 |
+
'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
|
| 64 |
+
'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
|
| 65 |
+
'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
|
| 66 |
+
'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
|
| 67 |
+
'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
|
| 68 |
+
'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
|
| 69 |
+
'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
|
| 70 |
+
'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
|
| 71 |
+
'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
|
| 72 |
+
'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
|
| 73 |
+
'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
|
| 74 |
+
'CarbonLexer': ('pip._vendor.pygments.lexers.carbon', 'Carbon', ('carbon',), ('*.carbon',), ('text/x-carbon',)),
|
| 75 |
+
'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
|
| 76 |
+
'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
|
| 77 |
+
'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
|
| 78 |
+
'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
|
| 79 |
+
'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
|
| 80 |
+
'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
|
| 81 |
+
'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
|
| 82 |
+
'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
|
| 83 |
+
'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
|
| 84 |
+
'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
|
| 85 |
+
'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
|
| 86 |
+
'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
|
| 87 |
+
'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
|
| 88 |
+
'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
|
| 89 |
+
'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
|
| 90 |
+
'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
|
| 91 |
+
'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
|
| 92 |
+
'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
|
| 93 |
+
'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
|
| 94 |
+
'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
|
| 95 |
+
'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
|
| 96 |
+
'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
|
| 97 |
+
'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
|
| 98 |
+
'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
|
| 99 |
+
'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
|
| 100 |
+
'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
|
| 101 |
+
'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
|
| 102 |
+
'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
|
| 103 |
+
'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
|
| 104 |
+
'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
|
| 105 |
+
'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
|
| 106 |
+
'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
|
| 107 |
+
'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
|
| 108 |
+
'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
|
| 109 |
+
'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
|
| 110 |
+
'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
|
| 111 |
+
'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
|
| 112 |
+
'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
|
| 113 |
+
'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
|
| 114 |
+
'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
|
| 115 |
+
'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
|
| 116 |
+
'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
|
| 117 |
+
'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
|
| 118 |
+
'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
|
| 119 |
+
'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
|
| 120 |
+
'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
|
| 121 |
+
'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
|
| 122 |
+
'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
|
| 123 |
+
'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
|
| 124 |
+
'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
|
| 125 |
+
'DaxLexer': ('pip._vendor.pygments.lexers.dax', 'Dax', ('dax',), ('*.dax',), ()),
|
| 126 |
+
'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
|
| 127 |
+
'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
|
| 128 |
+
'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
|
| 129 |
+
'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
|
| 130 |
+
'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
|
| 131 |
+
'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
|
| 132 |
+
'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
|
| 133 |
+
'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
|
| 134 |
+
'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
|
| 135 |
+
'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
|
| 136 |
+
'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
|
| 137 |
+
'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
|
| 138 |
+
'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
|
| 139 |
+
'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
|
| 140 |
+
'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
|
| 141 |
+
'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
|
| 142 |
+
'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
|
| 143 |
+
'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
|
| 144 |
+
'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
|
| 145 |
+
'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
|
| 146 |
+
'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
|
| 147 |
+
'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
|
| 148 |
+
'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
|
| 149 |
+
'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
|
| 150 |
+
'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
|
| 151 |
+
'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
|
| 152 |
+
'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
|
| 153 |
+
'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
|
| 154 |
+
'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
|
| 155 |
+
'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
|
| 156 |
+
'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
|
| 157 |
+
'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
|
| 158 |
+
'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)),
|
| 159 |
+
'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
|
| 160 |
+
'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
|
| 161 |
+
'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
|
| 162 |
+
'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
|
| 163 |
+
'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
|
| 164 |
+
'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
|
| 165 |
+
'FiftLexer': ('pip._vendor.pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()),
|
| 166 |
+
'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
|
| 167 |
+
'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
|
| 168 |
+
'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
|
| 169 |
+
'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
|
| 170 |
+
'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
|
| 171 |
+
'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
|
| 172 |
+
'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
|
| 173 |
+
'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
|
| 174 |
+
'FuncLexer': ('pip._vendor.pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()),
|
| 175 |
+
'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
|
| 176 |
+
'GAPConsoleLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()),
|
| 177 |
+
'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
|
| 178 |
+
'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
|
| 179 |
+
'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
|
| 180 |
+
'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
|
| 181 |
+
'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
|
| 182 |
+
'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
|
| 183 |
+
'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
|
| 184 |
+
'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
|
| 185 |
+
'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
|
| 186 |
+
'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
|
| 187 |
+
'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
|
| 188 |
+
'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
|
| 189 |
+
'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
|
| 190 |
+
'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
|
| 191 |
+
'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
|
| 192 |
+
'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
|
| 193 |
+
'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
|
| 194 |
+
'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
|
| 195 |
+
'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
|
| 196 |
+
'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
|
| 197 |
+
'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
|
| 198 |
+
'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
|
| 199 |
+
'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
|
| 200 |
+
'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
|
| 201 |
+
'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
|
| 202 |
+
'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
|
| 203 |
+
'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
|
| 204 |
+
'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
|
| 205 |
+
'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()),
|
| 206 |
+
'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
|
| 207 |
+
'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
|
| 208 |
+
'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
|
| 209 |
+
'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
|
| 210 |
+
'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
|
| 211 |
+
'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
|
| 212 |
+
'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
|
| 213 |
+
'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
|
| 214 |
+
'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
|
| 215 |
+
'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
|
| 216 |
+
'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
|
| 217 |
+
'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
|
| 218 |
+
'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
|
| 219 |
+
'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
|
| 220 |
+
'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
|
| 221 |
+
'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
|
| 222 |
+
'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')),
|
| 223 |
+
'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
|
| 224 |
+
'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
|
| 225 |
+
'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
|
| 226 |
+
'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
|
| 227 |
+
'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
|
| 228 |
+
'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
|
| 229 |
+
'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
|
| 230 |
+
'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
|
| 231 |
+
'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
|
| 232 |
+
'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
|
| 233 |
+
'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
|
| 234 |
+
'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
|
| 235 |
+
'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
|
| 236 |
+
'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
|
| 237 |
+
'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
|
| 238 |
+
'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
|
| 239 |
+
'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
|
| 240 |
+
'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
|
| 241 |
+
'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
|
| 242 |
+
'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
|
| 243 |
+
'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
|
| 244 |
+
'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
|
| 245 |
+
'JsonnetLexer': ('pip._vendor.pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()),
|
| 246 |
+
'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
|
| 247 |
+
'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
|
| 248 |
+
'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
|
| 249 |
+
'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
|
| 250 |
+
'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
|
| 251 |
+
'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
|
| 252 |
+
'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
|
| 253 |
+
'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
|
| 254 |
+
'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
|
| 255 |
+
'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
|
| 256 |
+
'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
|
| 257 |
+
'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
|
| 258 |
+
'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
|
| 259 |
+
'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
|
| 260 |
+
'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
|
| 261 |
+
'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
|
| 262 |
+
'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
|
| 263 |
+
'LeanLexer': ('pip._vendor.pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
|
| 264 |
+
'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
|
| 265 |
+
'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
|
| 266 |
+
'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
|
| 267 |
+
'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
|
| 268 |
+
'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
|
| 269 |
+
'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
|
| 270 |
+
'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
|
| 271 |
+
'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
|
| 272 |
+
'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
|
| 273 |
+
'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
|
| 274 |
+
'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
|
| 275 |
+
'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
|
| 276 |
+
'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
|
| 277 |
+
'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
|
| 278 |
+
'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
|
| 279 |
+
'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
|
| 280 |
+
'MCFunctionLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
|
| 281 |
+
'MCSchemaLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)),
|
| 282 |
+
'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
|
| 283 |
+
'MIPSLexer': ('pip._vendor.pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()),
|
| 284 |
+
'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
|
| 285 |
+
'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
|
| 286 |
+
'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
|
| 287 |
+
'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
|
| 288 |
+
'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
|
| 289 |
+
'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
|
| 290 |
+
'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
|
| 291 |
+
'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
|
| 292 |
+
'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
|
| 293 |
+
'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
|
| 294 |
+
'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
|
| 295 |
+
'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
|
| 296 |
+
'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
|
| 297 |
+
'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
|
| 298 |
+
'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
|
| 299 |
+
'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
|
| 300 |
+
'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
|
| 301 |
+
'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
|
| 302 |
+
'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
|
| 303 |
+
'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
|
| 304 |
+
'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
|
| 305 |
+
'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
|
| 306 |
+
'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
|
| 307 |
+
'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
|
| 308 |
+
'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
|
| 309 |
+
'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
|
| 310 |
+
'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
|
| 311 |
+
'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
|
| 312 |
+
'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
|
| 313 |
+
'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
|
| 314 |
+
'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
|
| 315 |
+
'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
|
| 316 |
+
'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
|
| 317 |
+
'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
|
| 318 |
+
'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
|
| 319 |
+
'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
|
| 320 |
+
'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
|
| 321 |
+
'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
|
| 322 |
+
'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
|
| 323 |
+
'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
|
| 324 |
+
'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
|
| 325 |
+
'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
|
| 326 |
+
'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
|
| 327 |
+
'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
|
| 328 |
+
'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)),
|
| 329 |
+
'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
|
| 330 |
+
'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
|
| 331 |
+
'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
|
| 332 |
+
'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
|
| 333 |
+
'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
|
| 334 |
+
'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
|
| 335 |
+
'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
|
| 336 |
+
'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
|
| 337 |
+
'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
|
| 338 |
+
'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
|
| 339 |
+
'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
|
| 340 |
+
'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
|
| 341 |
+
'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
|
| 342 |
+
'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
|
| 343 |
+
'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
|
| 344 |
+
'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
|
| 345 |
+
'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
|
| 346 |
+
'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
|
| 347 |
+
'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
|
| 348 |
+
'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
|
| 349 |
+
'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
|
| 350 |
+
'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
|
| 351 |
+
'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
|
| 352 |
+
'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
|
| 353 |
+
'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
|
| 354 |
+
'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()),
|
| 355 |
+
'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
|
| 356 |
+
'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
|
| 357 |
+
'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
|
| 358 |
+
'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
|
| 359 |
+
'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
|
| 360 |
+
'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
|
| 361 |
+
'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
|
| 362 |
+
'PhixLexer': ('pip._vendor.pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)),
|
| 363 |
+
'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
|
| 364 |
+
'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
|
| 365 |
+
'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
|
| 366 |
+
'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
|
| 367 |
+
'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
|
| 368 |
+
'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
|
| 369 |
+
'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
|
| 370 |
+
'PortugolLexer': ('pip._vendor.pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()),
|
| 371 |
+
'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
|
| 372 |
+
'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
|
| 373 |
+
'PostgresExplainLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL EXPLAIN dialect', ('postgres-explain',), ('*.explain',), ('text/x-postgresql-explain',)),
|
| 374 |
+
'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
|
| 375 |
+
'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
|
| 376 |
+
'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
|
| 377 |
+
'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
|
| 378 |
+
'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
|
| 379 |
+
'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
|
| 380 |
+
'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
|
| 381 |
+
'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
|
| 382 |
+
'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
|
| 383 |
+
'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
|
| 384 |
+
'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
|
| 385 |
+
'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
|
| 386 |
+
'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
|
| 387 |
+
'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
|
| 388 |
+
'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
|
| 389 |
+
'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
|
| 390 |
+
'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
|
| 391 |
+
'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
|
| 392 |
+
'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
|
| 393 |
+
'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
|
| 394 |
+
'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
|
| 395 |
+
'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
|
| 396 |
+
'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
|
| 397 |
+
'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
|
| 398 |
+
'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
|
| 399 |
+
'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
|
| 400 |
+
'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
|
| 401 |
+
'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
|
| 402 |
+
'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
|
| 403 |
+
'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
|
| 404 |
+
'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
|
| 405 |
+
'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
|
| 406 |
+
'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
|
| 407 |
+
'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
|
| 408 |
+
'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
|
| 409 |
+
'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
|
| 410 |
+
'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
|
| 411 |
+
'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
|
| 412 |
+
'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
|
| 413 |
+
'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
|
| 414 |
+
'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
|
| 415 |
+
'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
|
| 416 |
+
'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
|
| 417 |
+
'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
|
| 418 |
+
'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
|
| 419 |
+
'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
|
| 420 |
+
'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
|
| 421 |
+
'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
|
| 422 |
+
'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
|
| 423 |
+
'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
|
| 424 |
+
'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
|
| 425 |
+
'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
|
| 426 |
+
'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
|
| 427 |
+
'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
|
| 428 |
+
'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
|
| 429 |
+
'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
|
| 430 |
+
'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
|
| 431 |
+
'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
|
| 432 |
+
'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
|
| 433 |
+
'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
|
| 434 |
+
'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
|
| 435 |
+
'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
|
| 436 |
+
'SNBTLexer': ('pip._vendor.pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
|
| 437 |
+
'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
|
| 438 |
+
'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
|
| 439 |
+
'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
|
| 440 |
+
'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
|
| 441 |
+
'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
|
| 442 |
+
'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
|
| 443 |
+
'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
|
| 444 |
+
'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
|
| 445 |
+
'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
|
| 446 |
+
'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
|
| 447 |
+
'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
|
| 448 |
+
'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
|
| 449 |
+
'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
|
| 450 |
+
'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
|
| 451 |
+
'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
|
| 452 |
+
'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
|
| 453 |
+
'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
|
| 454 |
+
'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
|
| 455 |
+
'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
|
| 456 |
+
'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
|
| 457 |
+
'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
|
| 458 |
+
'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
|
| 459 |
+
'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
|
| 460 |
+
'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
|
| 461 |
+
'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
|
| 462 |
+
'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
|
| 463 |
+
'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
|
| 464 |
+
'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
|
| 465 |
+
'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
|
| 466 |
+
'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
|
| 467 |
+
'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
|
| 468 |
+
'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
|
| 469 |
+
'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
|
| 470 |
+
'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
|
| 471 |
+
'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
|
| 472 |
+
'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
|
| 473 |
+
'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
|
| 474 |
+
'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
|
| 475 |
+
'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
|
| 476 |
+
'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
|
| 477 |
+
'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
|
| 478 |
+
'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
|
| 479 |
+
'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
|
| 480 |
+
'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
|
| 481 |
+
'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
|
| 482 |
+
'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
|
| 483 |
+
'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
|
| 484 |
+
'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
|
| 485 |
+
'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
|
| 486 |
+
'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
|
| 487 |
+
'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
|
| 488 |
+
'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
|
| 489 |
+
'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
|
| 490 |
+
'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
|
| 491 |
+
'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
|
| 492 |
+
'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
|
| 493 |
+
'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
|
| 494 |
+
'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf', 'hcl'), ('*.tf', '*.hcl'), ('application/x-tf', 'application/x-terraform')),
|
| 495 |
+
'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
|
| 496 |
+
'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
|
| 497 |
+
'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
|
| 498 |
+
'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
|
| 499 |
+
'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
|
| 500 |
+
'TlbLexer': ('pip._vendor.pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()),
|
| 501 |
+
'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
|
| 502 |
+
'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
|
| 503 |
+
'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
|
| 504 |
+
'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
|
| 505 |
+
'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
|
| 506 |
+
'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
|
| 507 |
+
'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
|
| 508 |
+
'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
|
| 509 |
+
'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
|
| 510 |
+
'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
|
| 511 |
+
'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
|
| 512 |
+
'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
|
| 513 |
+
'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
|
| 514 |
+
'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
|
| 515 |
+
'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
|
| 516 |
+
'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
|
| 517 |
+
'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
|
| 518 |
+
'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
|
| 519 |
+
'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
|
| 520 |
+
'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
|
| 521 |
+
'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
|
| 522 |
+
'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
|
| 523 |
+
'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
|
| 524 |
+
'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
|
| 525 |
+
'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
|
| 526 |
+
'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
|
| 527 |
+
'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
|
| 528 |
+
'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
|
| 529 |
+
'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
|
| 530 |
+
'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
|
| 531 |
+
'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
|
| 532 |
+
'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
|
| 533 |
+
'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
|
| 534 |
+
'WgslLexer': ('pip._vendor.pygments.lexers.wgsl', 'WebGPU Shading Language', ('wgsl',), ('*.wgsl',), ('text/wgsl',)),
|
| 535 |
+
'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
|
| 536 |
+
'WikitextLexer': ('pip._vendor.pygments.lexers.markup', 'Wikitext', ('wikitext', 'mediawiki'), (), ('text/x-wiki',)),
|
| 537 |
+
'WoWTocLexer': ('pip._vendor.pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()),
|
| 538 |
+
'WrenLexer': ('pip._vendor.pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()),
|
| 539 |
+
'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
|
| 540 |
+
'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
|
| 541 |
+
'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
|
| 542 |
+
'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
|
| 543 |
+
'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
|
| 544 |
+
'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
|
| 545 |
+
'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
|
| 546 |
+
'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
|
| 547 |
+
'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
|
| 548 |
+
'XppLexer': ('pip._vendor.pygments.lexers.dotnet', 'X++', ('xpp', 'x++'), ('*.xpp',), ()),
|
| 549 |
+
'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
|
| 550 |
+
'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
|
| 551 |
+
'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
|
| 552 |
+
'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
|
| 553 |
+
'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
|
| 554 |
+
'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
|
| 555 |
+
'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
|
| 556 |
+
'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
|
| 557 |
+
'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
|
| 558 |
+
'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
|
| 559 |
+
}
|
.venv/Lib/site-packages/pip/_vendor/pygments/lexers/python.py
ADDED
|
@@ -0,0 +1,1198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.lexers.python
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Lexers for Python and related languages.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
import keyword
|
| 13 |
+
|
| 14 |
+
from pip._vendor.pygments.lexer import DelegatingLexer, Lexer, RegexLexer, include, \
|
| 15 |
+
bygroups, using, default, words, combined, do_insertions, this, line_re
|
| 16 |
+
from pip._vendor.pygments.util import get_bool_opt, shebang_matches
|
| 17 |
+
from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
|
| 18 |
+
Number, Punctuation, Generic, Other, Error, Whitespace
|
| 19 |
+
from pip._vendor.pygments import unistring as uni
|
| 20 |
+
|
| 21 |
+
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
|
| 22 |
+
'Python2Lexer', 'Python2TracebackLexer',
|
| 23 |
+
'CythonLexer', 'DgLexer', 'NumPyLexer']
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PythonLexer(RegexLexer):
|
| 27 |
+
"""
|
| 28 |
+
For Python source code (version 3.x).
|
| 29 |
+
|
| 30 |
+
.. versionadded:: 0.10
|
| 31 |
+
|
| 32 |
+
.. versionchanged:: 2.5
|
| 33 |
+
This is now the default ``PythonLexer``. It is still available as the
|
| 34 |
+
alias ``Python3Lexer``.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
name = 'Python'
|
| 38 |
+
url = 'http://www.python.org'
|
| 39 |
+
aliases = ['python', 'py', 'sage', 'python3', 'py3']
|
| 40 |
+
filenames = [
|
| 41 |
+
'*.py',
|
| 42 |
+
'*.pyw',
|
| 43 |
+
# Type stubs
|
| 44 |
+
'*.pyi',
|
| 45 |
+
# Jython
|
| 46 |
+
'*.jy',
|
| 47 |
+
# Sage
|
| 48 |
+
'*.sage',
|
| 49 |
+
# SCons
|
| 50 |
+
'*.sc',
|
| 51 |
+
'SConstruct',
|
| 52 |
+
'SConscript',
|
| 53 |
+
# Skylark/Starlark (used by Bazel, Buck, and Pants)
|
| 54 |
+
'*.bzl',
|
| 55 |
+
'BUCK',
|
| 56 |
+
'BUILD',
|
| 57 |
+
'BUILD.bazel',
|
| 58 |
+
'WORKSPACE',
|
| 59 |
+
# Twisted Application infrastructure
|
| 60 |
+
'*.tac',
|
| 61 |
+
]
|
| 62 |
+
mimetypes = ['text/x-python', 'application/x-python',
|
| 63 |
+
'text/x-python3', 'application/x-python3']
|
| 64 |
+
|
| 65 |
+
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
|
| 66 |
+
|
| 67 |
+
def innerstring_rules(ttype):
|
| 68 |
+
return [
|
| 69 |
+
# the old style '%s' % (...) string formatting (still valid in Py3)
|
| 70 |
+
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
|
| 71 |
+
'[hlL]?[E-GXc-giorsaux%]', String.Interpol),
|
| 72 |
+
# the new style '{}'.format(...) string formatting
|
| 73 |
+
(r'\{'
|
| 74 |
+
r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
|
| 75 |
+
r'(\![sra])?' # conversion
|
| 76 |
+
r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
|
| 77 |
+
r'\}', String.Interpol),
|
| 78 |
+
|
| 79 |
+
# backslashes, quotes and formatting signs must be parsed one at a time
|
| 80 |
+
(r'[^\\\'"%{\n]+', ttype),
|
| 81 |
+
(r'[\'"\\]', ttype),
|
| 82 |
+
# unhandled string formatting sign
|
| 83 |
+
(r'%|(\{{1,2})', ttype)
|
| 84 |
+
# newlines are an error (use "nl" state)
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
def fstring_rules(ttype):
|
| 88 |
+
return [
|
| 89 |
+
# Assuming that a '}' is the closing brace after format specifier.
|
| 90 |
+
# Sadly, this means that we won't detect syntax error. But it's
|
| 91 |
+
# more important to parse correct syntax correctly, than to
|
| 92 |
+
# highlight invalid syntax.
|
| 93 |
+
(r'\}', String.Interpol),
|
| 94 |
+
(r'\{', String.Interpol, 'expr-inside-fstring'),
|
| 95 |
+
# backslashes, quotes and formatting signs must be parsed one at a time
|
| 96 |
+
(r'[^\\\'"{}\n]+', ttype),
|
| 97 |
+
(r'[\'"\\]', ttype),
|
| 98 |
+
# newlines are an error (use "nl" state)
|
| 99 |
+
]
|
| 100 |
+
|
| 101 |
+
tokens = {
|
| 102 |
+
'root': [
|
| 103 |
+
(r'\n', Whitespace),
|
| 104 |
+
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
|
| 105 |
+
bygroups(Whitespace, String.Affix, String.Doc)),
|
| 106 |
+
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
|
| 107 |
+
bygroups(Whitespace, String.Affix, String.Doc)),
|
| 108 |
+
(r'\A#!.+$', Comment.Hashbang),
|
| 109 |
+
(r'#.*$', Comment.Single),
|
| 110 |
+
(r'\\\n', Text),
|
| 111 |
+
(r'\\', Text),
|
| 112 |
+
include('keywords'),
|
| 113 |
+
include('soft-keywords'),
|
| 114 |
+
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
|
| 115 |
+
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
|
| 116 |
+
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
|
| 117 |
+
'fromimport'),
|
| 118 |
+
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
|
| 119 |
+
'import'),
|
| 120 |
+
include('expr'),
|
| 121 |
+
],
|
| 122 |
+
'expr': [
|
| 123 |
+
# raw f-strings
|
| 124 |
+
('(?i)(rf|fr)(""")',
|
| 125 |
+
bygroups(String.Affix, String.Double),
|
| 126 |
+
combined('rfstringescape', 'tdqf')),
|
| 127 |
+
("(?i)(rf|fr)(''')",
|
| 128 |
+
bygroups(String.Affix, String.Single),
|
| 129 |
+
combined('rfstringescape', 'tsqf')),
|
| 130 |
+
('(?i)(rf|fr)(")',
|
| 131 |
+
bygroups(String.Affix, String.Double),
|
| 132 |
+
combined('rfstringescape', 'dqf')),
|
| 133 |
+
("(?i)(rf|fr)(')",
|
| 134 |
+
bygroups(String.Affix, String.Single),
|
| 135 |
+
combined('rfstringescape', 'sqf')),
|
| 136 |
+
# non-raw f-strings
|
| 137 |
+
('([fF])(""")', bygroups(String.Affix, String.Double),
|
| 138 |
+
combined('fstringescape', 'tdqf')),
|
| 139 |
+
("([fF])(''')", bygroups(String.Affix, String.Single),
|
| 140 |
+
combined('fstringescape', 'tsqf')),
|
| 141 |
+
('([fF])(")', bygroups(String.Affix, String.Double),
|
| 142 |
+
combined('fstringescape', 'dqf')),
|
| 143 |
+
("([fF])(')", bygroups(String.Affix, String.Single),
|
| 144 |
+
combined('fstringescape', 'sqf')),
|
| 145 |
+
# raw bytes and strings
|
| 146 |
+
('(?i)(rb|br|r)(""")',
|
| 147 |
+
bygroups(String.Affix, String.Double), 'tdqs'),
|
| 148 |
+
("(?i)(rb|br|r)(''')",
|
| 149 |
+
bygroups(String.Affix, String.Single), 'tsqs'),
|
| 150 |
+
('(?i)(rb|br|r)(")',
|
| 151 |
+
bygroups(String.Affix, String.Double), 'dqs'),
|
| 152 |
+
("(?i)(rb|br|r)(')",
|
| 153 |
+
bygroups(String.Affix, String.Single), 'sqs'),
|
| 154 |
+
# non-raw strings
|
| 155 |
+
('([uU]?)(""")', bygroups(String.Affix, String.Double),
|
| 156 |
+
combined('stringescape', 'tdqs')),
|
| 157 |
+
("([uU]?)(''')", bygroups(String.Affix, String.Single),
|
| 158 |
+
combined('stringescape', 'tsqs')),
|
| 159 |
+
('([uU]?)(")', bygroups(String.Affix, String.Double),
|
| 160 |
+
combined('stringescape', 'dqs')),
|
| 161 |
+
("([uU]?)(')", bygroups(String.Affix, String.Single),
|
| 162 |
+
combined('stringescape', 'sqs')),
|
| 163 |
+
# non-raw bytes
|
| 164 |
+
('([bB])(""")', bygroups(String.Affix, String.Double),
|
| 165 |
+
combined('bytesescape', 'tdqs')),
|
| 166 |
+
("([bB])(''')", bygroups(String.Affix, String.Single),
|
| 167 |
+
combined('bytesescape', 'tsqs')),
|
| 168 |
+
('([bB])(")', bygroups(String.Affix, String.Double),
|
| 169 |
+
combined('bytesescape', 'dqs')),
|
| 170 |
+
("([bB])(')", bygroups(String.Affix, String.Single),
|
| 171 |
+
combined('bytesescape', 'sqs')),
|
| 172 |
+
|
| 173 |
+
(r'[^\S\n]+', Text),
|
| 174 |
+
include('numbers'),
|
| 175 |
+
(r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
|
| 176 |
+
(r'[]{}:(),;[]', Punctuation),
|
| 177 |
+
(r'(in|is|and|or|not)\b', Operator.Word),
|
| 178 |
+
include('expr-keywords'),
|
| 179 |
+
include('builtins'),
|
| 180 |
+
include('magicfuncs'),
|
| 181 |
+
include('magicvars'),
|
| 182 |
+
include('name'),
|
| 183 |
+
],
|
| 184 |
+
'expr-inside-fstring': [
|
| 185 |
+
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
|
| 186 |
+
# without format specifier
|
| 187 |
+
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
|
| 188 |
+
r'(\![sraf])?' # conversion
|
| 189 |
+
r'\}', String.Interpol, '#pop'),
|
| 190 |
+
# with format specifier
|
| 191 |
+
# we'll catch the remaining '}' in the outer scope
|
| 192 |
+
(r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
|
| 193 |
+
r'(\![sraf])?' # conversion
|
| 194 |
+
r':', String.Interpol, '#pop'),
|
| 195 |
+
(r'\s+', Whitespace), # allow new lines
|
| 196 |
+
include('expr'),
|
| 197 |
+
],
|
| 198 |
+
'expr-inside-fstring-inner': [
|
| 199 |
+
(r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
|
| 200 |
+
(r'[])}]', Punctuation, '#pop'),
|
| 201 |
+
(r'\s+', Whitespace), # allow new lines
|
| 202 |
+
include('expr'),
|
| 203 |
+
],
|
| 204 |
+
'expr-keywords': [
|
| 205 |
+
# Based on https://docs.python.org/3/reference/expressions.html
|
| 206 |
+
(words((
|
| 207 |
+
'async for', 'await', 'else', 'for', 'if', 'lambda',
|
| 208 |
+
'yield', 'yield from'), suffix=r'\b'),
|
| 209 |
+
Keyword),
|
| 210 |
+
(words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
|
| 211 |
+
],
|
| 212 |
+
'keywords': [
|
| 213 |
+
(words((
|
| 214 |
+
'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
|
| 215 |
+
'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
|
| 216 |
+
'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
|
| 217 |
+
'yield from', 'as', 'with'), suffix=r'\b'),
|
| 218 |
+
Keyword),
|
| 219 |
+
(words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
|
| 220 |
+
],
|
| 221 |
+
'soft-keywords': [
|
| 222 |
+
# `match`, `case` and `_` soft keywords
|
| 223 |
+
(r'(^[ \t]*)' # at beginning of line + possible indentation
|
| 224 |
+
r'(match|case)\b' # a possible keyword
|
| 225 |
+
r'(?![ \t]*(?:' # not followed by...
|
| 226 |
+
r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
|
| 227 |
+
r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
|
| 228 |
+
bygroups(Text, Keyword), 'soft-keywords-inner'),
|
| 229 |
+
],
|
| 230 |
+
'soft-keywords-inner': [
|
| 231 |
+
# optional `_` keyword
|
| 232 |
+
(r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
|
| 233 |
+
default('#pop')
|
| 234 |
+
],
|
| 235 |
+
'builtins': [
|
| 236 |
+
(words((
|
| 237 |
+
'__import__', 'abs', 'aiter', 'all', 'any', 'bin', 'bool', 'bytearray',
|
| 238 |
+
'breakpoint', 'bytes', 'callable', 'chr', 'classmethod', 'compile',
|
| 239 |
+
'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval',
|
| 240 |
+
'filter', 'float', 'format', 'frozenset', 'getattr', 'globals',
|
| 241 |
+
'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'isinstance',
|
| 242 |
+
'issubclass', 'iter', 'len', 'list', 'locals', 'map', 'max',
|
| 243 |
+
'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
|
| 244 |
+
'print', 'property', 'range', 'repr', 'reversed', 'round', 'set',
|
| 245 |
+
'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super',
|
| 246 |
+
'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
|
| 247 |
+
Name.Builtin),
|
| 248 |
+
(r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
|
| 249 |
+
(words((
|
| 250 |
+
'ArithmeticError', 'AssertionError', 'AttributeError',
|
| 251 |
+
'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
|
| 252 |
+
'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
|
| 253 |
+
'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
|
| 254 |
+
'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
|
| 255 |
+
'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
|
| 256 |
+
'NotImplementedError', 'OSError', 'OverflowError',
|
| 257 |
+
'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
|
| 258 |
+
'RuntimeError', 'RuntimeWarning', 'StopIteration',
|
| 259 |
+
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
|
| 260 |
+
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
|
| 261 |
+
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
|
| 262 |
+
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError',
|
| 263 |
+
'Warning', 'WindowsError', 'ZeroDivisionError',
|
| 264 |
+
# new builtin exceptions from PEP 3151
|
| 265 |
+
'BlockingIOError', 'ChildProcessError', 'ConnectionError',
|
| 266 |
+
'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
|
| 267 |
+
'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
|
| 268 |
+
'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
|
| 269 |
+
'PermissionError', 'ProcessLookupError', 'TimeoutError',
|
| 270 |
+
# others new in Python 3
|
| 271 |
+
'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
|
| 272 |
+
'EncodingWarning'),
|
| 273 |
+
prefix=r'(?<!\.)', suffix=r'\b'),
|
| 274 |
+
Name.Exception),
|
| 275 |
+
],
|
| 276 |
+
'magicfuncs': [
|
| 277 |
+
(words((
|
| 278 |
+
'__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__',
|
| 279 |
+
'__and__', '__anext__', '__await__', '__bool__', '__bytes__',
|
| 280 |
+
'__call__', '__complex__', '__contains__', '__del__', '__delattr__',
|
| 281 |
+
'__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__',
|
| 282 |
+
'__eq__', '__exit__', '__float__', '__floordiv__', '__format__',
|
| 283 |
+
'__ge__', '__get__', '__getattr__', '__getattribute__',
|
| 284 |
+
'__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__',
|
| 285 |
+
'__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__',
|
| 286 |
+
'__imul__', '__index__', '__init__', '__instancecheck__',
|
| 287 |
+
'__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
|
| 288 |
+
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__',
|
| 289 |
+
'__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__',
|
| 290 |
+
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
|
| 291 |
+
'__new__', '__next__', '__or__', '__pos__', '__pow__',
|
| 292 |
+
'__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__',
|
| 293 |
+
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__',
|
| 294 |
+
'__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__',
|
| 295 |
+
'__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
|
| 296 |
+
'__rxor__', '__set__', '__setattr__', '__setitem__', '__str__',
|
| 297 |
+
'__sub__', '__subclasscheck__', '__truediv__',
|
| 298 |
+
'__xor__'), suffix=r'\b'),
|
| 299 |
+
Name.Function.Magic),
|
| 300 |
+
],
|
| 301 |
+
'magicvars': [
|
| 302 |
+
(words((
|
| 303 |
+
'__annotations__', '__bases__', '__class__', '__closure__',
|
| 304 |
+
'__code__', '__defaults__', '__dict__', '__doc__', '__file__',
|
| 305 |
+
'__func__', '__globals__', '__kwdefaults__', '__module__',
|
| 306 |
+
'__mro__', '__name__', '__objclass__', '__qualname__',
|
| 307 |
+
'__self__', '__slots__', '__weakref__'), suffix=r'\b'),
|
| 308 |
+
Name.Variable.Magic),
|
| 309 |
+
],
|
| 310 |
+
'numbers': [
|
| 311 |
+
(r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
|
| 312 |
+
r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
|
| 313 |
+
(r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
|
| 314 |
+
(r'0[oO](?:_?[0-7])+', Number.Oct),
|
| 315 |
+
(r'0[bB](?:_?[01])+', Number.Bin),
|
| 316 |
+
(r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
|
| 317 |
+
(r'\d(?:_?\d)*', Number.Integer),
|
| 318 |
+
],
|
| 319 |
+
'name': [
|
| 320 |
+
(r'@' + uni_name, Name.Decorator),
|
| 321 |
+
(r'@', Operator), # new matrix multiplication operator
|
| 322 |
+
(uni_name, Name),
|
| 323 |
+
],
|
| 324 |
+
'funcname': [
|
| 325 |
+
include('magicfuncs'),
|
| 326 |
+
(uni_name, Name.Function, '#pop'),
|
| 327 |
+
default('#pop'),
|
| 328 |
+
],
|
| 329 |
+
'classname': [
|
| 330 |
+
(uni_name, Name.Class, '#pop'),
|
| 331 |
+
],
|
| 332 |
+
'import': [
|
| 333 |
+
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
|
| 334 |
+
(r'\.', Name.Namespace),
|
| 335 |
+
(uni_name, Name.Namespace),
|
| 336 |
+
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
|
| 337 |
+
default('#pop') # all else: go back
|
| 338 |
+
],
|
| 339 |
+
'fromimport': [
|
| 340 |
+
(r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
|
| 341 |
+
(r'\.', Name.Namespace),
|
| 342 |
+
# if None occurs here, it's "raise x from None", since None can
|
| 343 |
+
# never be a module name
|
| 344 |
+
(r'None\b', Keyword.Constant, '#pop'),
|
| 345 |
+
(uni_name, Name.Namespace),
|
| 346 |
+
default('#pop'),
|
| 347 |
+
],
|
| 348 |
+
'rfstringescape': [
|
| 349 |
+
(r'\{\{', String.Escape),
|
| 350 |
+
(r'\}\}', String.Escape),
|
| 351 |
+
],
|
| 352 |
+
'fstringescape': [
|
| 353 |
+
include('rfstringescape'),
|
| 354 |
+
include('stringescape'),
|
| 355 |
+
],
|
| 356 |
+
'bytesescape': [
|
| 357 |
+
(r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
|
| 358 |
+
],
|
| 359 |
+
'stringescape': [
|
| 360 |
+
(r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape),
|
| 361 |
+
include('bytesescape')
|
| 362 |
+
],
|
| 363 |
+
'fstrings-single': fstring_rules(String.Single),
|
| 364 |
+
'fstrings-double': fstring_rules(String.Double),
|
| 365 |
+
'strings-single': innerstring_rules(String.Single),
|
| 366 |
+
'strings-double': innerstring_rules(String.Double),
|
| 367 |
+
'dqf': [
|
| 368 |
+
(r'"', String.Double, '#pop'),
|
| 369 |
+
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
|
| 370 |
+
include('fstrings-double')
|
| 371 |
+
],
|
| 372 |
+
'sqf': [
|
| 373 |
+
(r"'", String.Single, '#pop'),
|
| 374 |
+
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
|
| 375 |
+
include('fstrings-single')
|
| 376 |
+
],
|
| 377 |
+
'dqs': [
|
| 378 |
+
(r'"', String.Double, '#pop'),
|
| 379 |
+
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
|
| 380 |
+
include('strings-double')
|
| 381 |
+
],
|
| 382 |
+
'sqs': [
|
| 383 |
+
(r"'", String.Single, '#pop'),
|
| 384 |
+
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
|
| 385 |
+
include('strings-single')
|
| 386 |
+
],
|
| 387 |
+
'tdqf': [
|
| 388 |
+
(r'"""', String.Double, '#pop'),
|
| 389 |
+
include('fstrings-double'),
|
| 390 |
+
(r'\n', String.Double)
|
| 391 |
+
],
|
| 392 |
+
'tsqf': [
|
| 393 |
+
(r"'''", String.Single, '#pop'),
|
| 394 |
+
include('fstrings-single'),
|
| 395 |
+
(r'\n', String.Single)
|
| 396 |
+
],
|
| 397 |
+
'tdqs': [
|
| 398 |
+
(r'"""', String.Double, '#pop'),
|
| 399 |
+
include('strings-double'),
|
| 400 |
+
(r'\n', String.Double)
|
| 401 |
+
],
|
| 402 |
+
'tsqs': [
|
| 403 |
+
(r"'''", String.Single, '#pop'),
|
| 404 |
+
include('strings-single'),
|
| 405 |
+
(r'\n', String.Single)
|
| 406 |
+
],
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
def analyse_text(text):
|
| 410 |
+
return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \
|
| 411 |
+
'import ' in text[:1000]
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
Python3Lexer = PythonLexer
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
class Python2Lexer(RegexLexer):
|
| 418 |
+
"""
|
| 419 |
+
For Python 2.x source code.
|
| 420 |
+
|
| 421 |
+
.. versionchanged:: 2.5
|
| 422 |
+
This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
|
| 423 |
+
refers to the Python 3 variant. File name patterns like ``*.py`` have
|
| 424 |
+
been moved to Python 3 as well.
|
| 425 |
+
"""
|
| 426 |
+
|
| 427 |
+
name = 'Python 2.x'
|
| 428 |
+
url = 'http://www.python.org'
|
| 429 |
+
aliases = ['python2', 'py2']
|
| 430 |
+
filenames = [] # now taken over by PythonLexer (3.x)
|
| 431 |
+
mimetypes = ['text/x-python2', 'application/x-python2']
|
| 432 |
+
|
| 433 |
+
def innerstring_rules(ttype):
|
| 434 |
+
return [
|
| 435 |
+
# the old style '%s' % (...) string formatting
|
| 436 |
+
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
|
| 437 |
+
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
|
| 438 |
+
# backslashes, quotes and formatting signs must be parsed one at a time
|
| 439 |
+
(r'[^\\\'"%\n]+', ttype),
|
| 440 |
+
(r'[\'"\\]', ttype),
|
| 441 |
+
# unhandled string formatting sign
|
| 442 |
+
(r'%', ttype),
|
| 443 |
+
# newlines are an error (use "nl" state)
|
| 444 |
+
]
|
| 445 |
+
|
| 446 |
+
tokens = {
|
| 447 |
+
'root': [
|
| 448 |
+
(r'\n', Whitespace),
|
| 449 |
+
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
|
| 450 |
+
bygroups(Whitespace, String.Affix, String.Doc)),
|
| 451 |
+
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
|
| 452 |
+
bygroups(Whitespace, String.Affix, String.Doc)),
|
| 453 |
+
(r'[^\S\n]+', Text),
|
| 454 |
+
(r'\A#!.+$', Comment.Hashbang),
|
| 455 |
+
(r'#.*$', Comment.Single),
|
| 456 |
+
(r'[]{}:(),;[]', Punctuation),
|
| 457 |
+
(r'\\\n', Text),
|
| 458 |
+
(r'\\', Text),
|
| 459 |
+
(r'(in|is|and|or|not)\b', Operator.Word),
|
| 460 |
+
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
|
| 461 |
+
include('keywords'),
|
| 462 |
+
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
|
| 463 |
+
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
|
| 464 |
+
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
|
| 465 |
+
'fromimport'),
|
| 466 |
+
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
|
| 467 |
+
'import'),
|
| 468 |
+
include('builtins'),
|
| 469 |
+
include('magicfuncs'),
|
| 470 |
+
include('magicvars'),
|
| 471 |
+
include('backtick'),
|
| 472 |
+
('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
|
| 473 |
+
bygroups(String.Affix, String.Double), 'tdqs'),
|
| 474 |
+
("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
|
| 475 |
+
bygroups(String.Affix, String.Single), 'tsqs'),
|
| 476 |
+
('([rR]|[uUbB][rR]|[rR][uUbB])(")',
|
| 477 |
+
bygroups(String.Affix, String.Double), 'dqs'),
|
| 478 |
+
("([rR]|[uUbB][rR]|[rR][uUbB])(')",
|
| 479 |
+
bygroups(String.Affix, String.Single), 'sqs'),
|
| 480 |
+
('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
|
| 481 |
+
combined('stringescape', 'tdqs')),
|
| 482 |
+
("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
|
| 483 |
+
combined('stringescape', 'tsqs')),
|
| 484 |
+
('([uUbB]?)(")', bygroups(String.Affix, String.Double),
|
| 485 |
+
combined('stringescape', 'dqs')),
|
| 486 |
+
("([uUbB]?)(')", bygroups(String.Affix, String.Single),
|
| 487 |
+
combined('stringescape', 'sqs')),
|
| 488 |
+
include('name'),
|
| 489 |
+
include('numbers'),
|
| 490 |
+
],
|
| 491 |
+
'keywords': [
|
| 492 |
+
(words((
|
| 493 |
+
'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
|
| 494 |
+
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
|
| 495 |
+
'print', 'raise', 'return', 'try', 'while', 'yield',
|
| 496 |
+
'yield from', 'as', 'with'), suffix=r'\b'),
|
| 497 |
+
Keyword),
|
| 498 |
+
],
|
| 499 |
+
'builtins': [
|
| 500 |
+
(words((
|
| 501 |
+
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
|
| 502 |
+
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
|
| 503 |
+
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
|
| 504 |
+
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
|
| 505 |
+
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
|
| 506 |
+
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
|
| 507 |
+
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
|
| 508 |
+
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
|
| 509 |
+
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
|
| 510 |
+
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
|
| 511 |
+
'unichr', 'unicode', 'vars', 'xrange', 'zip'),
|
| 512 |
+
prefix=r'(?<!\.)', suffix=r'\b'),
|
| 513 |
+
Name.Builtin),
|
| 514 |
+
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
|
| 515 |
+
r')\b', Name.Builtin.Pseudo),
|
| 516 |
+
(words((
|
| 517 |
+
'ArithmeticError', 'AssertionError', 'AttributeError',
|
| 518 |
+
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
|
| 519 |
+
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
|
| 520 |
+
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
|
| 521 |
+
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
|
| 522 |
+
'MemoryError', 'NameError',
|
| 523 |
+
'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
|
| 524 |
+
'PendingDeprecationWarning', 'ReferenceError',
|
| 525 |
+
'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
|
| 526 |
+
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
|
| 527 |
+
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
|
| 528 |
+
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
|
| 529 |
+
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
|
| 530 |
+
'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
|
| 531 |
+
Name.Exception),
|
| 532 |
+
],
|
| 533 |
+
'magicfuncs': [
|
| 534 |
+
(words((
|
| 535 |
+
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
|
| 536 |
+
'__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
|
| 537 |
+
'__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
|
| 538 |
+
'__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
|
| 539 |
+
'__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
|
| 540 |
+
'__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
|
| 541 |
+
'__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
|
| 542 |
+
'__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
|
| 543 |
+
'__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
|
| 544 |
+
'__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
|
| 545 |
+
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
|
| 546 |
+
'__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
|
| 547 |
+
'__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
|
| 548 |
+
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
|
| 549 |
+
'__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
|
| 550 |
+
'__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
|
| 551 |
+
'__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
|
| 552 |
+
'__unicode__', '__xor__'), suffix=r'\b'),
|
| 553 |
+
Name.Function.Magic),
|
| 554 |
+
],
|
| 555 |
+
'magicvars': [
|
| 556 |
+
(words((
|
| 557 |
+
'__bases__', '__class__', '__closure__', '__code__', '__defaults__',
|
| 558 |
+
'__dict__', '__doc__', '__file__', '__func__', '__globals__',
|
| 559 |
+
'__metaclass__', '__module__', '__mro__', '__name__', '__self__',
|
| 560 |
+
'__slots__', '__weakref__'),
|
| 561 |
+
suffix=r'\b'),
|
| 562 |
+
Name.Variable.Magic),
|
| 563 |
+
],
|
| 564 |
+
'numbers': [
|
| 565 |
+
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
|
| 566 |
+
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
|
| 567 |
+
(r'0[0-7]+j?', Number.Oct),
|
| 568 |
+
(r'0[bB][01]+', Number.Bin),
|
| 569 |
+
(r'0[xX][a-fA-F0-9]+', Number.Hex),
|
| 570 |
+
(r'\d+L', Number.Integer.Long),
|
| 571 |
+
(r'\d+j?', Number.Integer)
|
| 572 |
+
],
|
| 573 |
+
'backtick': [
|
| 574 |
+
('`.*?`', String.Backtick),
|
| 575 |
+
],
|
| 576 |
+
'name': [
|
| 577 |
+
(r'@[\w.]+', Name.Decorator),
|
| 578 |
+
(r'[a-zA-Z_]\w*', Name),
|
| 579 |
+
],
|
| 580 |
+
'funcname': [
|
| 581 |
+
include('magicfuncs'),
|
| 582 |
+
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
|
| 583 |
+
default('#pop'),
|
| 584 |
+
],
|
| 585 |
+
'classname': [
|
| 586 |
+
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
|
| 587 |
+
],
|
| 588 |
+
'import': [
|
| 589 |
+
(r'(?:[ \t]|\\\n)+', Text),
|
| 590 |
+
(r'as\b', Keyword.Namespace),
|
| 591 |
+
(r',', Operator),
|
| 592 |
+
(r'[a-zA-Z_][\w.]*', Name.Namespace),
|
| 593 |
+
default('#pop') # all else: go back
|
| 594 |
+
],
|
| 595 |
+
'fromimport': [
|
| 596 |
+
(r'(?:[ \t]|\\\n)+', Text),
|
| 597 |
+
(r'import\b', Keyword.Namespace, '#pop'),
|
| 598 |
+
# if None occurs here, it's "raise x from None", since None can
|
| 599 |
+
# never be a module name
|
| 600 |
+
(r'None\b', Name.Builtin.Pseudo, '#pop'),
|
| 601 |
+
# sadly, in "raise x from y" y will be highlighted as namespace too
|
| 602 |
+
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
|
| 603 |
+
# anything else here also means "raise x from y" and is therefore
|
| 604 |
+
# not an error
|
| 605 |
+
default('#pop'),
|
| 606 |
+
],
|
| 607 |
+
'stringescape': [
|
| 608 |
+
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
|
| 609 |
+
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
|
| 610 |
+
],
|
| 611 |
+
'strings-single': innerstring_rules(String.Single),
|
| 612 |
+
'strings-double': innerstring_rules(String.Double),
|
| 613 |
+
'dqs': [
|
| 614 |
+
(r'"', String.Double, '#pop'),
|
| 615 |
+
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
|
| 616 |
+
include('strings-double')
|
| 617 |
+
],
|
| 618 |
+
'sqs': [
|
| 619 |
+
(r"'", String.Single, '#pop'),
|
| 620 |
+
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
|
| 621 |
+
include('strings-single')
|
| 622 |
+
],
|
| 623 |
+
'tdqs': [
|
| 624 |
+
(r'"""', String.Double, '#pop'),
|
| 625 |
+
include('strings-double'),
|
| 626 |
+
(r'\n', String.Double)
|
| 627 |
+
],
|
| 628 |
+
'tsqs': [
|
| 629 |
+
(r"'''", String.Single, '#pop'),
|
| 630 |
+
include('strings-single'),
|
| 631 |
+
(r'\n', String.Single)
|
| 632 |
+
],
|
| 633 |
+
}
|
| 634 |
+
|
| 635 |
+
def analyse_text(text):
|
| 636 |
+
return shebang_matches(text, r'pythonw?2(\.\d)?')
|
| 637 |
+
|
| 638 |
+
class _PythonConsoleLexerBase(RegexLexer):
|
| 639 |
+
name = 'Python console session'
|
| 640 |
+
aliases = ['pycon']
|
| 641 |
+
mimetypes = ['text/x-python-doctest']
|
| 642 |
+
|
| 643 |
+
"""Auxiliary lexer for `PythonConsoleLexer`.
|
| 644 |
+
|
| 645 |
+
Code tokens are output as ``Token.Other.Code``, traceback tokens as
|
| 646 |
+
``Token.Other.Traceback``.
|
| 647 |
+
"""
|
| 648 |
+
tokens = {
|
| 649 |
+
'root': [
|
| 650 |
+
(r'(>>> )(.*\n)', bygroups(Generic.Prompt, Other.Code), 'continuations'),
|
| 651 |
+
# This happens, e.g., when tracebacks are embedded in documentation;
|
| 652 |
+
# trailing whitespaces are often stripped in such contexts.
|
| 653 |
+
(r'(>>>)(\n)', bygroups(Generic.Prompt, Whitespace)),
|
| 654 |
+
(r'(\^C)?Traceback \(most recent call last\):\n', Other.Traceback, 'traceback'),
|
| 655 |
+
# SyntaxError starts with this
|
| 656 |
+
(r' File "[^"]+", line \d+', Other.Traceback, 'traceback'),
|
| 657 |
+
(r'.*\n', Generic.Output),
|
| 658 |
+
],
|
| 659 |
+
'continuations': [
|
| 660 |
+
(r'(\.\.\. )(.*\n)', bygroups(Generic.Prompt, Other.Code)),
|
| 661 |
+
# See above.
|
| 662 |
+
(r'(\.\.\.)(\n)', bygroups(Generic.Prompt, Whitespace)),
|
| 663 |
+
default('#pop'),
|
| 664 |
+
],
|
| 665 |
+
'traceback': [
|
| 666 |
+
# As soon as we see a traceback, consume everything until the next
|
| 667 |
+
# >>> prompt.
|
| 668 |
+
(r'(?=>>>( |$))', Text, '#pop'),
|
| 669 |
+
(r'(KeyboardInterrupt)(\n)', bygroups(Name.Class, Whitespace)),
|
| 670 |
+
(r'.*\n', Other.Traceback),
|
| 671 |
+
],
|
| 672 |
+
}
|
| 673 |
+
|
| 674 |
+
class PythonConsoleLexer(DelegatingLexer):
|
| 675 |
+
"""
|
| 676 |
+
For Python console output or doctests, such as:
|
| 677 |
+
|
| 678 |
+
.. sourcecode:: pycon
|
| 679 |
+
|
| 680 |
+
>>> a = 'foo'
|
| 681 |
+
>>> print(a)
|
| 682 |
+
foo
|
| 683 |
+
>>> 1 / 0
|
| 684 |
+
Traceback (most recent call last):
|
| 685 |
+
File "<stdin>", line 1, in <module>
|
| 686 |
+
ZeroDivisionError: integer division or modulo by zero
|
| 687 |
+
|
| 688 |
+
Additional options:
|
| 689 |
+
|
| 690 |
+
`python3`
|
| 691 |
+
Use Python 3 lexer for code. Default is ``True``.
|
| 692 |
+
|
| 693 |
+
.. versionadded:: 1.0
|
| 694 |
+
.. versionchanged:: 2.5
|
| 695 |
+
Now defaults to ``True``.
|
| 696 |
+
"""
|
| 697 |
+
|
| 698 |
+
name = 'Python console session'
|
| 699 |
+
aliases = ['pycon']
|
| 700 |
+
mimetypes = ['text/x-python-doctest']
|
| 701 |
+
|
| 702 |
+
def __init__(self, **options):
|
| 703 |
+
python3 = get_bool_opt(options, 'python3', True)
|
| 704 |
+
if python3:
|
| 705 |
+
pylexer = PythonLexer
|
| 706 |
+
tblexer = PythonTracebackLexer
|
| 707 |
+
else:
|
| 708 |
+
pylexer = Python2Lexer
|
| 709 |
+
tblexer = Python2TracebackLexer
|
| 710 |
+
# We have two auxiliary lexers. Use DelegatingLexer twice with
|
| 711 |
+
# different tokens. TODO: DelegatingLexer should support this
|
| 712 |
+
# directly, by accepting a tuplet of auxiliary lexers and a tuple of
|
| 713 |
+
# distinguishing tokens. Then we wouldn't need this intermediary
|
| 714 |
+
# class.
|
| 715 |
+
class _ReplaceInnerCode(DelegatingLexer):
|
| 716 |
+
def __init__(self, **options):
|
| 717 |
+
super().__init__(pylexer, _PythonConsoleLexerBase, Other.Code, **options)
|
| 718 |
+
super().__init__(tblexer, _ReplaceInnerCode, Other.Traceback, **options)
|
| 719 |
+
|
| 720 |
+
class PythonTracebackLexer(RegexLexer):
|
| 721 |
+
"""
|
| 722 |
+
For Python 3.x tracebacks, with support for chained exceptions.
|
| 723 |
+
|
| 724 |
+
.. versionadded:: 1.0
|
| 725 |
+
|
| 726 |
+
.. versionchanged:: 2.5
|
| 727 |
+
This is now the default ``PythonTracebackLexer``. It is still available
|
| 728 |
+
as the alias ``Python3TracebackLexer``.
|
| 729 |
+
"""
|
| 730 |
+
|
| 731 |
+
name = 'Python Traceback'
|
| 732 |
+
aliases = ['pytb', 'py3tb']
|
| 733 |
+
filenames = ['*.pytb', '*.py3tb']
|
| 734 |
+
mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
|
| 735 |
+
|
| 736 |
+
tokens = {
|
| 737 |
+
'root': [
|
| 738 |
+
(r'\n', Whitespace),
|
| 739 |
+
(r'^(\^C)?Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
|
| 740 |
+
(r'^During handling of the above exception, another '
|
| 741 |
+
r'exception occurred:\n\n', Generic.Traceback),
|
| 742 |
+
(r'^The above exception was the direct cause of the '
|
| 743 |
+
r'following exception:\n\n', Generic.Traceback),
|
| 744 |
+
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
|
| 745 |
+
(r'^.*\n', Other),
|
| 746 |
+
],
|
| 747 |
+
'intb': [
|
| 748 |
+
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
|
| 749 |
+
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
|
| 750 |
+
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
|
| 751 |
+
bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
|
| 752 |
+
(r'^( )(.+)(\n)',
|
| 753 |
+
bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
|
| 754 |
+
(r'^([ \t]*)(\.\.\.)(\n)',
|
| 755 |
+
bygroups(Whitespace, Comment, Whitespace)), # for doctests...
|
| 756 |
+
(r'^([^:]+)(: )(.+)(\n)',
|
| 757 |
+
bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
|
| 758 |
+
(r'^([a-zA-Z_][\w.]*)(:?\n)',
|
| 759 |
+
bygroups(Generic.Error, Whitespace), '#pop'),
|
| 760 |
+
default('#pop'),
|
| 761 |
+
],
|
| 762 |
+
'markers': [
|
| 763 |
+
# Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
|
| 764 |
+
# error locations in Python 3.11+, or single-caret markers
|
| 765 |
+
# for syntax errors before that.
|
| 766 |
+
(r'^( {4,})([~^]+)(\n)',
|
| 767 |
+
bygroups(Whitespace, Punctuation.Marker, Whitespace),
|
| 768 |
+
'#pop'),
|
| 769 |
+
default('#pop'),
|
| 770 |
+
],
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
Python3TracebackLexer = PythonTracebackLexer
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
class Python2TracebackLexer(RegexLexer):
|
| 778 |
+
"""
|
| 779 |
+
For Python tracebacks.
|
| 780 |
+
|
| 781 |
+
.. versionadded:: 0.7
|
| 782 |
+
|
| 783 |
+
.. versionchanged:: 2.5
|
| 784 |
+
This class has been renamed from ``PythonTracebackLexer``.
|
| 785 |
+
``PythonTracebackLexer`` now refers to the Python 3 variant.
|
| 786 |
+
"""
|
| 787 |
+
|
| 788 |
+
name = 'Python 2.x Traceback'
|
| 789 |
+
aliases = ['py2tb']
|
| 790 |
+
filenames = ['*.py2tb']
|
| 791 |
+
mimetypes = ['text/x-python2-traceback']
|
| 792 |
+
|
| 793 |
+
tokens = {
|
| 794 |
+
'root': [
|
| 795 |
+
# Cover both (most recent call last) and (innermost last)
|
| 796 |
+
# The optional ^C allows us to catch keyboard interrupt signals.
|
| 797 |
+
(r'^(\^C)?(Traceback.*\n)',
|
| 798 |
+
bygroups(Text, Generic.Traceback), 'intb'),
|
| 799 |
+
# SyntaxError starts with this.
|
| 800 |
+
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
|
| 801 |
+
(r'^.*\n', Other),
|
| 802 |
+
],
|
| 803 |
+
'intb': [
|
| 804 |
+
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
|
| 805 |
+
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
|
| 806 |
+
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
|
| 807 |
+
bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
|
| 808 |
+
(r'^( )(.+)(\n)',
|
| 809 |
+
bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
|
| 810 |
+
(r'^([ \t]*)(\.\.\.)(\n)',
|
| 811 |
+
bygroups(Text, Comment, Whitespace)), # for doctests...
|
| 812 |
+
(r'^([^:]+)(: )(.+)(\n)',
|
| 813 |
+
bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
|
| 814 |
+
(r'^([a-zA-Z_]\w*)(:?\n)',
|
| 815 |
+
bygroups(Generic.Error, Whitespace), '#pop')
|
| 816 |
+
],
|
| 817 |
+
'marker': [
|
| 818 |
+
# For syntax errors.
|
| 819 |
+
(r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
|
| 820 |
+
default('#pop'),
|
| 821 |
+
],
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
class CythonLexer(RegexLexer):
|
| 826 |
+
"""
|
| 827 |
+
For Pyrex and Cython source code.
|
| 828 |
+
|
| 829 |
+
.. versionadded:: 1.1
|
| 830 |
+
"""
|
| 831 |
+
|
| 832 |
+
name = 'Cython'
|
| 833 |
+
url = 'http://cython.org'
|
| 834 |
+
aliases = ['cython', 'pyx', 'pyrex']
|
| 835 |
+
filenames = ['*.pyx', '*.pxd', '*.pxi']
|
| 836 |
+
mimetypes = ['text/x-cython', 'application/x-cython']
|
| 837 |
+
|
| 838 |
+
tokens = {
|
| 839 |
+
'root': [
|
| 840 |
+
(r'\n', Whitespace),
|
| 841 |
+
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
|
| 842 |
+
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
|
| 843 |
+
(r'[^\S\n]+', Text),
|
| 844 |
+
(r'#.*$', Comment),
|
| 845 |
+
(r'[]{}:(),;[]', Punctuation),
|
| 846 |
+
(r'\\\n', Whitespace),
|
| 847 |
+
(r'\\', Text),
|
| 848 |
+
(r'(in|is|and|or|not)\b', Operator.Word),
|
| 849 |
+
(r'(<)([a-zA-Z0-9.?]+)(>)',
|
| 850 |
+
bygroups(Punctuation, Keyword.Type, Punctuation)),
|
| 851 |
+
(r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
|
| 852 |
+
(r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
|
| 853 |
+
bygroups(Keyword, Number.Integer, Operator, Name, Operator,
|
| 854 |
+
Name, Punctuation)),
|
| 855 |
+
include('keywords'),
|
| 856 |
+
(r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
|
| 857 |
+
(r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
|
| 858 |
+
# (should actually start a block with only cdefs)
|
| 859 |
+
(r'(cdef)(:)', bygroups(Keyword, Punctuation)),
|
| 860 |
+
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
|
| 861 |
+
(r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
|
| 862 |
+
(r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
|
| 863 |
+
include('builtins'),
|
| 864 |
+
include('backtick'),
|
| 865 |
+
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
|
| 866 |
+
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
|
| 867 |
+
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
|
| 868 |
+
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
|
| 869 |
+
('[uU]?"""', String, combined('stringescape', 'tdqs')),
|
| 870 |
+
("[uU]?'''", String, combined('stringescape', 'tsqs')),
|
| 871 |
+
('[uU]?"', String, combined('stringescape', 'dqs')),
|
| 872 |
+
("[uU]?'", String, combined('stringescape', 'sqs')),
|
| 873 |
+
include('name'),
|
| 874 |
+
include('numbers'),
|
| 875 |
+
],
|
| 876 |
+
'keywords': [
|
| 877 |
+
(words((
|
| 878 |
+
'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
|
| 879 |
+
'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
|
| 880 |
+
'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
|
| 881 |
+
'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
|
| 882 |
+
Keyword),
|
| 883 |
+
(r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
|
| 884 |
+
],
|
| 885 |
+
'builtins': [
|
| 886 |
+
(words((
|
| 887 |
+
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
|
| 888 |
+
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
|
| 889 |
+
'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
|
| 890 |
+
'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
|
| 891 |
+
'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
|
| 892 |
+
'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
|
| 893 |
+
'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
|
| 894 |
+
'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
|
| 895 |
+
'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
|
| 896 |
+
'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
|
| 897 |
+
'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
|
| 898 |
+
'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
|
| 899 |
+
Name.Builtin),
|
| 900 |
+
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
|
| 901 |
+
r')\b', Name.Builtin.Pseudo),
|
| 902 |
+
(words((
|
| 903 |
+
'ArithmeticError', 'AssertionError', 'AttributeError',
|
| 904 |
+
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
|
| 905 |
+
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
|
| 906 |
+
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
|
| 907 |
+
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
|
| 908 |
+
'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
|
| 909 |
+
'OSError', 'OverflowError', 'OverflowWarning',
|
| 910 |
+
'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError',
|
| 911 |
+
'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError',
|
| 912 |
+
'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
|
| 913 |
+
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
|
| 914 |
+
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
|
| 915 |
+
'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
|
| 916 |
+
'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
|
| 917 |
+
Name.Exception),
|
| 918 |
+
],
|
| 919 |
+
'numbers': [
|
| 920 |
+
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
|
| 921 |
+
(r'0\d+', Number.Oct),
|
| 922 |
+
(r'0[xX][a-fA-F0-9]+', Number.Hex),
|
| 923 |
+
(r'\d+L', Number.Integer.Long),
|
| 924 |
+
(r'\d+', Number.Integer)
|
| 925 |
+
],
|
| 926 |
+
'backtick': [
|
| 927 |
+
('`.*?`', String.Backtick),
|
| 928 |
+
],
|
| 929 |
+
'name': [
|
| 930 |
+
(r'@\w+', Name.Decorator),
|
| 931 |
+
(r'[a-zA-Z_]\w*', Name),
|
| 932 |
+
],
|
| 933 |
+
'funcname': [
|
| 934 |
+
(r'[a-zA-Z_]\w*', Name.Function, '#pop')
|
| 935 |
+
],
|
| 936 |
+
'cdef': [
|
| 937 |
+
(r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
|
| 938 |
+
(r'(struct|enum|union|class)\b', Keyword),
|
| 939 |
+
(r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
|
| 940 |
+
bygroups(Name.Function, Text), '#pop'),
|
| 941 |
+
(r'([a-zA-Z_]\w*)(\s*)(,)',
|
| 942 |
+
bygroups(Name.Function, Text, Punctuation)),
|
| 943 |
+
(r'from\b', Keyword, '#pop'),
|
| 944 |
+
(r'as\b', Keyword),
|
| 945 |
+
(r':', Punctuation, '#pop'),
|
| 946 |
+
(r'(?=["\'])', Text, '#pop'),
|
| 947 |
+
(r'[a-zA-Z_]\w*', Keyword.Type),
|
| 948 |
+
(r'.', Text),
|
| 949 |
+
],
|
| 950 |
+
'classname': [
|
| 951 |
+
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
|
| 952 |
+
],
|
| 953 |
+
'import': [
|
| 954 |
+
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
|
| 955 |
+
(r'[a-zA-Z_][\w.]*', Name.Namespace),
|
| 956 |
+
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
|
| 957 |
+
default('#pop') # all else: go back
|
| 958 |
+
],
|
| 959 |
+
'fromimport': [
|
| 960 |
+
(r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
|
| 961 |
+
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
|
| 962 |
+
# ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
|
| 963 |
+
default('#pop'),
|
| 964 |
+
],
|
| 965 |
+
'stringescape': [
|
| 966 |
+
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
|
| 967 |
+
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
|
| 968 |
+
],
|
| 969 |
+
'strings': [
|
| 970 |
+
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
|
| 971 |
+
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
|
| 972 |
+
(r'[^\\\'"%\n]+', String),
|
| 973 |
+
# quotes, percents and backslashes must be parsed one at a time
|
| 974 |
+
(r'[\'"\\]', String),
|
| 975 |
+
# unhandled string formatting sign
|
| 976 |
+
(r'%', String)
|
| 977 |
+
# newlines are an error (use "nl" state)
|
| 978 |
+
],
|
| 979 |
+
'nl': [
|
| 980 |
+
(r'\n', String)
|
| 981 |
+
],
|
| 982 |
+
'dqs': [
|
| 983 |
+
(r'"', String, '#pop'),
|
| 984 |
+
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
|
| 985 |
+
include('strings')
|
| 986 |
+
],
|
| 987 |
+
'sqs': [
|
| 988 |
+
(r"'", String, '#pop'),
|
| 989 |
+
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
|
| 990 |
+
include('strings')
|
| 991 |
+
],
|
| 992 |
+
'tdqs': [
|
| 993 |
+
(r'"""', String, '#pop'),
|
| 994 |
+
include('strings'),
|
| 995 |
+
include('nl')
|
| 996 |
+
],
|
| 997 |
+
'tsqs': [
|
| 998 |
+
(r"'''", String, '#pop'),
|
| 999 |
+
include('strings'),
|
| 1000 |
+
include('nl')
|
| 1001 |
+
],
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
|
| 1005 |
+
class DgLexer(RegexLexer):
|
| 1006 |
+
"""
|
| 1007 |
+
Lexer for dg,
|
| 1008 |
+
a functional and object-oriented programming language
|
| 1009 |
+
running on the CPython 3 VM.
|
| 1010 |
+
|
| 1011 |
+
.. versionadded:: 1.6
|
| 1012 |
+
"""
|
| 1013 |
+
name = 'dg'
|
| 1014 |
+
aliases = ['dg']
|
| 1015 |
+
filenames = ['*.dg']
|
| 1016 |
+
mimetypes = ['text/x-dg']
|
| 1017 |
+
|
| 1018 |
+
tokens = {
|
| 1019 |
+
'root': [
|
| 1020 |
+
(r'\s+', Text),
|
| 1021 |
+
(r'#.*?$', Comment.Single),
|
| 1022 |
+
|
| 1023 |
+
(r'(?i)0b[01]+', Number.Bin),
|
| 1024 |
+
(r'(?i)0o[0-7]+', Number.Oct),
|
| 1025 |
+
(r'(?i)0x[0-9a-f]+', Number.Hex),
|
| 1026 |
+
(r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
|
| 1027 |
+
(r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
|
| 1028 |
+
(r'(?i)[+-]?[0-9]+j?', Number.Integer),
|
| 1029 |
+
|
| 1030 |
+
(r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
|
| 1031 |
+
(r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
|
| 1032 |
+
(r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
|
| 1033 |
+
(r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
|
| 1034 |
+
|
| 1035 |
+
(r"`\w+'*`", Operator),
|
| 1036 |
+
(r'\b(and|in|is|or|where)\b', Operator.Word),
|
| 1037 |
+
(r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
|
| 1038 |
+
|
| 1039 |
+
(words((
|
| 1040 |
+
'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
|
| 1041 |
+
'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
|
| 1042 |
+
'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
|
| 1043 |
+
'super', 'tuple', 'tuple\'', 'type'),
|
| 1044 |
+
prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
|
| 1045 |
+
Name.Builtin),
|
| 1046 |
+
(words((
|
| 1047 |
+
'__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
|
| 1048 |
+
'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
|
| 1049 |
+
'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
|
| 1050 |
+
'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
|
| 1051 |
+
'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
|
| 1052 |
+
'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
|
| 1053 |
+
'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
|
| 1054 |
+
'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
|
| 1055 |
+
prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
|
| 1056 |
+
Name.Builtin),
|
| 1057 |
+
(r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
|
| 1058 |
+
Name.Builtin.Pseudo),
|
| 1059 |
+
|
| 1060 |
+
(r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
|
| 1061 |
+
Name.Exception),
|
| 1062 |
+
(r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
|
| 1063 |
+
r"SystemExit)(?!['\w])", Name.Exception),
|
| 1064 |
+
|
| 1065 |
+
(r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
|
| 1066 |
+
r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
|
| 1067 |
+
|
| 1068 |
+
(r"[A-Z_]+'*(?!['\w])", Name),
|
| 1069 |
+
(r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
|
| 1070 |
+
(r"\w+'*", Name),
|
| 1071 |
+
|
| 1072 |
+
(r'[()]', Punctuation),
|
| 1073 |
+
(r'.', Error),
|
| 1074 |
+
],
|
| 1075 |
+
'stringescape': [
|
| 1076 |
+
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
|
| 1077 |
+
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
|
| 1078 |
+
],
|
| 1079 |
+
'string': [
|
| 1080 |
+
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
|
| 1081 |
+
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
|
| 1082 |
+
(r'[^\\\'"%\n]+', String),
|
| 1083 |
+
# quotes, percents and backslashes must be parsed one at a time
|
| 1084 |
+
(r'[\'"\\]', String),
|
| 1085 |
+
# unhandled string formatting sign
|
| 1086 |
+
(r'%', String),
|
| 1087 |
+
(r'\n', String)
|
| 1088 |
+
],
|
| 1089 |
+
'dqs': [
|
| 1090 |
+
(r'"', String, '#pop')
|
| 1091 |
+
],
|
| 1092 |
+
'sqs': [
|
| 1093 |
+
(r"'", String, '#pop')
|
| 1094 |
+
],
|
| 1095 |
+
'tdqs': [
|
| 1096 |
+
(r'"""', String, '#pop')
|
| 1097 |
+
],
|
| 1098 |
+
'tsqs': [
|
| 1099 |
+
(r"'''", String, '#pop')
|
| 1100 |
+
],
|
| 1101 |
+
}
|
| 1102 |
+
|
| 1103 |
+
|
| 1104 |
+
class NumPyLexer(PythonLexer):
|
| 1105 |
+
"""
|
| 1106 |
+
A Python lexer recognizing Numerical Python builtins.
|
| 1107 |
+
|
| 1108 |
+
.. versionadded:: 0.10
|
| 1109 |
+
"""
|
| 1110 |
+
|
| 1111 |
+
name = 'NumPy'
|
| 1112 |
+
url = 'https://numpy.org/'
|
| 1113 |
+
aliases = ['numpy']
|
| 1114 |
+
|
| 1115 |
+
# override the mimetypes to not inherit them from python
|
| 1116 |
+
mimetypes = []
|
| 1117 |
+
filenames = []
|
| 1118 |
+
|
| 1119 |
+
EXTRA_KEYWORDS = {
|
| 1120 |
+
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
|
| 1121 |
+
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
|
| 1122 |
+
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
|
| 1123 |
+
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
|
| 1124 |
+
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
|
| 1125 |
+
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
|
| 1126 |
+
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
|
| 1127 |
+
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
|
| 1128 |
+
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
|
| 1129 |
+
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
|
| 1130 |
+
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
|
| 1131 |
+
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
|
| 1132 |
+
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
|
| 1133 |
+
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
|
| 1134 |
+
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
|
| 1135 |
+
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
|
| 1136 |
+
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
|
| 1137 |
+
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
|
| 1138 |
+
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
|
| 1139 |
+
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
|
| 1140 |
+
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
|
| 1141 |
+
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
|
| 1142 |
+
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
|
| 1143 |
+
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
|
| 1144 |
+
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
|
| 1145 |
+
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
|
| 1146 |
+
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
|
| 1147 |
+
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
|
| 1148 |
+
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
|
| 1149 |
+
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
|
| 1150 |
+
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
|
| 1151 |
+
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
|
| 1152 |
+
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
|
| 1153 |
+
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
|
| 1154 |
+
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
|
| 1155 |
+
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
|
| 1156 |
+
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
|
| 1157 |
+
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
|
| 1158 |
+
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
|
| 1159 |
+
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
|
| 1160 |
+
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
|
| 1161 |
+
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
|
| 1162 |
+
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
|
| 1163 |
+
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
|
| 1164 |
+
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
|
| 1165 |
+
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
|
| 1166 |
+
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
|
| 1167 |
+
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
|
| 1168 |
+
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
|
| 1169 |
+
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
|
| 1170 |
+
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
|
| 1171 |
+
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
|
| 1172 |
+
'set_numeric_ops', 'set_printoptions', 'set_string_function',
|
| 1173 |
+
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
|
| 1174 |
+
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
|
| 1175 |
+
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
|
| 1176 |
+
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
|
| 1177 |
+
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
|
| 1178 |
+
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
|
| 1179 |
+
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
|
| 1180 |
+
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
|
| 1181 |
+
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
|
| 1182 |
+
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
|
| 1183 |
+
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
|
| 1184 |
+
}
|
| 1185 |
+
|
| 1186 |
+
def get_tokens_unprocessed(self, text):
|
| 1187 |
+
for index, token, value in \
|
| 1188 |
+
PythonLexer.get_tokens_unprocessed(self, text):
|
| 1189 |
+
if token is Name and value in self.EXTRA_KEYWORDS:
|
| 1190 |
+
yield index, Keyword.Pseudo, value
|
| 1191 |
+
else:
|
| 1192 |
+
yield index, token, value
|
| 1193 |
+
|
| 1194 |
+
def analyse_text(text):
|
| 1195 |
+
ltext = text[:1000]
|
| 1196 |
+
return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
|
| 1197 |
+
'import ' in ltext) \
|
| 1198 |
+
and ('import numpy' in ltext or 'from numpy import' in ltext)
|
.venv/Lib/site-packages/pip/_vendor/pygments/plugin.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.plugin
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments plugin interface. By default, this tries to use
|
| 6 |
+
``importlib.metadata``, which is in the Python standard
|
| 7 |
+
library since Python 3.8, or its ``importlib_metadata``
|
| 8 |
+
backport for earlier versions of Python. It falls back on
|
| 9 |
+
``pkg_resources`` if not found. Finally, if ``pkg_resources``
|
| 10 |
+
is not found either, no plugins are loaded at all.
|
| 11 |
+
|
| 12 |
+
lexer plugins::
|
| 13 |
+
|
| 14 |
+
[pygments.lexers]
|
| 15 |
+
yourlexer = yourmodule:YourLexer
|
| 16 |
+
|
| 17 |
+
formatter plugins::
|
| 18 |
+
|
| 19 |
+
[pygments.formatters]
|
| 20 |
+
yourformatter = yourformatter:YourFormatter
|
| 21 |
+
/.ext = yourformatter:YourFormatter
|
| 22 |
+
|
| 23 |
+
As you can see, you can define extensions for the formatter
|
| 24 |
+
with a leading slash.
|
| 25 |
+
|
| 26 |
+
syntax plugins::
|
| 27 |
+
|
| 28 |
+
[pygments.styles]
|
| 29 |
+
yourstyle = yourstyle:YourStyle
|
| 30 |
+
|
| 31 |
+
filter plugin::
|
| 32 |
+
|
| 33 |
+
[pygments.filter]
|
| 34 |
+
yourfilter = yourfilter:YourFilter
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 38 |
+
:license: BSD, see LICENSE for details.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
LEXER_ENTRY_POINT = 'pygments.lexers'
|
| 42 |
+
FORMATTER_ENTRY_POINT = 'pygments.formatters'
|
| 43 |
+
STYLE_ENTRY_POINT = 'pygments.styles'
|
| 44 |
+
FILTER_ENTRY_POINT = 'pygments.filters'
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def iter_entry_points(group_name):
|
| 48 |
+
try:
|
| 49 |
+
from importlib.metadata import entry_points
|
| 50 |
+
except ImportError:
|
| 51 |
+
try:
|
| 52 |
+
from importlib_metadata import entry_points
|
| 53 |
+
except ImportError:
|
| 54 |
+
try:
|
| 55 |
+
from pip._vendor.pkg_resources import iter_entry_points
|
| 56 |
+
except (ImportError, OSError):
|
| 57 |
+
return []
|
| 58 |
+
else:
|
| 59 |
+
return iter_entry_points(group_name)
|
| 60 |
+
groups = entry_points()
|
| 61 |
+
if hasattr(groups, 'select'):
|
| 62 |
+
# New interface in Python 3.10 and newer versions of the
|
| 63 |
+
# importlib_metadata backport.
|
| 64 |
+
return groups.select(group=group_name)
|
| 65 |
+
else:
|
| 66 |
+
# Older interface, deprecated in Python 3.10 and recent
|
| 67 |
+
# importlib_metadata, but we need it in Python 3.8 and 3.9.
|
| 68 |
+
return groups.get(group_name, [])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def find_plugin_lexers():
|
| 72 |
+
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
|
| 73 |
+
yield entrypoint.load()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def find_plugin_formatters():
|
| 77 |
+
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
|
| 78 |
+
yield entrypoint.name, entrypoint.load()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def find_plugin_styles():
|
| 82 |
+
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
|
| 83 |
+
yield entrypoint.name, entrypoint.load()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def find_plugin_filters():
|
| 87 |
+
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
|
| 88 |
+
yield entrypoint.name, entrypoint.load()
|
.venv/Lib/site-packages/pip/_vendor/pygments/regexopt.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.regexopt
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
An algorithm that generates optimized regexes for matching long lists of
|
| 6 |
+
literal strings.
|
| 7 |
+
|
| 8 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 9 |
+
:license: BSD, see LICENSE for details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import re
|
| 13 |
+
from re import escape
|
| 14 |
+
from os.path import commonprefix
|
| 15 |
+
from itertools import groupby
|
| 16 |
+
from operator import itemgetter
|
| 17 |
+
|
| 18 |
+
CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
|
| 19 |
+
FIRST_ELEMENT = itemgetter(0)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def make_charset(letters):
|
| 23 |
+
return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def regex_opt_inner(strings, open_paren):
|
| 27 |
+
"""Return a regex that matches any string in the sorted list of strings."""
|
| 28 |
+
close_paren = open_paren and ')' or ''
|
| 29 |
+
# print strings, repr(open_paren)
|
| 30 |
+
if not strings:
|
| 31 |
+
# print '-> nothing left'
|
| 32 |
+
return ''
|
| 33 |
+
first = strings[0]
|
| 34 |
+
if len(strings) == 1:
|
| 35 |
+
# print '-> only 1 string'
|
| 36 |
+
return open_paren + escape(first) + close_paren
|
| 37 |
+
if not first:
|
| 38 |
+
# print '-> first string empty'
|
| 39 |
+
return open_paren + regex_opt_inner(strings[1:], '(?:') \
|
| 40 |
+
+ '?' + close_paren
|
| 41 |
+
if len(first) == 1:
|
| 42 |
+
# multiple one-char strings? make a charset
|
| 43 |
+
oneletter = []
|
| 44 |
+
rest = []
|
| 45 |
+
for s in strings:
|
| 46 |
+
if len(s) == 1:
|
| 47 |
+
oneletter.append(s)
|
| 48 |
+
else:
|
| 49 |
+
rest.append(s)
|
| 50 |
+
if len(oneletter) > 1: # do we have more than one oneletter string?
|
| 51 |
+
if rest:
|
| 52 |
+
# print '-> 1-character + rest'
|
| 53 |
+
return open_paren + regex_opt_inner(rest, '') + '|' \
|
| 54 |
+
+ make_charset(oneletter) + close_paren
|
| 55 |
+
# print '-> only 1-character'
|
| 56 |
+
return open_paren + make_charset(oneletter) + close_paren
|
| 57 |
+
prefix = commonprefix(strings)
|
| 58 |
+
if prefix:
|
| 59 |
+
plen = len(prefix)
|
| 60 |
+
# we have a prefix for all strings
|
| 61 |
+
# print '-> prefix:', prefix
|
| 62 |
+
return open_paren + escape(prefix) \
|
| 63 |
+
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
|
| 64 |
+
+ close_paren
|
| 65 |
+
# is there a suffix?
|
| 66 |
+
strings_rev = [s[::-1] for s in strings]
|
| 67 |
+
suffix = commonprefix(strings_rev)
|
| 68 |
+
if suffix:
|
| 69 |
+
slen = len(suffix)
|
| 70 |
+
# print '-> suffix:', suffix[::-1]
|
| 71 |
+
return open_paren \
|
| 72 |
+
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
|
| 73 |
+
+ escape(suffix[::-1]) + close_paren
|
| 74 |
+
# recurse on common 1-string prefixes
|
| 75 |
+
# print '-> last resort'
|
| 76 |
+
return open_paren + \
|
| 77 |
+
'|'.join(regex_opt_inner(list(group[1]), '')
|
| 78 |
+
for group in groupby(strings, lambda s: s[0] == first[0])) \
|
| 79 |
+
+ close_paren
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def regex_opt(strings, prefix='', suffix=''):
|
| 83 |
+
"""Return a compiled regex that matches any string in the given list.
|
| 84 |
+
|
| 85 |
+
The strings to match must be literal strings, not regexes. They will be
|
| 86 |
+
regex-escaped.
|
| 87 |
+
|
| 88 |
+
*prefix* and *suffix* are pre- and appended to the final regex.
|
| 89 |
+
"""
|
| 90 |
+
strings = sorted(strings)
|
| 91 |
+
return prefix + regex_opt_inner(strings, '(') + suffix
|
.venv/Lib/site-packages/pip/_vendor/pygments/scanner.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.scanner
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This library implements a regex based scanner. Some languages
|
| 6 |
+
like Pascal are easy to parse but have some keywords that
|
| 7 |
+
depend on the context. Because of this it's impossible to lex
|
| 8 |
+
that just by using a regular expression lexer like the
|
| 9 |
+
`RegexLexer`.
|
| 10 |
+
|
| 11 |
+
Have a look at the `DelphiLexer` to get an idea of how to use
|
| 12 |
+
this scanner.
|
| 13 |
+
|
| 14 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 15 |
+
:license: BSD, see LICENSE for details.
|
| 16 |
+
"""
|
| 17 |
+
import re
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class EndOfText(RuntimeError):
|
| 21 |
+
"""
|
| 22 |
+
Raise if end of text is reached and the user
|
| 23 |
+
tried to call a match function.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Scanner:
|
| 28 |
+
"""
|
| 29 |
+
Simple scanner
|
| 30 |
+
|
| 31 |
+
All method patterns are regular expression strings (not
|
| 32 |
+
compiled expressions!)
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, text, flags=0):
|
| 36 |
+
"""
|
| 37 |
+
:param text: The text which should be scanned
|
| 38 |
+
:param flags: default regular expression flags
|
| 39 |
+
"""
|
| 40 |
+
self.data = text
|
| 41 |
+
self.data_length = len(text)
|
| 42 |
+
self.start_pos = 0
|
| 43 |
+
self.pos = 0
|
| 44 |
+
self.flags = flags
|
| 45 |
+
self.last = None
|
| 46 |
+
self.match = None
|
| 47 |
+
self._re_cache = {}
|
| 48 |
+
|
| 49 |
+
def eos(self):
|
| 50 |
+
"""`True` if the scanner reached the end of text."""
|
| 51 |
+
return self.pos >= self.data_length
|
| 52 |
+
eos = property(eos, eos.__doc__)
|
| 53 |
+
|
| 54 |
+
def check(self, pattern):
|
| 55 |
+
"""
|
| 56 |
+
Apply `pattern` on the current position and return
|
| 57 |
+
the match object. (Doesn't touch pos). Use this for
|
| 58 |
+
lookahead.
|
| 59 |
+
"""
|
| 60 |
+
if self.eos:
|
| 61 |
+
raise EndOfText()
|
| 62 |
+
if pattern not in self._re_cache:
|
| 63 |
+
self._re_cache[pattern] = re.compile(pattern, self.flags)
|
| 64 |
+
return self._re_cache[pattern].match(self.data, self.pos)
|
| 65 |
+
|
| 66 |
+
def test(self, pattern):
|
| 67 |
+
"""Apply a pattern on the current position and check
|
| 68 |
+
if it patches. Doesn't touch pos.
|
| 69 |
+
"""
|
| 70 |
+
return self.check(pattern) is not None
|
| 71 |
+
|
| 72 |
+
def scan(self, pattern):
|
| 73 |
+
"""
|
| 74 |
+
Scan the text for the given pattern and update pos/match
|
| 75 |
+
and related fields. The return value is a boolean that
|
| 76 |
+
indicates if the pattern matched. The matched value is
|
| 77 |
+
stored on the instance as ``match``, the last value is
|
| 78 |
+
stored as ``last``. ``start_pos`` is the position of the
|
| 79 |
+
pointer before the pattern was matched, ``pos`` is the
|
| 80 |
+
end position.
|
| 81 |
+
"""
|
| 82 |
+
if self.eos:
|
| 83 |
+
raise EndOfText()
|
| 84 |
+
if pattern not in self._re_cache:
|
| 85 |
+
self._re_cache[pattern] = re.compile(pattern, self.flags)
|
| 86 |
+
self.last = self.match
|
| 87 |
+
m = self._re_cache[pattern].match(self.data, self.pos)
|
| 88 |
+
if m is None:
|
| 89 |
+
return False
|
| 90 |
+
self.start_pos = m.start()
|
| 91 |
+
self.pos = m.end()
|
| 92 |
+
self.match = m.group()
|
| 93 |
+
return True
|
| 94 |
+
|
| 95 |
+
def get_char(self):
|
| 96 |
+
"""Scan exactly one char."""
|
| 97 |
+
self.scan('.')
|
| 98 |
+
|
| 99 |
+
def __repr__(self):
|
| 100 |
+
return '<%s %d/%d>' % (
|
| 101 |
+
self.__class__.__name__,
|
| 102 |
+
self.pos,
|
| 103 |
+
self.data_length
|
| 104 |
+
)
|
.venv/Lib/site-packages/pip/_vendor/pygments/sphinxext.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.sphinxext
|
| 3 |
+
~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Sphinx extension to generate automatic documentation of lexers,
|
| 6 |
+
formatters and filters.
|
| 7 |
+
|
| 8 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 9 |
+
:license: BSD, see LICENSE for details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import sys
|
| 13 |
+
|
| 14 |
+
from docutils import nodes
|
| 15 |
+
from docutils.statemachine import ViewList
|
| 16 |
+
from docutils.parsers.rst import Directive
|
| 17 |
+
from sphinx.util.nodes import nested_parse_with_titles
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
MODULEDOC = '''
|
| 21 |
+
.. module:: %s
|
| 22 |
+
|
| 23 |
+
%s
|
| 24 |
+
%s
|
| 25 |
+
'''
|
| 26 |
+
|
| 27 |
+
LEXERDOC = '''
|
| 28 |
+
.. class:: %s
|
| 29 |
+
|
| 30 |
+
:Short names: %s
|
| 31 |
+
:Filenames: %s
|
| 32 |
+
:MIME types: %s
|
| 33 |
+
|
| 34 |
+
%s
|
| 35 |
+
|
| 36 |
+
'''
|
| 37 |
+
|
| 38 |
+
FMTERDOC = '''
|
| 39 |
+
.. class:: %s
|
| 40 |
+
|
| 41 |
+
:Short names: %s
|
| 42 |
+
:Filenames: %s
|
| 43 |
+
|
| 44 |
+
%s
|
| 45 |
+
|
| 46 |
+
'''
|
| 47 |
+
|
| 48 |
+
FILTERDOC = '''
|
| 49 |
+
.. class:: %s
|
| 50 |
+
|
| 51 |
+
:Name: %s
|
| 52 |
+
|
| 53 |
+
%s
|
| 54 |
+
|
| 55 |
+
'''
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class PygmentsDoc(Directive):
|
| 59 |
+
"""
|
| 60 |
+
A directive to collect all lexers/formatters/filters and generate
|
| 61 |
+
autoclass directives for them.
|
| 62 |
+
"""
|
| 63 |
+
has_content = False
|
| 64 |
+
required_arguments = 1
|
| 65 |
+
optional_arguments = 0
|
| 66 |
+
final_argument_whitespace = False
|
| 67 |
+
option_spec = {}
|
| 68 |
+
|
| 69 |
+
def run(self):
|
| 70 |
+
self.filenames = set()
|
| 71 |
+
if self.arguments[0] == 'lexers':
|
| 72 |
+
out = self.document_lexers()
|
| 73 |
+
elif self.arguments[0] == 'formatters':
|
| 74 |
+
out = self.document_formatters()
|
| 75 |
+
elif self.arguments[0] == 'filters':
|
| 76 |
+
out = self.document_filters()
|
| 77 |
+
elif self.arguments[0] == 'lexers_overview':
|
| 78 |
+
out = self.document_lexers_overview()
|
| 79 |
+
else:
|
| 80 |
+
raise Exception('invalid argument for "pygmentsdoc" directive')
|
| 81 |
+
node = nodes.compound()
|
| 82 |
+
vl = ViewList(out.split('\n'), source='')
|
| 83 |
+
nested_parse_with_titles(self.state, vl, node)
|
| 84 |
+
for fn in self.filenames:
|
| 85 |
+
self.state.document.settings.record_dependencies.add(fn)
|
| 86 |
+
return node.children
|
| 87 |
+
|
| 88 |
+
def document_lexers_overview(self):
|
| 89 |
+
"""Generate a tabular overview of all lexers.
|
| 90 |
+
|
| 91 |
+
The columns are the lexer name, the extensions handled by this lexer
|
| 92 |
+
(or "None"), the aliases and a link to the lexer class."""
|
| 93 |
+
from pip._vendor.pygments.lexers._mapping import LEXERS
|
| 94 |
+
from pip._vendor.pygments.lexers import find_lexer_class
|
| 95 |
+
out = []
|
| 96 |
+
|
| 97 |
+
table = []
|
| 98 |
+
|
| 99 |
+
def format_link(name, url):
|
| 100 |
+
if url:
|
| 101 |
+
return f'`{name} <{url}>`_'
|
| 102 |
+
return name
|
| 103 |
+
|
| 104 |
+
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
|
| 105 |
+
lexer_cls = find_lexer_class(data[1])
|
| 106 |
+
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
|
| 107 |
+
|
| 108 |
+
table.append({
|
| 109 |
+
'name': format_link(data[1], lexer_cls.url),
|
| 110 |
+
'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
|
| 111 |
+
'aliases': ', '.join(data[2]),
|
| 112 |
+
'class': f'{data[0]}.{classname}'
|
| 113 |
+
})
|
| 114 |
+
|
| 115 |
+
column_names = ['name', 'extensions', 'aliases', 'class']
|
| 116 |
+
column_lengths = [max([len(row[column]) for row in table if row[column]])
|
| 117 |
+
for column in column_names]
|
| 118 |
+
|
| 119 |
+
def write_row(*columns):
|
| 120 |
+
"""Format a table row"""
|
| 121 |
+
out = []
|
| 122 |
+
for l, c in zip(column_lengths, columns):
|
| 123 |
+
if c:
|
| 124 |
+
out.append(c.ljust(l))
|
| 125 |
+
else:
|
| 126 |
+
out.append(' '*l)
|
| 127 |
+
|
| 128 |
+
return ' '.join(out)
|
| 129 |
+
|
| 130 |
+
def write_seperator():
|
| 131 |
+
"""Write a table separator row"""
|
| 132 |
+
sep = ['='*c for c in column_lengths]
|
| 133 |
+
return write_row(*sep)
|
| 134 |
+
|
| 135 |
+
out.append(write_seperator())
|
| 136 |
+
out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
|
| 137 |
+
out.append(write_seperator())
|
| 138 |
+
for row in table:
|
| 139 |
+
out.append(write_row(
|
| 140 |
+
row['name'],
|
| 141 |
+
row['extensions'],
|
| 142 |
+
row['aliases'],
|
| 143 |
+
f':class:`~{row["class"]}`'))
|
| 144 |
+
out.append(write_seperator())
|
| 145 |
+
|
| 146 |
+
return '\n'.join(out)
|
| 147 |
+
|
| 148 |
+
def document_lexers(self):
|
| 149 |
+
from pip._vendor.pygments.lexers._mapping import LEXERS
|
| 150 |
+
out = []
|
| 151 |
+
modules = {}
|
| 152 |
+
moduledocstrings = {}
|
| 153 |
+
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
|
| 154 |
+
module = data[0]
|
| 155 |
+
mod = __import__(module, None, None, [classname])
|
| 156 |
+
self.filenames.add(mod.__file__)
|
| 157 |
+
cls = getattr(mod, classname)
|
| 158 |
+
if not cls.__doc__:
|
| 159 |
+
print("Warning: %s does not have a docstring." % classname)
|
| 160 |
+
docstring = cls.__doc__
|
| 161 |
+
if isinstance(docstring, bytes):
|
| 162 |
+
docstring = docstring.decode('utf8')
|
| 163 |
+
modules.setdefault(module, []).append((
|
| 164 |
+
classname,
|
| 165 |
+
', '.join(data[2]) or 'None',
|
| 166 |
+
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
|
| 167 |
+
', '.join(data[4]) or 'None',
|
| 168 |
+
docstring))
|
| 169 |
+
if module not in moduledocstrings:
|
| 170 |
+
moddoc = mod.__doc__
|
| 171 |
+
if isinstance(moddoc, bytes):
|
| 172 |
+
moddoc = moddoc.decode('utf8')
|
| 173 |
+
moduledocstrings[module] = moddoc
|
| 174 |
+
|
| 175 |
+
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
|
| 176 |
+
if moduledocstrings[module] is None:
|
| 177 |
+
raise Exception("Missing docstring for %s" % (module,))
|
| 178 |
+
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
|
| 179 |
+
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
|
| 180 |
+
for data in lexers:
|
| 181 |
+
out.append(LEXERDOC % data)
|
| 182 |
+
|
| 183 |
+
return ''.join(out)
|
| 184 |
+
|
| 185 |
+
def document_formatters(self):
|
| 186 |
+
from pip._vendor.pygments.formatters import FORMATTERS
|
| 187 |
+
|
| 188 |
+
out = []
|
| 189 |
+
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
|
| 190 |
+
module = data[0]
|
| 191 |
+
mod = __import__(module, None, None, [classname])
|
| 192 |
+
self.filenames.add(mod.__file__)
|
| 193 |
+
cls = getattr(mod, classname)
|
| 194 |
+
docstring = cls.__doc__
|
| 195 |
+
if isinstance(docstring, bytes):
|
| 196 |
+
docstring = docstring.decode('utf8')
|
| 197 |
+
heading = cls.__name__
|
| 198 |
+
out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
|
| 199 |
+
', '.join(data[3]).replace('*', '\\*') or 'None',
|
| 200 |
+
docstring))
|
| 201 |
+
return ''.join(out)
|
| 202 |
+
|
| 203 |
+
def document_filters(self):
|
| 204 |
+
from pip._vendor.pygments.filters import FILTERS
|
| 205 |
+
|
| 206 |
+
out = []
|
| 207 |
+
for name, cls in FILTERS.items():
|
| 208 |
+
self.filenames.add(sys.modules[cls.__module__].__file__)
|
| 209 |
+
docstring = cls.__doc__
|
| 210 |
+
if isinstance(docstring, bytes):
|
| 211 |
+
docstring = docstring.decode('utf8')
|
| 212 |
+
out.append(FILTERDOC % (cls.__name__, name, docstring))
|
| 213 |
+
return ''.join(out)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def setup(app):
|
| 217 |
+
app.add_directive('pygmentsdoc', PygmentsDoc)
|
.venv/Lib/site-packages/pip/_vendor/pygments/style.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.style
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Basic style object.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.token import Token, STANDARD_TYPES
|
| 12 |
+
|
| 13 |
+
# Default mapping of ansixxx to RGB colors.
|
| 14 |
+
_ansimap = {
|
| 15 |
+
# dark
|
| 16 |
+
'ansiblack': '000000',
|
| 17 |
+
'ansired': '7f0000',
|
| 18 |
+
'ansigreen': '007f00',
|
| 19 |
+
'ansiyellow': '7f7fe0',
|
| 20 |
+
'ansiblue': '00007f',
|
| 21 |
+
'ansimagenta': '7f007f',
|
| 22 |
+
'ansicyan': '007f7f',
|
| 23 |
+
'ansigray': 'e5e5e5',
|
| 24 |
+
# normal
|
| 25 |
+
'ansibrightblack': '555555',
|
| 26 |
+
'ansibrightred': 'ff0000',
|
| 27 |
+
'ansibrightgreen': '00ff00',
|
| 28 |
+
'ansibrightyellow': 'ffff00',
|
| 29 |
+
'ansibrightblue': '0000ff',
|
| 30 |
+
'ansibrightmagenta': 'ff00ff',
|
| 31 |
+
'ansibrightcyan': '00ffff',
|
| 32 |
+
'ansiwhite': 'ffffff',
|
| 33 |
+
}
|
| 34 |
+
# mapping of deprecated #ansixxx colors to new color names
|
| 35 |
+
_deprecated_ansicolors = {
|
| 36 |
+
# dark
|
| 37 |
+
'#ansiblack': 'ansiblack',
|
| 38 |
+
'#ansidarkred': 'ansired',
|
| 39 |
+
'#ansidarkgreen': 'ansigreen',
|
| 40 |
+
'#ansibrown': 'ansiyellow',
|
| 41 |
+
'#ansidarkblue': 'ansiblue',
|
| 42 |
+
'#ansipurple': 'ansimagenta',
|
| 43 |
+
'#ansiteal': 'ansicyan',
|
| 44 |
+
'#ansilightgray': 'ansigray',
|
| 45 |
+
# normal
|
| 46 |
+
'#ansidarkgray': 'ansibrightblack',
|
| 47 |
+
'#ansired': 'ansibrightred',
|
| 48 |
+
'#ansigreen': 'ansibrightgreen',
|
| 49 |
+
'#ansiyellow': 'ansibrightyellow',
|
| 50 |
+
'#ansiblue': 'ansibrightblue',
|
| 51 |
+
'#ansifuchsia': 'ansibrightmagenta',
|
| 52 |
+
'#ansiturquoise': 'ansibrightcyan',
|
| 53 |
+
'#ansiwhite': 'ansiwhite',
|
| 54 |
+
}
|
| 55 |
+
ansicolors = set(_ansimap)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class StyleMeta(type):
|
| 59 |
+
|
| 60 |
+
def __new__(mcs, name, bases, dct):
|
| 61 |
+
obj = type.__new__(mcs, name, bases, dct)
|
| 62 |
+
for token in STANDARD_TYPES:
|
| 63 |
+
if token not in obj.styles:
|
| 64 |
+
obj.styles[token] = ''
|
| 65 |
+
|
| 66 |
+
def colorformat(text):
|
| 67 |
+
if text in ansicolors:
|
| 68 |
+
return text
|
| 69 |
+
if text[0:1] == '#':
|
| 70 |
+
col = text[1:]
|
| 71 |
+
if len(col) == 6:
|
| 72 |
+
return col
|
| 73 |
+
elif len(col) == 3:
|
| 74 |
+
return col[0] * 2 + col[1] * 2 + col[2] * 2
|
| 75 |
+
elif text == '':
|
| 76 |
+
return ''
|
| 77 |
+
elif text.startswith('var') or text.startswith('calc'):
|
| 78 |
+
return text
|
| 79 |
+
assert False, "wrong color format %r" % text
|
| 80 |
+
|
| 81 |
+
_styles = obj._styles = {}
|
| 82 |
+
|
| 83 |
+
for ttype in obj.styles:
|
| 84 |
+
for token in ttype.split():
|
| 85 |
+
if token in _styles:
|
| 86 |
+
continue
|
| 87 |
+
ndef = _styles.get(token.parent, None)
|
| 88 |
+
styledefs = obj.styles.get(token, '').split()
|
| 89 |
+
if not ndef or token is None:
|
| 90 |
+
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
|
| 91 |
+
elif 'noinherit' in styledefs and token is not Token:
|
| 92 |
+
ndef = _styles[Token][:]
|
| 93 |
+
else:
|
| 94 |
+
ndef = ndef[:]
|
| 95 |
+
_styles[token] = ndef
|
| 96 |
+
for styledef in obj.styles.get(token, '').split():
|
| 97 |
+
if styledef == 'noinherit':
|
| 98 |
+
pass
|
| 99 |
+
elif styledef == 'bold':
|
| 100 |
+
ndef[1] = 1
|
| 101 |
+
elif styledef == 'nobold':
|
| 102 |
+
ndef[1] = 0
|
| 103 |
+
elif styledef == 'italic':
|
| 104 |
+
ndef[2] = 1
|
| 105 |
+
elif styledef == 'noitalic':
|
| 106 |
+
ndef[2] = 0
|
| 107 |
+
elif styledef == 'underline':
|
| 108 |
+
ndef[3] = 1
|
| 109 |
+
elif styledef == 'nounderline':
|
| 110 |
+
ndef[3] = 0
|
| 111 |
+
elif styledef[:3] == 'bg:':
|
| 112 |
+
ndef[4] = colorformat(styledef[3:])
|
| 113 |
+
elif styledef[:7] == 'border:':
|
| 114 |
+
ndef[5] = colorformat(styledef[7:])
|
| 115 |
+
elif styledef == 'roman':
|
| 116 |
+
ndef[6] = 1
|
| 117 |
+
elif styledef == 'sans':
|
| 118 |
+
ndef[7] = 1
|
| 119 |
+
elif styledef == 'mono':
|
| 120 |
+
ndef[8] = 1
|
| 121 |
+
else:
|
| 122 |
+
ndef[0] = colorformat(styledef)
|
| 123 |
+
|
| 124 |
+
return obj
|
| 125 |
+
|
| 126 |
+
def style_for_token(cls, token):
|
| 127 |
+
t = cls._styles[token]
|
| 128 |
+
ansicolor = bgansicolor = None
|
| 129 |
+
color = t[0]
|
| 130 |
+
if color in _deprecated_ansicolors:
|
| 131 |
+
color = _deprecated_ansicolors[color]
|
| 132 |
+
if color in ansicolors:
|
| 133 |
+
ansicolor = color
|
| 134 |
+
color = _ansimap[color]
|
| 135 |
+
bgcolor = t[4]
|
| 136 |
+
if bgcolor in _deprecated_ansicolors:
|
| 137 |
+
bgcolor = _deprecated_ansicolors[bgcolor]
|
| 138 |
+
if bgcolor in ansicolors:
|
| 139 |
+
bgansicolor = bgcolor
|
| 140 |
+
bgcolor = _ansimap[bgcolor]
|
| 141 |
+
|
| 142 |
+
return {
|
| 143 |
+
'color': color or None,
|
| 144 |
+
'bold': bool(t[1]),
|
| 145 |
+
'italic': bool(t[2]),
|
| 146 |
+
'underline': bool(t[3]),
|
| 147 |
+
'bgcolor': bgcolor or None,
|
| 148 |
+
'border': t[5] or None,
|
| 149 |
+
'roman': bool(t[6]) or None,
|
| 150 |
+
'sans': bool(t[7]) or None,
|
| 151 |
+
'mono': bool(t[8]) or None,
|
| 152 |
+
'ansicolor': ansicolor,
|
| 153 |
+
'bgansicolor': bgansicolor,
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
def list_styles(cls):
|
| 157 |
+
return list(cls)
|
| 158 |
+
|
| 159 |
+
def styles_token(cls, ttype):
|
| 160 |
+
return ttype in cls._styles
|
| 161 |
+
|
| 162 |
+
def __iter__(cls):
|
| 163 |
+
for token in cls._styles:
|
| 164 |
+
yield token, cls.style_for_token(token)
|
| 165 |
+
|
| 166 |
+
def __len__(cls):
|
| 167 |
+
return len(cls._styles)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class Style(metaclass=StyleMeta):
|
| 171 |
+
|
| 172 |
+
#: overall background color (``None`` means transparent)
|
| 173 |
+
background_color = '#ffffff'
|
| 174 |
+
|
| 175 |
+
#: highlight background color
|
| 176 |
+
highlight_color = '#ffffcc'
|
| 177 |
+
|
| 178 |
+
#: line number font color
|
| 179 |
+
line_number_color = 'inherit'
|
| 180 |
+
|
| 181 |
+
#: line number background color
|
| 182 |
+
line_number_background_color = 'transparent'
|
| 183 |
+
|
| 184 |
+
#: special line number font color
|
| 185 |
+
line_number_special_color = '#000000'
|
| 186 |
+
|
| 187 |
+
#: special line number background color
|
| 188 |
+
line_number_special_background_color = '#ffffc0'
|
| 189 |
+
|
| 190 |
+
#: Style definitions for individual token types.
|
| 191 |
+
styles = {}
|
| 192 |
+
|
| 193 |
+
# Attribute for lexers defined within Pygments. If set
|
| 194 |
+
# to True, the style is not shown in the style gallery
|
| 195 |
+
# on the website. This is intended for language-specific
|
| 196 |
+
# styles.
|
| 197 |
+
web_style_gallery_exclude = False
|
.venv/Lib/site-packages/pip/_vendor/pygments/styles/__init__.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.styles
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Contains built-in styles.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.plugin import find_plugin_styles
|
| 12 |
+
from pip._vendor.pygments.util import ClassNotFound
|
| 13 |
+
|
| 14 |
+
#: A dictionary of built-in styles, mapping style names to
|
| 15 |
+
#: ``'submodule::classname'`` strings.
|
| 16 |
+
STYLE_MAP = {
|
| 17 |
+
'default': 'default::DefaultStyle',
|
| 18 |
+
'emacs': 'emacs::EmacsStyle',
|
| 19 |
+
'friendly': 'friendly::FriendlyStyle',
|
| 20 |
+
'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle',
|
| 21 |
+
'colorful': 'colorful::ColorfulStyle',
|
| 22 |
+
'autumn': 'autumn::AutumnStyle',
|
| 23 |
+
'murphy': 'murphy::MurphyStyle',
|
| 24 |
+
'manni': 'manni::ManniStyle',
|
| 25 |
+
'material': 'material::MaterialStyle',
|
| 26 |
+
'monokai': 'monokai::MonokaiStyle',
|
| 27 |
+
'perldoc': 'perldoc::PerldocStyle',
|
| 28 |
+
'pastie': 'pastie::PastieStyle',
|
| 29 |
+
'borland': 'borland::BorlandStyle',
|
| 30 |
+
'trac': 'trac::TracStyle',
|
| 31 |
+
'native': 'native::NativeStyle',
|
| 32 |
+
'fruity': 'fruity::FruityStyle',
|
| 33 |
+
'bw': 'bw::BlackWhiteStyle',
|
| 34 |
+
'vim': 'vim::VimStyle',
|
| 35 |
+
'vs': 'vs::VisualStudioStyle',
|
| 36 |
+
'tango': 'tango::TangoStyle',
|
| 37 |
+
'rrt': 'rrt::RrtStyle',
|
| 38 |
+
'xcode': 'xcode::XcodeStyle',
|
| 39 |
+
'igor': 'igor::IgorStyle',
|
| 40 |
+
'paraiso-light': 'paraiso_light::ParaisoLightStyle',
|
| 41 |
+
'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle',
|
| 42 |
+
'lovelace': 'lovelace::LovelaceStyle',
|
| 43 |
+
'algol': 'algol::AlgolStyle',
|
| 44 |
+
'algol_nu': 'algol_nu::Algol_NuStyle',
|
| 45 |
+
'arduino': 'arduino::ArduinoStyle',
|
| 46 |
+
'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
|
| 47 |
+
'abap': 'abap::AbapStyle',
|
| 48 |
+
'solarized-dark': 'solarized::SolarizedDarkStyle',
|
| 49 |
+
'solarized-light': 'solarized::SolarizedLightStyle',
|
| 50 |
+
'sas': 'sas::SasStyle',
|
| 51 |
+
'staroffice' : 'staroffice::StarofficeStyle',
|
| 52 |
+
'stata': 'stata_light::StataLightStyle',
|
| 53 |
+
'stata-light': 'stata_light::StataLightStyle',
|
| 54 |
+
'stata-dark': 'stata_dark::StataDarkStyle',
|
| 55 |
+
'inkpot': 'inkpot::InkPotStyle',
|
| 56 |
+
'zenburn': 'zenburn::ZenburnStyle',
|
| 57 |
+
'gruvbox-dark': 'gruvbox::GruvboxDarkStyle',
|
| 58 |
+
'gruvbox-light': 'gruvbox::GruvboxLightStyle',
|
| 59 |
+
'dracula': 'dracula::DraculaStyle',
|
| 60 |
+
'one-dark': 'onedark::OneDarkStyle',
|
| 61 |
+
'lilypond' : 'lilypond::LilyPondStyle',
|
| 62 |
+
'nord': 'nord::NordStyle',
|
| 63 |
+
'nord-darker': 'nord::NordDarkerStyle',
|
| 64 |
+
'github-dark': 'gh_dark::GhDarkStyle'
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_style_by_name(name):
|
| 69 |
+
"""
|
| 70 |
+
Return a style class by its short name. The names of the builtin styles
|
| 71 |
+
are listed in :data:`pygments.styles.STYLE_MAP`.
|
| 72 |
+
|
| 73 |
+
Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
|
| 74 |
+
found.
|
| 75 |
+
"""
|
| 76 |
+
if name in STYLE_MAP:
|
| 77 |
+
mod, cls = STYLE_MAP[name].split('::')
|
| 78 |
+
builtin = "yes"
|
| 79 |
+
else:
|
| 80 |
+
for found_name, style in find_plugin_styles():
|
| 81 |
+
if name == found_name:
|
| 82 |
+
return style
|
| 83 |
+
# perhaps it got dropped into our styles package
|
| 84 |
+
builtin = ""
|
| 85 |
+
mod = name
|
| 86 |
+
cls = name.title() + "Style"
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
mod = __import__('pygments.styles.' + mod, None, None, [cls])
|
| 90 |
+
except ImportError:
|
| 91 |
+
raise ClassNotFound("Could not find style module %r" % mod +
|
| 92 |
+
(builtin and ", though it should be builtin") + ".")
|
| 93 |
+
try:
|
| 94 |
+
return getattr(mod, cls)
|
| 95 |
+
except AttributeError:
|
| 96 |
+
raise ClassNotFound("Could not find style class %r in style module." % cls)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def get_all_styles():
|
| 100 |
+
"""Return a generator for all styles by name, both builtin and plugin."""
|
| 101 |
+
yield from STYLE_MAP
|
| 102 |
+
for name, _ in find_plugin_styles():
|
| 103 |
+
yield name
|
.venv/Lib/site-packages/pip/_vendor/pygments/token.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.token
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Basic token types and the standard tokens.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class _TokenType(tuple):
|
| 13 |
+
parent = None
|
| 14 |
+
|
| 15 |
+
def split(self):
|
| 16 |
+
buf = []
|
| 17 |
+
node = self
|
| 18 |
+
while node is not None:
|
| 19 |
+
buf.append(node)
|
| 20 |
+
node = node.parent
|
| 21 |
+
buf.reverse()
|
| 22 |
+
return buf
|
| 23 |
+
|
| 24 |
+
def __init__(self, *args):
|
| 25 |
+
# no need to call super.__init__
|
| 26 |
+
self.subtypes = set()
|
| 27 |
+
|
| 28 |
+
def __contains__(self, val):
|
| 29 |
+
return self is val or (
|
| 30 |
+
type(val) is self.__class__ and
|
| 31 |
+
val[:len(self)] == self
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
def __getattr__(self, val):
|
| 35 |
+
if not val or not val[0].isupper():
|
| 36 |
+
return tuple.__getattribute__(self, val)
|
| 37 |
+
new = _TokenType(self + (val,))
|
| 38 |
+
setattr(self, val, new)
|
| 39 |
+
self.subtypes.add(new)
|
| 40 |
+
new.parent = self
|
| 41 |
+
return new
|
| 42 |
+
|
| 43 |
+
def __repr__(self):
|
| 44 |
+
return 'Token' + (self and '.' or '') + '.'.join(self)
|
| 45 |
+
|
| 46 |
+
def __copy__(self):
|
| 47 |
+
# These instances are supposed to be singletons
|
| 48 |
+
return self
|
| 49 |
+
|
| 50 |
+
def __deepcopy__(self, memo):
|
| 51 |
+
# These instances are supposed to be singletons
|
| 52 |
+
return self
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Token = _TokenType()
|
| 56 |
+
|
| 57 |
+
# Special token types
|
| 58 |
+
Text = Token.Text
|
| 59 |
+
Whitespace = Text.Whitespace
|
| 60 |
+
Escape = Token.Escape
|
| 61 |
+
Error = Token.Error
|
| 62 |
+
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
|
| 63 |
+
Other = Token.Other
|
| 64 |
+
|
| 65 |
+
# Common token types for source code
|
| 66 |
+
Keyword = Token.Keyword
|
| 67 |
+
Name = Token.Name
|
| 68 |
+
Literal = Token.Literal
|
| 69 |
+
String = Literal.String
|
| 70 |
+
Number = Literal.Number
|
| 71 |
+
Punctuation = Token.Punctuation
|
| 72 |
+
Operator = Token.Operator
|
| 73 |
+
Comment = Token.Comment
|
| 74 |
+
|
| 75 |
+
# Generic types for non-source code
|
| 76 |
+
Generic = Token.Generic
|
| 77 |
+
|
| 78 |
+
# String and some others are not direct children of Token.
|
| 79 |
+
# alias them:
|
| 80 |
+
Token.Token = Token
|
| 81 |
+
Token.String = String
|
| 82 |
+
Token.Number = Number
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def is_token_subtype(ttype, other):
|
| 86 |
+
"""
|
| 87 |
+
Return True if ``ttype`` is a subtype of ``other``.
|
| 88 |
+
|
| 89 |
+
exists for backwards compatibility. use ``ttype in other`` now.
|
| 90 |
+
"""
|
| 91 |
+
return ttype in other
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def string_to_tokentype(s):
|
| 95 |
+
"""
|
| 96 |
+
Convert a string into a token type::
|
| 97 |
+
|
| 98 |
+
>>> string_to_token('String.Double')
|
| 99 |
+
Token.Literal.String.Double
|
| 100 |
+
>>> string_to_token('Token.Literal.Number')
|
| 101 |
+
Token.Literal.Number
|
| 102 |
+
>>> string_to_token('')
|
| 103 |
+
Token
|
| 104 |
+
|
| 105 |
+
Tokens that are already tokens are returned unchanged:
|
| 106 |
+
|
| 107 |
+
>>> string_to_token(String)
|
| 108 |
+
Token.Literal.String
|
| 109 |
+
"""
|
| 110 |
+
if isinstance(s, _TokenType):
|
| 111 |
+
return s
|
| 112 |
+
if not s:
|
| 113 |
+
return Token
|
| 114 |
+
node = Token
|
| 115 |
+
for item in s.split('.'):
|
| 116 |
+
node = getattr(node, item)
|
| 117 |
+
return node
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Map standard token types to short names, used in CSS class naming.
|
| 121 |
+
# If you add a new item, please be sure to run this file to perform
|
| 122 |
+
# a consistency check for duplicate values.
|
| 123 |
+
STANDARD_TYPES = {
|
| 124 |
+
Token: '',
|
| 125 |
+
|
| 126 |
+
Text: '',
|
| 127 |
+
Whitespace: 'w',
|
| 128 |
+
Escape: 'esc',
|
| 129 |
+
Error: 'err',
|
| 130 |
+
Other: 'x',
|
| 131 |
+
|
| 132 |
+
Keyword: 'k',
|
| 133 |
+
Keyword.Constant: 'kc',
|
| 134 |
+
Keyword.Declaration: 'kd',
|
| 135 |
+
Keyword.Namespace: 'kn',
|
| 136 |
+
Keyword.Pseudo: 'kp',
|
| 137 |
+
Keyword.Reserved: 'kr',
|
| 138 |
+
Keyword.Type: 'kt',
|
| 139 |
+
|
| 140 |
+
Name: 'n',
|
| 141 |
+
Name.Attribute: 'na',
|
| 142 |
+
Name.Builtin: 'nb',
|
| 143 |
+
Name.Builtin.Pseudo: 'bp',
|
| 144 |
+
Name.Class: 'nc',
|
| 145 |
+
Name.Constant: 'no',
|
| 146 |
+
Name.Decorator: 'nd',
|
| 147 |
+
Name.Entity: 'ni',
|
| 148 |
+
Name.Exception: 'ne',
|
| 149 |
+
Name.Function: 'nf',
|
| 150 |
+
Name.Function.Magic: 'fm',
|
| 151 |
+
Name.Property: 'py',
|
| 152 |
+
Name.Label: 'nl',
|
| 153 |
+
Name.Namespace: 'nn',
|
| 154 |
+
Name.Other: 'nx',
|
| 155 |
+
Name.Tag: 'nt',
|
| 156 |
+
Name.Variable: 'nv',
|
| 157 |
+
Name.Variable.Class: 'vc',
|
| 158 |
+
Name.Variable.Global: 'vg',
|
| 159 |
+
Name.Variable.Instance: 'vi',
|
| 160 |
+
Name.Variable.Magic: 'vm',
|
| 161 |
+
|
| 162 |
+
Literal: 'l',
|
| 163 |
+
Literal.Date: 'ld',
|
| 164 |
+
|
| 165 |
+
String: 's',
|
| 166 |
+
String.Affix: 'sa',
|
| 167 |
+
String.Backtick: 'sb',
|
| 168 |
+
String.Char: 'sc',
|
| 169 |
+
String.Delimiter: 'dl',
|
| 170 |
+
String.Doc: 'sd',
|
| 171 |
+
String.Double: 's2',
|
| 172 |
+
String.Escape: 'se',
|
| 173 |
+
String.Heredoc: 'sh',
|
| 174 |
+
String.Interpol: 'si',
|
| 175 |
+
String.Other: 'sx',
|
| 176 |
+
String.Regex: 'sr',
|
| 177 |
+
String.Single: 's1',
|
| 178 |
+
String.Symbol: 'ss',
|
| 179 |
+
|
| 180 |
+
Number: 'm',
|
| 181 |
+
Number.Bin: 'mb',
|
| 182 |
+
Number.Float: 'mf',
|
| 183 |
+
Number.Hex: 'mh',
|
| 184 |
+
Number.Integer: 'mi',
|
| 185 |
+
Number.Integer.Long: 'il',
|
| 186 |
+
Number.Oct: 'mo',
|
| 187 |
+
|
| 188 |
+
Operator: 'o',
|
| 189 |
+
Operator.Word: 'ow',
|
| 190 |
+
|
| 191 |
+
Punctuation: 'p',
|
| 192 |
+
Punctuation.Marker: 'pm',
|
| 193 |
+
|
| 194 |
+
Comment: 'c',
|
| 195 |
+
Comment.Hashbang: 'ch',
|
| 196 |
+
Comment.Multiline: 'cm',
|
| 197 |
+
Comment.Preproc: 'cp',
|
| 198 |
+
Comment.PreprocFile: 'cpf',
|
| 199 |
+
Comment.Single: 'c1',
|
| 200 |
+
Comment.Special: 'cs',
|
| 201 |
+
|
| 202 |
+
Generic: 'g',
|
| 203 |
+
Generic.Deleted: 'gd',
|
| 204 |
+
Generic.Emph: 'ge',
|
| 205 |
+
Generic.Error: 'gr',
|
| 206 |
+
Generic.Heading: 'gh',
|
| 207 |
+
Generic.Inserted: 'gi',
|
| 208 |
+
Generic.Output: 'go',
|
| 209 |
+
Generic.Prompt: 'gp',
|
| 210 |
+
Generic.Strong: 'gs',
|
| 211 |
+
Generic.Subheading: 'gu',
|
| 212 |
+
Generic.Traceback: 'gt',
|
| 213 |
+
}
|
.venv/Lib/site-packages/pip/_vendor/pygments/unistring.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.unistring
|
| 3 |
+
~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Strings of all Unicode characters of a certain category.
|
| 6 |
+
Used for matching in Unicode-aware languages. Run to regenerate.
|
| 7 |
+
|
| 8 |
+
Inspired by chartypes_create.py from the MoinMoin project.
|
| 9 |
+
|
| 10 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 11 |
+
:license: BSD, see LICENSE for details.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
Cc = '\x00-\x1f\x7f-\x9f'
|
| 15 |
+
|
| 16 |
+
Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
|
| 17 |
+
|
| 18 |
+
Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
|
| 19 |
+
|
| 20 |
+
Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd'
|
| 21 |
+
|
| 22 |
+
Cs = '\ud800-\udbff\\\udc00\udc01-\udfff'
|
| 23 |
+
|
| 24 |
+
Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943'
|
| 25 |
+
|
| 26 |
+
Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1'
|
| 27 |
+
|
| 28 |
+
Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
|
| 29 |
+
|
| 30 |
+
Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
|
| 31 |
+
|
| 32 |
+
Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921'
|
| 33 |
+
|
| 34 |
+
Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
|
| 35 |
+
|
| 36 |
+
Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
|
| 37 |
+
|
| 38 |
+
Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef'
|
| 39 |
+
|
| 40 |
+
Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959'
|
| 41 |
+
|
| 42 |
+
Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e'
|
| 43 |
+
|
| 44 |
+
No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c'
|
| 45 |
+
|
| 46 |
+
Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
|
| 47 |
+
|
| 48 |
+
Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
|
| 49 |
+
|
| 50 |
+
Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
|
| 51 |
+
|
| 52 |
+
Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
|
| 53 |
+
|
| 54 |
+
Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
|
| 55 |
+
|
| 56 |
+
Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f"
|
| 57 |
+
|
| 58 |
+
Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
|
| 59 |
+
|
| 60 |
+
Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0'
|
| 61 |
+
|
| 62 |
+
Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff'
|
| 63 |
+
|
| 64 |
+
Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
|
| 65 |
+
|
| 66 |
+
So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d'
|
| 67 |
+
|
| 68 |
+
Zl = '\u2028'
|
| 69 |
+
|
| 70 |
+
Zp = '\u2029'
|
| 71 |
+
|
| 72 |
+
Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
|
| 73 |
+
|
| 74 |
+
xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
|
| 75 |
+
|
| 76 |
+
xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
|
| 77 |
+
|
| 78 |
+
cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
|
| 79 |
+
|
| 80 |
+
# Generated from unidata 11.0.0
|
| 81 |
+
|
| 82 |
+
def combine(*args):
|
| 83 |
+
return ''.join(globals()[cat] for cat in args)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def allexcept(*args):
|
| 87 |
+
newcats = cats[:]
|
| 88 |
+
for arg in args:
|
| 89 |
+
newcats.remove(arg)
|
| 90 |
+
return ''.join(globals()[cat] for cat in newcats)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _handle_runs(char_list): # pragma: no cover
|
| 94 |
+
buf = []
|
| 95 |
+
for c in char_list:
|
| 96 |
+
if len(c) == 1:
|
| 97 |
+
if buf and buf[-1][1] == chr(ord(c)-1):
|
| 98 |
+
buf[-1] = (buf[-1][0], c)
|
| 99 |
+
else:
|
| 100 |
+
buf.append((c, c))
|
| 101 |
+
else:
|
| 102 |
+
buf.append((c, c))
|
| 103 |
+
for a, b in buf:
|
| 104 |
+
if a == b:
|
| 105 |
+
yield a
|
| 106 |
+
else:
|
| 107 |
+
yield '%s-%s' % (a, b)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
if __name__ == '__main__': # pragma: no cover
|
| 111 |
+
import unicodedata
|
| 112 |
+
|
| 113 |
+
categories = {'xid_start': [], 'xid_continue': []}
|
| 114 |
+
|
| 115 |
+
with open(__file__, encoding='utf-8') as fp:
|
| 116 |
+
content = fp.read()
|
| 117 |
+
|
| 118 |
+
header = content[:content.find('Cc =')]
|
| 119 |
+
footer = content[content.find("def combine("):]
|
| 120 |
+
|
| 121 |
+
for code in range(0x110000):
|
| 122 |
+
c = chr(code)
|
| 123 |
+
cat = unicodedata.category(c)
|
| 124 |
+
if ord(c) == 0xdc00:
|
| 125 |
+
# Hack to avoid combining this combining with the preceding high
|
| 126 |
+
# surrogate, 0xdbff, when doing a repr.
|
| 127 |
+
c = '\\' + c
|
| 128 |
+
elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
|
| 129 |
+
# Escape regex metachars.
|
| 130 |
+
c = '\\' + c
|
| 131 |
+
categories.setdefault(cat, []).append(c)
|
| 132 |
+
# XID_START and XID_CONTINUE are special categories used for matching
|
| 133 |
+
# identifiers in Python 3.
|
| 134 |
+
if c.isidentifier():
|
| 135 |
+
categories['xid_start'].append(c)
|
| 136 |
+
if ('a' + c).isidentifier():
|
| 137 |
+
categories['xid_continue'].append(c)
|
| 138 |
+
|
| 139 |
+
with open(__file__, 'w', encoding='utf-8') as fp:
|
| 140 |
+
fp.write(header)
|
| 141 |
+
|
| 142 |
+
for cat in sorted(categories):
|
| 143 |
+
val = ''.join(_handle_runs(categories[cat]))
|
| 144 |
+
fp.write('%s = %a\n\n' % (cat, val))
|
| 145 |
+
|
| 146 |
+
cats = sorted(categories)
|
| 147 |
+
cats.remove('xid_start')
|
| 148 |
+
cats.remove('xid_continue')
|
| 149 |
+
fp.write('cats = %r\n\n' % cats)
|
| 150 |
+
|
| 151 |
+
fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
|
| 152 |
+
|
| 153 |
+
fp.write(footer)
|
.venv/Lib/site-packages/pip/_vendor/pygments/util.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.util
|
| 3 |
+
~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Utility functions.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
from io import TextIOWrapper
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
split_path_re = re.compile(r'[/\\ ]')
|
| 16 |
+
doctype_lookup_re = re.compile(r'''
|
| 17 |
+
<!DOCTYPE\s+(
|
| 18 |
+
[a-zA-Z_][a-zA-Z0-9]*
|
| 19 |
+
(?: \s+ # optional in HTML5
|
| 20 |
+
[a-zA-Z_][a-zA-Z0-9]*\s+
|
| 21 |
+
"[^"]*")?
|
| 22 |
+
)
|
| 23 |
+
[^>]*>
|
| 24 |
+
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
|
| 25 |
+
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
|
| 26 |
+
re.IGNORECASE | re.DOTALL | re.MULTILINE)
|
| 27 |
+
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ClassNotFound(ValueError):
|
| 31 |
+
"""Raised if one of the lookup functions didn't find a matching class."""
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class OptionError(Exception):
|
| 35 |
+
"""
|
| 36 |
+
This exception will be raised by all option processing functions if
|
| 37 |
+
the type or value of the argument is not correct.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
|
| 41 |
+
"""
|
| 42 |
+
If the key `optname` from the dictionary is not in the sequence
|
| 43 |
+
`allowed`, raise an error, otherwise return it.
|
| 44 |
+
"""
|
| 45 |
+
string = options.get(optname, default)
|
| 46 |
+
if normcase:
|
| 47 |
+
string = string.lower()
|
| 48 |
+
if string not in allowed:
|
| 49 |
+
raise OptionError('Value for option %s must be one of %s' %
|
| 50 |
+
(optname, ', '.join(map(str, allowed))))
|
| 51 |
+
return string
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_bool_opt(options, optname, default=None):
|
| 55 |
+
"""
|
| 56 |
+
Intuitively, this is `options.get(optname, default)`, but restricted to
|
| 57 |
+
Boolean value. The Booleans can be represented as string, in order to accept
|
| 58 |
+
Boolean value from the command line arguments. If the key `optname` is
|
| 59 |
+
present in the dictionary `options` and is not associated with a Boolean,
|
| 60 |
+
raise an `OptionError`. If it is absent, `default` is returned instead.
|
| 61 |
+
|
| 62 |
+
The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
|
| 63 |
+
``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
|
| 64 |
+
(matched case-insensitively).
|
| 65 |
+
"""
|
| 66 |
+
string = options.get(optname, default)
|
| 67 |
+
if isinstance(string, bool):
|
| 68 |
+
return string
|
| 69 |
+
elif isinstance(string, int):
|
| 70 |
+
return bool(string)
|
| 71 |
+
elif not isinstance(string, str):
|
| 72 |
+
raise OptionError('Invalid type %r for option %s; use '
|
| 73 |
+
'1/0, yes/no, true/false, on/off' % (
|
| 74 |
+
string, optname))
|
| 75 |
+
elif string.lower() in ('1', 'yes', 'true', 'on'):
|
| 76 |
+
return True
|
| 77 |
+
elif string.lower() in ('0', 'no', 'false', 'off'):
|
| 78 |
+
return False
|
| 79 |
+
else:
|
| 80 |
+
raise OptionError('Invalid value %r for option %s; use '
|
| 81 |
+
'1/0, yes/no, true/false, on/off' % (
|
| 82 |
+
string, optname))
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_int_opt(options, optname, default=None):
|
| 86 |
+
"""As :func:`get_bool_opt`, but interpret the value as an integer."""
|
| 87 |
+
string = options.get(optname, default)
|
| 88 |
+
try:
|
| 89 |
+
return int(string)
|
| 90 |
+
except TypeError:
|
| 91 |
+
raise OptionError('Invalid type %r for option %s; you '
|
| 92 |
+
'must give an integer value' % (
|
| 93 |
+
string, optname))
|
| 94 |
+
except ValueError:
|
| 95 |
+
raise OptionError('Invalid value %r for option %s; you '
|
| 96 |
+
'must give an integer value' % (
|
| 97 |
+
string, optname))
|
| 98 |
+
|
| 99 |
+
def get_list_opt(options, optname, default=None):
|
| 100 |
+
"""
|
| 101 |
+
If the key `optname` from the dictionary `options` is a string,
|
| 102 |
+
split it at whitespace and return it. If it is already a list
|
| 103 |
+
or a tuple, it is returned as a list.
|
| 104 |
+
"""
|
| 105 |
+
val = options.get(optname, default)
|
| 106 |
+
if isinstance(val, str):
|
| 107 |
+
return val.split()
|
| 108 |
+
elif isinstance(val, (list, tuple)):
|
| 109 |
+
return list(val)
|
| 110 |
+
else:
|
| 111 |
+
raise OptionError('Invalid type %r for option %s; you '
|
| 112 |
+
'must give a list value' % (
|
| 113 |
+
val, optname))
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def docstring_headline(obj):
|
| 117 |
+
if not obj.__doc__:
|
| 118 |
+
return ''
|
| 119 |
+
res = []
|
| 120 |
+
for line in obj.__doc__.strip().splitlines():
|
| 121 |
+
if line.strip():
|
| 122 |
+
res.append(" " + line.strip())
|
| 123 |
+
else:
|
| 124 |
+
break
|
| 125 |
+
return ''.join(res).lstrip()
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def make_analysator(f):
|
| 129 |
+
"""Return a static text analyser function that returns float values."""
|
| 130 |
+
def text_analyse(text):
|
| 131 |
+
try:
|
| 132 |
+
rv = f(text)
|
| 133 |
+
except Exception:
|
| 134 |
+
return 0.0
|
| 135 |
+
if not rv:
|
| 136 |
+
return 0.0
|
| 137 |
+
try:
|
| 138 |
+
return min(1.0, max(0.0, float(rv)))
|
| 139 |
+
except (ValueError, TypeError):
|
| 140 |
+
return 0.0
|
| 141 |
+
text_analyse.__doc__ = f.__doc__
|
| 142 |
+
return staticmethod(text_analyse)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def shebang_matches(text, regex):
|
| 146 |
+
r"""Check if the given regular expression matches the last part of the
|
| 147 |
+
shebang if one exists.
|
| 148 |
+
|
| 149 |
+
>>> from pygments.util import shebang_matches
|
| 150 |
+
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
|
| 151 |
+
True
|
| 152 |
+
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
|
| 153 |
+
True
|
| 154 |
+
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
|
| 155 |
+
False
|
| 156 |
+
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
|
| 157 |
+
False
|
| 158 |
+
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
|
| 159 |
+
... r'python(2\.\d)?')
|
| 160 |
+
True
|
| 161 |
+
|
| 162 |
+
It also checks for common windows executable file extensions::
|
| 163 |
+
|
| 164 |
+
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
|
| 165 |
+
True
|
| 166 |
+
|
| 167 |
+
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
|
| 168 |
+
the same as ``'perl -e'``)
|
| 169 |
+
|
| 170 |
+
Note that this method automatically searches the whole string (eg:
|
| 171 |
+
the regular expression is wrapped in ``'^$'``)
|
| 172 |
+
"""
|
| 173 |
+
index = text.find('\n')
|
| 174 |
+
if index >= 0:
|
| 175 |
+
first_line = text[:index].lower()
|
| 176 |
+
else:
|
| 177 |
+
first_line = text.lower()
|
| 178 |
+
if first_line.startswith('#!'):
|
| 179 |
+
try:
|
| 180 |
+
found = [x for x in split_path_re.split(first_line[2:].strip())
|
| 181 |
+
if x and not x.startswith('-')][-1]
|
| 182 |
+
except IndexError:
|
| 183 |
+
return False
|
| 184 |
+
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
|
| 185 |
+
if regex.search(found) is not None:
|
| 186 |
+
return True
|
| 187 |
+
return False
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def doctype_matches(text, regex):
|
| 191 |
+
"""Check if the doctype matches a regular expression (if present).
|
| 192 |
+
|
| 193 |
+
Note that this method only checks the first part of a DOCTYPE.
|
| 194 |
+
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
|
| 195 |
+
"""
|
| 196 |
+
m = doctype_lookup_re.search(text)
|
| 197 |
+
if m is None:
|
| 198 |
+
return False
|
| 199 |
+
doctype = m.group(1)
|
| 200 |
+
return re.compile(regex, re.I).match(doctype.strip()) is not None
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def html_doctype_matches(text):
|
| 204 |
+
"""Check if the file looks like it has a html doctype."""
|
| 205 |
+
return doctype_matches(text, r'html')
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
_looks_like_xml_cache = {}
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def looks_like_xml(text):
|
| 212 |
+
"""Check if a doctype exists or if we have some tags."""
|
| 213 |
+
if xml_decl_re.match(text):
|
| 214 |
+
return True
|
| 215 |
+
key = hash(text)
|
| 216 |
+
try:
|
| 217 |
+
return _looks_like_xml_cache[key]
|
| 218 |
+
except KeyError:
|
| 219 |
+
m = doctype_lookup_re.search(text)
|
| 220 |
+
if m is not None:
|
| 221 |
+
return True
|
| 222 |
+
rv = tag_re.search(text[:1000]) is not None
|
| 223 |
+
_looks_like_xml_cache[key] = rv
|
| 224 |
+
return rv
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def surrogatepair(c):
|
| 228 |
+
"""Given a unicode character code with length greater than 16 bits,
|
| 229 |
+
return the two 16 bit surrogate pair.
|
| 230 |
+
"""
|
| 231 |
+
# From example D28 of:
|
| 232 |
+
# http://www.unicode.org/book/ch03.pdf
|
| 233 |
+
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def format_lines(var_name, seq, raw=False, indent_level=0):
|
| 237 |
+
"""Formats a sequence of strings for output."""
|
| 238 |
+
lines = []
|
| 239 |
+
base_indent = ' ' * indent_level * 4
|
| 240 |
+
inner_indent = ' ' * (indent_level + 1) * 4
|
| 241 |
+
lines.append(base_indent + var_name + ' = (')
|
| 242 |
+
if raw:
|
| 243 |
+
# These should be preformatted reprs of, say, tuples.
|
| 244 |
+
for i in seq:
|
| 245 |
+
lines.append(inner_indent + i + ',')
|
| 246 |
+
else:
|
| 247 |
+
for i in seq:
|
| 248 |
+
# Force use of single quotes
|
| 249 |
+
r = repr(i + '"')
|
| 250 |
+
lines.append(inner_indent + r[:-2] + r[-1] + ',')
|
| 251 |
+
lines.append(base_indent + ')')
|
| 252 |
+
return '\n'.join(lines)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def duplicates_removed(it, already_seen=()):
|
| 256 |
+
"""
|
| 257 |
+
Returns a list with duplicates removed from the iterable `it`.
|
| 258 |
+
|
| 259 |
+
Order is preserved.
|
| 260 |
+
"""
|
| 261 |
+
lst = []
|
| 262 |
+
seen = set()
|
| 263 |
+
for i in it:
|
| 264 |
+
if i in seen or i in already_seen:
|
| 265 |
+
continue
|
| 266 |
+
lst.append(i)
|
| 267 |
+
seen.add(i)
|
| 268 |
+
return lst
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
class Future:
|
| 272 |
+
"""Generic class to defer some work.
|
| 273 |
+
|
| 274 |
+
Handled specially in RegexLexerMeta, to support regex string construction at
|
| 275 |
+
first use.
|
| 276 |
+
"""
|
| 277 |
+
def get(self):
|
| 278 |
+
raise NotImplementedError
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def guess_decode(text):
|
| 282 |
+
"""Decode *text* with guessed encoding.
|
| 283 |
+
|
| 284 |
+
First try UTF-8; this should fail for non-UTF-8 encodings.
|
| 285 |
+
Then try the preferred locale encoding.
|
| 286 |
+
Fall back to latin-1, which always works.
|
| 287 |
+
"""
|
| 288 |
+
try:
|
| 289 |
+
text = text.decode('utf-8')
|
| 290 |
+
return text, 'utf-8'
|
| 291 |
+
except UnicodeDecodeError:
|
| 292 |
+
try:
|
| 293 |
+
import locale
|
| 294 |
+
prefencoding = locale.getpreferredencoding()
|
| 295 |
+
text = text.decode()
|
| 296 |
+
return text, prefencoding
|
| 297 |
+
except (UnicodeDecodeError, LookupError):
|
| 298 |
+
text = text.decode('latin1')
|
| 299 |
+
return text, 'latin1'
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def guess_decode_from_terminal(text, term):
|
| 303 |
+
"""Decode *text* coming from terminal *term*.
|
| 304 |
+
|
| 305 |
+
First try the terminal encoding, if given.
|
| 306 |
+
Then try UTF-8. Then try the preferred locale encoding.
|
| 307 |
+
Fall back to latin-1, which always works.
|
| 308 |
+
"""
|
| 309 |
+
if getattr(term, 'encoding', None):
|
| 310 |
+
try:
|
| 311 |
+
text = text.decode(term.encoding)
|
| 312 |
+
except UnicodeDecodeError:
|
| 313 |
+
pass
|
| 314 |
+
else:
|
| 315 |
+
return text, term.encoding
|
| 316 |
+
return guess_decode(text)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def terminal_encoding(term):
|
| 320 |
+
"""Return our best guess of encoding for the given *term*."""
|
| 321 |
+
if getattr(term, 'encoding', None):
|
| 322 |
+
return term.encoding
|
| 323 |
+
import locale
|
| 324 |
+
return locale.getpreferredencoding()
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class UnclosingTextIOWrapper(TextIOWrapper):
|
| 328 |
+
# Don't close underlying buffer on destruction.
|
| 329 |
+
def close(self):
|
| 330 |
+
self.flush()
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/__init__.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# module pyparsing.py
|
| 2 |
+
#
|
| 3 |
+
# Copyright (c) 2003-2022 Paul T. McGuire
|
| 4 |
+
#
|
| 5 |
+
# Permission is hereby granted, free of charge, to any person obtaining
|
| 6 |
+
# a copy of this software and associated documentation files (the
|
| 7 |
+
# "Software"), to deal in the Software without restriction, including
|
| 8 |
+
# without limitation the rights to use, copy, modify, merge, publish,
|
| 9 |
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
| 10 |
+
# permit persons to whom the Software is furnished to do so, subject to
|
| 11 |
+
# the following conditions:
|
| 12 |
+
#
|
| 13 |
+
# The above copyright notice and this permission notice shall be
|
| 14 |
+
# included in all copies or substantial portions of the Software.
|
| 15 |
+
#
|
| 16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 17 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 18 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
| 19 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
| 20 |
+
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
| 21 |
+
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
| 22 |
+
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 23 |
+
#
|
| 24 |
+
|
| 25 |
+
__doc__ = """
|
| 26 |
+
pyparsing module - Classes and methods to define and execute parsing grammars
|
| 27 |
+
=============================================================================
|
| 28 |
+
|
| 29 |
+
The pyparsing module is an alternative approach to creating and
|
| 30 |
+
executing simple grammars, vs. the traditional lex/yacc approach, or the
|
| 31 |
+
use of regular expressions. With pyparsing, you don't need to learn
|
| 32 |
+
a new syntax for defining grammars or matching expressions - the parsing
|
| 33 |
+
module provides a library of classes that you use to construct the
|
| 34 |
+
grammar directly in Python.
|
| 35 |
+
|
| 36 |
+
Here is a program to parse "Hello, World!" (or any greeting of the form
|
| 37 |
+
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
|
| 38 |
+
:class:`Literal`, and :class:`And` elements
|
| 39 |
+
(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
|
| 40 |
+
and the strings are auto-converted to :class:`Literal` expressions)::
|
| 41 |
+
|
| 42 |
+
from pip._vendor.pyparsing import Word, alphas
|
| 43 |
+
|
| 44 |
+
# define grammar of a greeting
|
| 45 |
+
greet = Word(alphas) + "," + Word(alphas) + "!"
|
| 46 |
+
|
| 47 |
+
hello = "Hello, World!"
|
| 48 |
+
print(hello, "->", greet.parse_string(hello))
|
| 49 |
+
|
| 50 |
+
The program outputs the following::
|
| 51 |
+
|
| 52 |
+
Hello, World! -> ['Hello', ',', 'World', '!']
|
| 53 |
+
|
| 54 |
+
The Python representation of the grammar is quite readable, owing to the
|
| 55 |
+
self-explanatory class names, and the use of :class:`'+'<And>`,
|
| 56 |
+
:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
|
| 57 |
+
|
| 58 |
+
The :class:`ParseResults` object returned from
|
| 59 |
+
:class:`ParserElement.parse_string` can be
|
| 60 |
+
accessed as a nested list, a dictionary, or an object with named
|
| 61 |
+
attributes.
|
| 62 |
+
|
| 63 |
+
The pyparsing module handles some of the problems that are typically
|
| 64 |
+
vexing when writing text parsers:
|
| 65 |
+
|
| 66 |
+
- extra or missing whitespace (the above program will also handle
|
| 67 |
+
"Hello,World!", "Hello , World !", etc.)
|
| 68 |
+
- quoted strings
|
| 69 |
+
- embedded comments
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
Getting Started -
|
| 73 |
+
-----------------
|
| 74 |
+
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
|
| 75 |
+
see the base classes that most other pyparsing
|
| 76 |
+
classes inherit from. Use the docstrings for examples of how to:
|
| 77 |
+
|
| 78 |
+
- construct literal match expressions from :class:`Literal` and
|
| 79 |
+
:class:`CaselessLiteral` classes
|
| 80 |
+
- construct character word-group expressions using the :class:`Word`
|
| 81 |
+
class
|
| 82 |
+
- see how to create repetitive expressions using :class:`ZeroOrMore`
|
| 83 |
+
and :class:`OneOrMore` classes
|
| 84 |
+
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
|
| 85 |
+
and :class:`'&'<Each>` operators to combine simple expressions into
|
| 86 |
+
more complex ones
|
| 87 |
+
- associate names with your parsed results using
|
| 88 |
+
:class:`ParserElement.set_results_name`
|
| 89 |
+
- access the parsed data, which is returned as a :class:`ParseResults`
|
| 90 |
+
object
|
| 91 |
+
- find some helpful expression short-cuts like :class:`DelimitedList`
|
| 92 |
+
and :class:`one_of`
|
| 93 |
+
- find more useful common expressions in the :class:`pyparsing_common`
|
| 94 |
+
namespace class
|
| 95 |
+
"""
|
| 96 |
+
from typing import NamedTuple
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class version_info(NamedTuple):
|
| 100 |
+
major: int
|
| 101 |
+
minor: int
|
| 102 |
+
micro: int
|
| 103 |
+
releaselevel: str
|
| 104 |
+
serial: int
|
| 105 |
+
|
| 106 |
+
@property
|
| 107 |
+
def __version__(self):
|
| 108 |
+
return (
|
| 109 |
+
f"{self.major}.{self.minor}.{self.micro}"
|
| 110 |
+
+ (
|
| 111 |
+
f"{'r' if self.releaselevel[0] == 'c' else ''}{self.releaselevel[0]}{self.serial}",
|
| 112 |
+
"",
|
| 113 |
+
)[self.releaselevel == "final"]
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
def __str__(self):
|
| 117 |
+
return f"{__name__} {self.__version__} / {__version_time__}"
|
| 118 |
+
|
| 119 |
+
def __repr__(self):
|
| 120 |
+
return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})"
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
__version_info__ = version_info(3, 1, 0, "final", 1)
|
| 124 |
+
__version_time__ = "18 Jun 2023 14:05 UTC"
|
| 125 |
+
__version__ = __version_info__.__version__
|
| 126 |
+
__versionTime__ = __version_time__
|
| 127 |
+
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
|
| 128 |
+
|
| 129 |
+
from .util import *
|
| 130 |
+
from .exceptions import *
|
| 131 |
+
from .actions import *
|
| 132 |
+
from .core import __diag__, __compat__
|
| 133 |
+
from .results import *
|
| 134 |
+
from .core import * # type: ignore[misc, assignment]
|
| 135 |
+
from .core import _builtin_exprs as core_builtin_exprs
|
| 136 |
+
from .helpers import * # type: ignore[misc, assignment]
|
| 137 |
+
from .helpers import _builtin_exprs as helper_builtin_exprs
|
| 138 |
+
|
| 139 |
+
from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
|
| 140 |
+
from .testing import pyparsing_test as testing
|
| 141 |
+
from .common import (
|
| 142 |
+
pyparsing_common as common,
|
| 143 |
+
_builtin_exprs as common_builtin_exprs,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# define backward compat synonyms
|
| 147 |
+
if "pyparsing_unicode" not in globals():
|
| 148 |
+
pyparsing_unicode = unicode # type: ignore[misc]
|
| 149 |
+
if "pyparsing_common" not in globals():
|
| 150 |
+
pyparsing_common = common # type: ignore[misc]
|
| 151 |
+
if "pyparsing_test" not in globals():
|
| 152 |
+
pyparsing_test = testing # type: ignore[misc]
|
| 153 |
+
|
| 154 |
+
core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
__all__ = [
|
| 158 |
+
"__version__",
|
| 159 |
+
"__version_time__",
|
| 160 |
+
"__author__",
|
| 161 |
+
"__compat__",
|
| 162 |
+
"__diag__",
|
| 163 |
+
"And",
|
| 164 |
+
"AtLineStart",
|
| 165 |
+
"AtStringStart",
|
| 166 |
+
"CaselessKeyword",
|
| 167 |
+
"CaselessLiteral",
|
| 168 |
+
"CharsNotIn",
|
| 169 |
+
"CloseMatch",
|
| 170 |
+
"Combine",
|
| 171 |
+
"DelimitedList",
|
| 172 |
+
"Dict",
|
| 173 |
+
"Each",
|
| 174 |
+
"Empty",
|
| 175 |
+
"FollowedBy",
|
| 176 |
+
"Forward",
|
| 177 |
+
"GoToColumn",
|
| 178 |
+
"Group",
|
| 179 |
+
"IndentedBlock",
|
| 180 |
+
"Keyword",
|
| 181 |
+
"LineEnd",
|
| 182 |
+
"LineStart",
|
| 183 |
+
"Literal",
|
| 184 |
+
"Located",
|
| 185 |
+
"PrecededBy",
|
| 186 |
+
"MatchFirst",
|
| 187 |
+
"NoMatch",
|
| 188 |
+
"NotAny",
|
| 189 |
+
"OneOrMore",
|
| 190 |
+
"OnlyOnce",
|
| 191 |
+
"OpAssoc",
|
| 192 |
+
"Opt",
|
| 193 |
+
"Optional",
|
| 194 |
+
"Or",
|
| 195 |
+
"ParseBaseException",
|
| 196 |
+
"ParseElementEnhance",
|
| 197 |
+
"ParseException",
|
| 198 |
+
"ParseExpression",
|
| 199 |
+
"ParseFatalException",
|
| 200 |
+
"ParseResults",
|
| 201 |
+
"ParseSyntaxException",
|
| 202 |
+
"ParserElement",
|
| 203 |
+
"PositionToken",
|
| 204 |
+
"QuotedString",
|
| 205 |
+
"RecursiveGrammarException",
|
| 206 |
+
"Regex",
|
| 207 |
+
"SkipTo",
|
| 208 |
+
"StringEnd",
|
| 209 |
+
"StringStart",
|
| 210 |
+
"Suppress",
|
| 211 |
+
"Token",
|
| 212 |
+
"TokenConverter",
|
| 213 |
+
"White",
|
| 214 |
+
"Word",
|
| 215 |
+
"WordEnd",
|
| 216 |
+
"WordStart",
|
| 217 |
+
"ZeroOrMore",
|
| 218 |
+
"Char",
|
| 219 |
+
"alphanums",
|
| 220 |
+
"alphas",
|
| 221 |
+
"alphas8bit",
|
| 222 |
+
"any_close_tag",
|
| 223 |
+
"any_open_tag",
|
| 224 |
+
"autoname_elements",
|
| 225 |
+
"c_style_comment",
|
| 226 |
+
"col",
|
| 227 |
+
"common_html_entity",
|
| 228 |
+
"condition_as_parse_action",
|
| 229 |
+
"counted_array",
|
| 230 |
+
"cpp_style_comment",
|
| 231 |
+
"dbl_quoted_string",
|
| 232 |
+
"dbl_slash_comment",
|
| 233 |
+
"delimited_list",
|
| 234 |
+
"dict_of",
|
| 235 |
+
"empty",
|
| 236 |
+
"hexnums",
|
| 237 |
+
"html_comment",
|
| 238 |
+
"identchars",
|
| 239 |
+
"identbodychars",
|
| 240 |
+
"infix_notation",
|
| 241 |
+
"java_style_comment",
|
| 242 |
+
"line",
|
| 243 |
+
"line_end",
|
| 244 |
+
"line_start",
|
| 245 |
+
"lineno",
|
| 246 |
+
"make_html_tags",
|
| 247 |
+
"make_xml_tags",
|
| 248 |
+
"match_only_at_col",
|
| 249 |
+
"match_previous_expr",
|
| 250 |
+
"match_previous_literal",
|
| 251 |
+
"nested_expr",
|
| 252 |
+
"null_debug_action",
|
| 253 |
+
"nums",
|
| 254 |
+
"one_of",
|
| 255 |
+
"original_text_for",
|
| 256 |
+
"printables",
|
| 257 |
+
"punc8bit",
|
| 258 |
+
"pyparsing_common",
|
| 259 |
+
"pyparsing_test",
|
| 260 |
+
"pyparsing_unicode",
|
| 261 |
+
"python_style_comment",
|
| 262 |
+
"quoted_string",
|
| 263 |
+
"remove_quotes",
|
| 264 |
+
"replace_with",
|
| 265 |
+
"replace_html_entity",
|
| 266 |
+
"rest_of_line",
|
| 267 |
+
"sgl_quoted_string",
|
| 268 |
+
"srange",
|
| 269 |
+
"string_end",
|
| 270 |
+
"string_start",
|
| 271 |
+
"token_map",
|
| 272 |
+
"trace_parse_action",
|
| 273 |
+
"ungroup",
|
| 274 |
+
"unicode_set",
|
| 275 |
+
"unicode_string",
|
| 276 |
+
"with_attribute",
|
| 277 |
+
"with_class",
|
| 278 |
+
# pre-PEP8 compatibility names
|
| 279 |
+
"__versionTime__",
|
| 280 |
+
"anyCloseTag",
|
| 281 |
+
"anyOpenTag",
|
| 282 |
+
"cStyleComment",
|
| 283 |
+
"commonHTMLEntity",
|
| 284 |
+
"conditionAsParseAction",
|
| 285 |
+
"countedArray",
|
| 286 |
+
"cppStyleComment",
|
| 287 |
+
"dblQuotedString",
|
| 288 |
+
"dblSlashComment",
|
| 289 |
+
"delimitedList",
|
| 290 |
+
"dictOf",
|
| 291 |
+
"htmlComment",
|
| 292 |
+
"indentedBlock",
|
| 293 |
+
"infixNotation",
|
| 294 |
+
"javaStyleComment",
|
| 295 |
+
"lineEnd",
|
| 296 |
+
"lineStart",
|
| 297 |
+
"locatedExpr",
|
| 298 |
+
"makeHTMLTags",
|
| 299 |
+
"makeXMLTags",
|
| 300 |
+
"matchOnlyAtCol",
|
| 301 |
+
"matchPreviousExpr",
|
| 302 |
+
"matchPreviousLiteral",
|
| 303 |
+
"nestedExpr",
|
| 304 |
+
"nullDebugAction",
|
| 305 |
+
"oneOf",
|
| 306 |
+
"opAssoc",
|
| 307 |
+
"originalTextFor",
|
| 308 |
+
"pythonStyleComment",
|
| 309 |
+
"quotedString",
|
| 310 |
+
"removeQuotes",
|
| 311 |
+
"replaceHTMLEntity",
|
| 312 |
+
"replaceWith",
|
| 313 |
+
"restOfLine",
|
| 314 |
+
"sglQuotedString",
|
| 315 |
+
"stringEnd",
|
| 316 |
+
"stringStart",
|
| 317 |
+
"tokenMap",
|
| 318 |
+
"traceParseAction",
|
| 319 |
+
"unicodeString",
|
| 320 |
+
"withAttribute",
|
| 321 |
+
"withClass",
|
| 322 |
+
]
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/actions.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# actions.py
|
| 2 |
+
|
| 3 |
+
from .exceptions import ParseException
|
| 4 |
+
from .util import col, replaced_by_pep8
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class OnlyOnce:
|
| 8 |
+
"""
|
| 9 |
+
Wrapper for parse actions, to ensure they are only called once.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, method_call):
|
| 13 |
+
from .core import _trim_arity
|
| 14 |
+
|
| 15 |
+
self.callable = _trim_arity(method_call)
|
| 16 |
+
self.called = False
|
| 17 |
+
|
| 18 |
+
def __call__(self, s, l, t):
|
| 19 |
+
if not self.called:
|
| 20 |
+
results = self.callable(s, l, t)
|
| 21 |
+
self.called = True
|
| 22 |
+
return results
|
| 23 |
+
raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
|
| 24 |
+
|
| 25 |
+
def reset(self):
|
| 26 |
+
"""
|
| 27 |
+
Allow the associated parse action to be called once more.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
self.called = False
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def match_only_at_col(n):
|
| 34 |
+
"""
|
| 35 |
+
Helper method for defining parse actions that require matching at
|
| 36 |
+
a specific column in the input text.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def verify_col(strg, locn, toks):
|
| 40 |
+
if col(locn, strg) != n:
|
| 41 |
+
raise ParseException(strg, locn, f"matched token not at column {n}")
|
| 42 |
+
|
| 43 |
+
return verify_col
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def replace_with(repl_str):
|
| 47 |
+
"""
|
| 48 |
+
Helper method for common parse actions that simply return
|
| 49 |
+
a literal value. Especially useful when used with
|
| 50 |
+
:class:`transform_string<ParserElement.transform_string>` ().
|
| 51 |
+
|
| 52 |
+
Example::
|
| 53 |
+
|
| 54 |
+
num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
|
| 55 |
+
na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
|
| 56 |
+
term = na | num
|
| 57 |
+
|
| 58 |
+
term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
|
| 59 |
+
"""
|
| 60 |
+
return lambda s, l, t: [repl_str]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def remove_quotes(s, l, t):
|
| 64 |
+
"""
|
| 65 |
+
Helper parse action for removing quotation marks from parsed
|
| 66 |
+
quoted strings.
|
| 67 |
+
|
| 68 |
+
Example::
|
| 69 |
+
|
| 70 |
+
# by default, quotation marks are included in parsed results
|
| 71 |
+
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
|
| 72 |
+
|
| 73 |
+
# use remove_quotes to strip quotation marks from parsed results
|
| 74 |
+
quoted_string.set_parse_action(remove_quotes)
|
| 75 |
+
quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
|
| 76 |
+
"""
|
| 77 |
+
return t[0][1:-1]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def with_attribute(*args, **attr_dict):
|
| 81 |
+
"""
|
| 82 |
+
Helper to create a validating parse action to be used with start
|
| 83 |
+
tags created with :class:`make_xml_tags` or
|
| 84 |
+
:class:`make_html_tags`. Use ``with_attribute`` to qualify
|
| 85 |
+
a starting tag with a required attribute value, to avoid false
|
| 86 |
+
matches on common tags such as ``<TD>`` or ``<DIV>``.
|
| 87 |
+
|
| 88 |
+
Call ``with_attribute`` with a series of attribute names and
|
| 89 |
+
values. Specify the list of filter attributes names and values as:
|
| 90 |
+
|
| 91 |
+
- keyword arguments, as in ``(align="right")``, or
|
| 92 |
+
- as an explicit dict with ``**`` operator, when an attribute
|
| 93 |
+
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
|
| 94 |
+
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
|
| 95 |
+
|
| 96 |
+
For attribute names with a namespace prefix, you must use the second
|
| 97 |
+
form. Attribute names are matched insensitive to upper/lower case.
|
| 98 |
+
|
| 99 |
+
If just testing for ``class`` (with or without a namespace), use
|
| 100 |
+
:class:`with_class`.
|
| 101 |
+
|
| 102 |
+
To verify that the attribute exists, but without specifying a value,
|
| 103 |
+
pass ``with_attribute.ANY_VALUE`` as the value.
|
| 104 |
+
|
| 105 |
+
Example::
|
| 106 |
+
|
| 107 |
+
html = '''
|
| 108 |
+
<div>
|
| 109 |
+
Some text
|
| 110 |
+
<div type="grid">1 4 0 1 0</div>
|
| 111 |
+
<div type="graph">1,3 2,3 1,1</div>
|
| 112 |
+
<div>this has no type</div>
|
| 113 |
+
</div>
|
| 114 |
+
|
| 115 |
+
'''
|
| 116 |
+
div,div_end = make_html_tags("div")
|
| 117 |
+
|
| 118 |
+
# only match div tag having a type attribute with value "grid"
|
| 119 |
+
div_grid = div().set_parse_action(with_attribute(type="grid"))
|
| 120 |
+
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
| 121 |
+
for grid_header in grid_expr.search_string(html):
|
| 122 |
+
print(grid_header.body)
|
| 123 |
+
|
| 124 |
+
# construct a match with any div tag having a type attribute, regardless of the value
|
| 125 |
+
div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
|
| 126 |
+
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
| 127 |
+
for div_header in div_expr.search_string(html):
|
| 128 |
+
print(div_header.body)
|
| 129 |
+
|
| 130 |
+
prints::
|
| 131 |
+
|
| 132 |
+
1 4 0 1 0
|
| 133 |
+
|
| 134 |
+
1 4 0 1 0
|
| 135 |
+
1,3 2,3 1,1
|
| 136 |
+
"""
|
| 137 |
+
if args:
|
| 138 |
+
attrs = args[:]
|
| 139 |
+
else:
|
| 140 |
+
attrs = attr_dict.items()
|
| 141 |
+
attrs = [(k, v) for k, v in attrs]
|
| 142 |
+
|
| 143 |
+
def pa(s, l, tokens):
|
| 144 |
+
for attrName, attrValue in attrs:
|
| 145 |
+
if attrName not in tokens:
|
| 146 |
+
raise ParseException(s, l, "no matching attribute " + attrName)
|
| 147 |
+
if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
|
| 148 |
+
raise ParseException(
|
| 149 |
+
s,
|
| 150 |
+
l,
|
| 151 |
+
f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}",
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
return pa
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
with_attribute.ANY_VALUE = object() # type: ignore [attr-defined]
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def with_class(classname, namespace=""):
|
| 161 |
+
"""
|
| 162 |
+
Simplified version of :class:`with_attribute` when
|
| 163 |
+
matching on a div class - made difficult because ``class`` is
|
| 164 |
+
a reserved word in Python.
|
| 165 |
+
|
| 166 |
+
Example::
|
| 167 |
+
|
| 168 |
+
html = '''
|
| 169 |
+
<div>
|
| 170 |
+
Some text
|
| 171 |
+
<div class="grid">1 4 0 1 0</div>
|
| 172 |
+
<div class="graph">1,3 2,3 1,1</div>
|
| 173 |
+
<div>this <div> has no class</div>
|
| 174 |
+
</div>
|
| 175 |
+
|
| 176 |
+
'''
|
| 177 |
+
div,div_end = make_html_tags("div")
|
| 178 |
+
div_grid = div().set_parse_action(with_class("grid"))
|
| 179 |
+
|
| 180 |
+
grid_expr = div_grid + SkipTo(div | div_end)("body")
|
| 181 |
+
for grid_header in grid_expr.search_string(html):
|
| 182 |
+
print(grid_header.body)
|
| 183 |
+
|
| 184 |
+
div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
|
| 185 |
+
div_expr = div_any_type + SkipTo(div | div_end)("body")
|
| 186 |
+
for div_header in div_expr.search_string(html):
|
| 187 |
+
print(div_header.body)
|
| 188 |
+
|
| 189 |
+
prints::
|
| 190 |
+
|
| 191 |
+
1 4 0 1 0
|
| 192 |
+
|
| 193 |
+
1 4 0 1 0
|
| 194 |
+
1,3 2,3 1,1
|
| 195 |
+
"""
|
| 196 |
+
classattr = f"{namespace}:class" if namespace else "class"
|
| 197 |
+
return with_attribute(**{classattr: classname})
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# pre-PEP8 compatibility symbols
|
| 201 |
+
# fmt: off
|
| 202 |
+
@replaced_by_pep8(replace_with)
|
| 203 |
+
def replaceWith(): ...
|
| 204 |
+
|
| 205 |
+
@replaced_by_pep8(remove_quotes)
|
| 206 |
+
def removeQuotes(): ...
|
| 207 |
+
|
| 208 |
+
@replaced_by_pep8(with_attribute)
|
| 209 |
+
def withAttribute(): ...
|
| 210 |
+
|
| 211 |
+
@replaced_by_pep8(with_class)
|
| 212 |
+
def withClass(): ...
|
| 213 |
+
|
| 214 |
+
@replaced_by_pep8(match_only_at_col)
|
| 215 |
+
def matchOnlyAtCol(): ...
|
| 216 |
+
|
| 217 |
+
# fmt: on
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/common.py
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# common.py
|
| 2 |
+
from .core import *
|
| 3 |
+
from .helpers import DelimitedList, any_open_tag, any_close_tag
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# some other useful expressions - using lower-case class name since we are really using this as a namespace
|
| 8 |
+
class pyparsing_common:
|
| 9 |
+
"""Here are some common low-level expressions that may be useful in
|
| 10 |
+
jump-starting parser development:
|
| 11 |
+
|
| 12 |
+
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
|
| 13 |
+
:class:`scientific notation<sci_real>`)
|
| 14 |
+
- common :class:`programming identifiers<identifier>`
|
| 15 |
+
- network addresses (:class:`MAC<mac_address>`,
|
| 16 |
+
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
|
| 17 |
+
- ISO8601 :class:`dates<iso8601_date>` and
|
| 18 |
+
:class:`datetime<iso8601_datetime>`
|
| 19 |
+
- :class:`UUID<uuid>`
|
| 20 |
+
- :class:`comma-separated list<comma_separated_list>`
|
| 21 |
+
- :class:`url`
|
| 22 |
+
|
| 23 |
+
Parse actions:
|
| 24 |
+
|
| 25 |
+
- :class:`convert_to_integer`
|
| 26 |
+
- :class:`convert_to_float`
|
| 27 |
+
- :class:`convert_to_date`
|
| 28 |
+
- :class:`convert_to_datetime`
|
| 29 |
+
- :class:`strip_html_tags`
|
| 30 |
+
- :class:`upcase_tokens`
|
| 31 |
+
- :class:`downcase_tokens`
|
| 32 |
+
|
| 33 |
+
Example::
|
| 34 |
+
|
| 35 |
+
pyparsing_common.number.run_tests('''
|
| 36 |
+
# any int or real number, returned as the appropriate type
|
| 37 |
+
100
|
| 38 |
+
-100
|
| 39 |
+
+100
|
| 40 |
+
3.14159
|
| 41 |
+
6.02e23
|
| 42 |
+
1e-12
|
| 43 |
+
''')
|
| 44 |
+
|
| 45 |
+
pyparsing_common.fnumber.run_tests('''
|
| 46 |
+
# any int or real number, returned as float
|
| 47 |
+
100
|
| 48 |
+
-100
|
| 49 |
+
+100
|
| 50 |
+
3.14159
|
| 51 |
+
6.02e23
|
| 52 |
+
1e-12
|
| 53 |
+
''')
|
| 54 |
+
|
| 55 |
+
pyparsing_common.hex_integer.run_tests('''
|
| 56 |
+
# hex numbers
|
| 57 |
+
100
|
| 58 |
+
FF
|
| 59 |
+
''')
|
| 60 |
+
|
| 61 |
+
pyparsing_common.fraction.run_tests('''
|
| 62 |
+
# fractions
|
| 63 |
+
1/2
|
| 64 |
+
-3/4
|
| 65 |
+
''')
|
| 66 |
+
|
| 67 |
+
pyparsing_common.mixed_integer.run_tests('''
|
| 68 |
+
# mixed fractions
|
| 69 |
+
1
|
| 70 |
+
1/2
|
| 71 |
+
-3/4
|
| 72 |
+
1-3/4
|
| 73 |
+
''')
|
| 74 |
+
|
| 75 |
+
import uuid
|
| 76 |
+
pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID))
|
| 77 |
+
pyparsing_common.uuid.run_tests('''
|
| 78 |
+
# uuid
|
| 79 |
+
12345678-1234-5678-1234-567812345678
|
| 80 |
+
''')
|
| 81 |
+
|
| 82 |
+
prints::
|
| 83 |
+
|
| 84 |
+
# any int or real number, returned as the appropriate type
|
| 85 |
+
100
|
| 86 |
+
[100]
|
| 87 |
+
|
| 88 |
+
-100
|
| 89 |
+
[-100]
|
| 90 |
+
|
| 91 |
+
+100
|
| 92 |
+
[100]
|
| 93 |
+
|
| 94 |
+
3.14159
|
| 95 |
+
[3.14159]
|
| 96 |
+
|
| 97 |
+
6.02e23
|
| 98 |
+
[6.02e+23]
|
| 99 |
+
|
| 100 |
+
1e-12
|
| 101 |
+
[1e-12]
|
| 102 |
+
|
| 103 |
+
# any int or real number, returned as float
|
| 104 |
+
100
|
| 105 |
+
[100.0]
|
| 106 |
+
|
| 107 |
+
-100
|
| 108 |
+
[-100.0]
|
| 109 |
+
|
| 110 |
+
+100
|
| 111 |
+
[100.0]
|
| 112 |
+
|
| 113 |
+
3.14159
|
| 114 |
+
[3.14159]
|
| 115 |
+
|
| 116 |
+
6.02e23
|
| 117 |
+
[6.02e+23]
|
| 118 |
+
|
| 119 |
+
1e-12
|
| 120 |
+
[1e-12]
|
| 121 |
+
|
| 122 |
+
# hex numbers
|
| 123 |
+
100
|
| 124 |
+
[256]
|
| 125 |
+
|
| 126 |
+
FF
|
| 127 |
+
[255]
|
| 128 |
+
|
| 129 |
+
# fractions
|
| 130 |
+
1/2
|
| 131 |
+
[0.5]
|
| 132 |
+
|
| 133 |
+
-3/4
|
| 134 |
+
[-0.75]
|
| 135 |
+
|
| 136 |
+
# mixed fractions
|
| 137 |
+
1
|
| 138 |
+
[1]
|
| 139 |
+
|
| 140 |
+
1/2
|
| 141 |
+
[0.5]
|
| 142 |
+
|
| 143 |
+
-3/4
|
| 144 |
+
[-0.75]
|
| 145 |
+
|
| 146 |
+
1-3/4
|
| 147 |
+
[1.75]
|
| 148 |
+
|
| 149 |
+
# uuid
|
| 150 |
+
12345678-1234-5678-1234-567812345678
|
| 151 |
+
[UUID('12345678-1234-5678-1234-567812345678')]
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
convert_to_integer = token_map(int)
|
| 155 |
+
"""
|
| 156 |
+
Parse action for converting parsed integers to Python int
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
convert_to_float = token_map(float)
|
| 160 |
+
"""
|
| 161 |
+
Parse action for converting parsed numbers to Python float
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
|
| 165 |
+
"""expression that parses an unsigned integer, returns an int"""
|
| 166 |
+
|
| 167 |
+
hex_integer = (
|
| 168 |
+
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
|
| 169 |
+
)
|
| 170 |
+
"""expression that parses a hexadecimal integer, returns an int"""
|
| 171 |
+
|
| 172 |
+
signed_integer = (
|
| 173 |
+
Regex(r"[+-]?\d+")
|
| 174 |
+
.set_name("signed integer")
|
| 175 |
+
.set_parse_action(convert_to_integer)
|
| 176 |
+
)
|
| 177 |
+
"""expression that parses an integer with optional leading sign, returns an int"""
|
| 178 |
+
|
| 179 |
+
fraction = (
|
| 180 |
+
signed_integer().set_parse_action(convert_to_float)
|
| 181 |
+
+ "/"
|
| 182 |
+
+ signed_integer().set_parse_action(convert_to_float)
|
| 183 |
+
).set_name("fraction")
|
| 184 |
+
"""fractional expression of an integer divided by an integer, returns a float"""
|
| 185 |
+
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
|
| 186 |
+
|
| 187 |
+
mixed_integer = (
|
| 188 |
+
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
|
| 189 |
+
).set_name("fraction or mixed integer-fraction")
|
| 190 |
+
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
|
| 191 |
+
mixed_integer.add_parse_action(sum)
|
| 192 |
+
|
| 193 |
+
real = (
|
| 194 |
+
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
|
| 195 |
+
.set_name("real number")
|
| 196 |
+
.set_parse_action(convert_to_float)
|
| 197 |
+
)
|
| 198 |
+
"""expression that parses a floating point number and returns a float"""
|
| 199 |
+
|
| 200 |
+
sci_real = (
|
| 201 |
+
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
|
| 202 |
+
.set_name("real number with scientific notation")
|
| 203 |
+
.set_parse_action(convert_to_float)
|
| 204 |
+
)
|
| 205 |
+
"""expression that parses a floating point number with optional
|
| 206 |
+
scientific notation and returns a float"""
|
| 207 |
+
|
| 208 |
+
# streamlining this expression makes the docs nicer-looking
|
| 209 |
+
number = (sci_real | real | signed_integer).setName("number").streamline()
|
| 210 |
+
"""any numeric expression, returns the corresponding Python type"""
|
| 211 |
+
|
| 212 |
+
fnumber = (
|
| 213 |
+
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
|
| 214 |
+
.set_name("fnumber")
|
| 215 |
+
.set_parse_action(convert_to_float)
|
| 216 |
+
)
|
| 217 |
+
"""any int or real number, returned as float"""
|
| 218 |
+
|
| 219 |
+
identifier = Word(identchars, identbodychars).set_name("identifier")
|
| 220 |
+
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
|
| 221 |
+
|
| 222 |
+
ipv4_address = Regex(
|
| 223 |
+
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
|
| 224 |
+
).set_name("IPv4 address")
|
| 225 |
+
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
|
| 226 |
+
|
| 227 |
+
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
|
| 228 |
+
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
|
| 229 |
+
"full IPv6 address"
|
| 230 |
+
)
|
| 231 |
+
_short_ipv6_address = (
|
| 232 |
+
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
| 233 |
+
+ "::"
|
| 234 |
+
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
| 235 |
+
).set_name("short IPv6 address")
|
| 236 |
+
_short_ipv6_address.add_condition(
|
| 237 |
+
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
|
| 238 |
+
)
|
| 239 |
+
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
|
| 240 |
+
ipv6_address = Combine(
|
| 241 |
+
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
|
| 242 |
+
"IPv6 address"
|
| 243 |
+
)
|
| 244 |
+
).set_name("IPv6 address")
|
| 245 |
+
"IPv6 address (long, short, or mixed form)"
|
| 246 |
+
|
| 247 |
+
mac_address = Regex(
|
| 248 |
+
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
|
| 249 |
+
).set_name("MAC address")
|
| 250 |
+
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
|
| 251 |
+
|
| 252 |
+
@staticmethod
|
| 253 |
+
def convert_to_date(fmt: str = "%Y-%m-%d"):
|
| 254 |
+
"""
|
| 255 |
+
Helper to create a parse action for converting parsed date string to Python datetime.date
|
| 256 |
+
|
| 257 |
+
Params -
|
| 258 |
+
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
|
| 259 |
+
|
| 260 |
+
Example::
|
| 261 |
+
|
| 262 |
+
date_expr = pyparsing_common.iso8601_date.copy()
|
| 263 |
+
date_expr.set_parse_action(pyparsing_common.convert_to_date())
|
| 264 |
+
print(date_expr.parse_string("1999-12-31"))
|
| 265 |
+
|
| 266 |
+
prints::
|
| 267 |
+
|
| 268 |
+
[datetime.date(1999, 12, 31)]
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
def cvt_fn(ss, ll, tt):
|
| 272 |
+
try:
|
| 273 |
+
return datetime.strptime(tt[0], fmt).date()
|
| 274 |
+
except ValueError as ve:
|
| 275 |
+
raise ParseException(ss, ll, str(ve))
|
| 276 |
+
|
| 277 |
+
return cvt_fn
|
| 278 |
+
|
| 279 |
+
@staticmethod
|
| 280 |
+
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
|
| 281 |
+
"""Helper to create a parse action for converting parsed
|
| 282 |
+
datetime string to Python datetime.datetime
|
| 283 |
+
|
| 284 |
+
Params -
|
| 285 |
+
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
|
| 286 |
+
|
| 287 |
+
Example::
|
| 288 |
+
|
| 289 |
+
dt_expr = pyparsing_common.iso8601_datetime.copy()
|
| 290 |
+
dt_expr.set_parse_action(pyparsing_common.convert_to_datetime())
|
| 291 |
+
print(dt_expr.parse_string("1999-12-31T23:59:59.999"))
|
| 292 |
+
|
| 293 |
+
prints::
|
| 294 |
+
|
| 295 |
+
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
|
| 296 |
+
"""
|
| 297 |
+
|
| 298 |
+
def cvt_fn(s, l, t):
|
| 299 |
+
try:
|
| 300 |
+
return datetime.strptime(t[0], fmt)
|
| 301 |
+
except ValueError as ve:
|
| 302 |
+
raise ParseException(s, l, str(ve))
|
| 303 |
+
|
| 304 |
+
return cvt_fn
|
| 305 |
+
|
| 306 |
+
iso8601_date = Regex(
|
| 307 |
+
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
|
| 308 |
+
).set_name("ISO8601 date")
|
| 309 |
+
"ISO8601 date (``yyyy-mm-dd``)"
|
| 310 |
+
|
| 311 |
+
iso8601_datetime = Regex(
|
| 312 |
+
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
|
| 313 |
+
).set_name("ISO8601 datetime")
|
| 314 |
+
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
|
| 315 |
+
|
| 316 |
+
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
|
| 317 |
+
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
|
| 318 |
+
|
| 319 |
+
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
|
| 320 |
+
|
| 321 |
+
@staticmethod
|
| 322 |
+
def strip_html_tags(s: str, l: int, tokens: ParseResults):
|
| 323 |
+
"""Parse action to remove HTML tags from web page HTML source
|
| 324 |
+
|
| 325 |
+
Example::
|
| 326 |
+
|
| 327 |
+
# strip HTML links from normal text
|
| 328 |
+
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
| 329 |
+
td, td_end = make_html_tags("TD")
|
| 330 |
+
table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end
|
| 331 |
+
print(table_text.parse_string(text).body)
|
| 332 |
+
|
| 333 |
+
Prints::
|
| 334 |
+
|
| 335 |
+
More info at the pyparsing wiki page
|
| 336 |
+
"""
|
| 337 |
+
return pyparsing_common._html_stripper.transform_string(tokens[0])
|
| 338 |
+
|
| 339 |
+
_commasepitem = (
|
| 340 |
+
Combine(
|
| 341 |
+
OneOrMore(
|
| 342 |
+
~Literal(",")
|
| 343 |
+
+ ~LineEnd()
|
| 344 |
+
+ Word(printables, exclude_chars=",")
|
| 345 |
+
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
|
| 346 |
+
)
|
| 347 |
+
)
|
| 348 |
+
.streamline()
|
| 349 |
+
.set_name("commaItem")
|
| 350 |
+
)
|
| 351 |
+
comma_separated_list = DelimitedList(
|
| 352 |
+
Opt(quoted_string.copy() | _commasepitem, default="")
|
| 353 |
+
).set_name("comma separated list")
|
| 354 |
+
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
|
| 355 |
+
|
| 356 |
+
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
|
| 357 |
+
"""Parse action to convert tokens to upper case."""
|
| 358 |
+
|
| 359 |
+
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
|
| 360 |
+
"""Parse action to convert tokens to lower case."""
|
| 361 |
+
|
| 362 |
+
# fmt: off
|
| 363 |
+
url = Regex(
|
| 364 |
+
# https://mathiasbynens.be/demo/url-regex
|
| 365 |
+
# https://gist.github.com/dperini/729294
|
| 366 |
+
r"(?P<url>" +
|
| 367 |
+
# protocol identifier (optional)
|
| 368 |
+
# short syntax // still required
|
| 369 |
+
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
|
| 370 |
+
# user:pass BasicAuth (optional)
|
| 371 |
+
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
|
| 372 |
+
r"(?P<host>" +
|
| 373 |
+
# IP address exclusion
|
| 374 |
+
# private & local networks
|
| 375 |
+
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
|
| 376 |
+
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
|
| 377 |
+
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
|
| 378 |
+
# IP address dotted notation octets
|
| 379 |
+
# excludes loopback network 0.0.0.0
|
| 380 |
+
# excludes reserved space >= 224.0.0.0
|
| 381 |
+
# excludes network & broadcast addresses
|
| 382 |
+
# (first & last IP address of each class)
|
| 383 |
+
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
|
| 384 |
+
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
|
| 385 |
+
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
|
| 386 |
+
r"|" +
|
| 387 |
+
# host & domain names, may end with dot
|
| 388 |
+
# can be replaced by a shortest alternative
|
| 389 |
+
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
|
| 390 |
+
r"(?:" +
|
| 391 |
+
r"(?:" +
|
| 392 |
+
r"[a-z0-9\u00a1-\uffff]" +
|
| 393 |
+
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
|
| 394 |
+
r")?" +
|
| 395 |
+
r"[a-z0-9\u00a1-\uffff]\." +
|
| 396 |
+
r")+" +
|
| 397 |
+
# TLD identifier name, may end with dot
|
| 398 |
+
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
|
| 399 |
+
r")" +
|
| 400 |
+
# port number (optional)
|
| 401 |
+
r"(:(?P<port>\d{2,5}))?" +
|
| 402 |
+
# resource path (optional)
|
| 403 |
+
r"(?P<path>\/[^?# ]*)?" +
|
| 404 |
+
# query string (optional)
|
| 405 |
+
r"(\?(?P<query>[^#]*))?" +
|
| 406 |
+
# fragment (optional)
|
| 407 |
+
r"(#(?P<fragment>\S*))?" +
|
| 408 |
+
r")"
|
| 409 |
+
).set_name("url")
|
| 410 |
+
"""URL (http/https/ftp scheme)"""
|
| 411 |
+
# fmt: on
|
| 412 |
+
|
| 413 |
+
# pre-PEP8 compatibility names
|
| 414 |
+
convertToInteger = convert_to_integer
|
| 415 |
+
"""Deprecated - use :class:`convert_to_integer`"""
|
| 416 |
+
convertToFloat = convert_to_float
|
| 417 |
+
"""Deprecated - use :class:`convert_to_float`"""
|
| 418 |
+
convertToDate = convert_to_date
|
| 419 |
+
"""Deprecated - use :class:`convert_to_date`"""
|
| 420 |
+
convertToDatetime = convert_to_datetime
|
| 421 |
+
"""Deprecated - use :class:`convert_to_datetime`"""
|
| 422 |
+
stripHTMLTags = strip_html_tags
|
| 423 |
+
"""Deprecated - use :class:`strip_html_tags`"""
|
| 424 |
+
upcaseTokens = upcase_tokens
|
| 425 |
+
"""Deprecated - use :class:`upcase_tokens`"""
|
| 426 |
+
downcaseTokens = downcase_tokens
|
| 427 |
+
"""Deprecated - use :class:`downcase_tokens`"""
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
_builtin_exprs = [
|
| 431 |
+
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
|
| 432 |
+
]
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/core.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/diagram/__init__.py
ADDED
|
@@ -0,0 +1,656 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
import railroad
|
| 3 |
+
from pip._vendor import pyparsing
|
| 4 |
+
import typing
|
| 5 |
+
from typing import (
|
| 6 |
+
List,
|
| 7 |
+
NamedTuple,
|
| 8 |
+
Generic,
|
| 9 |
+
TypeVar,
|
| 10 |
+
Dict,
|
| 11 |
+
Callable,
|
| 12 |
+
Set,
|
| 13 |
+
Iterable,
|
| 14 |
+
)
|
| 15 |
+
from jinja2 import Template
|
| 16 |
+
from io import StringIO
|
| 17 |
+
import inspect
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
jinja2_template_source = """\
|
| 21 |
+
{% if not embed %}
|
| 22 |
+
<!DOCTYPE html>
|
| 23 |
+
<html>
|
| 24 |
+
<head>
|
| 25 |
+
{% endif %}
|
| 26 |
+
{% if not head %}
|
| 27 |
+
<style>
|
| 28 |
+
.railroad-heading {
|
| 29 |
+
font-family: monospace;
|
| 30 |
+
}
|
| 31 |
+
</style>
|
| 32 |
+
{% else %}
|
| 33 |
+
{{ head | safe }}
|
| 34 |
+
{% endif %}
|
| 35 |
+
{% if not embed %}
|
| 36 |
+
</head>
|
| 37 |
+
<body>
|
| 38 |
+
{% endif %}
|
| 39 |
+
{{ body | safe }}
|
| 40 |
+
{% for diagram in diagrams %}
|
| 41 |
+
<div class="railroad-group">
|
| 42 |
+
<h1 class="railroad-heading">{{ diagram.title }}</h1>
|
| 43 |
+
<div class="railroad-description">{{ diagram.text }}</div>
|
| 44 |
+
<div class="railroad-svg">
|
| 45 |
+
{{ diagram.svg }}
|
| 46 |
+
</div>
|
| 47 |
+
</div>
|
| 48 |
+
{% endfor %}
|
| 49 |
+
{% if not embed %}
|
| 50 |
+
</body>
|
| 51 |
+
</html>
|
| 52 |
+
{% endif %}
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
template = Template(jinja2_template_source)
|
| 56 |
+
|
| 57 |
+
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
|
| 58 |
+
NamedDiagram = NamedTuple(
|
| 59 |
+
"NamedDiagram",
|
| 60 |
+
[("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
|
| 61 |
+
)
|
| 62 |
+
"""
|
| 63 |
+
A simple structure for associating a name with a railroad diagram
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
T = TypeVar("T")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class EachItem(railroad.Group):
|
| 70 |
+
"""
|
| 71 |
+
Custom railroad item to compose a:
|
| 72 |
+
- Group containing a
|
| 73 |
+
- OneOrMore containing a
|
| 74 |
+
- Choice of the elements in the Each
|
| 75 |
+
with the group label indicating that all must be matched
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
all_label = "[ALL]"
|
| 79 |
+
|
| 80 |
+
def __init__(self, *items):
|
| 81 |
+
choice_item = railroad.Choice(len(items) - 1, *items)
|
| 82 |
+
one_or_more_item = railroad.OneOrMore(item=choice_item)
|
| 83 |
+
super().__init__(one_or_more_item, label=self.all_label)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class AnnotatedItem(railroad.Group):
|
| 87 |
+
"""
|
| 88 |
+
Simple subclass of Group that creates an annotation label
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
def __init__(self, label: str, item):
|
| 92 |
+
super().__init__(item=item, label="[{}]".format(label) if label else label)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class EditablePartial(Generic[T]):
|
| 96 |
+
"""
|
| 97 |
+
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
|
| 98 |
+
constructed.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
# We need this here because the railroad constructors actually transform the data, so can't be called until the
|
| 102 |
+
# entire tree is assembled
|
| 103 |
+
|
| 104 |
+
def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
|
| 105 |
+
self.func = func
|
| 106 |
+
self.args = args
|
| 107 |
+
self.kwargs = kwargs
|
| 108 |
+
|
| 109 |
+
@classmethod
|
| 110 |
+
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
|
| 111 |
+
"""
|
| 112 |
+
If you call this function in the same way that you would call the constructor, it will store the arguments
|
| 113 |
+
as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
|
| 114 |
+
"""
|
| 115 |
+
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def name(self):
|
| 119 |
+
return self.kwargs["name"]
|
| 120 |
+
|
| 121 |
+
def __call__(self) -> T:
|
| 122 |
+
"""
|
| 123 |
+
Evaluate the partial and return the result
|
| 124 |
+
"""
|
| 125 |
+
args = self.args.copy()
|
| 126 |
+
kwargs = self.kwargs.copy()
|
| 127 |
+
|
| 128 |
+
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
|
| 129 |
+
# args=['list', 'of', 'things'])
|
| 130 |
+
arg_spec = inspect.getfullargspec(self.func)
|
| 131 |
+
if arg_spec.varargs in self.kwargs:
|
| 132 |
+
args += kwargs.pop(arg_spec.varargs)
|
| 133 |
+
|
| 134 |
+
return self.func(*args, **kwargs)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def railroad_to_html(diagrams: List[NamedDiagram], embed=False, **kwargs) -> str:
|
| 138 |
+
"""
|
| 139 |
+
Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
|
| 140 |
+
:params kwargs: kwargs to be passed in to the template
|
| 141 |
+
"""
|
| 142 |
+
data = []
|
| 143 |
+
for diagram in diagrams:
|
| 144 |
+
if diagram.diagram is None:
|
| 145 |
+
continue
|
| 146 |
+
io = StringIO()
|
| 147 |
+
try:
|
| 148 |
+
css = kwargs.get('css')
|
| 149 |
+
diagram.diagram.writeStandalone(io.write, css=css)
|
| 150 |
+
except AttributeError:
|
| 151 |
+
diagram.diagram.writeSvg(io.write)
|
| 152 |
+
title = diagram.name
|
| 153 |
+
if diagram.index == 0:
|
| 154 |
+
title += " (root)"
|
| 155 |
+
data.append({"title": title, "text": "", "svg": io.getvalue()})
|
| 156 |
+
|
| 157 |
+
return template.render(diagrams=data, embed=embed, **kwargs)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def resolve_partial(partial: "EditablePartial[T]") -> T:
|
| 161 |
+
"""
|
| 162 |
+
Recursively resolves a collection of Partials into whatever type they are
|
| 163 |
+
"""
|
| 164 |
+
if isinstance(partial, EditablePartial):
|
| 165 |
+
partial.args = resolve_partial(partial.args)
|
| 166 |
+
partial.kwargs = resolve_partial(partial.kwargs)
|
| 167 |
+
return partial()
|
| 168 |
+
elif isinstance(partial, list):
|
| 169 |
+
return [resolve_partial(x) for x in partial]
|
| 170 |
+
elif isinstance(partial, dict):
|
| 171 |
+
return {key: resolve_partial(x) for key, x in partial.items()}
|
| 172 |
+
else:
|
| 173 |
+
return partial
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def to_railroad(
|
| 177 |
+
element: pyparsing.ParserElement,
|
| 178 |
+
diagram_kwargs: typing.Optional[dict] = None,
|
| 179 |
+
vertical: int = 3,
|
| 180 |
+
show_results_names: bool = False,
|
| 181 |
+
show_groups: bool = False,
|
| 182 |
+
) -> List[NamedDiagram]:
|
| 183 |
+
"""
|
| 184 |
+
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
|
| 185 |
+
creation if you want to access the Railroad tree before it is converted to HTML
|
| 186 |
+
:param element: base element of the parser being diagrammed
|
| 187 |
+
:param diagram_kwargs: kwargs to pass to the Diagram() constructor
|
| 188 |
+
:param vertical: (optional) - int - limit at which number of alternatives should be
|
| 189 |
+
shown vertically instead of horizontally
|
| 190 |
+
:param show_results_names - bool to indicate whether results name annotations should be
|
| 191 |
+
included in the diagram
|
| 192 |
+
:param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
|
| 193 |
+
surrounding box
|
| 194 |
+
"""
|
| 195 |
+
# Convert the whole tree underneath the root
|
| 196 |
+
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
|
| 197 |
+
_to_diagram_element(
|
| 198 |
+
element,
|
| 199 |
+
lookup=lookup,
|
| 200 |
+
parent=None,
|
| 201 |
+
vertical=vertical,
|
| 202 |
+
show_results_names=show_results_names,
|
| 203 |
+
show_groups=show_groups,
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
root_id = id(element)
|
| 207 |
+
# Convert the root if it hasn't been already
|
| 208 |
+
if root_id in lookup:
|
| 209 |
+
if not element.customName:
|
| 210 |
+
lookup[root_id].name = ""
|
| 211 |
+
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
|
| 212 |
+
|
| 213 |
+
# Now that we're finished, we can convert from intermediate structures into Railroad elements
|
| 214 |
+
diags = list(lookup.diagrams.values())
|
| 215 |
+
if len(diags) > 1:
|
| 216 |
+
# collapse out duplicate diags with the same name
|
| 217 |
+
seen = set()
|
| 218 |
+
deduped_diags = []
|
| 219 |
+
for d in diags:
|
| 220 |
+
# don't extract SkipTo elements, they are uninformative as subdiagrams
|
| 221 |
+
if d.name == "...":
|
| 222 |
+
continue
|
| 223 |
+
if d.name is not None and d.name not in seen:
|
| 224 |
+
seen.add(d.name)
|
| 225 |
+
deduped_diags.append(d)
|
| 226 |
+
resolved = [resolve_partial(partial) for partial in deduped_diags]
|
| 227 |
+
else:
|
| 228 |
+
# special case - if just one diagram, always display it, even if
|
| 229 |
+
# it has no name
|
| 230 |
+
resolved = [resolve_partial(partial) for partial in diags]
|
| 231 |
+
return sorted(resolved, key=lambda diag: diag.index)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _should_vertical(
|
| 235 |
+
specification: int, exprs: Iterable[pyparsing.ParserElement]
|
| 236 |
+
) -> bool:
|
| 237 |
+
"""
|
| 238 |
+
Returns true if we should return a vertical list of elements
|
| 239 |
+
"""
|
| 240 |
+
if specification is None:
|
| 241 |
+
return False
|
| 242 |
+
else:
|
| 243 |
+
return len(_visible_exprs(exprs)) >= specification
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class ElementState:
|
| 247 |
+
"""
|
| 248 |
+
State recorded for an individual pyparsing Element
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
# Note: this should be a dataclass, but we have to support Python 3.5
|
| 252 |
+
def __init__(
|
| 253 |
+
self,
|
| 254 |
+
element: pyparsing.ParserElement,
|
| 255 |
+
converted: EditablePartial,
|
| 256 |
+
parent: EditablePartial,
|
| 257 |
+
number: int,
|
| 258 |
+
name: str = None,
|
| 259 |
+
parent_index: typing.Optional[int] = None,
|
| 260 |
+
):
|
| 261 |
+
#: The pyparsing element that this represents
|
| 262 |
+
self.element: pyparsing.ParserElement = element
|
| 263 |
+
#: The name of the element
|
| 264 |
+
self.name: typing.Optional[str] = name
|
| 265 |
+
#: The output Railroad element in an unconverted state
|
| 266 |
+
self.converted: EditablePartial = converted
|
| 267 |
+
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
|
| 268 |
+
self.parent: EditablePartial = parent
|
| 269 |
+
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
|
| 270 |
+
self.number: int = number
|
| 271 |
+
#: The index of this inside its parent
|
| 272 |
+
self.parent_index: typing.Optional[int] = parent_index
|
| 273 |
+
#: If true, we should extract this out into a subdiagram
|
| 274 |
+
self.extract: bool = False
|
| 275 |
+
#: If true, all of this element's children have been filled out
|
| 276 |
+
self.complete: bool = False
|
| 277 |
+
|
| 278 |
+
def mark_for_extraction(
|
| 279 |
+
self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
|
| 280 |
+
):
|
| 281 |
+
"""
|
| 282 |
+
Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
|
| 283 |
+
:param el_id: id of the element
|
| 284 |
+
:param state: element/diagram state tracker
|
| 285 |
+
:param name: name to use for this element's text
|
| 286 |
+
:param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
|
| 287 |
+
root element when we know we're finished
|
| 288 |
+
"""
|
| 289 |
+
self.extract = True
|
| 290 |
+
|
| 291 |
+
# Set the name
|
| 292 |
+
if not self.name:
|
| 293 |
+
if name:
|
| 294 |
+
# Allow forcing a custom name
|
| 295 |
+
self.name = name
|
| 296 |
+
elif self.element.customName:
|
| 297 |
+
self.name = self.element.customName
|
| 298 |
+
else:
|
| 299 |
+
self.name = ""
|
| 300 |
+
|
| 301 |
+
# Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
|
| 302 |
+
# to be added
|
| 303 |
+
# Also, if this is just a string literal etc, don't bother extracting it
|
| 304 |
+
if force or (self.complete and _worth_extracting(self.element)):
|
| 305 |
+
state.extract_into_diagram(el_id)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class ConverterState:
|
| 309 |
+
"""
|
| 310 |
+
Stores some state that persists between recursions into the element tree
|
| 311 |
+
"""
|
| 312 |
+
|
| 313 |
+
def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
|
| 314 |
+
#: A dictionary mapping ParserElements to state relating to them
|
| 315 |
+
self._element_diagram_states: Dict[int, ElementState] = {}
|
| 316 |
+
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
|
| 317 |
+
self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
|
| 318 |
+
#: The index of the next unnamed element
|
| 319 |
+
self.unnamed_index: int = 1
|
| 320 |
+
#: The index of the next element. This is used for sorting
|
| 321 |
+
self.index: int = 0
|
| 322 |
+
#: Shared kwargs that are used to customize the construction of diagrams
|
| 323 |
+
self.diagram_kwargs: dict = diagram_kwargs or {}
|
| 324 |
+
self.extracted_diagram_names: Set[str] = set()
|
| 325 |
+
|
| 326 |
+
def __setitem__(self, key: int, value: ElementState):
|
| 327 |
+
self._element_diagram_states[key] = value
|
| 328 |
+
|
| 329 |
+
def __getitem__(self, key: int) -> ElementState:
|
| 330 |
+
return self._element_diagram_states[key]
|
| 331 |
+
|
| 332 |
+
def __delitem__(self, key: int):
|
| 333 |
+
del self._element_diagram_states[key]
|
| 334 |
+
|
| 335 |
+
def __contains__(self, key: int):
|
| 336 |
+
return key in self._element_diagram_states
|
| 337 |
+
|
| 338 |
+
def generate_unnamed(self) -> int:
|
| 339 |
+
"""
|
| 340 |
+
Generate a number used in the name of an otherwise unnamed diagram
|
| 341 |
+
"""
|
| 342 |
+
self.unnamed_index += 1
|
| 343 |
+
return self.unnamed_index
|
| 344 |
+
|
| 345 |
+
def generate_index(self) -> int:
|
| 346 |
+
"""
|
| 347 |
+
Generate a number used to index a diagram
|
| 348 |
+
"""
|
| 349 |
+
self.index += 1
|
| 350 |
+
return self.index
|
| 351 |
+
|
| 352 |
+
def extract_into_diagram(self, el_id: int):
|
| 353 |
+
"""
|
| 354 |
+
Used when we encounter the same token twice in the same tree. When this
|
| 355 |
+
happens, we replace all instances of that token with a terminal, and
|
| 356 |
+
create a new subdiagram for the token
|
| 357 |
+
"""
|
| 358 |
+
position = self[el_id]
|
| 359 |
+
|
| 360 |
+
# Replace the original definition of this element with a regular block
|
| 361 |
+
if position.parent:
|
| 362 |
+
ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
|
| 363 |
+
if "item" in position.parent.kwargs:
|
| 364 |
+
position.parent.kwargs["item"] = ret
|
| 365 |
+
elif "items" in position.parent.kwargs:
|
| 366 |
+
position.parent.kwargs["items"][position.parent_index] = ret
|
| 367 |
+
|
| 368 |
+
# If the element we're extracting is a group, skip to its content but keep the title
|
| 369 |
+
if position.converted.func == railroad.Group:
|
| 370 |
+
content = position.converted.kwargs["item"]
|
| 371 |
+
else:
|
| 372 |
+
content = position.converted
|
| 373 |
+
|
| 374 |
+
self.diagrams[el_id] = EditablePartial.from_call(
|
| 375 |
+
NamedDiagram,
|
| 376 |
+
name=position.name,
|
| 377 |
+
diagram=EditablePartial.from_call(
|
| 378 |
+
railroad.Diagram, content, **self.diagram_kwargs
|
| 379 |
+
),
|
| 380 |
+
index=position.number,
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
del self[el_id]
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def _worth_extracting(element: pyparsing.ParserElement) -> bool:
|
| 387 |
+
"""
|
| 388 |
+
Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
|
| 389 |
+
themselves have children, then its complex enough to extract
|
| 390 |
+
"""
|
| 391 |
+
children = element.recurse()
|
| 392 |
+
return any(child.recurse() for child in children)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
def _apply_diagram_item_enhancements(fn):
|
| 396 |
+
"""
|
| 397 |
+
decorator to ensure enhancements to a diagram item (such as results name annotations)
|
| 398 |
+
get applied on return from _to_diagram_element (we do this since there are several
|
| 399 |
+
returns in _to_diagram_element)
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
def _inner(
|
| 403 |
+
element: pyparsing.ParserElement,
|
| 404 |
+
parent: typing.Optional[EditablePartial],
|
| 405 |
+
lookup: ConverterState = None,
|
| 406 |
+
vertical: int = None,
|
| 407 |
+
index: int = 0,
|
| 408 |
+
name_hint: str = None,
|
| 409 |
+
show_results_names: bool = False,
|
| 410 |
+
show_groups: bool = False,
|
| 411 |
+
) -> typing.Optional[EditablePartial]:
|
| 412 |
+
ret = fn(
|
| 413 |
+
element,
|
| 414 |
+
parent,
|
| 415 |
+
lookup,
|
| 416 |
+
vertical,
|
| 417 |
+
index,
|
| 418 |
+
name_hint,
|
| 419 |
+
show_results_names,
|
| 420 |
+
show_groups,
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
# apply annotation for results name, if present
|
| 424 |
+
if show_results_names and ret is not None:
|
| 425 |
+
element_results_name = element.resultsName
|
| 426 |
+
if element_results_name:
|
| 427 |
+
# add "*" to indicate if this is a "list all results" name
|
| 428 |
+
element_results_name += "" if element.modalResults else "*"
|
| 429 |
+
ret = EditablePartial.from_call(
|
| 430 |
+
railroad.Group, item=ret, label=element_results_name
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
return ret
|
| 434 |
+
|
| 435 |
+
return _inner
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
|
| 439 |
+
non_diagramming_exprs = (
|
| 440 |
+
pyparsing.ParseElementEnhance,
|
| 441 |
+
pyparsing.PositionToken,
|
| 442 |
+
pyparsing.And._ErrorStop,
|
| 443 |
+
)
|
| 444 |
+
return [
|
| 445 |
+
e
|
| 446 |
+
for e in exprs
|
| 447 |
+
if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
|
| 448 |
+
]
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
@_apply_diagram_item_enhancements
|
| 452 |
+
def _to_diagram_element(
|
| 453 |
+
element: pyparsing.ParserElement,
|
| 454 |
+
parent: typing.Optional[EditablePartial],
|
| 455 |
+
lookup: ConverterState = None,
|
| 456 |
+
vertical: int = None,
|
| 457 |
+
index: int = 0,
|
| 458 |
+
name_hint: str = None,
|
| 459 |
+
show_results_names: bool = False,
|
| 460 |
+
show_groups: bool = False,
|
| 461 |
+
) -> typing.Optional[EditablePartial]:
|
| 462 |
+
"""
|
| 463 |
+
Recursively converts a PyParsing Element to a railroad Element
|
| 464 |
+
:param lookup: The shared converter state that keeps track of useful things
|
| 465 |
+
:param index: The index of this element within the parent
|
| 466 |
+
:param parent: The parent of this element in the output tree
|
| 467 |
+
:param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
|
| 468 |
+
it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
|
| 469 |
+
do so
|
| 470 |
+
:param name_hint: If provided, this will override the generated name
|
| 471 |
+
:param show_results_names: bool flag indicating whether to add annotations for results names
|
| 472 |
+
:returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
|
| 473 |
+
:param show_groups: bool flag indicating whether to show groups using bounding box
|
| 474 |
+
"""
|
| 475 |
+
exprs = element.recurse()
|
| 476 |
+
name = name_hint or element.customName or element.__class__.__name__
|
| 477 |
+
|
| 478 |
+
# Python's id() is used to provide a unique identifier for elements
|
| 479 |
+
el_id = id(element)
|
| 480 |
+
|
| 481 |
+
element_results_name = element.resultsName
|
| 482 |
+
|
| 483 |
+
# Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
|
| 484 |
+
if not element.customName:
|
| 485 |
+
if isinstance(
|
| 486 |
+
element,
|
| 487 |
+
(
|
| 488 |
+
# pyparsing.TokenConverter,
|
| 489 |
+
# pyparsing.Forward,
|
| 490 |
+
pyparsing.Located,
|
| 491 |
+
),
|
| 492 |
+
):
|
| 493 |
+
# However, if this element has a useful custom name, and its child does not, we can pass it on to the child
|
| 494 |
+
if exprs:
|
| 495 |
+
if not exprs[0].customName:
|
| 496 |
+
propagated_name = name
|
| 497 |
+
else:
|
| 498 |
+
propagated_name = None
|
| 499 |
+
|
| 500 |
+
return _to_diagram_element(
|
| 501 |
+
element.expr,
|
| 502 |
+
parent=parent,
|
| 503 |
+
lookup=lookup,
|
| 504 |
+
vertical=vertical,
|
| 505 |
+
index=index,
|
| 506 |
+
name_hint=propagated_name,
|
| 507 |
+
show_results_names=show_results_names,
|
| 508 |
+
show_groups=show_groups,
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
# If the element isn't worth extracting, we always treat it as the first time we say it
|
| 512 |
+
if _worth_extracting(element):
|
| 513 |
+
if el_id in lookup:
|
| 514 |
+
# If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
|
| 515 |
+
# so we have to extract it into a new diagram.
|
| 516 |
+
looked_up = lookup[el_id]
|
| 517 |
+
looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
|
| 518 |
+
ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
|
| 519 |
+
return ret
|
| 520 |
+
|
| 521 |
+
elif el_id in lookup.diagrams:
|
| 522 |
+
# If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
|
| 523 |
+
# just put in a marker element that refers to the sub-diagram
|
| 524 |
+
ret = EditablePartial.from_call(
|
| 525 |
+
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
|
| 526 |
+
)
|
| 527 |
+
return ret
|
| 528 |
+
|
| 529 |
+
# Recursively convert child elements
|
| 530 |
+
# Here we find the most relevant Railroad element for matching pyparsing Element
|
| 531 |
+
# We use ``items=[]`` here to hold the place for where the child elements will go once created
|
| 532 |
+
if isinstance(element, pyparsing.And):
|
| 533 |
+
# detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
|
| 534 |
+
# (all will have the same name, and resultsName)
|
| 535 |
+
if not exprs:
|
| 536 |
+
return None
|
| 537 |
+
if len(set((e.name, e.resultsName) for e in exprs)) == 1:
|
| 538 |
+
ret = EditablePartial.from_call(
|
| 539 |
+
railroad.OneOrMore, item="", repeat=str(len(exprs))
|
| 540 |
+
)
|
| 541 |
+
elif _should_vertical(vertical, exprs):
|
| 542 |
+
ret = EditablePartial.from_call(railroad.Stack, items=[])
|
| 543 |
+
else:
|
| 544 |
+
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
| 545 |
+
elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
|
| 546 |
+
if not exprs:
|
| 547 |
+
return None
|
| 548 |
+
if _should_vertical(vertical, exprs):
|
| 549 |
+
ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
|
| 550 |
+
else:
|
| 551 |
+
ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
|
| 552 |
+
elif isinstance(element, pyparsing.Each):
|
| 553 |
+
if not exprs:
|
| 554 |
+
return None
|
| 555 |
+
ret = EditablePartial.from_call(EachItem, items=[])
|
| 556 |
+
elif isinstance(element, pyparsing.NotAny):
|
| 557 |
+
ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
|
| 558 |
+
elif isinstance(element, pyparsing.FollowedBy):
|
| 559 |
+
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
|
| 560 |
+
elif isinstance(element, pyparsing.PrecededBy):
|
| 561 |
+
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
|
| 562 |
+
elif isinstance(element, pyparsing.Group):
|
| 563 |
+
if show_groups:
|
| 564 |
+
ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
|
| 565 |
+
else:
|
| 566 |
+
ret = EditablePartial.from_call(railroad.Group, label="", item="")
|
| 567 |
+
elif isinstance(element, pyparsing.TokenConverter):
|
| 568 |
+
label = type(element).__name__.lower()
|
| 569 |
+
if label == "tokenconverter":
|
| 570 |
+
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
| 571 |
+
else:
|
| 572 |
+
ret = EditablePartial.from_call(AnnotatedItem, label=label, item="")
|
| 573 |
+
elif isinstance(element, pyparsing.Opt):
|
| 574 |
+
ret = EditablePartial.from_call(railroad.Optional, item="")
|
| 575 |
+
elif isinstance(element, pyparsing.OneOrMore):
|
| 576 |
+
ret = EditablePartial.from_call(railroad.OneOrMore, item="")
|
| 577 |
+
elif isinstance(element, pyparsing.ZeroOrMore):
|
| 578 |
+
ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
|
| 579 |
+
elif isinstance(element, pyparsing.Group):
|
| 580 |
+
ret = EditablePartial.from_call(
|
| 581 |
+
railroad.Group, item=None, label=element_results_name
|
| 582 |
+
)
|
| 583 |
+
elif isinstance(element, pyparsing.Empty) and not element.customName:
|
| 584 |
+
# Skip unnamed "Empty" elements
|
| 585 |
+
ret = None
|
| 586 |
+
elif isinstance(element, pyparsing.ParseElementEnhance):
|
| 587 |
+
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
| 588 |
+
elif len(exprs) > 0 and not element_results_name:
|
| 589 |
+
ret = EditablePartial.from_call(railroad.Group, item="", label=name)
|
| 590 |
+
elif len(exprs) > 0:
|
| 591 |
+
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
| 592 |
+
else:
|
| 593 |
+
terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
|
| 594 |
+
ret = terminal
|
| 595 |
+
|
| 596 |
+
if ret is None:
|
| 597 |
+
return
|
| 598 |
+
|
| 599 |
+
# Indicate this element's position in the tree so we can extract it if necessary
|
| 600 |
+
lookup[el_id] = ElementState(
|
| 601 |
+
element=element,
|
| 602 |
+
converted=ret,
|
| 603 |
+
parent=parent,
|
| 604 |
+
parent_index=index,
|
| 605 |
+
number=lookup.generate_index(),
|
| 606 |
+
)
|
| 607 |
+
if element.customName:
|
| 608 |
+
lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
|
| 609 |
+
|
| 610 |
+
i = 0
|
| 611 |
+
for expr in exprs:
|
| 612 |
+
# Add a placeholder index in case we have to extract the child before we even add it to the parent
|
| 613 |
+
if "items" in ret.kwargs:
|
| 614 |
+
ret.kwargs["items"].insert(i, None)
|
| 615 |
+
|
| 616 |
+
item = _to_diagram_element(
|
| 617 |
+
expr,
|
| 618 |
+
parent=ret,
|
| 619 |
+
lookup=lookup,
|
| 620 |
+
vertical=vertical,
|
| 621 |
+
index=i,
|
| 622 |
+
show_results_names=show_results_names,
|
| 623 |
+
show_groups=show_groups,
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
# Some elements don't need to be shown in the diagram
|
| 627 |
+
if item is not None:
|
| 628 |
+
if "item" in ret.kwargs:
|
| 629 |
+
ret.kwargs["item"] = item
|
| 630 |
+
elif "items" in ret.kwargs:
|
| 631 |
+
# If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
|
| 632 |
+
ret.kwargs["items"][i] = item
|
| 633 |
+
i += 1
|
| 634 |
+
elif "items" in ret.kwargs:
|
| 635 |
+
# If we're supposed to skip this element, remove it from the parent
|
| 636 |
+
del ret.kwargs["items"][i]
|
| 637 |
+
|
| 638 |
+
# If all this items children are none, skip this item
|
| 639 |
+
if ret and (
|
| 640 |
+
("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
|
| 641 |
+
or ("item" in ret.kwargs and ret.kwargs["item"] is None)
|
| 642 |
+
):
|
| 643 |
+
ret = EditablePartial.from_call(railroad.Terminal, name)
|
| 644 |
+
|
| 645 |
+
# Mark this element as "complete", ie it has all of its children
|
| 646 |
+
if el_id in lookup:
|
| 647 |
+
lookup[el_id].complete = True
|
| 648 |
+
|
| 649 |
+
if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
|
| 650 |
+
lookup.extract_into_diagram(el_id)
|
| 651 |
+
if ret is not None:
|
| 652 |
+
ret = EditablePartial.from_call(
|
| 653 |
+
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
return ret
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# exceptions.py
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
import typing
|
| 6 |
+
|
| 7 |
+
from .util import (
|
| 8 |
+
col,
|
| 9 |
+
line,
|
| 10 |
+
lineno,
|
| 11 |
+
_collapse_string_to_ranges,
|
| 12 |
+
replaced_by_pep8,
|
| 13 |
+
)
|
| 14 |
+
from .unicode import pyparsing_unicode as ppu
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
|
| 22 |
+
_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ParseBaseException(Exception):
|
| 26 |
+
"""base exception class for all parsing runtime exceptions"""
|
| 27 |
+
|
| 28 |
+
loc: int
|
| 29 |
+
msg: str
|
| 30 |
+
pstr: str
|
| 31 |
+
parser_element: typing.Any # "ParserElement"
|
| 32 |
+
args: typing.Tuple[str, int, typing.Optional[str]]
|
| 33 |
+
|
| 34 |
+
__slots__ = (
|
| 35 |
+
"loc",
|
| 36 |
+
"msg",
|
| 37 |
+
"pstr",
|
| 38 |
+
"parser_element",
|
| 39 |
+
"args",
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# Performance tuning: we construct a *lot* of these, so keep this
|
| 43 |
+
# constructor as small and fast as possible
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
pstr: str,
|
| 47 |
+
loc: int = 0,
|
| 48 |
+
msg: typing.Optional[str] = None,
|
| 49 |
+
elem=None,
|
| 50 |
+
):
|
| 51 |
+
self.loc = loc
|
| 52 |
+
if msg is None:
|
| 53 |
+
self.msg = pstr
|
| 54 |
+
self.pstr = ""
|
| 55 |
+
else:
|
| 56 |
+
self.msg = msg
|
| 57 |
+
self.pstr = pstr
|
| 58 |
+
self.parser_element = elem
|
| 59 |
+
self.args = (pstr, loc, msg)
|
| 60 |
+
|
| 61 |
+
@staticmethod
|
| 62 |
+
def explain_exception(exc, depth=16):
|
| 63 |
+
"""
|
| 64 |
+
Method to take an exception and translate the Python internal traceback into a list
|
| 65 |
+
of the pyparsing expressions that caused the exception to be raised.
|
| 66 |
+
|
| 67 |
+
Parameters:
|
| 68 |
+
|
| 69 |
+
- exc - exception raised during parsing (need not be a ParseException, in support
|
| 70 |
+
of Python exceptions that might be raised in a parse action)
|
| 71 |
+
- depth (default=16) - number of levels back in the stack trace to list expression
|
| 72 |
+
and function names; if None, the full stack trace names will be listed; if 0, only
|
| 73 |
+
the failing input line, marker, and exception string will be shown
|
| 74 |
+
|
| 75 |
+
Returns a multi-line string listing the ParserElements and/or function names in the
|
| 76 |
+
exception's stack trace.
|
| 77 |
+
"""
|
| 78 |
+
import inspect
|
| 79 |
+
from .core import ParserElement
|
| 80 |
+
|
| 81 |
+
if depth is None:
|
| 82 |
+
depth = sys.getrecursionlimit()
|
| 83 |
+
ret = []
|
| 84 |
+
if isinstance(exc, ParseBaseException):
|
| 85 |
+
ret.append(exc.line)
|
| 86 |
+
ret.append(" " * (exc.column - 1) + "^")
|
| 87 |
+
ret.append(f"{type(exc).__name__}: {exc}")
|
| 88 |
+
|
| 89 |
+
if depth > 0:
|
| 90 |
+
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
|
| 91 |
+
seen = set()
|
| 92 |
+
for i, ff in enumerate(callers[-depth:]):
|
| 93 |
+
frm = ff[0]
|
| 94 |
+
|
| 95 |
+
f_self = frm.f_locals.get("self", None)
|
| 96 |
+
if isinstance(f_self, ParserElement):
|
| 97 |
+
if not frm.f_code.co_name.startswith(
|
| 98 |
+
("parseImpl", "_parseNoCache")
|
| 99 |
+
):
|
| 100 |
+
continue
|
| 101 |
+
if id(f_self) in seen:
|
| 102 |
+
continue
|
| 103 |
+
seen.add(id(f_self))
|
| 104 |
+
|
| 105 |
+
self_type = type(f_self)
|
| 106 |
+
ret.append(
|
| 107 |
+
f"{self_type.__module__}.{self_type.__name__} - {f_self}"
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
elif f_self is not None:
|
| 111 |
+
self_type = type(f_self)
|
| 112 |
+
ret.append(f"{self_type.__module__}.{self_type.__name__}")
|
| 113 |
+
|
| 114 |
+
else:
|
| 115 |
+
code = frm.f_code
|
| 116 |
+
if code.co_name in ("wrapper", "<module>"):
|
| 117 |
+
continue
|
| 118 |
+
|
| 119 |
+
ret.append(code.co_name)
|
| 120 |
+
|
| 121 |
+
depth -= 1
|
| 122 |
+
if not depth:
|
| 123 |
+
break
|
| 124 |
+
|
| 125 |
+
return "\n".join(ret)
|
| 126 |
+
|
| 127 |
+
@classmethod
|
| 128 |
+
def _from_exception(cls, pe):
|
| 129 |
+
"""
|
| 130 |
+
internal factory method to simplify creating one type of ParseException
|
| 131 |
+
from another - avoids having __init__ signature conflicts among subclasses
|
| 132 |
+
"""
|
| 133 |
+
return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element)
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def line(self) -> str:
|
| 137 |
+
"""
|
| 138 |
+
Return the line of text where the exception occurred.
|
| 139 |
+
"""
|
| 140 |
+
return line(self.loc, self.pstr)
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def lineno(self) -> int:
|
| 144 |
+
"""
|
| 145 |
+
Return the 1-based line number of text where the exception occurred.
|
| 146 |
+
"""
|
| 147 |
+
return lineno(self.loc, self.pstr)
|
| 148 |
+
|
| 149 |
+
@property
|
| 150 |
+
def col(self) -> int:
|
| 151 |
+
"""
|
| 152 |
+
Return the 1-based column on the line of text where the exception occurred.
|
| 153 |
+
"""
|
| 154 |
+
return col(self.loc, self.pstr)
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
def column(self) -> int:
|
| 158 |
+
"""
|
| 159 |
+
Return the 1-based column on the line of text where the exception occurred.
|
| 160 |
+
"""
|
| 161 |
+
return col(self.loc, self.pstr)
|
| 162 |
+
|
| 163 |
+
# pre-PEP8 compatibility
|
| 164 |
+
@property
|
| 165 |
+
def parserElement(self):
|
| 166 |
+
return self.parser_element
|
| 167 |
+
|
| 168 |
+
@parserElement.setter
|
| 169 |
+
def parserElement(self, elem):
|
| 170 |
+
self.parser_element = elem
|
| 171 |
+
|
| 172 |
+
def __str__(self) -> str:
|
| 173 |
+
if self.pstr:
|
| 174 |
+
if self.loc >= len(self.pstr):
|
| 175 |
+
foundstr = ", found end of text"
|
| 176 |
+
else:
|
| 177 |
+
# pull out next word at error location
|
| 178 |
+
found_match = _exception_word_extractor.match(self.pstr, self.loc)
|
| 179 |
+
if found_match is not None:
|
| 180 |
+
found = found_match.group(0)
|
| 181 |
+
else:
|
| 182 |
+
found = self.pstr[self.loc : self.loc + 1]
|
| 183 |
+
foundstr = (", found %r" % found).replace(r"\\", "\\")
|
| 184 |
+
else:
|
| 185 |
+
foundstr = ""
|
| 186 |
+
return f"{self.msg}{foundstr} (at char {self.loc}), (line:{self.lineno}, col:{self.column})"
|
| 187 |
+
|
| 188 |
+
def __repr__(self):
|
| 189 |
+
return str(self)
|
| 190 |
+
|
| 191 |
+
def mark_input_line(
|
| 192 |
+
self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<"
|
| 193 |
+
) -> str:
|
| 194 |
+
"""
|
| 195 |
+
Extracts the exception line from the input string, and marks
|
| 196 |
+
the location of the exception with a special symbol.
|
| 197 |
+
"""
|
| 198 |
+
markerString = marker_string if marker_string is not None else markerString
|
| 199 |
+
line_str = self.line
|
| 200 |
+
line_column = self.column - 1
|
| 201 |
+
if markerString:
|
| 202 |
+
line_str = "".join(
|
| 203 |
+
(line_str[:line_column], markerString, line_str[line_column:])
|
| 204 |
+
)
|
| 205 |
+
return line_str.strip()
|
| 206 |
+
|
| 207 |
+
def explain(self, depth=16) -> str:
|
| 208 |
+
"""
|
| 209 |
+
Method to translate the Python internal traceback into a list
|
| 210 |
+
of the pyparsing expressions that caused the exception to be raised.
|
| 211 |
+
|
| 212 |
+
Parameters:
|
| 213 |
+
|
| 214 |
+
- depth (default=16) - number of levels back in the stack trace to list expression
|
| 215 |
+
and function names; if None, the full stack trace names will be listed; if 0, only
|
| 216 |
+
the failing input line, marker, and exception string will be shown
|
| 217 |
+
|
| 218 |
+
Returns a multi-line string listing the ParserElements and/or function names in the
|
| 219 |
+
exception's stack trace.
|
| 220 |
+
|
| 221 |
+
Example::
|
| 222 |
+
|
| 223 |
+
expr = pp.Word(pp.nums) * 3
|
| 224 |
+
try:
|
| 225 |
+
expr.parse_string("123 456 A789")
|
| 226 |
+
except pp.ParseException as pe:
|
| 227 |
+
print(pe.explain(depth=0))
|
| 228 |
+
|
| 229 |
+
prints::
|
| 230 |
+
|
| 231 |
+
123 456 A789
|
| 232 |
+
^
|
| 233 |
+
ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
|
| 234 |
+
|
| 235 |
+
Note: the diagnostic output will include string representations of the expressions
|
| 236 |
+
that failed to parse. These representations will be more helpful if you use `set_name` to
|
| 237 |
+
give identifiable names to your expressions. Otherwise they will use the default string
|
| 238 |
+
forms, which may be cryptic to read.
|
| 239 |
+
|
| 240 |
+
Note: pyparsing's default truncation of exception tracebacks may also truncate the
|
| 241 |
+
stack of expressions that are displayed in the ``explain`` output. To get the full listing
|
| 242 |
+
of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
|
| 243 |
+
"""
|
| 244 |
+
return self.explain_exception(self, depth)
|
| 245 |
+
|
| 246 |
+
# fmt: off
|
| 247 |
+
@replaced_by_pep8(mark_input_line)
|
| 248 |
+
def markInputline(self): ...
|
| 249 |
+
# fmt: on
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class ParseException(ParseBaseException):
|
| 253 |
+
"""
|
| 254 |
+
Exception thrown when a parse expression doesn't match the input string
|
| 255 |
+
|
| 256 |
+
Example::
|
| 257 |
+
|
| 258 |
+
try:
|
| 259 |
+
Word(nums).set_name("integer").parse_string("ABC")
|
| 260 |
+
except ParseException as pe:
|
| 261 |
+
print(pe)
|
| 262 |
+
print("column: {}".format(pe.column))
|
| 263 |
+
|
| 264 |
+
prints::
|
| 265 |
+
|
| 266 |
+
Expected integer (at char 0), (line:1, col:1)
|
| 267 |
+
column: 1
|
| 268 |
+
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class ParseFatalException(ParseBaseException):
|
| 273 |
+
"""
|
| 274 |
+
User-throwable exception thrown when inconsistent parse content
|
| 275 |
+
is found; stops all parsing immediately
|
| 276 |
+
"""
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class ParseSyntaxException(ParseFatalException):
|
| 280 |
+
"""
|
| 281 |
+
Just like :class:`ParseFatalException`, but thrown internally
|
| 282 |
+
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
|
| 283 |
+
that parsing is to stop immediately because an unbacktrackable
|
| 284 |
+
syntax error has been found.
|
| 285 |
+
"""
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class RecursiveGrammarException(Exception):
|
| 289 |
+
"""
|
| 290 |
+
Exception thrown by :class:`ParserElement.validate` if the
|
| 291 |
+
grammar could be left-recursive; parser may need to enable
|
| 292 |
+
left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def __init__(self, parseElementList):
|
| 296 |
+
self.parseElementTrace = parseElementList
|
| 297 |
+
|
| 298 |
+
def __str__(self) -> str:
|
| 299 |
+
return f"RecursiveGrammarException: {self.parseElementTrace}"
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/helpers.py
ADDED
|
@@ -0,0 +1,1100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# helpers.py
|
| 2 |
+
import html.entities
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
import typing
|
| 6 |
+
|
| 7 |
+
from . import __diag__
|
| 8 |
+
from .core import *
|
| 9 |
+
from .util import (
|
| 10 |
+
_bslash,
|
| 11 |
+
_flatten,
|
| 12 |
+
_escape_regex_range_chars,
|
| 13 |
+
replaced_by_pep8,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
#
|
| 18 |
+
# global helpers
|
| 19 |
+
#
|
| 20 |
+
def counted_array(
|
| 21 |
+
expr: ParserElement,
|
| 22 |
+
int_expr: typing.Optional[ParserElement] = None,
|
| 23 |
+
*,
|
| 24 |
+
intExpr: typing.Optional[ParserElement] = None,
|
| 25 |
+
) -> ParserElement:
|
| 26 |
+
"""Helper to define a counted list of expressions.
|
| 27 |
+
|
| 28 |
+
This helper defines a pattern of the form::
|
| 29 |
+
|
| 30 |
+
integer expr expr expr...
|
| 31 |
+
|
| 32 |
+
where the leading integer tells how many expr expressions follow.
|
| 33 |
+
The matched tokens returns the array of expr tokens as a list - the
|
| 34 |
+
leading count token is suppressed.
|
| 35 |
+
|
| 36 |
+
If ``int_expr`` is specified, it should be a pyparsing expression
|
| 37 |
+
that produces an integer value.
|
| 38 |
+
|
| 39 |
+
Example::
|
| 40 |
+
|
| 41 |
+
counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
|
| 42 |
+
|
| 43 |
+
# in this parser, the leading integer value is given in binary,
|
| 44 |
+
# '10' indicating that 2 values are in the array
|
| 45 |
+
binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
|
| 46 |
+
counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
|
| 47 |
+
|
| 48 |
+
# if other fields must be parsed after the count but before the
|
| 49 |
+
# list items, give the fields results names and they will
|
| 50 |
+
# be preserved in the returned ParseResults:
|
| 51 |
+
count_with_metadata = integer + Word(alphas)("type")
|
| 52 |
+
typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
|
| 53 |
+
result = typed_array.parse_string("3 bool True True False")
|
| 54 |
+
print(result.dump())
|
| 55 |
+
|
| 56 |
+
# prints
|
| 57 |
+
# ['True', 'True', 'False']
|
| 58 |
+
# - items: ['True', 'True', 'False']
|
| 59 |
+
# - type: 'bool'
|
| 60 |
+
"""
|
| 61 |
+
intExpr = intExpr or int_expr
|
| 62 |
+
array_expr = Forward()
|
| 63 |
+
|
| 64 |
+
def count_field_parse_action(s, l, t):
|
| 65 |
+
nonlocal array_expr
|
| 66 |
+
n = t[0]
|
| 67 |
+
array_expr <<= (expr * n) if n else Empty()
|
| 68 |
+
# clear list contents, but keep any named results
|
| 69 |
+
del t[:]
|
| 70 |
+
|
| 71 |
+
if intExpr is None:
|
| 72 |
+
intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
|
| 73 |
+
else:
|
| 74 |
+
intExpr = intExpr.copy()
|
| 75 |
+
intExpr.set_name("arrayLen")
|
| 76 |
+
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
|
| 77 |
+
return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def match_previous_literal(expr: ParserElement) -> ParserElement:
|
| 81 |
+
"""Helper to define an expression that is indirectly defined from
|
| 82 |
+
the tokens matched in a previous expression, that is, it looks for
|
| 83 |
+
a 'repeat' of a previous expression. For example::
|
| 84 |
+
|
| 85 |
+
first = Word(nums)
|
| 86 |
+
second = match_previous_literal(first)
|
| 87 |
+
match_expr = first + ":" + second
|
| 88 |
+
|
| 89 |
+
will match ``"1:1"``, but not ``"1:2"``. Because this
|
| 90 |
+
matches a previous literal, will also match the leading
|
| 91 |
+
``"1:1"`` in ``"1:10"``. If this is not desired, use
|
| 92 |
+
:class:`match_previous_expr`. Do *not* use with packrat parsing
|
| 93 |
+
enabled.
|
| 94 |
+
"""
|
| 95 |
+
rep = Forward()
|
| 96 |
+
|
| 97 |
+
def copy_token_to_repeater(s, l, t):
|
| 98 |
+
if t:
|
| 99 |
+
if len(t) == 1:
|
| 100 |
+
rep << t[0]
|
| 101 |
+
else:
|
| 102 |
+
# flatten t tokens
|
| 103 |
+
tflat = _flatten(t.as_list())
|
| 104 |
+
rep << And(Literal(tt) for tt in tflat)
|
| 105 |
+
else:
|
| 106 |
+
rep << Empty()
|
| 107 |
+
|
| 108 |
+
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
| 109 |
+
rep.set_name("(prev) " + str(expr))
|
| 110 |
+
return rep
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def match_previous_expr(expr: ParserElement) -> ParserElement:
|
| 114 |
+
"""Helper to define an expression that is indirectly defined from
|
| 115 |
+
the tokens matched in a previous expression, that is, it looks for
|
| 116 |
+
a 'repeat' of a previous expression. For example::
|
| 117 |
+
|
| 118 |
+
first = Word(nums)
|
| 119 |
+
second = match_previous_expr(first)
|
| 120 |
+
match_expr = first + ":" + second
|
| 121 |
+
|
| 122 |
+
will match ``"1:1"``, but not ``"1:2"``. Because this
|
| 123 |
+
matches by expressions, will *not* match the leading ``"1:1"``
|
| 124 |
+
in ``"1:10"``; the expressions are evaluated first, and then
|
| 125 |
+
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
|
| 126 |
+
with packrat parsing enabled.
|
| 127 |
+
"""
|
| 128 |
+
rep = Forward()
|
| 129 |
+
e2 = expr.copy()
|
| 130 |
+
rep <<= e2
|
| 131 |
+
|
| 132 |
+
def copy_token_to_repeater(s, l, t):
|
| 133 |
+
matchTokens = _flatten(t.as_list())
|
| 134 |
+
|
| 135 |
+
def must_match_these_tokens(s, l, t):
|
| 136 |
+
theseTokens = _flatten(t.as_list())
|
| 137 |
+
if theseTokens != matchTokens:
|
| 138 |
+
raise ParseException(
|
| 139 |
+
s, l, f"Expected {matchTokens}, found{theseTokens}"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
|
| 143 |
+
|
| 144 |
+
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
| 145 |
+
rep.set_name("(prev) " + str(expr))
|
| 146 |
+
return rep
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def one_of(
|
| 150 |
+
strs: Union[typing.Iterable[str], str],
|
| 151 |
+
caseless: bool = False,
|
| 152 |
+
use_regex: bool = True,
|
| 153 |
+
as_keyword: bool = False,
|
| 154 |
+
*,
|
| 155 |
+
useRegex: bool = True,
|
| 156 |
+
asKeyword: bool = False,
|
| 157 |
+
) -> ParserElement:
|
| 158 |
+
"""Helper to quickly define a set of alternative :class:`Literal` s,
|
| 159 |
+
and makes sure to do longest-first testing when there is a conflict,
|
| 160 |
+
regardless of the input order, but returns
|
| 161 |
+
a :class:`MatchFirst` for best performance.
|
| 162 |
+
|
| 163 |
+
Parameters:
|
| 164 |
+
|
| 165 |
+
- ``strs`` - a string of space-delimited literals, or a collection of
|
| 166 |
+
string literals
|
| 167 |
+
- ``caseless`` - treat all literals as caseless - (default= ``False``)
|
| 168 |
+
- ``use_regex`` - as an optimization, will
|
| 169 |
+
generate a :class:`Regex` object; otherwise, will generate
|
| 170 |
+
a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if
|
| 171 |
+
creating a :class:`Regex` raises an exception) - (default= ``True``)
|
| 172 |
+
- ``as_keyword`` - enforce :class:`Keyword`-style matching on the
|
| 173 |
+
generated expressions - (default= ``False``)
|
| 174 |
+
- ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
|
| 175 |
+
but will be removed in a future release
|
| 176 |
+
|
| 177 |
+
Example::
|
| 178 |
+
|
| 179 |
+
comp_oper = one_of("< = > <= >= !=")
|
| 180 |
+
var = Word(alphas)
|
| 181 |
+
number = Word(nums)
|
| 182 |
+
term = var | number
|
| 183 |
+
comparison_expr = term + comp_oper + term
|
| 184 |
+
print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
|
| 185 |
+
|
| 186 |
+
prints::
|
| 187 |
+
|
| 188 |
+
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
|
| 189 |
+
"""
|
| 190 |
+
asKeyword = asKeyword or as_keyword
|
| 191 |
+
useRegex = useRegex and use_regex
|
| 192 |
+
|
| 193 |
+
if (
|
| 194 |
+
isinstance(caseless, str_type)
|
| 195 |
+
and __diag__.warn_on_multiple_string_args_to_oneof
|
| 196 |
+
):
|
| 197 |
+
warnings.warn(
|
| 198 |
+
"More than one string argument passed to one_of, pass"
|
| 199 |
+
" choices as a list or space-delimited string",
|
| 200 |
+
stacklevel=2,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
if caseless:
|
| 204 |
+
isequal = lambda a, b: a.upper() == b.upper()
|
| 205 |
+
masks = lambda a, b: b.upper().startswith(a.upper())
|
| 206 |
+
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
|
| 207 |
+
else:
|
| 208 |
+
isequal = lambda a, b: a == b
|
| 209 |
+
masks = lambda a, b: b.startswith(a)
|
| 210 |
+
parseElementClass = Keyword if asKeyword else Literal
|
| 211 |
+
|
| 212 |
+
symbols: List[str] = []
|
| 213 |
+
if isinstance(strs, str_type):
|
| 214 |
+
strs = typing.cast(str, strs)
|
| 215 |
+
symbols = strs.split()
|
| 216 |
+
elif isinstance(strs, Iterable):
|
| 217 |
+
symbols = list(strs)
|
| 218 |
+
else:
|
| 219 |
+
raise TypeError("Invalid argument to one_of, expected string or iterable")
|
| 220 |
+
if not symbols:
|
| 221 |
+
return NoMatch()
|
| 222 |
+
|
| 223 |
+
# reorder given symbols to take care to avoid masking longer choices with shorter ones
|
| 224 |
+
# (but only if the given symbols are not just single characters)
|
| 225 |
+
if any(len(sym) > 1 for sym in symbols):
|
| 226 |
+
i = 0
|
| 227 |
+
while i < len(symbols) - 1:
|
| 228 |
+
cur = symbols[i]
|
| 229 |
+
for j, other in enumerate(symbols[i + 1 :]):
|
| 230 |
+
if isequal(other, cur):
|
| 231 |
+
del symbols[i + j + 1]
|
| 232 |
+
break
|
| 233 |
+
elif masks(cur, other):
|
| 234 |
+
del symbols[i + j + 1]
|
| 235 |
+
symbols.insert(i, other)
|
| 236 |
+
break
|
| 237 |
+
else:
|
| 238 |
+
i += 1
|
| 239 |
+
|
| 240 |
+
if useRegex:
|
| 241 |
+
re_flags: int = re.IGNORECASE if caseless else 0
|
| 242 |
+
|
| 243 |
+
try:
|
| 244 |
+
if all(len(sym) == 1 for sym in symbols):
|
| 245 |
+
# symbols are just single characters, create range regex pattern
|
| 246 |
+
patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]"
|
| 247 |
+
else:
|
| 248 |
+
patt = "|".join(re.escape(sym) for sym in symbols)
|
| 249 |
+
|
| 250 |
+
# wrap with \b word break markers if defining as keywords
|
| 251 |
+
if asKeyword:
|
| 252 |
+
patt = rf"\b(?:{patt})\b"
|
| 253 |
+
|
| 254 |
+
ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
|
| 255 |
+
|
| 256 |
+
if caseless:
|
| 257 |
+
# add parse action to return symbols as specified, not in random
|
| 258 |
+
# casing as found in input string
|
| 259 |
+
symbol_map = {sym.lower(): sym for sym in symbols}
|
| 260 |
+
ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
|
| 261 |
+
|
| 262 |
+
return ret
|
| 263 |
+
|
| 264 |
+
except re.error:
|
| 265 |
+
warnings.warn(
|
| 266 |
+
"Exception creating Regex for one_of, building MatchFirst", stacklevel=2
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
# last resort, just use MatchFirst
|
| 270 |
+
return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
|
| 271 |
+
" | ".join(symbols)
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
|
| 276 |
+
"""Helper to easily and clearly define a dictionary by specifying
|
| 277 |
+
the respective patterns for the key and value. Takes care of
|
| 278 |
+
defining the :class:`Dict`, :class:`ZeroOrMore`, and
|
| 279 |
+
:class:`Group` tokens in the proper order. The key pattern
|
| 280 |
+
can include delimiting markers or punctuation, as long as they are
|
| 281 |
+
suppressed, thereby leaving the significant key text. The value
|
| 282 |
+
pattern can include named results, so that the :class:`Dict` results
|
| 283 |
+
can include named token fields.
|
| 284 |
+
|
| 285 |
+
Example::
|
| 286 |
+
|
| 287 |
+
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
|
| 288 |
+
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
|
| 289 |
+
print(attr_expr[1, ...].parse_string(text).dump())
|
| 290 |
+
|
| 291 |
+
attr_label = label
|
| 292 |
+
attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
|
| 293 |
+
|
| 294 |
+
# similar to Dict, but simpler call format
|
| 295 |
+
result = dict_of(attr_label, attr_value).parse_string(text)
|
| 296 |
+
print(result.dump())
|
| 297 |
+
print(result['shape'])
|
| 298 |
+
print(result.shape) # object attribute access works too
|
| 299 |
+
print(result.as_dict())
|
| 300 |
+
|
| 301 |
+
prints::
|
| 302 |
+
|
| 303 |
+
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
|
| 304 |
+
- color: 'light blue'
|
| 305 |
+
- posn: 'upper left'
|
| 306 |
+
- shape: 'SQUARE'
|
| 307 |
+
- texture: 'burlap'
|
| 308 |
+
SQUARE
|
| 309 |
+
SQUARE
|
| 310 |
+
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
|
| 311 |
+
"""
|
| 312 |
+
return Dict(OneOrMore(Group(key + value)))
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def original_text_for(
|
| 316 |
+
expr: ParserElement, as_string: bool = True, *, asString: bool = True
|
| 317 |
+
) -> ParserElement:
|
| 318 |
+
"""Helper to return the original, untokenized text for a given
|
| 319 |
+
expression. Useful to restore the parsed fields of an HTML start
|
| 320 |
+
tag into the raw tag text itself, or to revert separate tokens with
|
| 321 |
+
intervening whitespace back to the original matching input text. By
|
| 322 |
+
default, returns a string containing the original parsed text.
|
| 323 |
+
|
| 324 |
+
If the optional ``as_string`` argument is passed as
|
| 325 |
+
``False``, then the return value is
|
| 326 |
+
a :class:`ParseResults` containing any results names that
|
| 327 |
+
were originally matched, and a single token containing the original
|
| 328 |
+
matched text from the input string. So if the expression passed to
|
| 329 |
+
:class:`original_text_for` contains expressions with defined
|
| 330 |
+
results names, you must set ``as_string`` to ``False`` if you
|
| 331 |
+
want to preserve those results name values.
|
| 332 |
+
|
| 333 |
+
The ``asString`` pre-PEP8 argument is retained for compatibility,
|
| 334 |
+
but will be removed in a future release.
|
| 335 |
+
|
| 336 |
+
Example::
|
| 337 |
+
|
| 338 |
+
src = "this is test <b> bold <i>text</i> </b> normal text "
|
| 339 |
+
for tag in ("b", "i"):
|
| 340 |
+
opener, closer = make_html_tags(tag)
|
| 341 |
+
patt = original_text_for(opener + ... + closer)
|
| 342 |
+
print(patt.search_string(src)[0])
|
| 343 |
+
|
| 344 |
+
prints::
|
| 345 |
+
|
| 346 |
+
['<b> bold <i>text</i> </b>']
|
| 347 |
+
['<i>text</i>']
|
| 348 |
+
"""
|
| 349 |
+
asString = asString and as_string
|
| 350 |
+
|
| 351 |
+
locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
|
| 352 |
+
endlocMarker = locMarker.copy()
|
| 353 |
+
endlocMarker.callPreparse = False
|
| 354 |
+
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
|
| 355 |
+
if asString:
|
| 356 |
+
extractText = lambda s, l, t: s[t._original_start : t._original_end]
|
| 357 |
+
else:
|
| 358 |
+
|
| 359 |
+
def extractText(s, l, t):
|
| 360 |
+
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
|
| 361 |
+
|
| 362 |
+
matchExpr.set_parse_action(extractText)
|
| 363 |
+
matchExpr.ignoreExprs = expr.ignoreExprs
|
| 364 |
+
matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
|
| 365 |
+
return matchExpr
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def ungroup(expr: ParserElement) -> ParserElement:
|
| 369 |
+
"""Helper to undo pyparsing's default grouping of And expressions,
|
| 370 |
+
even if all but one are non-empty.
|
| 371 |
+
"""
|
| 372 |
+
return TokenConverter(expr).add_parse_action(lambda t: t[0])
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def locatedExpr(expr: ParserElement) -> ParserElement:
|
| 376 |
+
"""
|
| 377 |
+
(DEPRECATED - future code should use the :class:`Located` class)
|
| 378 |
+
Helper to decorate a returned token with its starting and ending
|
| 379 |
+
locations in the input string.
|
| 380 |
+
|
| 381 |
+
This helper adds the following results names:
|
| 382 |
+
|
| 383 |
+
- ``locn_start`` - location where matched expression begins
|
| 384 |
+
- ``locn_end`` - location where matched expression ends
|
| 385 |
+
- ``value`` - the actual parsed results
|
| 386 |
+
|
| 387 |
+
Be careful if the input text contains ``<TAB>`` characters, you
|
| 388 |
+
may want to call :class:`ParserElement.parse_with_tabs`
|
| 389 |
+
|
| 390 |
+
Example::
|
| 391 |
+
|
| 392 |
+
wd = Word(alphas)
|
| 393 |
+
for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
|
| 394 |
+
print(match)
|
| 395 |
+
|
| 396 |
+
prints::
|
| 397 |
+
|
| 398 |
+
[[0, 'ljsdf', 5]]
|
| 399 |
+
[[8, 'lksdjjf', 15]]
|
| 400 |
+
[[18, 'lkkjj', 23]]
|
| 401 |
+
"""
|
| 402 |
+
locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
|
| 403 |
+
return Group(
|
| 404 |
+
locator("locn_start")
|
| 405 |
+
+ expr("value")
|
| 406 |
+
+ locator.copy().leaveWhitespace()("locn_end")
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def nested_expr(
|
| 411 |
+
opener: Union[str, ParserElement] = "(",
|
| 412 |
+
closer: Union[str, ParserElement] = ")",
|
| 413 |
+
content: typing.Optional[ParserElement] = None,
|
| 414 |
+
ignore_expr: ParserElement = quoted_string(),
|
| 415 |
+
*,
|
| 416 |
+
ignoreExpr: ParserElement = quoted_string(),
|
| 417 |
+
) -> ParserElement:
|
| 418 |
+
"""Helper method for defining nested lists enclosed in opening and
|
| 419 |
+
closing delimiters (``"("`` and ``")"`` are the default).
|
| 420 |
+
|
| 421 |
+
Parameters:
|
| 422 |
+
|
| 423 |
+
- ``opener`` - opening character for a nested list
|
| 424 |
+
(default= ``"("``); can also be a pyparsing expression
|
| 425 |
+
- ``closer`` - closing character for a nested list
|
| 426 |
+
(default= ``")"``); can also be a pyparsing expression
|
| 427 |
+
- ``content`` - expression for items within the nested lists
|
| 428 |
+
(default= ``None``)
|
| 429 |
+
- ``ignore_expr`` - expression for ignoring opening and closing delimiters
|
| 430 |
+
(default= :class:`quoted_string`)
|
| 431 |
+
- ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
|
| 432 |
+
but will be removed in a future release
|
| 433 |
+
|
| 434 |
+
If an expression is not provided for the content argument, the
|
| 435 |
+
nested expression will capture all whitespace-delimited content
|
| 436 |
+
between delimiters as a list of separate values.
|
| 437 |
+
|
| 438 |
+
Use the ``ignore_expr`` argument to define expressions that may
|
| 439 |
+
contain opening or closing characters that should not be treated as
|
| 440 |
+
opening or closing characters for nesting, such as quoted_string or
|
| 441 |
+
a comment expression. Specify multiple expressions using an
|
| 442 |
+
:class:`Or` or :class:`MatchFirst`. The default is
|
| 443 |
+
:class:`quoted_string`, but if no expressions are to be ignored, then
|
| 444 |
+
pass ``None`` for this argument.
|
| 445 |
+
|
| 446 |
+
Example::
|
| 447 |
+
|
| 448 |
+
data_type = one_of("void int short long char float double")
|
| 449 |
+
decl_data_type = Combine(data_type + Opt(Word('*')))
|
| 450 |
+
ident = Word(alphas+'_', alphanums+'_')
|
| 451 |
+
number = pyparsing_common.number
|
| 452 |
+
arg = Group(decl_data_type + ident)
|
| 453 |
+
LPAR, RPAR = map(Suppress, "()")
|
| 454 |
+
|
| 455 |
+
code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
|
| 456 |
+
|
| 457 |
+
c_function = (decl_data_type("type")
|
| 458 |
+
+ ident("name")
|
| 459 |
+
+ LPAR + Opt(DelimitedList(arg), [])("args") + RPAR
|
| 460 |
+
+ code_body("body"))
|
| 461 |
+
c_function.ignore(c_style_comment)
|
| 462 |
+
|
| 463 |
+
source_code = '''
|
| 464 |
+
int is_odd(int x) {
|
| 465 |
+
return (x%2);
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
int dec_to_hex(char hchar) {
|
| 469 |
+
if (hchar >= '0' && hchar <= '9') {
|
| 470 |
+
return (ord(hchar)-ord('0'));
|
| 471 |
+
} else {
|
| 472 |
+
return (10+ord(hchar)-ord('A'));
|
| 473 |
+
}
|
| 474 |
+
}
|
| 475 |
+
'''
|
| 476 |
+
for func in c_function.search_string(source_code):
|
| 477 |
+
print("%(name)s (%(type)s) args: %(args)s" % func)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
prints::
|
| 481 |
+
|
| 482 |
+
is_odd (int) args: [['int', 'x']]
|
| 483 |
+
dec_to_hex (int) args: [['char', 'hchar']]
|
| 484 |
+
"""
|
| 485 |
+
if ignoreExpr != ignore_expr:
|
| 486 |
+
ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
|
| 487 |
+
if opener == closer:
|
| 488 |
+
raise ValueError("opening and closing strings cannot be the same")
|
| 489 |
+
if content is None:
|
| 490 |
+
if isinstance(opener, str_type) and isinstance(closer, str_type):
|
| 491 |
+
opener = typing.cast(str, opener)
|
| 492 |
+
closer = typing.cast(str, closer)
|
| 493 |
+
if len(opener) == 1 and len(closer) == 1:
|
| 494 |
+
if ignoreExpr is not None:
|
| 495 |
+
content = Combine(
|
| 496 |
+
OneOrMore(
|
| 497 |
+
~ignoreExpr
|
| 498 |
+
+ CharsNotIn(
|
| 499 |
+
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
|
| 500 |
+
exact=1,
|
| 501 |
+
)
|
| 502 |
+
)
|
| 503 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 504 |
+
else:
|
| 505 |
+
content = empty.copy() + CharsNotIn(
|
| 506 |
+
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
|
| 507 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 508 |
+
else:
|
| 509 |
+
if ignoreExpr is not None:
|
| 510 |
+
content = Combine(
|
| 511 |
+
OneOrMore(
|
| 512 |
+
~ignoreExpr
|
| 513 |
+
+ ~Literal(opener)
|
| 514 |
+
+ ~Literal(closer)
|
| 515 |
+
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
| 516 |
+
)
|
| 517 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 518 |
+
else:
|
| 519 |
+
content = Combine(
|
| 520 |
+
OneOrMore(
|
| 521 |
+
~Literal(opener)
|
| 522 |
+
+ ~Literal(closer)
|
| 523 |
+
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
|
| 524 |
+
)
|
| 525 |
+
).set_parse_action(lambda t: t[0].strip())
|
| 526 |
+
else:
|
| 527 |
+
raise ValueError(
|
| 528 |
+
"opening and closing arguments must be strings if no content expression is given"
|
| 529 |
+
)
|
| 530 |
+
ret = Forward()
|
| 531 |
+
if ignoreExpr is not None:
|
| 532 |
+
ret <<= Group(
|
| 533 |
+
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
|
| 534 |
+
)
|
| 535 |
+
else:
|
| 536 |
+
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
|
| 537 |
+
ret.set_name("nested %s%s expression" % (opener, closer))
|
| 538 |
+
return ret
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
|
| 542 |
+
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
|
| 543 |
+
if isinstance(tagStr, str_type):
|
| 544 |
+
resname = tagStr
|
| 545 |
+
tagStr = Keyword(tagStr, caseless=not xml)
|
| 546 |
+
else:
|
| 547 |
+
resname = tagStr.name
|
| 548 |
+
|
| 549 |
+
tagAttrName = Word(alphas, alphanums + "_-:")
|
| 550 |
+
if xml:
|
| 551 |
+
tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
|
| 552 |
+
openTag = (
|
| 553 |
+
suppress_LT
|
| 554 |
+
+ tagStr("tag")
|
| 555 |
+
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
|
| 556 |
+
+ Opt("/", default=[False])("empty").set_parse_action(
|
| 557 |
+
lambda s, l, t: t[0] == "/"
|
| 558 |
+
)
|
| 559 |
+
+ suppress_GT
|
| 560 |
+
)
|
| 561 |
+
else:
|
| 562 |
+
tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
|
| 563 |
+
printables, exclude_chars=">"
|
| 564 |
+
)
|
| 565 |
+
openTag = (
|
| 566 |
+
suppress_LT
|
| 567 |
+
+ tagStr("tag")
|
| 568 |
+
+ Dict(
|
| 569 |
+
ZeroOrMore(
|
| 570 |
+
Group(
|
| 571 |
+
tagAttrName.set_parse_action(lambda t: t[0].lower())
|
| 572 |
+
+ Opt(Suppress("=") + tagAttrValue)
|
| 573 |
+
)
|
| 574 |
+
)
|
| 575 |
+
)
|
| 576 |
+
+ Opt("/", default=[False])("empty").set_parse_action(
|
| 577 |
+
lambda s, l, t: t[0] == "/"
|
| 578 |
+
)
|
| 579 |
+
+ suppress_GT
|
| 580 |
+
)
|
| 581 |
+
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
|
| 582 |
+
|
| 583 |
+
openTag.set_name("<%s>" % resname)
|
| 584 |
+
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
|
| 585 |
+
openTag.add_parse_action(
|
| 586 |
+
lambda t: t.__setitem__(
|
| 587 |
+
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
|
| 588 |
+
)
|
| 589 |
+
)
|
| 590 |
+
closeTag = closeTag(
|
| 591 |
+
"end" + "".join(resname.replace(":", " ").title().split())
|
| 592 |
+
).set_name("</%s>" % resname)
|
| 593 |
+
openTag.tag = resname
|
| 594 |
+
closeTag.tag = resname
|
| 595 |
+
openTag.tag_body = SkipTo(closeTag())
|
| 596 |
+
return openTag, closeTag
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def make_html_tags(
|
| 600 |
+
tag_str: Union[str, ParserElement]
|
| 601 |
+
) -> Tuple[ParserElement, ParserElement]:
|
| 602 |
+
"""Helper to construct opening and closing tag expressions for HTML,
|
| 603 |
+
given a tag name. Matches tags in either upper or lower case,
|
| 604 |
+
attributes with namespaces and with quoted or unquoted values.
|
| 605 |
+
|
| 606 |
+
Example::
|
| 607 |
+
|
| 608 |
+
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
| 609 |
+
# make_html_tags returns pyparsing expressions for the opening and
|
| 610 |
+
# closing tags as a 2-tuple
|
| 611 |
+
a, a_end = make_html_tags("A")
|
| 612 |
+
link_expr = a + SkipTo(a_end)("link_text") + a_end
|
| 613 |
+
|
| 614 |
+
for link in link_expr.search_string(text):
|
| 615 |
+
# attributes in the <A> tag (like "href" shown here) are
|
| 616 |
+
# also accessible as named results
|
| 617 |
+
print(link.link_text, '->', link.href)
|
| 618 |
+
|
| 619 |
+
prints::
|
| 620 |
+
|
| 621 |
+
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
|
| 622 |
+
"""
|
| 623 |
+
return _makeTags(tag_str, False)
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
def make_xml_tags(
|
| 627 |
+
tag_str: Union[str, ParserElement]
|
| 628 |
+
) -> Tuple[ParserElement, ParserElement]:
|
| 629 |
+
"""Helper to construct opening and closing tag expressions for XML,
|
| 630 |
+
given a tag name. Matches tags only in the given upper/lower case.
|
| 631 |
+
|
| 632 |
+
Example: similar to :class:`make_html_tags`
|
| 633 |
+
"""
|
| 634 |
+
return _makeTags(tag_str, True)
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
any_open_tag: ParserElement
|
| 638 |
+
any_close_tag: ParserElement
|
| 639 |
+
any_open_tag, any_close_tag = make_html_tags(
|
| 640 |
+
Word(alphas, alphanums + "_:").set_name("any tag")
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
|
| 644 |
+
common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
|
| 645 |
+
"common HTML entity"
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def replace_html_entity(s, l, t):
|
| 650 |
+
"""Helper parser action to replace common HTML entities with their special characters"""
|
| 651 |
+
return _htmlEntityMap.get(t.entity)
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
class OpAssoc(Enum):
|
| 655 |
+
"""Enumeration of operator associativity
|
| 656 |
+
- used in constructing InfixNotationOperatorSpec for :class:`infix_notation`"""
|
| 657 |
+
|
| 658 |
+
LEFT = 1
|
| 659 |
+
RIGHT = 2
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
InfixNotationOperatorArgType = Union[
|
| 663 |
+
ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
|
| 664 |
+
]
|
| 665 |
+
InfixNotationOperatorSpec = Union[
|
| 666 |
+
Tuple[
|
| 667 |
+
InfixNotationOperatorArgType,
|
| 668 |
+
int,
|
| 669 |
+
OpAssoc,
|
| 670 |
+
typing.Optional[ParseAction],
|
| 671 |
+
],
|
| 672 |
+
Tuple[
|
| 673 |
+
InfixNotationOperatorArgType,
|
| 674 |
+
int,
|
| 675 |
+
OpAssoc,
|
| 676 |
+
],
|
| 677 |
+
]
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
def infix_notation(
|
| 681 |
+
base_expr: ParserElement,
|
| 682 |
+
op_list: List[InfixNotationOperatorSpec],
|
| 683 |
+
lpar: Union[str, ParserElement] = Suppress("("),
|
| 684 |
+
rpar: Union[str, ParserElement] = Suppress(")"),
|
| 685 |
+
) -> ParserElement:
|
| 686 |
+
"""Helper method for constructing grammars of expressions made up of
|
| 687 |
+
operators working in a precedence hierarchy. Operators may be unary
|
| 688 |
+
or binary, left- or right-associative. Parse actions can also be
|
| 689 |
+
attached to operator expressions. The generated parser will also
|
| 690 |
+
recognize the use of parentheses to override operator precedences
|
| 691 |
+
(see example below).
|
| 692 |
+
|
| 693 |
+
Note: if you define a deep operator list, you may see performance
|
| 694 |
+
issues when using infix_notation. See
|
| 695 |
+
:class:`ParserElement.enable_packrat` for a mechanism to potentially
|
| 696 |
+
improve your parser performance.
|
| 697 |
+
|
| 698 |
+
Parameters:
|
| 699 |
+
|
| 700 |
+
- ``base_expr`` - expression representing the most basic operand to
|
| 701 |
+
be used in the expression
|
| 702 |
+
- ``op_list`` - list of tuples, one for each operator precedence level
|
| 703 |
+
in the expression grammar; each tuple is of the form ``(op_expr,
|
| 704 |
+
num_operands, right_left_assoc, (optional)parse_action)``, where:
|
| 705 |
+
|
| 706 |
+
- ``op_expr`` is the pyparsing expression for the operator; may also
|
| 707 |
+
be a string, which will be converted to a Literal; if ``num_operands``
|
| 708 |
+
is 3, ``op_expr`` is a tuple of two expressions, for the two
|
| 709 |
+
operators separating the 3 terms
|
| 710 |
+
- ``num_operands`` is the number of terms for this operator (must be 1,
|
| 711 |
+
2, or 3)
|
| 712 |
+
- ``right_left_assoc`` is the indicator whether the operator is right
|
| 713 |
+
or left associative, using the pyparsing-defined constants
|
| 714 |
+
``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
|
| 715 |
+
- ``parse_action`` is the parse action to be associated with
|
| 716 |
+
expressions matching this operator expression (the parse action
|
| 717 |
+
tuple member may be omitted); if the parse action is passed
|
| 718 |
+
a tuple or list of functions, this is equivalent to calling
|
| 719 |
+
``set_parse_action(*fn)``
|
| 720 |
+
(:class:`ParserElement.set_parse_action`)
|
| 721 |
+
- ``lpar`` - expression for matching left-parentheses; if passed as a
|
| 722 |
+
str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as
|
| 723 |
+
an expression (such as ``Literal('(')``), then it will be kept in
|
| 724 |
+
the parsed results, and grouped with them. (default= ``Suppress('(')``)
|
| 725 |
+
- ``rpar`` - expression for matching right-parentheses; if passed as a
|
| 726 |
+
str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as
|
| 727 |
+
an expression (such as ``Literal(')')``), then it will be kept in
|
| 728 |
+
the parsed results, and grouped with them. (default= ``Suppress(')')``)
|
| 729 |
+
|
| 730 |
+
Example::
|
| 731 |
+
|
| 732 |
+
# simple example of four-function arithmetic with ints and
|
| 733 |
+
# variable names
|
| 734 |
+
integer = pyparsing_common.signed_integer
|
| 735 |
+
varname = pyparsing_common.identifier
|
| 736 |
+
|
| 737 |
+
arith_expr = infix_notation(integer | varname,
|
| 738 |
+
[
|
| 739 |
+
('-', 1, OpAssoc.RIGHT),
|
| 740 |
+
(one_of('* /'), 2, OpAssoc.LEFT),
|
| 741 |
+
(one_of('+ -'), 2, OpAssoc.LEFT),
|
| 742 |
+
])
|
| 743 |
+
|
| 744 |
+
arith_expr.run_tests('''
|
| 745 |
+
5+3*6
|
| 746 |
+
(5+3)*6
|
| 747 |
+
-2--11
|
| 748 |
+
''', full_dump=False)
|
| 749 |
+
|
| 750 |
+
prints::
|
| 751 |
+
|
| 752 |
+
5+3*6
|
| 753 |
+
[[5, '+', [3, '*', 6]]]
|
| 754 |
+
|
| 755 |
+
(5+3)*6
|
| 756 |
+
[[[5, '+', 3], '*', 6]]
|
| 757 |
+
|
| 758 |
+
(5+x)*y
|
| 759 |
+
[[[5, '+', 'x'], '*', 'y']]
|
| 760 |
+
|
| 761 |
+
-2--11
|
| 762 |
+
[[['-', 2], '-', ['-', 11]]]
|
| 763 |
+
"""
|
| 764 |
+
|
| 765 |
+
# captive version of FollowedBy that does not do parse actions or capture results names
|
| 766 |
+
class _FB(FollowedBy):
|
| 767 |
+
def parseImpl(self, instring, loc, doActions=True):
|
| 768 |
+
self.expr.try_parse(instring, loc)
|
| 769 |
+
return loc, []
|
| 770 |
+
|
| 771 |
+
_FB.__name__ = "FollowedBy>"
|
| 772 |
+
|
| 773 |
+
ret = Forward()
|
| 774 |
+
if isinstance(lpar, str):
|
| 775 |
+
lpar = Suppress(lpar)
|
| 776 |
+
if isinstance(rpar, str):
|
| 777 |
+
rpar = Suppress(rpar)
|
| 778 |
+
|
| 779 |
+
# if lpar and rpar are not suppressed, wrap in group
|
| 780 |
+
if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
|
| 781 |
+
lastExpr = base_expr | Group(lpar + ret + rpar)
|
| 782 |
+
else:
|
| 783 |
+
lastExpr = base_expr | (lpar + ret + rpar)
|
| 784 |
+
|
| 785 |
+
arity: int
|
| 786 |
+
rightLeftAssoc: opAssoc
|
| 787 |
+
pa: typing.Optional[ParseAction]
|
| 788 |
+
opExpr1: ParserElement
|
| 789 |
+
opExpr2: ParserElement
|
| 790 |
+
for i, operDef in enumerate(op_list):
|
| 791 |
+
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment]
|
| 792 |
+
if isinstance(opExpr, str_type):
|
| 793 |
+
opExpr = ParserElement._literalStringClass(opExpr)
|
| 794 |
+
opExpr = typing.cast(ParserElement, opExpr)
|
| 795 |
+
if arity == 3:
|
| 796 |
+
if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
|
| 797 |
+
raise ValueError(
|
| 798 |
+
"if numterms=3, opExpr must be a tuple or list of two expressions"
|
| 799 |
+
)
|
| 800 |
+
opExpr1, opExpr2 = opExpr
|
| 801 |
+
term_name = f"{opExpr1}{opExpr2} term"
|
| 802 |
+
else:
|
| 803 |
+
term_name = f"{opExpr} term"
|
| 804 |
+
|
| 805 |
+
if not 1 <= arity <= 3:
|
| 806 |
+
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
|
| 807 |
+
|
| 808 |
+
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
|
| 809 |
+
raise ValueError("operator must indicate right or left associativity")
|
| 810 |
+
|
| 811 |
+
thisExpr: ParserElement = Forward().set_name(term_name)
|
| 812 |
+
thisExpr = typing.cast(Forward, thisExpr)
|
| 813 |
+
if rightLeftAssoc is OpAssoc.LEFT:
|
| 814 |
+
if arity == 1:
|
| 815 |
+
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
|
| 816 |
+
elif arity == 2:
|
| 817 |
+
if opExpr is not None:
|
| 818 |
+
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
|
| 819 |
+
lastExpr + (opExpr + lastExpr)[1, ...]
|
| 820 |
+
)
|
| 821 |
+
else:
|
| 822 |
+
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
|
| 823 |
+
elif arity == 3:
|
| 824 |
+
matchExpr = _FB(
|
| 825 |
+
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
|
| 826 |
+
) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
|
| 827 |
+
elif rightLeftAssoc is OpAssoc.RIGHT:
|
| 828 |
+
if arity == 1:
|
| 829 |
+
# try to avoid LR with this extra test
|
| 830 |
+
if not isinstance(opExpr, Opt):
|
| 831 |
+
opExpr = Opt(opExpr)
|
| 832 |
+
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
|
| 833 |
+
elif arity == 2:
|
| 834 |
+
if opExpr is not None:
|
| 835 |
+
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
|
| 836 |
+
lastExpr + (opExpr + thisExpr)[1, ...]
|
| 837 |
+
)
|
| 838 |
+
else:
|
| 839 |
+
matchExpr = _FB(lastExpr + thisExpr) + Group(
|
| 840 |
+
lastExpr + thisExpr[1, ...]
|
| 841 |
+
)
|
| 842 |
+
elif arity == 3:
|
| 843 |
+
matchExpr = _FB(
|
| 844 |
+
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
|
| 845 |
+
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
|
| 846 |
+
if pa:
|
| 847 |
+
if isinstance(pa, (tuple, list)):
|
| 848 |
+
matchExpr.set_parse_action(*pa)
|
| 849 |
+
else:
|
| 850 |
+
matchExpr.set_parse_action(pa)
|
| 851 |
+
thisExpr <<= (matchExpr | lastExpr).setName(term_name)
|
| 852 |
+
lastExpr = thisExpr
|
| 853 |
+
ret <<= lastExpr
|
| 854 |
+
return ret
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
|
| 858 |
+
"""
|
| 859 |
+
(DEPRECATED - use :class:`IndentedBlock` class instead)
|
| 860 |
+
Helper method for defining space-delimited indentation blocks,
|
| 861 |
+
such as those used to define block statements in Python source code.
|
| 862 |
+
|
| 863 |
+
Parameters:
|
| 864 |
+
|
| 865 |
+
- ``blockStatementExpr`` - expression defining syntax of statement that
|
| 866 |
+
is repeated within the indented block
|
| 867 |
+
- ``indentStack`` - list created by caller to manage indentation stack
|
| 868 |
+
(multiple ``statementWithIndentedBlock`` expressions within a single
|
| 869 |
+
grammar should share a common ``indentStack``)
|
| 870 |
+
- ``indent`` - boolean indicating whether block must be indented beyond
|
| 871 |
+
the current level; set to ``False`` for block of left-most statements
|
| 872 |
+
(default= ``True``)
|
| 873 |
+
|
| 874 |
+
A valid block must contain at least one ``blockStatement``.
|
| 875 |
+
|
| 876 |
+
(Note that indentedBlock uses internal parse actions which make it
|
| 877 |
+
incompatible with packrat parsing.)
|
| 878 |
+
|
| 879 |
+
Example::
|
| 880 |
+
|
| 881 |
+
data = '''
|
| 882 |
+
def A(z):
|
| 883 |
+
A1
|
| 884 |
+
B = 100
|
| 885 |
+
G = A2
|
| 886 |
+
A2
|
| 887 |
+
A3
|
| 888 |
+
B
|
| 889 |
+
def BB(a,b,c):
|
| 890 |
+
BB1
|
| 891 |
+
def BBA():
|
| 892 |
+
bba1
|
| 893 |
+
bba2
|
| 894 |
+
bba3
|
| 895 |
+
C
|
| 896 |
+
D
|
| 897 |
+
def spam(x,y):
|
| 898 |
+
def eggs(z):
|
| 899 |
+
pass
|
| 900 |
+
'''
|
| 901 |
+
|
| 902 |
+
|
| 903 |
+
indentStack = [1]
|
| 904 |
+
stmt = Forward()
|
| 905 |
+
|
| 906 |
+
identifier = Word(alphas, alphanums)
|
| 907 |
+
funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
|
| 908 |
+
func_body = indentedBlock(stmt, indentStack)
|
| 909 |
+
funcDef = Group(funcDecl + func_body)
|
| 910 |
+
|
| 911 |
+
rvalue = Forward()
|
| 912 |
+
funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
|
| 913 |
+
rvalue << (funcCall | identifier | Word(nums))
|
| 914 |
+
assignment = Group(identifier + "=" + rvalue)
|
| 915 |
+
stmt << (funcDef | assignment | identifier)
|
| 916 |
+
|
| 917 |
+
module_body = stmt[1, ...]
|
| 918 |
+
|
| 919 |
+
parseTree = module_body.parseString(data)
|
| 920 |
+
parseTree.pprint()
|
| 921 |
+
|
| 922 |
+
prints::
|
| 923 |
+
|
| 924 |
+
[['def',
|
| 925 |
+
'A',
|
| 926 |
+
['(', 'z', ')'],
|
| 927 |
+
':',
|
| 928 |
+
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
|
| 929 |
+
'B',
|
| 930 |
+
['def',
|
| 931 |
+
'BB',
|
| 932 |
+
['(', 'a', 'b', 'c', ')'],
|
| 933 |
+
':',
|
| 934 |
+
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
|
| 935 |
+
'C',
|
| 936 |
+
'D',
|
| 937 |
+
['def',
|
| 938 |
+
'spam',
|
| 939 |
+
['(', 'x', 'y', ')'],
|
| 940 |
+
':',
|
| 941 |
+
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
|
| 942 |
+
"""
|
| 943 |
+
backup_stacks.append(indentStack[:])
|
| 944 |
+
|
| 945 |
+
def reset_stack():
|
| 946 |
+
indentStack[:] = backup_stacks[-1]
|
| 947 |
+
|
| 948 |
+
def checkPeerIndent(s, l, t):
|
| 949 |
+
if l >= len(s):
|
| 950 |
+
return
|
| 951 |
+
curCol = col(l, s)
|
| 952 |
+
if curCol != indentStack[-1]:
|
| 953 |
+
if curCol > indentStack[-1]:
|
| 954 |
+
raise ParseException(s, l, "illegal nesting")
|
| 955 |
+
raise ParseException(s, l, "not a peer entry")
|
| 956 |
+
|
| 957 |
+
def checkSubIndent(s, l, t):
|
| 958 |
+
curCol = col(l, s)
|
| 959 |
+
if curCol > indentStack[-1]:
|
| 960 |
+
indentStack.append(curCol)
|
| 961 |
+
else:
|
| 962 |
+
raise ParseException(s, l, "not a subentry")
|
| 963 |
+
|
| 964 |
+
def checkUnindent(s, l, t):
|
| 965 |
+
if l >= len(s):
|
| 966 |
+
return
|
| 967 |
+
curCol = col(l, s)
|
| 968 |
+
if not (indentStack and curCol in indentStack):
|
| 969 |
+
raise ParseException(s, l, "not an unindent")
|
| 970 |
+
if curCol < indentStack[-1]:
|
| 971 |
+
indentStack.pop()
|
| 972 |
+
|
| 973 |
+
NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
|
| 974 |
+
INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
|
| 975 |
+
PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
|
| 976 |
+
UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
|
| 977 |
+
if indent:
|
| 978 |
+
smExpr = Group(
|
| 979 |
+
Opt(NL)
|
| 980 |
+
+ INDENT
|
| 981 |
+
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
| 982 |
+
+ UNDENT
|
| 983 |
+
)
|
| 984 |
+
else:
|
| 985 |
+
smExpr = Group(
|
| 986 |
+
Opt(NL)
|
| 987 |
+
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
|
| 988 |
+
+ Opt(UNDENT)
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
# add a parse action to remove backup_stack from list of backups
|
| 992 |
+
smExpr.add_parse_action(
|
| 993 |
+
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
|
| 994 |
+
)
|
| 995 |
+
smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
|
| 996 |
+
blockStatementExpr.ignore(_bslash + LineEnd())
|
| 997 |
+
return smExpr.set_name("indented block")
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
|
| 1001 |
+
c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
|
| 1002 |
+
"C style comment"
|
| 1003 |
+
)
|
| 1004 |
+
"Comment of the form ``/* ... */``"
|
| 1005 |
+
|
| 1006 |
+
html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
|
| 1007 |
+
"Comment of the form ``<!-- ... -->``"
|
| 1008 |
+
|
| 1009 |
+
rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
|
| 1010 |
+
dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
|
| 1011 |
+
"Comment of the form ``// ... (to end of line)``"
|
| 1012 |
+
|
| 1013 |
+
cpp_style_comment = Combine(
|
| 1014 |
+
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
|
| 1015 |
+
).set_name("C++ style comment")
|
| 1016 |
+
"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
|
| 1017 |
+
|
| 1018 |
+
java_style_comment = cpp_style_comment
|
| 1019 |
+
"Same as :class:`cpp_style_comment`"
|
| 1020 |
+
|
| 1021 |
+
python_style_comment = Regex(r"#.*").set_name("Python style comment")
|
| 1022 |
+
"Comment of the form ``# ... (to end of line)``"
|
| 1023 |
+
|
| 1024 |
+
|
| 1025 |
+
# build list of built-in expressions, for future reference if a global default value
|
| 1026 |
+
# gets updated
|
| 1027 |
+
_builtin_exprs: List[ParserElement] = [
|
| 1028 |
+
v for v in vars().values() if isinstance(v, ParserElement)
|
| 1029 |
+
]
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
# compatibility function, superseded by DelimitedList class
|
| 1033 |
+
def delimited_list(
|
| 1034 |
+
expr: Union[str, ParserElement],
|
| 1035 |
+
delim: Union[str, ParserElement] = ",",
|
| 1036 |
+
combine: bool = False,
|
| 1037 |
+
min: typing.Optional[int] = None,
|
| 1038 |
+
max: typing.Optional[int] = None,
|
| 1039 |
+
*,
|
| 1040 |
+
allow_trailing_delim: bool = False,
|
| 1041 |
+
) -> ParserElement:
|
| 1042 |
+
"""(DEPRECATED - use :class:`DelimitedList` class)"""
|
| 1043 |
+
return DelimitedList(
|
| 1044 |
+
expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim
|
| 1045 |
+
)
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
# pre-PEP8 compatible names
|
| 1049 |
+
# fmt: off
|
| 1050 |
+
opAssoc = OpAssoc
|
| 1051 |
+
anyOpenTag = any_open_tag
|
| 1052 |
+
anyCloseTag = any_close_tag
|
| 1053 |
+
commonHTMLEntity = common_html_entity
|
| 1054 |
+
cStyleComment = c_style_comment
|
| 1055 |
+
htmlComment = html_comment
|
| 1056 |
+
restOfLine = rest_of_line
|
| 1057 |
+
dblSlashComment = dbl_slash_comment
|
| 1058 |
+
cppStyleComment = cpp_style_comment
|
| 1059 |
+
javaStyleComment = java_style_comment
|
| 1060 |
+
pythonStyleComment = python_style_comment
|
| 1061 |
+
|
| 1062 |
+
@replaced_by_pep8(DelimitedList)
|
| 1063 |
+
def delimitedList(): ...
|
| 1064 |
+
|
| 1065 |
+
@replaced_by_pep8(DelimitedList)
|
| 1066 |
+
def delimited_list(): ...
|
| 1067 |
+
|
| 1068 |
+
@replaced_by_pep8(counted_array)
|
| 1069 |
+
def countedArray(): ...
|
| 1070 |
+
|
| 1071 |
+
@replaced_by_pep8(match_previous_literal)
|
| 1072 |
+
def matchPreviousLiteral(): ...
|
| 1073 |
+
|
| 1074 |
+
@replaced_by_pep8(match_previous_expr)
|
| 1075 |
+
def matchPreviousExpr(): ...
|
| 1076 |
+
|
| 1077 |
+
@replaced_by_pep8(one_of)
|
| 1078 |
+
def oneOf(): ...
|
| 1079 |
+
|
| 1080 |
+
@replaced_by_pep8(dict_of)
|
| 1081 |
+
def dictOf(): ...
|
| 1082 |
+
|
| 1083 |
+
@replaced_by_pep8(original_text_for)
|
| 1084 |
+
def originalTextFor(): ...
|
| 1085 |
+
|
| 1086 |
+
@replaced_by_pep8(nested_expr)
|
| 1087 |
+
def nestedExpr(): ...
|
| 1088 |
+
|
| 1089 |
+
@replaced_by_pep8(make_html_tags)
|
| 1090 |
+
def makeHTMLTags(): ...
|
| 1091 |
+
|
| 1092 |
+
@replaced_by_pep8(make_xml_tags)
|
| 1093 |
+
def makeXMLTags(): ...
|
| 1094 |
+
|
| 1095 |
+
@replaced_by_pep8(replace_html_entity)
|
| 1096 |
+
def replaceHTMLEntity(): ...
|
| 1097 |
+
|
| 1098 |
+
@replaced_by_pep8(infix_notation)
|
| 1099 |
+
def infixNotation(): ...
|
| 1100 |
+
# fmt: on
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/results.py
ADDED
|
@@ -0,0 +1,796 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# results.py
|
| 2 |
+
from collections.abc import (
|
| 3 |
+
MutableMapping,
|
| 4 |
+
Mapping,
|
| 5 |
+
MutableSequence,
|
| 6 |
+
Iterator,
|
| 7 |
+
Sequence,
|
| 8 |
+
Container,
|
| 9 |
+
)
|
| 10 |
+
import pprint
|
| 11 |
+
from typing import Tuple, Any, Dict, Set, List
|
| 12 |
+
|
| 13 |
+
str_type: Tuple[type, ...] = (str, bytes)
|
| 14 |
+
_generator_type = type((_ for _ in ()))
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _ParseResultsWithOffset:
|
| 18 |
+
tup: Tuple["ParseResults", int]
|
| 19 |
+
__slots__ = ["tup"]
|
| 20 |
+
|
| 21 |
+
def __init__(self, p1: "ParseResults", p2: int):
|
| 22 |
+
self.tup: Tuple[ParseResults, int] = (p1, p2)
|
| 23 |
+
|
| 24 |
+
def __getitem__(self, i):
|
| 25 |
+
return self.tup[i]
|
| 26 |
+
|
| 27 |
+
def __getstate__(self):
|
| 28 |
+
return self.tup
|
| 29 |
+
|
| 30 |
+
def __setstate__(self, *args):
|
| 31 |
+
self.tup = args[0]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ParseResults:
|
| 35 |
+
"""Structured parse results, to provide multiple means of access to
|
| 36 |
+
the parsed data:
|
| 37 |
+
|
| 38 |
+
- as a list (``len(results)``)
|
| 39 |
+
- by list index (``results[0], results[1]``, etc.)
|
| 40 |
+
- by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
|
| 41 |
+
|
| 42 |
+
Example::
|
| 43 |
+
|
| 44 |
+
integer = Word(nums)
|
| 45 |
+
date_str = (integer.set_results_name("year") + '/'
|
| 46 |
+
+ integer.set_results_name("month") + '/'
|
| 47 |
+
+ integer.set_results_name("day"))
|
| 48 |
+
# equivalent form:
|
| 49 |
+
# date_str = (integer("year") + '/'
|
| 50 |
+
# + integer("month") + '/'
|
| 51 |
+
# + integer("day"))
|
| 52 |
+
|
| 53 |
+
# parse_string returns a ParseResults object
|
| 54 |
+
result = date_str.parse_string("1999/12/31")
|
| 55 |
+
|
| 56 |
+
def test(s, fn=repr):
|
| 57 |
+
print(f"{s} -> {fn(eval(s))}")
|
| 58 |
+
test("list(result)")
|
| 59 |
+
test("result[0]")
|
| 60 |
+
test("result['month']")
|
| 61 |
+
test("result.day")
|
| 62 |
+
test("'month' in result")
|
| 63 |
+
test("'minutes' in result")
|
| 64 |
+
test("result.dump()", str)
|
| 65 |
+
|
| 66 |
+
prints::
|
| 67 |
+
|
| 68 |
+
list(result) -> ['1999', '/', '12', '/', '31']
|
| 69 |
+
result[0] -> '1999'
|
| 70 |
+
result['month'] -> '12'
|
| 71 |
+
result.day -> '31'
|
| 72 |
+
'month' in result -> True
|
| 73 |
+
'minutes' in result -> False
|
| 74 |
+
result.dump() -> ['1999', '/', '12', '/', '31']
|
| 75 |
+
- day: '31'
|
| 76 |
+
- month: '12'
|
| 77 |
+
- year: '1999'
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
_null_values: Tuple[Any, ...] = (None, [], ())
|
| 81 |
+
|
| 82 |
+
_name: str
|
| 83 |
+
_parent: "ParseResults"
|
| 84 |
+
_all_names: Set[str]
|
| 85 |
+
_modal: bool
|
| 86 |
+
_toklist: List[Any]
|
| 87 |
+
_tokdict: Dict[str, Any]
|
| 88 |
+
|
| 89 |
+
__slots__ = (
|
| 90 |
+
"_name",
|
| 91 |
+
"_parent",
|
| 92 |
+
"_all_names",
|
| 93 |
+
"_modal",
|
| 94 |
+
"_toklist",
|
| 95 |
+
"_tokdict",
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
class List(list):
|
| 99 |
+
"""
|
| 100 |
+
Simple wrapper class to distinguish parsed list results that should be preserved
|
| 101 |
+
as actual Python lists, instead of being converted to :class:`ParseResults`::
|
| 102 |
+
|
| 103 |
+
LBRACK, RBRACK = map(pp.Suppress, "[]")
|
| 104 |
+
element = pp.Forward()
|
| 105 |
+
item = ppc.integer
|
| 106 |
+
element_list = LBRACK + pp.DelimitedList(element) + RBRACK
|
| 107 |
+
|
| 108 |
+
# add parse actions to convert from ParseResults to actual Python collection types
|
| 109 |
+
def as_python_list(t):
|
| 110 |
+
return pp.ParseResults.List(t.as_list())
|
| 111 |
+
element_list.add_parse_action(as_python_list)
|
| 112 |
+
|
| 113 |
+
element <<= item | element_list
|
| 114 |
+
|
| 115 |
+
element.run_tests('''
|
| 116 |
+
100
|
| 117 |
+
[2,3,4]
|
| 118 |
+
[[2, 1],3,4]
|
| 119 |
+
[(2, 1),3,4]
|
| 120 |
+
(2,3,4)
|
| 121 |
+
''', post_parse=lambda s, r: (r[0], type(r[0])))
|
| 122 |
+
|
| 123 |
+
prints::
|
| 124 |
+
|
| 125 |
+
100
|
| 126 |
+
(100, <class 'int'>)
|
| 127 |
+
|
| 128 |
+
[2,3,4]
|
| 129 |
+
([2, 3, 4], <class 'list'>)
|
| 130 |
+
|
| 131 |
+
[[2, 1],3,4]
|
| 132 |
+
([[2, 1], 3, 4], <class 'list'>)
|
| 133 |
+
|
| 134 |
+
(Used internally by :class:`Group` when `aslist=True`.)
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
def __new__(cls, contained=None):
|
| 138 |
+
if contained is None:
|
| 139 |
+
contained = []
|
| 140 |
+
|
| 141 |
+
if not isinstance(contained, list):
|
| 142 |
+
raise TypeError(
|
| 143 |
+
f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
return list.__new__(cls)
|
| 147 |
+
|
| 148 |
+
def __new__(cls, toklist=None, name=None, **kwargs):
|
| 149 |
+
if isinstance(toklist, ParseResults):
|
| 150 |
+
return toklist
|
| 151 |
+
self = object.__new__(cls)
|
| 152 |
+
self._name = None
|
| 153 |
+
self._parent = None
|
| 154 |
+
self._all_names = set()
|
| 155 |
+
|
| 156 |
+
if toklist is None:
|
| 157 |
+
self._toklist = []
|
| 158 |
+
elif isinstance(toklist, (list, _generator_type)):
|
| 159 |
+
self._toklist = (
|
| 160 |
+
[toklist[:]]
|
| 161 |
+
if isinstance(toklist, ParseResults.List)
|
| 162 |
+
else list(toklist)
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
self._toklist = [toklist]
|
| 166 |
+
self._tokdict = dict()
|
| 167 |
+
return self
|
| 168 |
+
|
| 169 |
+
# Performance tuning: we construct a *lot* of these, so keep this
|
| 170 |
+
# constructor as small and fast as possible
|
| 171 |
+
def __init__(
|
| 172 |
+
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
|
| 173 |
+
):
|
| 174 |
+
self._tokdict: Dict[str, _ParseResultsWithOffset]
|
| 175 |
+
self._modal = modal
|
| 176 |
+
if name is not None and name != "":
|
| 177 |
+
if isinstance(name, int):
|
| 178 |
+
name = str(name)
|
| 179 |
+
if not modal:
|
| 180 |
+
self._all_names = {name}
|
| 181 |
+
self._name = name
|
| 182 |
+
if toklist not in self._null_values:
|
| 183 |
+
if isinstance(toklist, (str_type, type)):
|
| 184 |
+
toklist = [toklist]
|
| 185 |
+
if asList:
|
| 186 |
+
if isinstance(toklist, ParseResults):
|
| 187 |
+
self[name] = _ParseResultsWithOffset(
|
| 188 |
+
ParseResults(toklist._toklist), 0
|
| 189 |
+
)
|
| 190 |
+
else:
|
| 191 |
+
self[name] = _ParseResultsWithOffset(
|
| 192 |
+
ParseResults(toklist[0]), 0
|
| 193 |
+
)
|
| 194 |
+
self[name]._name = name
|
| 195 |
+
else:
|
| 196 |
+
try:
|
| 197 |
+
self[name] = toklist[0]
|
| 198 |
+
except (KeyError, TypeError, IndexError):
|
| 199 |
+
if toklist is not self:
|
| 200 |
+
self[name] = toklist
|
| 201 |
+
else:
|
| 202 |
+
self._name = name
|
| 203 |
+
|
| 204 |
+
def __getitem__(self, i):
|
| 205 |
+
if isinstance(i, (int, slice)):
|
| 206 |
+
return self._toklist[i]
|
| 207 |
+
else:
|
| 208 |
+
if i not in self._all_names:
|
| 209 |
+
return self._tokdict[i][-1][0]
|
| 210 |
+
else:
|
| 211 |
+
return ParseResults([v[0] for v in self._tokdict[i]])
|
| 212 |
+
|
| 213 |
+
def __setitem__(self, k, v, isinstance=isinstance):
|
| 214 |
+
if isinstance(v, _ParseResultsWithOffset):
|
| 215 |
+
self._tokdict[k] = self._tokdict.get(k, list()) + [v]
|
| 216 |
+
sub = v[0]
|
| 217 |
+
elif isinstance(k, (int, slice)):
|
| 218 |
+
self._toklist[k] = v
|
| 219 |
+
sub = v
|
| 220 |
+
else:
|
| 221 |
+
self._tokdict[k] = self._tokdict.get(k, list()) + [
|
| 222 |
+
_ParseResultsWithOffset(v, 0)
|
| 223 |
+
]
|
| 224 |
+
sub = v
|
| 225 |
+
if isinstance(sub, ParseResults):
|
| 226 |
+
sub._parent = self
|
| 227 |
+
|
| 228 |
+
def __delitem__(self, i):
|
| 229 |
+
if isinstance(i, (int, slice)):
|
| 230 |
+
mylen = len(self._toklist)
|
| 231 |
+
del self._toklist[i]
|
| 232 |
+
|
| 233 |
+
# convert int to slice
|
| 234 |
+
if isinstance(i, int):
|
| 235 |
+
if i < 0:
|
| 236 |
+
i += mylen
|
| 237 |
+
i = slice(i, i + 1)
|
| 238 |
+
# get removed indices
|
| 239 |
+
removed = list(range(*i.indices(mylen)))
|
| 240 |
+
removed.reverse()
|
| 241 |
+
# fixup indices in token dictionary
|
| 242 |
+
for name, occurrences in self._tokdict.items():
|
| 243 |
+
for j in removed:
|
| 244 |
+
for k, (value, position) in enumerate(occurrences):
|
| 245 |
+
occurrences[k] = _ParseResultsWithOffset(
|
| 246 |
+
value, position - (position > j)
|
| 247 |
+
)
|
| 248 |
+
else:
|
| 249 |
+
del self._tokdict[i]
|
| 250 |
+
|
| 251 |
+
def __contains__(self, k) -> bool:
|
| 252 |
+
return k in self._tokdict
|
| 253 |
+
|
| 254 |
+
def __len__(self) -> int:
|
| 255 |
+
return len(self._toklist)
|
| 256 |
+
|
| 257 |
+
def __bool__(self) -> bool:
|
| 258 |
+
return not not (self._toklist or self._tokdict)
|
| 259 |
+
|
| 260 |
+
def __iter__(self) -> Iterator:
|
| 261 |
+
return iter(self._toklist)
|
| 262 |
+
|
| 263 |
+
def __reversed__(self) -> Iterator:
|
| 264 |
+
return iter(self._toklist[::-1])
|
| 265 |
+
|
| 266 |
+
def keys(self):
|
| 267 |
+
return iter(self._tokdict)
|
| 268 |
+
|
| 269 |
+
def values(self):
|
| 270 |
+
return (self[k] for k in self.keys())
|
| 271 |
+
|
| 272 |
+
def items(self):
|
| 273 |
+
return ((k, self[k]) for k in self.keys())
|
| 274 |
+
|
| 275 |
+
def haskeys(self) -> bool:
|
| 276 |
+
"""
|
| 277 |
+
Since ``keys()`` returns an iterator, this method is helpful in bypassing
|
| 278 |
+
code that looks for the existence of any defined results names."""
|
| 279 |
+
return not not self._tokdict
|
| 280 |
+
|
| 281 |
+
def pop(self, *args, **kwargs):
|
| 282 |
+
"""
|
| 283 |
+
Removes and returns item at specified index (default= ``last``).
|
| 284 |
+
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
|
| 285 |
+
passed no argument or an integer argument, it will use ``list``
|
| 286 |
+
semantics and pop tokens from the list of parsed tokens. If passed
|
| 287 |
+
a non-integer argument (most likely a string), it will use ``dict``
|
| 288 |
+
semantics and pop the corresponding value from any defined results
|
| 289 |
+
names. A second default return value argument is supported, just as in
|
| 290 |
+
``dict.pop()``.
|
| 291 |
+
|
| 292 |
+
Example::
|
| 293 |
+
|
| 294 |
+
numlist = Word(nums)[...]
|
| 295 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
| 296 |
+
|
| 297 |
+
def remove_first(tokens):
|
| 298 |
+
tokens.pop(0)
|
| 299 |
+
numlist.add_parse_action(remove_first)
|
| 300 |
+
print(numlist.parse_string("0 123 321")) # -> ['123', '321']
|
| 301 |
+
|
| 302 |
+
label = Word(alphas)
|
| 303 |
+
patt = label("LABEL") + Word(nums)[1, ...]
|
| 304 |
+
print(patt.parse_string("AAB 123 321").dump())
|
| 305 |
+
|
| 306 |
+
# Use pop() in a parse action to remove named result (note that corresponding value is not
|
| 307 |
+
# removed from list form of results)
|
| 308 |
+
def remove_LABEL(tokens):
|
| 309 |
+
tokens.pop("LABEL")
|
| 310 |
+
return tokens
|
| 311 |
+
patt.add_parse_action(remove_LABEL)
|
| 312 |
+
print(patt.parse_string("AAB 123 321").dump())
|
| 313 |
+
|
| 314 |
+
prints::
|
| 315 |
+
|
| 316 |
+
['AAB', '123', '321']
|
| 317 |
+
- LABEL: 'AAB'
|
| 318 |
+
|
| 319 |
+
['AAB', '123', '321']
|
| 320 |
+
"""
|
| 321 |
+
if not args:
|
| 322 |
+
args = [-1]
|
| 323 |
+
for k, v in kwargs.items():
|
| 324 |
+
if k == "default":
|
| 325 |
+
args = (args[0], v)
|
| 326 |
+
else:
|
| 327 |
+
raise TypeError(f"pop() got an unexpected keyword argument {k!r}")
|
| 328 |
+
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
|
| 329 |
+
index = args[0]
|
| 330 |
+
ret = self[index]
|
| 331 |
+
del self[index]
|
| 332 |
+
return ret
|
| 333 |
+
else:
|
| 334 |
+
defaultvalue = args[1]
|
| 335 |
+
return defaultvalue
|
| 336 |
+
|
| 337 |
+
def get(self, key, default_value=None):
|
| 338 |
+
"""
|
| 339 |
+
Returns named result matching the given key, or if there is no
|
| 340 |
+
such name, then returns the given ``default_value`` or ``None`` if no
|
| 341 |
+
``default_value`` is specified.
|
| 342 |
+
|
| 343 |
+
Similar to ``dict.get()``.
|
| 344 |
+
|
| 345 |
+
Example::
|
| 346 |
+
|
| 347 |
+
integer = Word(nums)
|
| 348 |
+
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
| 349 |
+
|
| 350 |
+
result = date_str.parse_string("1999/12/31")
|
| 351 |
+
print(result.get("year")) # -> '1999'
|
| 352 |
+
print(result.get("hour", "not specified")) # -> 'not specified'
|
| 353 |
+
print(result.get("hour")) # -> None
|
| 354 |
+
"""
|
| 355 |
+
if key in self:
|
| 356 |
+
return self[key]
|
| 357 |
+
else:
|
| 358 |
+
return default_value
|
| 359 |
+
|
| 360 |
+
def insert(self, index, ins_string):
|
| 361 |
+
"""
|
| 362 |
+
Inserts new element at location index in the list of parsed tokens.
|
| 363 |
+
|
| 364 |
+
Similar to ``list.insert()``.
|
| 365 |
+
|
| 366 |
+
Example::
|
| 367 |
+
|
| 368 |
+
numlist = Word(nums)[...]
|
| 369 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
| 370 |
+
|
| 371 |
+
# use a parse action to insert the parse location in the front of the parsed results
|
| 372 |
+
def insert_locn(locn, tokens):
|
| 373 |
+
tokens.insert(0, locn)
|
| 374 |
+
numlist.add_parse_action(insert_locn)
|
| 375 |
+
print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
|
| 376 |
+
"""
|
| 377 |
+
self._toklist.insert(index, ins_string)
|
| 378 |
+
# fixup indices in token dictionary
|
| 379 |
+
for name, occurrences in self._tokdict.items():
|
| 380 |
+
for k, (value, position) in enumerate(occurrences):
|
| 381 |
+
occurrences[k] = _ParseResultsWithOffset(
|
| 382 |
+
value, position + (position > index)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
def append(self, item):
|
| 386 |
+
"""
|
| 387 |
+
Add single element to end of ``ParseResults`` list of elements.
|
| 388 |
+
|
| 389 |
+
Example::
|
| 390 |
+
|
| 391 |
+
numlist = Word(nums)[...]
|
| 392 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
|
| 393 |
+
|
| 394 |
+
# use a parse action to compute the sum of the parsed integers, and add it to the end
|
| 395 |
+
def append_sum(tokens):
|
| 396 |
+
tokens.append(sum(map(int, tokens)))
|
| 397 |
+
numlist.add_parse_action(append_sum)
|
| 398 |
+
print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
|
| 399 |
+
"""
|
| 400 |
+
self._toklist.append(item)
|
| 401 |
+
|
| 402 |
+
def extend(self, itemseq):
|
| 403 |
+
"""
|
| 404 |
+
Add sequence of elements to end of ``ParseResults`` list of elements.
|
| 405 |
+
|
| 406 |
+
Example::
|
| 407 |
+
|
| 408 |
+
patt = Word(alphas)[1, ...]
|
| 409 |
+
|
| 410 |
+
# use a parse action to append the reverse of the matched strings, to make a palindrome
|
| 411 |
+
def make_palindrome(tokens):
|
| 412 |
+
tokens.extend(reversed([t[::-1] for t in tokens]))
|
| 413 |
+
return ''.join(tokens)
|
| 414 |
+
patt.add_parse_action(make_palindrome)
|
| 415 |
+
print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
|
| 416 |
+
"""
|
| 417 |
+
if isinstance(itemseq, ParseResults):
|
| 418 |
+
self.__iadd__(itemseq)
|
| 419 |
+
else:
|
| 420 |
+
self._toklist.extend(itemseq)
|
| 421 |
+
|
| 422 |
+
def clear(self):
|
| 423 |
+
"""
|
| 424 |
+
Clear all elements and results names.
|
| 425 |
+
"""
|
| 426 |
+
del self._toklist[:]
|
| 427 |
+
self._tokdict.clear()
|
| 428 |
+
|
| 429 |
+
def __getattr__(self, name):
|
| 430 |
+
try:
|
| 431 |
+
return self[name]
|
| 432 |
+
except KeyError:
|
| 433 |
+
if name.startswith("__"):
|
| 434 |
+
raise AttributeError(name)
|
| 435 |
+
return ""
|
| 436 |
+
|
| 437 |
+
def __add__(self, other: "ParseResults") -> "ParseResults":
|
| 438 |
+
ret = self.copy()
|
| 439 |
+
ret += other
|
| 440 |
+
return ret
|
| 441 |
+
|
| 442 |
+
def __iadd__(self, other: "ParseResults") -> "ParseResults":
|
| 443 |
+
if not other:
|
| 444 |
+
return self
|
| 445 |
+
|
| 446 |
+
if other._tokdict:
|
| 447 |
+
offset = len(self._toklist)
|
| 448 |
+
addoffset = lambda a: offset if a < 0 else a + offset
|
| 449 |
+
otheritems = other._tokdict.items()
|
| 450 |
+
otherdictitems = [
|
| 451 |
+
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
|
| 452 |
+
for k, vlist in otheritems
|
| 453 |
+
for v in vlist
|
| 454 |
+
]
|
| 455 |
+
for k, v in otherdictitems:
|
| 456 |
+
self[k] = v
|
| 457 |
+
if isinstance(v[0], ParseResults):
|
| 458 |
+
v[0]._parent = self
|
| 459 |
+
|
| 460 |
+
self._toklist += other._toklist
|
| 461 |
+
self._all_names |= other._all_names
|
| 462 |
+
return self
|
| 463 |
+
|
| 464 |
+
def __radd__(self, other) -> "ParseResults":
|
| 465 |
+
if isinstance(other, int) and other == 0:
|
| 466 |
+
# useful for merging many ParseResults using sum() builtin
|
| 467 |
+
return self.copy()
|
| 468 |
+
else:
|
| 469 |
+
# this may raise a TypeError - so be it
|
| 470 |
+
return other + self
|
| 471 |
+
|
| 472 |
+
def __repr__(self) -> str:
|
| 473 |
+
return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})"
|
| 474 |
+
|
| 475 |
+
def __str__(self) -> str:
|
| 476 |
+
return (
|
| 477 |
+
"["
|
| 478 |
+
+ ", ".join(
|
| 479 |
+
[
|
| 480 |
+
str(i) if isinstance(i, ParseResults) else repr(i)
|
| 481 |
+
for i in self._toklist
|
| 482 |
+
]
|
| 483 |
+
)
|
| 484 |
+
+ "]"
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
def _asStringList(self, sep=""):
|
| 488 |
+
out = []
|
| 489 |
+
for item in self._toklist:
|
| 490 |
+
if out and sep:
|
| 491 |
+
out.append(sep)
|
| 492 |
+
if isinstance(item, ParseResults):
|
| 493 |
+
out += item._asStringList()
|
| 494 |
+
else:
|
| 495 |
+
out.append(str(item))
|
| 496 |
+
return out
|
| 497 |
+
|
| 498 |
+
def as_list(self) -> list:
|
| 499 |
+
"""
|
| 500 |
+
Returns the parse results as a nested list of matching tokens, all converted to strings.
|
| 501 |
+
|
| 502 |
+
Example::
|
| 503 |
+
|
| 504 |
+
patt = Word(alphas)[1, ...]
|
| 505 |
+
result = patt.parse_string("sldkj lsdkj sldkj")
|
| 506 |
+
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
|
| 507 |
+
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
|
| 508 |
+
|
| 509 |
+
# Use as_list() to create an actual list
|
| 510 |
+
result_list = result.as_list()
|
| 511 |
+
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
|
| 512 |
+
"""
|
| 513 |
+
return [
|
| 514 |
+
res.as_list() if isinstance(res, ParseResults) else res
|
| 515 |
+
for res in self._toklist
|
| 516 |
+
]
|
| 517 |
+
|
| 518 |
+
def as_dict(self) -> dict:
|
| 519 |
+
"""
|
| 520 |
+
Returns the named parse results as a nested dictionary.
|
| 521 |
+
|
| 522 |
+
Example::
|
| 523 |
+
|
| 524 |
+
integer = Word(nums)
|
| 525 |
+
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
| 526 |
+
|
| 527 |
+
result = date_str.parse_string('12/31/1999')
|
| 528 |
+
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
|
| 529 |
+
|
| 530 |
+
result_dict = result.as_dict()
|
| 531 |
+
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
|
| 532 |
+
|
| 533 |
+
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
|
| 534 |
+
import json
|
| 535 |
+
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
|
| 536 |
+
print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
|
| 537 |
+
"""
|
| 538 |
+
|
| 539 |
+
def to_item(obj):
|
| 540 |
+
if isinstance(obj, ParseResults):
|
| 541 |
+
return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
|
| 542 |
+
else:
|
| 543 |
+
return obj
|
| 544 |
+
|
| 545 |
+
return dict((k, to_item(v)) for k, v in self.items())
|
| 546 |
+
|
| 547 |
+
def copy(self) -> "ParseResults":
|
| 548 |
+
"""
|
| 549 |
+
Returns a new shallow copy of a :class:`ParseResults` object. `ParseResults`
|
| 550 |
+
items contained within the source are shared with the copy. Use
|
| 551 |
+
:class:`ParseResults.deepcopy()` to create a copy with its own separate
|
| 552 |
+
content values.
|
| 553 |
+
"""
|
| 554 |
+
ret = ParseResults(self._toklist)
|
| 555 |
+
ret._tokdict = self._tokdict.copy()
|
| 556 |
+
ret._parent = self._parent
|
| 557 |
+
ret._all_names |= self._all_names
|
| 558 |
+
ret._name = self._name
|
| 559 |
+
return ret
|
| 560 |
+
|
| 561 |
+
def deepcopy(self) -> "ParseResults":
|
| 562 |
+
"""
|
| 563 |
+
Returns a new deep copy of a :class:`ParseResults` object.
|
| 564 |
+
"""
|
| 565 |
+
ret = self.copy()
|
| 566 |
+
# replace values with copies if they are of known mutable types
|
| 567 |
+
for i, obj in enumerate(self._toklist):
|
| 568 |
+
if isinstance(obj, ParseResults):
|
| 569 |
+
self._toklist[i] = obj.deepcopy()
|
| 570 |
+
elif isinstance(obj, (str, bytes)):
|
| 571 |
+
pass
|
| 572 |
+
elif isinstance(obj, MutableMapping):
|
| 573 |
+
self._toklist[i] = dest = type(obj)()
|
| 574 |
+
for k, v in obj.items():
|
| 575 |
+
dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v
|
| 576 |
+
elif isinstance(obj, Container):
|
| 577 |
+
self._toklist[i] = type(obj)(
|
| 578 |
+
v.deepcopy() if isinstance(v, ParseResults) else v for v in obj
|
| 579 |
+
)
|
| 580 |
+
return ret
|
| 581 |
+
|
| 582 |
+
def get_name(self):
|
| 583 |
+
r"""
|
| 584 |
+
Returns the results name for this token expression. Useful when several
|
| 585 |
+
different expressions might match at a particular location.
|
| 586 |
+
|
| 587 |
+
Example::
|
| 588 |
+
|
| 589 |
+
integer = Word(nums)
|
| 590 |
+
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
|
| 591 |
+
house_number_expr = Suppress('#') + Word(nums, alphanums)
|
| 592 |
+
user_data = (Group(house_number_expr)("house_number")
|
| 593 |
+
| Group(ssn_expr)("ssn")
|
| 594 |
+
| Group(integer)("age"))
|
| 595 |
+
user_info = user_data[1, ...]
|
| 596 |
+
|
| 597 |
+
result = user_info.parse_string("22 111-22-3333 #221B")
|
| 598 |
+
for item in result:
|
| 599 |
+
print(item.get_name(), ':', item[0])
|
| 600 |
+
|
| 601 |
+
prints::
|
| 602 |
+
|
| 603 |
+
age : 22
|
| 604 |
+
ssn : 111-22-3333
|
| 605 |
+
house_number : 221B
|
| 606 |
+
"""
|
| 607 |
+
if self._name:
|
| 608 |
+
return self._name
|
| 609 |
+
elif self._parent:
|
| 610 |
+
par: "ParseResults" = self._parent
|
| 611 |
+
parent_tokdict_items = par._tokdict.items()
|
| 612 |
+
return next(
|
| 613 |
+
(
|
| 614 |
+
k
|
| 615 |
+
for k, vlist in parent_tokdict_items
|
| 616 |
+
for v, loc in vlist
|
| 617 |
+
if v is self
|
| 618 |
+
),
|
| 619 |
+
None,
|
| 620 |
+
)
|
| 621 |
+
elif (
|
| 622 |
+
len(self) == 1
|
| 623 |
+
and len(self._tokdict) == 1
|
| 624 |
+
and next(iter(self._tokdict.values()))[0][1] in (0, -1)
|
| 625 |
+
):
|
| 626 |
+
return next(iter(self._tokdict.keys()))
|
| 627 |
+
else:
|
| 628 |
+
return None
|
| 629 |
+
|
| 630 |
+
def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
|
| 631 |
+
"""
|
| 632 |
+
Diagnostic method for listing out the contents of
|
| 633 |
+
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
|
| 634 |
+
that this string can be embedded in a nested display of other data.
|
| 635 |
+
|
| 636 |
+
Example::
|
| 637 |
+
|
| 638 |
+
integer = Word(nums)
|
| 639 |
+
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
|
| 640 |
+
|
| 641 |
+
result = date_str.parse_string('1999/12/31')
|
| 642 |
+
print(result.dump())
|
| 643 |
+
|
| 644 |
+
prints::
|
| 645 |
+
|
| 646 |
+
['1999', '/', '12', '/', '31']
|
| 647 |
+
- day: '31'
|
| 648 |
+
- month: '12'
|
| 649 |
+
- year: '1999'
|
| 650 |
+
"""
|
| 651 |
+
out = []
|
| 652 |
+
NL = "\n"
|
| 653 |
+
out.append(indent + str(self.as_list()) if include_list else "")
|
| 654 |
+
|
| 655 |
+
if full:
|
| 656 |
+
if self.haskeys():
|
| 657 |
+
items = sorted((str(k), v) for k, v in self.items())
|
| 658 |
+
for k, v in items:
|
| 659 |
+
if out:
|
| 660 |
+
out.append(NL)
|
| 661 |
+
out.append(f"{indent}{(' ' * _depth)}- {k}: ")
|
| 662 |
+
if isinstance(v, ParseResults):
|
| 663 |
+
if v:
|
| 664 |
+
out.append(
|
| 665 |
+
v.dump(
|
| 666 |
+
indent=indent,
|
| 667 |
+
full=full,
|
| 668 |
+
include_list=include_list,
|
| 669 |
+
_depth=_depth + 1,
|
| 670 |
+
)
|
| 671 |
+
)
|
| 672 |
+
else:
|
| 673 |
+
out.append(str(v))
|
| 674 |
+
else:
|
| 675 |
+
out.append(repr(v))
|
| 676 |
+
if any(isinstance(vv, ParseResults) for vv in self):
|
| 677 |
+
v = self
|
| 678 |
+
for i, vv in enumerate(v):
|
| 679 |
+
if isinstance(vv, ParseResults):
|
| 680 |
+
out.append(
|
| 681 |
+
"\n{}{}[{}]:\n{}{}{}".format(
|
| 682 |
+
indent,
|
| 683 |
+
(" " * (_depth)),
|
| 684 |
+
i,
|
| 685 |
+
indent,
|
| 686 |
+
(" " * (_depth + 1)),
|
| 687 |
+
vv.dump(
|
| 688 |
+
indent=indent,
|
| 689 |
+
full=full,
|
| 690 |
+
include_list=include_list,
|
| 691 |
+
_depth=_depth + 1,
|
| 692 |
+
),
|
| 693 |
+
)
|
| 694 |
+
)
|
| 695 |
+
else:
|
| 696 |
+
out.append(
|
| 697 |
+
"\n%s%s[%d]:\n%s%s%s"
|
| 698 |
+
% (
|
| 699 |
+
indent,
|
| 700 |
+
(" " * (_depth)),
|
| 701 |
+
i,
|
| 702 |
+
indent,
|
| 703 |
+
(" " * (_depth + 1)),
|
| 704 |
+
str(vv),
|
| 705 |
+
)
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
return "".join(out)
|
| 709 |
+
|
| 710 |
+
def pprint(self, *args, **kwargs):
|
| 711 |
+
"""
|
| 712 |
+
Pretty-printer for parsed results as a list, using the
|
| 713 |
+
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
|
| 714 |
+
Accepts additional positional or keyword args as defined for
|
| 715 |
+
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
|
| 716 |
+
|
| 717 |
+
Example::
|
| 718 |
+
|
| 719 |
+
ident = Word(alphas, alphanums)
|
| 720 |
+
num = Word(nums)
|
| 721 |
+
func = Forward()
|
| 722 |
+
term = ident | num | Group('(' + func + ')')
|
| 723 |
+
func <<= ident + Group(Optional(DelimitedList(term)))
|
| 724 |
+
result = func.parse_string("fna a,b,(fnb c,d,200),100")
|
| 725 |
+
result.pprint(width=40)
|
| 726 |
+
|
| 727 |
+
prints::
|
| 728 |
+
|
| 729 |
+
['fna',
|
| 730 |
+
['a',
|
| 731 |
+
'b',
|
| 732 |
+
['(', 'fnb', ['c', 'd', '200'], ')'],
|
| 733 |
+
'100']]
|
| 734 |
+
"""
|
| 735 |
+
pprint.pprint(self.as_list(), *args, **kwargs)
|
| 736 |
+
|
| 737 |
+
# add support for pickle protocol
|
| 738 |
+
def __getstate__(self):
|
| 739 |
+
return (
|
| 740 |
+
self._toklist,
|
| 741 |
+
(
|
| 742 |
+
self._tokdict.copy(),
|
| 743 |
+
None,
|
| 744 |
+
self._all_names,
|
| 745 |
+
self._name,
|
| 746 |
+
),
|
| 747 |
+
)
|
| 748 |
+
|
| 749 |
+
def __setstate__(self, state):
|
| 750 |
+
self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
|
| 751 |
+
self._all_names = set(inAccumNames)
|
| 752 |
+
self._parent = None
|
| 753 |
+
|
| 754 |
+
def __getnewargs__(self):
|
| 755 |
+
return self._toklist, self._name
|
| 756 |
+
|
| 757 |
+
def __dir__(self):
|
| 758 |
+
return dir(type(self)) + list(self.keys())
|
| 759 |
+
|
| 760 |
+
@classmethod
|
| 761 |
+
def from_dict(cls, other, name=None) -> "ParseResults":
|
| 762 |
+
"""
|
| 763 |
+
Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
|
| 764 |
+
name-value relations as results names. If an optional ``name`` argument is
|
| 765 |
+
given, a nested ``ParseResults`` will be returned.
|
| 766 |
+
"""
|
| 767 |
+
|
| 768 |
+
def is_iterable(obj):
|
| 769 |
+
try:
|
| 770 |
+
iter(obj)
|
| 771 |
+
except Exception:
|
| 772 |
+
return False
|
| 773 |
+
# str's are iterable, but in pyparsing, we don't want to iterate over them
|
| 774 |
+
else:
|
| 775 |
+
return not isinstance(obj, str_type)
|
| 776 |
+
|
| 777 |
+
ret = cls([])
|
| 778 |
+
for k, v in other.items():
|
| 779 |
+
if isinstance(v, Mapping):
|
| 780 |
+
ret += cls.from_dict(v, name=k)
|
| 781 |
+
else:
|
| 782 |
+
ret += cls([v], name=k, asList=is_iterable(v))
|
| 783 |
+
if name is not None:
|
| 784 |
+
ret = cls([ret], name=name)
|
| 785 |
+
return ret
|
| 786 |
+
|
| 787 |
+
asList = as_list
|
| 788 |
+
"""Deprecated - use :class:`as_list`"""
|
| 789 |
+
asDict = as_dict
|
| 790 |
+
"""Deprecated - use :class:`as_dict`"""
|
| 791 |
+
getName = get_name
|
| 792 |
+
"""Deprecated - use :class:`get_name`"""
|
| 793 |
+
|
| 794 |
+
|
| 795 |
+
MutableMapping.register(ParseResults)
|
| 796 |
+
MutableSequence.register(ParseResults)
|
.venv/Lib/site-packages/pip/_vendor/pyparsing/testing.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# testing.py
|
| 2 |
+
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
import typing
|
| 5 |
+
|
| 6 |
+
from .core import (
|
| 7 |
+
ParserElement,
|
| 8 |
+
ParseException,
|
| 9 |
+
Keyword,
|
| 10 |
+
__diag__,
|
| 11 |
+
__compat__,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class pyparsing_test:
|
| 16 |
+
"""
|
| 17 |
+
namespace class for classes useful in writing unit tests
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
class reset_pyparsing_context:
|
| 21 |
+
"""
|
| 22 |
+
Context manager to be used when writing unit tests that modify pyparsing config values:
|
| 23 |
+
- packrat parsing
|
| 24 |
+
- bounded recursion parsing
|
| 25 |
+
- default whitespace characters.
|
| 26 |
+
- default keyword characters
|
| 27 |
+
- literal string auto-conversion class
|
| 28 |
+
- __diag__ settings
|
| 29 |
+
|
| 30 |
+
Example::
|
| 31 |
+
|
| 32 |
+
with reset_pyparsing_context():
|
| 33 |
+
# test that literals used to construct a grammar are automatically suppressed
|
| 34 |
+
ParserElement.inlineLiteralsUsing(Suppress)
|
| 35 |
+
|
| 36 |
+
term = Word(alphas) | Word(nums)
|
| 37 |
+
group = Group('(' + term[...] + ')')
|
| 38 |
+
|
| 39 |
+
# assert that the '()' characters are not included in the parsed tokens
|
| 40 |
+
self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
|
| 41 |
+
|
| 42 |
+
# after exiting context manager, literals are converted to Literal expressions again
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self):
|
| 46 |
+
self._save_context = {}
|
| 47 |
+
|
| 48 |
+
def save(self):
|
| 49 |
+
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
|
| 50 |
+
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
|
| 51 |
+
|
| 52 |
+
self._save_context[
|
| 53 |
+
"literal_string_class"
|
| 54 |
+
] = ParserElement._literalStringClass
|
| 55 |
+
|
| 56 |
+
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
|
| 57 |
+
|
| 58 |
+
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
|
| 59 |
+
if ParserElement._packratEnabled:
|
| 60 |
+
self._save_context[
|
| 61 |
+
"packrat_cache_size"
|
| 62 |
+
] = ParserElement.packrat_cache.size
|
| 63 |
+
else:
|
| 64 |
+
self._save_context["packrat_cache_size"] = None
|
| 65 |
+
self._save_context["packrat_parse"] = ParserElement._parse
|
| 66 |
+
self._save_context[
|
| 67 |
+
"recursion_enabled"
|
| 68 |
+
] = ParserElement._left_recursion_enabled
|
| 69 |
+
|
| 70 |
+
self._save_context["__diag__"] = {
|
| 71 |
+
name: getattr(__diag__, name) for name in __diag__._all_names
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
self._save_context["__compat__"] = {
|
| 75 |
+
"collect_all_And_tokens": __compat__.collect_all_And_tokens
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
return self
|
| 79 |
+
|
| 80 |
+
def restore(self):
|
| 81 |
+
# reset pyparsing global state
|
| 82 |
+
if (
|
| 83 |
+
ParserElement.DEFAULT_WHITE_CHARS
|
| 84 |
+
!= self._save_context["default_whitespace"]
|
| 85 |
+
):
|
| 86 |
+
ParserElement.set_default_whitespace_chars(
|
| 87 |
+
self._save_context["default_whitespace"]
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
|
| 91 |
+
|
| 92 |
+
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
|
| 93 |
+
ParserElement.inlineLiteralsUsing(
|
| 94 |
+
self._save_context["literal_string_class"]
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
for name, value in self._save_context["__diag__"].items():
|
| 98 |
+
(__diag__.enable if value else __diag__.disable)(name)
|
| 99 |
+
|
| 100 |
+
ParserElement._packratEnabled = False
|
| 101 |
+
if self._save_context["packrat_enabled"]:
|
| 102 |
+
ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
|
| 103 |
+
else:
|
| 104 |
+
ParserElement._parse = self._save_context["packrat_parse"]
|
| 105 |
+
ParserElement._left_recursion_enabled = self._save_context[
|
| 106 |
+
"recursion_enabled"
|
| 107 |
+
]
|
| 108 |
+
|
| 109 |
+
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
|
| 110 |
+
|
| 111 |
+
return self
|
| 112 |
+
|
| 113 |
+
def copy(self):
|
| 114 |
+
ret = type(self)()
|
| 115 |
+
ret._save_context.update(self._save_context)
|
| 116 |
+
return ret
|
| 117 |
+
|
| 118 |
+
def __enter__(self):
|
| 119 |
+
return self.save()
|
| 120 |
+
|
| 121 |
+
def __exit__(self, *args):
|
| 122 |
+
self.restore()
|
| 123 |
+
|
| 124 |
+
class TestParseResultsAsserts:
|
| 125 |
+
"""
|
| 126 |
+
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
def assertParseResultsEquals(
|
| 130 |
+
self, result, expected_list=None, expected_dict=None, msg=None
|
| 131 |
+
):
|
| 132 |
+
"""
|
| 133 |
+
Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
|
| 134 |
+
and compare any defined results names with an optional ``expected_dict``.
|
| 135 |
+
"""
|
| 136 |
+
if expected_list is not None:
|
| 137 |
+
self.assertEqual(expected_list, result.as_list(), msg=msg)
|
| 138 |
+
if expected_dict is not None:
|
| 139 |
+
self.assertEqual(expected_dict, result.as_dict(), msg=msg)
|
| 140 |
+
|
| 141 |
+
def assertParseAndCheckList(
|
| 142 |
+
self, expr, test_string, expected_list, msg=None, verbose=True
|
| 143 |
+
):
|
| 144 |
+
"""
|
| 145 |
+
Convenience wrapper assert to test a parser element and input string, and assert that
|
| 146 |
+
the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
|
| 147 |
+
"""
|
| 148 |
+
result = expr.parse_string(test_string, parse_all=True)
|
| 149 |
+
if verbose:
|
| 150 |
+
print(result.dump())
|
| 151 |
+
else:
|
| 152 |
+
print(result.as_list())
|
| 153 |
+
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
|
| 154 |
+
|
| 155 |
+
def assertParseAndCheckDict(
|
| 156 |
+
self, expr, test_string, expected_dict, msg=None, verbose=True
|
| 157 |
+
):
|
| 158 |
+
"""
|
| 159 |
+
Convenience wrapper assert to test a parser element and input string, and assert that
|
| 160 |
+
the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
|
| 161 |
+
"""
|
| 162 |
+
result = expr.parse_string(test_string, parseAll=True)
|
| 163 |
+
if verbose:
|
| 164 |
+
print(result.dump())
|
| 165 |
+
else:
|
| 166 |
+
print(result.as_list())
|
| 167 |
+
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
|
| 168 |
+
|
| 169 |
+
def assertRunTestResults(
|
| 170 |
+
self, run_tests_report, expected_parse_results=None, msg=None
|
| 171 |
+
):
|
| 172 |
+
"""
|
| 173 |
+
Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
|
| 174 |
+
list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
|
| 175 |
+
with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
|
| 176 |
+
Finally, asserts that the overall ``runTests()`` success value is ``True``.
|
| 177 |
+
|
| 178 |
+
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
|
| 179 |
+
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
|
| 180 |
+
"""
|
| 181 |
+
run_test_success, run_test_results = run_tests_report
|
| 182 |
+
|
| 183 |
+
if expected_parse_results is not None:
|
| 184 |
+
merged = [
|
| 185 |
+
(*rpt, expected)
|
| 186 |
+
for rpt, expected in zip(run_test_results, expected_parse_results)
|
| 187 |
+
]
|
| 188 |
+
for test_string, result, expected in merged:
|
| 189 |
+
# expected should be a tuple containing a list and/or a dict or an exception,
|
| 190 |
+
# and optional failure message string
|
| 191 |
+
# an empty tuple will skip any result validation
|
| 192 |
+
fail_msg = next(
|
| 193 |
+
(exp for exp in expected if isinstance(exp, str)), None
|
| 194 |
+
)
|
| 195 |
+
expected_exception = next(
|
| 196 |
+
(
|
| 197 |
+
exp
|
| 198 |
+
for exp in expected
|
| 199 |
+
if isinstance(exp, type) and issubclass(exp, Exception)
|
| 200 |
+
),
|
| 201 |
+
None,
|
| 202 |
+
)
|
| 203 |
+
if expected_exception is not None:
|
| 204 |
+
with self.assertRaises(
|
| 205 |
+
expected_exception=expected_exception, msg=fail_msg or msg
|
| 206 |
+
):
|
| 207 |
+
if isinstance(result, Exception):
|
| 208 |
+
raise result
|
| 209 |
+
else:
|
| 210 |
+
expected_list = next(
|
| 211 |
+
(exp for exp in expected if isinstance(exp, list)), None
|
| 212 |
+
)
|
| 213 |
+
expected_dict = next(
|
| 214 |
+
(exp for exp in expected if isinstance(exp, dict)), None
|
| 215 |
+
)
|
| 216 |
+
if (expected_list, expected_dict) != (None, None):
|
| 217 |
+
self.assertParseResultsEquals(
|
| 218 |
+
result,
|
| 219 |
+
expected_list=expected_list,
|
| 220 |
+
expected_dict=expected_dict,
|
| 221 |
+
msg=fail_msg or msg,
|
| 222 |
+
)
|
| 223 |
+
else:
|
| 224 |
+
# warning here maybe?
|
| 225 |
+
print(f"no validation for {test_string!r}")
|
| 226 |
+
|
| 227 |
+
# do this last, in case some specific test results can be reported instead
|
| 228 |
+
self.assertTrue(
|
| 229 |
+
run_test_success, msg=msg if msg is not None else "failed runTests"
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
@contextmanager
|
| 233 |
+
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
|
| 234 |
+
with self.assertRaises(exc_type, msg=msg):
|
| 235 |
+
yield
|
| 236 |
+
|
| 237 |
+
@staticmethod
|
| 238 |
+
def with_line_numbers(
|
| 239 |
+
s: str,
|
| 240 |
+
start_line: typing.Optional[int] = None,
|
| 241 |
+
end_line: typing.Optional[int] = None,
|
| 242 |
+
expand_tabs: bool = True,
|
| 243 |
+
eol_mark: str = "|",
|
| 244 |
+
mark_spaces: typing.Optional[str] = None,
|
| 245 |
+
mark_control: typing.Optional[str] = None,
|
| 246 |
+
) -> str:
|
| 247 |
+
"""
|
| 248 |
+
Helpful method for debugging a parser - prints a string with line and column numbers.
|
| 249 |
+
(Line and column numbers are 1-based.)
|
| 250 |
+
|
| 251 |
+
:param s: tuple(bool, str - string to be printed with line and column numbers
|
| 252 |
+
:param start_line: int - (optional) starting line number in s to print (default=1)
|
| 253 |
+
:param end_line: int - (optional) ending line number in s to print (default=len(s))
|
| 254 |
+
:param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
|
| 255 |
+
:param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
|
| 256 |
+
:param mark_spaces: str - (optional) special character to display in place of spaces
|
| 257 |
+
:param mark_control: str - (optional) convert non-printing control characters to a placeholding
|
| 258 |
+
character; valid values:
|
| 259 |
+
- "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
|
| 260 |
+
- any single character string - replace control characters with given string
|
| 261 |
+
- None (default) - string is displayed as-is
|
| 262 |
+
|
| 263 |
+
:return: str - input string with leading line numbers and column number headers
|
| 264 |
+
"""
|
| 265 |
+
if expand_tabs:
|
| 266 |
+
s = s.expandtabs()
|
| 267 |
+
if mark_control is not None:
|
| 268 |
+
mark_control = typing.cast(str, mark_control)
|
| 269 |
+
if mark_control == "unicode":
|
| 270 |
+
transtable_map = {
|
| 271 |
+
c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))
|
| 272 |
+
}
|
| 273 |
+
transtable_map[127] = 0x2421
|
| 274 |
+
tbl = str.maketrans(transtable_map)
|
| 275 |
+
eol_mark = ""
|
| 276 |
+
else:
|
| 277 |
+
ord_mark_control = ord(mark_control)
|
| 278 |
+
tbl = str.maketrans(
|
| 279 |
+
{c: ord_mark_control for c in list(range(0, 32)) + [127]}
|
| 280 |
+
)
|
| 281 |
+
s = s.translate(tbl)
|
| 282 |
+
if mark_spaces is not None and mark_spaces != " ":
|
| 283 |
+
if mark_spaces == "unicode":
|
| 284 |
+
tbl = str.maketrans({9: 0x2409, 32: 0x2423})
|
| 285 |
+
s = s.translate(tbl)
|
| 286 |
+
else:
|
| 287 |
+
s = s.replace(" ", mark_spaces)
|
| 288 |
+
if start_line is None:
|
| 289 |
+
start_line = 1
|
| 290 |
+
if end_line is None:
|
| 291 |
+
end_line = len(s)
|
| 292 |
+
end_line = min(end_line, len(s))
|
| 293 |
+
start_line = min(max(1, start_line), end_line)
|
| 294 |
+
|
| 295 |
+
if mark_control != "unicode":
|
| 296 |
+
s_lines = s.splitlines()[start_line - 1 : end_line]
|
| 297 |
+
else:
|
| 298 |
+
s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
|
| 299 |
+
if not s_lines:
|
| 300 |
+
return ""
|
| 301 |
+
|
| 302 |
+
lineno_width = len(str(end_line))
|
| 303 |
+
max_line_len = max(len(line) for line in s_lines)
|
| 304 |
+
lead = " " * (lineno_width + 1)
|
| 305 |
+
if max_line_len >= 99:
|
| 306 |
+
header0 = (
|
| 307 |
+
lead
|
| 308 |
+
+ "".join(
|
| 309 |
+
f"{' ' * 99}{(i + 1) % 100}"
|
| 310 |
+
for i in range(max(max_line_len // 100, 1))
|
| 311 |
+
)
|
| 312 |
+
+ "\n"
|
| 313 |
+
)
|
| 314 |
+
else:
|
| 315 |
+
header0 = ""
|
| 316 |
+
header1 = (
|
| 317 |
+
header0
|
| 318 |
+
+ lead
|
| 319 |
+
+ "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10)))
|
| 320 |
+
+ "\n"
|
| 321 |
+
)
|
| 322 |
+
header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
|
| 323 |
+
return (
|
| 324 |
+
header1
|
| 325 |
+
+ header2
|
| 326 |
+
+ "\n".join(
|
| 327 |
+
f"{i:{lineno_width}d}:{line}{eol_mark}"
|
| 328 |
+
for i, line in enumerate(s_lines, start=start_line)
|
| 329 |
+
)
|
| 330 |
+
+ "\n"
|
| 331 |
+
)
|