Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_llava/lib/python3.10/site-packages/_distutils_hack/__init__.py +239 -0
- evalkit_llava/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/_distutils_hack/override.py +1 -0
- evalkit_llava/lib/python3.10/site-packages/pip-25.0.1.dist-info/LICENSE.txt +20 -0
- evalkit_llava/lib/python3.10/site-packages/pip-25.0.1.dist-info/METADATA +90 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py +82 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/cmdline.py +668 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/console.py +70 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/filter.py +70 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/other.py +160 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/lexer.py +963 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/modeline.py +43 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/plugin.py +72 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/regexopt.py +91 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/sphinxext.py +247 -0
- evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/token.py +214 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_core_metadata.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_itertools.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_path.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_shutil.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_static.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/build_meta.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/errors.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/launch.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/logging.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/monkey.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/msvc.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/namespaces.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/_requirestxt.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_wheel.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build_clib.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build_ext.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build_py.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/develop.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/editable_wheel.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/install.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/sdist.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/setopt.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/_requirestxt.py +131 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/alias.py +77 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/bdist_egg.py +479 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/bdist_wheel.py +610 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/build.py +135 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/build_clib.py +103 -0
- evalkit_llava/lib/python3.10/site-packages/setuptools/command/build_py.py +400 -0
evalkit_llava/lib/python3.10/site-packages/_distutils_hack/__init__.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# don't import any costly modules
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
report_url = (
|
| 6 |
+
"https://github.com/pypa/setuptools/issues/new?template=distutils-deprecation.yml"
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def warn_distutils_present():
|
| 11 |
+
if 'distutils' not in sys.modules:
|
| 12 |
+
return
|
| 13 |
+
import warnings
|
| 14 |
+
|
| 15 |
+
warnings.warn(
|
| 16 |
+
"Distutils was imported before Setuptools, but importing Setuptools "
|
| 17 |
+
"also replaces the `distutils` module in `sys.modules`. This may lead "
|
| 18 |
+
"to undesirable behaviors or errors. To avoid these issues, avoid "
|
| 19 |
+
"using distutils directly, ensure that setuptools is installed in the "
|
| 20 |
+
"traditional way (e.g. not an editable install), and/or make sure "
|
| 21 |
+
"that setuptools is always imported before distutils."
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def clear_distutils():
|
| 26 |
+
if 'distutils' not in sys.modules:
|
| 27 |
+
return
|
| 28 |
+
import warnings
|
| 29 |
+
|
| 30 |
+
warnings.warn(
|
| 31 |
+
"Setuptools is replacing distutils. Support for replacing "
|
| 32 |
+
"an already imported distutils is deprecated. In the future, "
|
| 33 |
+
"this condition will fail. "
|
| 34 |
+
f"Register concerns at {report_url}"
|
| 35 |
+
)
|
| 36 |
+
mods = [
|
| 37 |
+
name
|
| 38 |
+
for name in sys.modules
|
| 39 |
+
if name == "distutils" or name.startswith("distutils.")
|
| 40 |
+
]
|
| 41 |
+
for name in mods:
|
| 42 |
+
del sys.modules[name]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def enabled():
|
| 46 |
+
"""
|
| 47 |
+
Allow selection of distutils by environment variable.
|
| 48 |
+
"""
|
| 49 |
+
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
|
| 50 |
+
if which == 'stdlib':
|
| 51 |
+
import warnings
|
| 52 |
+
|
| 53 |
+
warnings.warn(
|
| 54 |
+
"Reliance on distutils from stdlib is deprecated. Users "
|
| 55 |
+
"must rely on setuptools to provide the distutils module. "
|
| 56 |
+
"Avoid importing distutils or import setuptools first, "
|
| 57 |
+
"and avoid setting SETUPTOOLS_USE_DISTUTILS=stdlib. "
|
| 58 |
+
f"Register concerns at {report_url}"
|
| 59 |
+
)
|
| 60 |
+
return which == 'local'
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def ensure_local_distutils():
|
| 64 |
+
import importlib
|
| 65 |
+
|
| 66 |
+
clear_distutils()
|
| 67 |
+
|
| 68 |
+
# With the DistutilsMetaFinder in place,
|
| 69 |
+
# perform an import to cause distutils to be
|
| 70 |
+
# loaded from setuptools._distutils. Ref #2906.
|
| 71 |
+
with shim():
|
| 72 |
+
importlib.import_module('distutils')
|
| 73 |
+
|
| 74 |
+
# check that submodules load as expected
|
| 75 |
+
core = importlib.import_module('distutils.core')
|
| 76 |
+
assert '_distutils' in core.__file__, core.__file__
|
| 77 |
+
assert 'setuptools._distutils.log' not in sys.modules
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def do_override():
|
| 81 |
+
"""
|
| 82 |
+
Ensure that the local copy of distutils is preferred over stdlib.
|
| 83 |
+
|
| 84 |
+
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
|
| 85 |
+
for more motivation.
|
| 86 |
+
"""
|
| 87 |
+
if enabled():
|
| 88 |
+
warn_distutils_present()
|
| 89 |
+
ensure_local_distutils()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class _TrivialRe:
|
| 93 |
+
def __init__(self, *patterns) -> None:
|
| 94 |
+
self._patterns = patterns
|
| 95 |
+
|
| 96 |
+
def match(self, string):
|
| 97 |
+
return all(pat in string for pat in self._patterns)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class DistutilsMetaFinder:
|
| 101 |
+
def find_spec(self, fullname, path, target=None):
|
| 102 |
+
# optimization: only consider top level modules and those
|
| 103 |
+
# found in the CPython test suite.
|
| 104 |
+
if path is not None and not fullname.startswith('test.'):
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
method_name = 'spec_for_{fullname}'.format(**locals())
|
| 108 |
+
method = getattr(self, method_name, lambda: None)
|
| 109 |
+
return method()
|
| 110 |
+
|
| 111 |
+
def spec_for_distutils(self):
|
| 112 |
+
if self.is_cpython():
|
| 113 |
+
return None
|
| 114 |
+
|
| 115 |
+
import importlib
|
| 116 |
+
import importlib.abc
|
| 117 |
+
import importlib.util
|
| 118 |
+
|
| 119 |
+
try:
|
| 120 |
+
mod = importlib.import_module('setuptools._distutils')
|
| 121 |
+
except Exception:
|
| 122 |
+
# There are a couple of cases where setuptools._distutils
|
| 123 |
+
# may not be present:
|
| 124 |
+
# - An older Setuptools without a local distutils is
|
| 125 |
+
# taking precedence. Ref #2957.
|
| 126 |
+
# - Path manipulation during sitecustomize removes
|
| 127 |
+
# setuptools from the path but only after the hook
|
| 128 |
+
# has been loaded. Ref #2980.
|
| 129 |
+
# In either case, fall back to stdlib behavior.
|
| 130 |
+
return None
|
| 131 |
+
|
| 132 |
+
class DistutilsLoader(importlib.abc.Loader):
|
| 133 |
+
def create_module(self, spec):
|
| 134 |
+
mod.__name__ = 'distutils'
|
| 135 |
+
return mod
|
| 136 |
+
|
| 137 |
+
def exec_module(self, module):
|
| 138 |
+
pass
|
| 139 |
+
|
| 140 |
+
return importlib.util.spec_from_loader(
|
| 141 |
+
'distutils', DistutilsLoader(), origin=mod.__file__
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
@staticmethod
|
| 145 |
+
def is_cpython():
|
| 146 |
+
"""
|
| 147 |
+
Suppress supplying distutils for CPython (build and tests).
|
| 148 |
+
Ref #2965 and #3007.
|
| 149 |
+
"""
|
| 150 |
+
return os.path.isfile('pybuilddir.txt')
|
| 151 |
+
|
| 152 |
+
def spec_for_pip(self):
|
| 153 |
+
"""
|
| 154 |
+
Ensure stdlib distutils when running under pip.
|
| 155 |
+
See pypa/pip#8761 for rationale.
|
| 156 |
+
"""
|
| 157 |
+
if sys.version_info >= (3, 12) or self.pip_imported_during_build():
|
| 158 |
+
return
|
| 159 |
+
clear_distutils()
|
| 160 |
+
self.spec_for_distutils = lambda: None
|
| 161 |
+
|
| 162 |
+
@classmethod
|
| 163 |
+
def pip_imported_during_build(cls):
|
| 164 |
+
"""
|
| 165 |
+
Detect if pip is being imported in a build script. Ref #2355.
|
| 166 |
+
"""
|
| 167 |
+
import traceback
|
| 168 |
+
|
| 169 |
+
return any(
|
| 170 |
+
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
@staticmethod
|
| 174 |
+
def frame_file_is_setup(frame):
|
| 175 |
+
"""
|
| 176 |
+
Return True if the indicated frame suggests a setup.py file.
|
| 177 |
+
"""
|
| 178 |
+
# some frames may not have __file__ (#2940)
|
| 179 |
+
return frame.f_globals.get('__file__', '').endswith('setup.py')
|
| 180 |
+
|
| 181 |
+
def spec_for_sensitive_tests(self):
|
| 182 |
+
"""
|
| 183 |
+
Ensure stdlib distutils when running select tests under CPython.
|
| 184 |
+
|
| 185 |
+
python/cpython#91169
|
| 186 |
+
"""
|
| 187 |
+
clear_distutils()
|
| 188 |
+
self.spec_for_distutils = lambda: None
|
| 189 |
+
|
| 190 |
+
sensitive_tests = (
|
| 191 |
+
[
|
| 192 |
+
'test.test_distutils',
|
| 193 |
+
'test.test_peg_generator',
|
| 194 |
+
'test.test_importlib',
|
| 195 |
+
]
|
| 196 |
+
if sys.version_info < (3, 10)
|
| 197 |
+
else [
|
| 198 |
+
'test.test_distutils',
|
| 199 |
+
]
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
for name in DistutilsMetaFinder.sensitive_tests:
|
| 204 |
+
setattr(
|
| 205 |
+
DistutilsMetaFinder,
|
| 206 |
+
f'spec_for_{name}',
|
| 207 |
+
DistutilsMetaFinder.spec_for_sensitive_tests,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
DISTUTILS_FINDER = DistutilsMetaFinder()
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def add_shim():
|
| 215 |
+
DISTUTILS_FINDER in sys.meta_path or insert_shim()
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class shim:
|
| 219 |
+
def __enter__(self) -> None:
|
| 220 |
+
insert_shim()
|
| 221 |
+
|
| 222 |
+
def __exit__(self, exc: object, value: object, tb: object) -> None:
|
| 223 |
+
_remove_shim()
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def insert_shim():
|
| 227 |
+
sys.meta_path.insert(0, DISTUTILS_FINDER)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def _remove_shim():
|
| 231 |
+
try:
|
| 232 |
+
sys.meta_path.remove(DISTUTILS_FINDER)
|
| 233 |
+
except ValueError:
|
| 234 |
+
pass
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
if sys.version_info < (3, 12):
|
| 238 |
+
# DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
|
| 239 |
+
remove_shim = _remove_shim
|
evalkit_llava/lib/python3.10/site-packages/_distutils_hack/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (8.21 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/_distutils_hack/__pycache__/override.cpython-310.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/_distutils_hack/override.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__import__('_distutils_hack').do_override()
|
evalkit_llava/lib/python3.10/site-packages/pip-25.0.1.dist-info/LICENSE.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
|
| 2 |
+
|
| 3 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 4 |
+
a copy of this software and associated documentation files (the
|
| 5 |
+
"Software"), to deal in the Software without restriction, including
|
| 6 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 7 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 8 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 9 |
+
the following conditions:
|
| 10 |
+
|
| 11 |
+
The above copyright notice and this permission notice shall be
|
| 12 |
+
included in all copies or substantial portions of the Software.
|
| 13 |
+
|
| 14 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 15 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 16 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 17 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
| 18 |
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
| 19 |
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
| 20 |
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
evalkit_llava/lib/python3.10/site-packages/pip-25.0.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.2
|
| 2 |
+
Name: pip
|
| 3 |
+
Version: 25.0.1
|
| 4 |
+
Summary: The PyPA recommended tool for installing Python packages.
|
| 5 |
+
Author-email: The pip developers <distutils-sig@python.org>
|
| 6 |
+
License: MIT
|
| 7 |
+
Project-URL: Homepage, https://pip.pypa.io/
|
| 8 |
+
Project-URL: Documentation, https://pip.pypa.io
|
| 9 |
+
Project-URL: Source, https://github.com/pypa/pip
|
| 10 |
+
Project-URL: Changelog, https://pip.pypa.io/en/stable/news/
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 14 |
+
Classifier: Topic :: Software Development :: Build Tools
|
| 15 |
+
Classifier: Programming Language :: Python
|
| 16 |
+
Classifier: Programming Language :: Python :: 3
|
| 17 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 24 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 25 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 26 |
+
Requires-Python: >=3.8
|
| 27 |
+
Description-Content-Type: text/x-rst
|
| 28 |
+
License-File: LICENSE.txt
|
| 29 |
+
License-File: AUTHORS.txt
|
| 30 |
+
|
| 31 |
+
pip - The Python Package Installer
|
| 32 |
+
==================================
|
| 33 |
+
|
| 34 |
+
.. |pypi-version| image:: https://img.shields.io/pypi/v/pip.svg
|
| 35 |
+
:target: https://pypi.org/project/pip/
|
| 36 |
+
:alt: PyPI
|
| 37 |
+
|
| 38 |
+
.. |python-versions| image:: https://img.shields.io/pypi/pyversions/pip
|
| 39 |
+
:target: https://pypi.org/project/pip
|
| 40 |
+
:alt: PyPI - Python Version
|
| 41 |
+
|
| 42 |
+
.. |docs-badge| image:: https://readthedocs.org/projects/pip/badge/?version=latest
|
| 43 |
+
:target: https://pip.pypa.io/en/latest
|
| 44 |
+
:alt: Documentation
|
| 45 |
+
|
| 46 |
+
|pypi-version| |python-versions| |docs-badge|
|
| 47 |
+
|
| 48 |
+
pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
|
| 49 |
+
|
| 50 |
+
Please take a look at our documentation for how to install and use pip:
|
| 51 |
+
|
| 52 |
+
* `Installation`_
|
| 53 |
+
* `Usage`_
|
| 54 |
+
|
| 55 |
+
We release updates regularly, with a new version every 3 months. Find more details in our documentation:
|
| 56 |
+
|
| 57 |
+
* `Release notes`_
|
| 58 |
+
* `Release process`_
|
| 59 |
+
|
| 60 |
+
If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms:
|
| 61 |
+
|
| 62 |
+
* `Issue tracking`_
|
| 63 |
+
* `Discourse channel`_
|
| 64 |
+
* `User IRC`_
|
| 65 |
+
|
| 66 |
+
If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms:
|
| 67 |
+
|
| 68 |
+
* `GitHub page`_
|
| 69 |
+
* `Development documentation`_
|
| 70 |
+
* `Development IRC`_
|
| 71 |
+
|
| 72 |
+
Code of Conduct
|
| 73 |
+
---------------
|
| 74 |
+
|
| 75 |
+
Everyone interacting in the pip project's codebases, issue trackers, chat
|
| 76 |
+
rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
|
| 77 |
+
|
| 78 |
+
.. _package installer: https://packaging.python.org/guides/tool-recommendations/
|
| 79 |
+
.. _Python Package Index: https://pypi.org
|
| 80 |
+
.. _Installation: https://pip.pypa.io/en/stable/installation/
|
| 81 |
+
.. _Usage: https://pip.pypa.io/en/stable/
|
| 82 |
+
.. _Release notes: https://pip.pypa.io/en/stable/news.html
|
| 83 |
+
.. _Release process: https://pip.pypa.io/en/latest/development/release-process/
|
| 84 |
+
.. _GitHub page: https://github.com/pypa/pip
|
| 85 |
+
.. _Development documentation: https://pip.pypa.io/en/latest/development
|
| 86 |
+
.. _Issue tracking: https://github.com/pypa/pip/issues
|
| 87 |
+
.. _Discourse channel: https://discuss.python.org/c/packaging
|
| 88 |
+
.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa
|
| 89 |
+
.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev
|
| 90 |
+
.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/__init__.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Pygments
|
| 3 |
+
~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments is a syntax highlighting package written in Python.
|
| 6 |
+
|
| 7 |
+
It is a generic syntax highlighter for general use in all kinds of software
|
| 8 |
+
such as forum systems, wikis or other applications that need to prettify
|
| 9 |
+
source code. Highlights are:
|
| 10 |
+
|
| 11 |
+
* a wide range of common languages and markup formats is supported
|
| 12 |
+
* special attention is paid to details, increasing quality by a fair amount
|
| 13 |
+
* support for new languages and formats are added easily
|
| 14 |
+
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
|
| 15 |
+
formats that PIL supports, and ANSI sequences
|
| 16 |
+
* it is usable as a command-line tool and as a library
|
| 17 |
+
* ... and it highlights even Brainfuck!
|
| 18 |
+
|
| 19 |
+
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
|
| 20 |
+
|
| 21 |
+
.. _Pygments master branch:
|
| 22 |
+
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
|
| 23 |
+
|
| 24 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 25 |
+
:license: BSD, see LICENSE for details.
|
| 26 |
+
"""
|
| 27 |
+
from io import StringIO, BytesIO
|
| 28 |
+
|
| 29 |
+
__version__ = '2.18.0'
|
| 30 |
+
__docformat__ = 'restructuredtext'
|
| 31 |
+
|
| 32 |
+
__all__ = ['lex', 'format', 'highlight']
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def lex(code, lexer):
|
| 36 |
+
"""
|
| 37 |
+
Lex `code` with the `lexer` (must be a `Lexer` instance)
|
| 38 |
+
and return an iterable of tokens. Currently, this only calls
|
| 39 |
+
`lexer.get_tokens()`.
|
| 40 |
+
"""
|
| 41 |
+
try:
|
| 42 |
+
return lexer.get_tokens(code)
|
| 43 |
+
except TypeError:
|
| 44 |
+
# Heuristic to catch a common mistake.
|
| 45 |
+
from pip._vendor.pygments.lexer import RegexLexer
|
| 46 |
+
if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
|
| 47 |
+
raise TypeError('lex() argument must be a lexer instance, '
|
| 48 |
+
'not a class')
|
| 49 |
+
raise
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
|
| 53 |
+
"""
|
| 54 |
+
Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
|
| 55 |
+
(a `Formatter` instance).
|
| 56 |
+
|
| 57 |
+
If ``outfile`` is given and a valid file object (an object with a
|
| 58 |
+
``write`` method), the result will be written to it, otherwise it
|
| 59 |
+
is returned as a string.
|
| 60 |
+
"""
|
| 61 |
+
try:
|
| 62 |
+
if not outfile:
|
| 63 |
+
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
|
| 64 |
+
formatter.format(tokens, realoutfile)
|
| 65 |
+
return realoutfile.getvalue()
|
| 66 |
+
else:
|
| 67 |
+
formatter.format(tokens, outfile)
|
| 68 |
+
except TypeError:
|
| 69 |
+
# Heuristic to catch a common mistake.
|
| 70 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 71 |
+
if isinstance(formatter, type) and issubclass(formatter, Formatter):
|
| 72 |
+
raise TypeError('format() argument must be a formatter instance, '
|
| 73 |
+
'not a class')
|
| 74 |
+
raise
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def highlight(code, lexer, formatter, outfile=None):
|
| 78 |
+
"""
|
| 79 |
+
This is the most high-level highlighting function. It combines `lex` and
|
| 80 |
+
`format` in one function.
|
| 81 |
+
"""
|
| 82 |
+
return format(lex(code, lexer), formatter, outfile)
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/cmdline.py
ADDED
|
@@ -0,0 +1,668 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.cmdline
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Command line interface.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
import shutil
|
| 14 |
+
import argparse
|
| 15 |
+
from textwrap import dedent
|
| 16 |
+
|
| 17 |
+
from pip._vendor.pygments import __version__, highlight
|
| 18 |
+
from pip._vendor.pygments.util import ClassNotFound, OptionError, docstring_headline, \
|
| 19 |
+
guess_decode, guess_decode_from_terminal, terminal_encoding, \
|
| 20 |
+
UnclosingTextIOWrapper
|
| 21 |
+
from pip._vendor.pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
|
| 22 |
+
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
|
| 23 |
+
from pip._vendor.pygments.lexers.special import TextLexer
|
| 24 |
+
from pip._vendor.pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
|
| 25 |
+
from pip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \
|
| 26 |
+
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
|
| 27 |
+
from pip._vendor.pygments.formatters.terminal import TerminalFormatter
|
| 28 |
+
from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
|
| 29 |
+
from pip._vendor.pygments.filters import get_all_filters, find_filter_class
|
| 30 |
+
from pip._vendor.pygments.styles import get_all_styles, get_style_by_name
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _parse_options(o_strs):
|
| 34 |
+
opts = {}
|
| 35 |
+
if not o_strs:
|
| 36 |
+
return opts
|
| 37 |
+
for o_str in o_strs:
|
| 38 |
+
if not o_str.strip():
|
| 39 |
+
continue
|
| 40 |
+
o_args = o_str.split(',')
|
| 41 |
+
for o_arg in o_args:
|
| 42 |
+
o_arg = o_arg.strip()
|
| 43 |
+
try:
|
| 44 |
+
o_key, o_val = o_arg.split('=', 1)
|
| 45 |
+
o_key = o_key.strip()
|
| 46 |
+
o_val = o_val.strip()
|
| 47 |
+
except ValueError:
|
| 48 |
+
opts[o_arg] = True
|
| 49 |
+
else:
|
| 50 |
+
opts[o_key] = o_val
|
| 51 |
+
return opts
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _parse_filters(f_strs):
|
| 55 |
+
filters = []
|
| 56 |
+
if not f_strs:
|
| 57 |
+
return filters
|
| 58 |
+
for f_str in f_strs:
|
| 59 |
+
if ':' in f_str:
|
| 60 |
+
fname, fopts = f_str.split(':', 1)
|
| 61 |
+
filters.append((fname, _parse_options([fopts])))
|
| 62 |
+
else:
|
| 63 |
+
filters.append((f_str, {}))
|
| 64 |
+
return filters
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _print_help(what, name):
|
| 68 |
+
try:
|
| 69 |
+
if what == 'lexer':
|
| 70 |
+
cls = get_lexer_by_name(name)
|
| 71 |
+
print(f"Help on the {cls.name} lexer:")
|
| 72 |
+
print(dedent(cls.__doc__))
|
| 73 |
+
elif what == 'formatter':
|
| 74 |
+
cls = find_formatter_class(name)
|
| 75 |
+
print(f"Help on the {cls.name} formatter:")
|
| 76 |
+
print(dedent(cls.__doc__))
|
| 77 |
+
elif what == 'filter':
|
| 78 |
+
cls = find_filter_class(name)
|
| 79 |
+
print(f"Help on the {name} filter:")
|
| 80 |
+
print(dedent(cls.__doc__))
|
| 81 |
+
return 0
|
| 82 |
+
except (AttributeError, ValueError):
|
| 83 |
+
print(f"{what} not found!", file=sys.stderr)
|
| 84 |
+
return 1
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _print_list(what):
|
| 88 |
+
if what == 'lexer':
|
| 89 |
+
print()
|
| 90 |
+
print("Lexers:")
|
| 91 |
+
print("~~~~~~~")
|
| 92 |
+
|
| 93 |
+
info = []
|
| 94 |
+
for fullname, names, exts, _ in get_all_lexers():
|
| 95 |
+
tup = (', '.join(names)+':', fullname,
|
| 96 |
+
exts and '(filenames ' + ', '.join(exts) + ')' or '')
|
| 97 |
+
info.append(tup)
|
| 98 |
+
info.sort()
|
| 99 |
+
for i in info:
|
| 100 |
+
print(('* {}\n {} {}').format(*i))
|
| 101 |
+
|
| 102 |
+
elif what == 'formatter':
|
| 103 |
+
print()
|
| 104 |
+
print("Formatters:")
|
| 105 |
+
print("~~~~~~~~~~~")
|
| 106 |
+
|
| 107 |
+
info = []
|
| 108 |
+
for cls in get_all_formatters():
|
| 109 |
+
doc = docstring_headline(cls)
|
| 110 |
+
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
|
| 111 |
+
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
|
| 112 |
+
info.append(tup)
|
| 113 |
+
info.sort()
|
| 114 |
+
for i in info:
|
| 115 |
+
print(('* {}\n {} {}').format(*i))
|
| 116 |
+
|
| 117 |
+
elif what == 'filter':
|
| 118 |
+
print()
|
| 119 |
+
print("Filters:")
|
| 120 |
+
print("~~~~~~~~")
|
| 121 |
+
|
| 122 |
+
for name in get_all_filters():
|
| 123 |
+
cls = find_filter_class(name)
|
| 124 |
+
print("* " + name + ':')
|
| 125 |
+
print(f" {docstring_headline(cls)}")
|
| 126 |
+
|
| 127 |
+
elif what == 'style':
|
| 128 |
+
print()
|
| 129 |
+
print("Styles:")
|
| 130 |
+
print("~~~~~~~")
|
| 131 |
+
|
| 132 |
+
for name in get_all_styles():
|
| 133 |
+
cls = get_style_by_name(name)
|
| 134 |
+
print("* " + name + ':')
|
| 135 |
+
print(f" {docstring_headline(cls)}")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def _print_list_as_json(requested_items):
|
| 139 |
+
import json
|
| 140 |
+
result = {}
|
| 141 |
+
if 'lexer' in requested_items:
|
| 142 |
+
info = {}
|
| 143 |
+
for fullname, names, filenames, mimetypes in get_all_lexers():
|
| 144 |
+
info[fullname] = {
|
| 145 |
+
'aliases': names,
|
| 146 |
+
'filenames': filenames,
|
| 147 |
+
'mimetypes': mimetypes
|
| 148 |
+
}
|
| 149 |
+
result['lexers'] = info
|
| 150 |
+
|
| 151 |
+
if 'formatter' in requested_items:
|
| 152 |
+
info = {}
|
| 153 |
+
for cls in get_all_formatters():
|
| 154 |
+
doc = docstring_headline(cls)
|
| 155 |
+
info[cls.name] = {
|
| 156 |
+
'aliases': cls.aliases,
|
| 157 |
+
'filenames': cls.filenames,
|
| 158 |
+
'doc': doc
|
| 159 |
+
}
|
| 160 |
+
result['formatters'] = info
|
| 161 |
+
|
| 162 |
+
if 'filter' in requested_items:
|
| 163 |
+
info = {}
|
| 164 |
+
for name in get_all_filters():
|
| 165 |
+
cls = find_filter_class(name)
|
| 166 |
+
info[name] = {
|
| 167 |
+
'doc': docstring_headline(cls)
|
| 168 |
+
}
|
| 169 |
+
result['filters'] = info
|
| 170 |
+
|
| 171 |
+
if 'style' in requested_items:
|
| 172 |
+
info = {}
|
| 173 |
+
for name in get_all_styles():
|
| 174 |
+
cls = get_style_by_name(name)
|
| 175 |
+
info[name] = {
|
| 176 |
+
'doc': docstring_headline(cls)
|
| 177 |
+
}
|
| 178 |
+
result['styles'] = info
|
| 179 |
+
|
| 180 |
+
json.dump(result, sys.stdout)
|
| 181 |
+
|
| 182 |
+
def main_inner(parser, argns):
|
| 183 |
+
if argns.help:
|
| 184 |
+
parser.print_help()
|
| 185 |
+
return 0
|
| 186 |
+
|
| 187 |
+
if argns.V:
|
| 188 |
+
print(f'Pygments version {__version__}, (c) 2006-2024 by Georg Brandl, Matthäus '
|
| 189 |
+
'Chajdas and contributors.')
|
| 190 |
+
return 0
|
| 191 |
+
|
| 192 |
+
def is_only_option(opt):
|
| 193 |
+
return not any(v for (k, v) in vars(argns).items() if k != opt)
|
| 194 |
+
|
| 195 |
+
# handle ``pygmentize -L``
|
| 196 |
+
if argns.L is not None:
|
| 197 |
+
arg_set = set()
|
| 198 |
+
for k, v in vars(argns).items():
|
| 199 |
+
if v:
|
| 200 |
+
arg_set.add(k)
|
| 201 |
+
|
| 202 |
+
arg_set.discard('L')
|
| 203 |
+
arg_set.discard('json')
|
| 204 |
+
|
| 205 |
+
if arg_set:
|
| 206 |
+
parser.print_help(sys.stderr)
|
| 207 |
+
return 2
|
| 208 |
+
|
| 209 |
+
# print version
|
| 210 |
+
if not argns.json:
|
| 211 |
+
main(['', '-V'])
|
| 212 |
+
allowed_types = {'lexer', 'formatter', 'filter', 'style'}
|
| 213 |
+
largs = [arg.rstrip('s') for arg in argns.L]
|
| 214 |
+
if any(arg not in allowed_types for arg in largs):
|
| 215 |
+
parser.print_help(sys.stderr)
|
| 216 |
+
return 0
|
| 217 |
+
if not largs:
|
| 218 |
+
largs = allowed_types
|
| 219 |
+
if not argns.json:
|
| 220 |
+
for arg in largs:
|
| 221 |
+
_print_list(arg)
|
| 222 |
+
else:
|
| 223 |
+
_print_list_as_json(largs)
|
| 224 |
+
return 0
|
| 225 |
+
|
| 226 |
+
# handle ``pygmentize -H``
|
| 227 |
+
if argns.H:
|
| 228 |
+
if not is_only_option('H'):
|
| 229 |
+
parser.print_help(sys.stderr)
|
| 230 |
+
return 2
|
| 231 |
+
what, name = argns.H
|
| 232 |
+
if what not in ('lexer', 'formatter', 'filter'):
|
| 233 |
+
parser.print_help(sys.stderr)
|
| 234 |
+
return 2
|
| 235 |
+
return _print_help(what, name)
|
| 236 |
+
|
| 237 |
+
# parse -O options
|
| 238 |
+
parsed_opts = _parse_options(argns.O or [])
|
| 239 |
+
|
| 240 |
+
# parse -P options
|
| 241 |
+
for p_opt in argns.P or []:
|
| 242 |
+
try:
|
| 243 |
+
name, value = p_opt.split('=', 1)
|
| 244 |
+
except ValueError:
|
| 245 |
+
parsed_opts[p_opt] = True
|
| 246 |
+
else:
|
| 247 |
+
parsed_opts[name] = value
|
| 248 |
+
|
| 249 |
+
# encodings
|
| 250 |
+
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
|
| 251 |
+
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
|
| 252 |
+
|
| 253 |
+
# handle ``pygmentize -N``
|
| 254 |
+
if argns.N:
|
| 255 |
+
lexer = find_lexer_class_for_filename(argns.N)
|
| 256 |
+
if lexer is None:
|
| 257 |
+
lexer = TextLexer
|
| 258 |
+
|
| 259 |
+
print(lexer.aliases[0])
|
| 260 |
+
return 0
|
| 261 |
+
|
| 262 |
+
# handle ``pygmentize -C``
|
| 263 |
+
if argns.C:
|
| 264 |
+
inp = sys.stdin.buffer.read()
|
| 265 |
+
try:
|
| 266 |
+
lexer = guess_lexer(inp, inencoding=inencoding)
|
| 267 |
+
except ClassNotFound:
|
| 268 |
+
lexer = TextLexer
|
| 269 |
+
|
| 270 |
+
print(lexer.aliases[0])
|
| 271 |
+
return 0
|
| 272 |
+
|
| 273 |
+
# handle ``pygmentize -S``
|
| 274 |
+
S_opt = argns.S
|
| 275 |
+
a_opt = argns.a
|
| 276 |
+
if S_opt is not None:
|
| 277 |
+
f_opt = argns.f
|
| 278 |
+
if not f_opt:
|
| 279 |
+
parser.print_help(sys.stderr)
|
| 280 |
+
return 2
|
| 281 |
+
if argns.l or argns.INPUTFILE:
|
| 282 |
+
parser.print_help(sys.stderr)
|
| 283 |
+
return 2
|
| 284 |
+
|
| 285 |
+
try:
|
| 286 |
+
parsed_opts['style'] = S_opt
|
| 287 |
+
fmter = get_formatter_by_name(f_opt, **parsed_opts)
|
| 288 |
+
except ClassNotFound as err:
|
| 289 |
+
print(err, file=sys.stderr)
|
| 290 |
+
return 1
|
| 291 |
+
|
| 292 |
+
print(fmter.get_style_defs(a_opt or ''))
|
| 293 |
+
return 0
|
| 294 |
+
|
| 295 |
+
# if no -S is given, -a is not allowed
|
| 296 |
+
if argns.a is not None:
|
| 297 |
+
parser.print_help(sys.stderr)
|
| 298 |
+
return 2
|
| 299 |
+
|
| 300 |
+
# parse -F options
|
| 301 |
+
F_opts = _parse_filters(argns.F or [])
|
| 302 |
+
|
| 303 |
+
# -x: allow custom (eXternal) lexers and formatters
|
| 304 |
+
allow_custom_lexer_formatter = bool(argns.x)
|
| 305 |
+
|
| 306 |
+
# select lexer
|
| 307 |
+
lexer = None
|
| 308 |
+
|
| 309 |
+
# given by name?
|
| 310 |
+
lexername = argns.l
|
| 311 |
+
if lexername:
|
| 312 |
+
# custom lexer, located relative to user's cwd
|
| 313 |
+
if allow_custom_lexer_formatter and '.py' in lexername:
|
| 314 |
+
try:
|
| 315 |
+
filename = None
|
| 316 |
+
name = None
|
| 317 |
+
if ':' in lexername:
|
| 318 |
+
filename, name = lexername.rsplit(':', 1)
|
| 319 |
+
|
| 320 |
+
if '.py' in name:
|
| 321 |
+
# This can happen on Windows: If the lexername is
|
| 322 |
+
# C:\lexer.py -- return to normal load path in that case
|
| 323 |
+
name = None
|
| 324 |
+
|
| 325 |
+
if filename and name:
|
| 326 |
+
lexer = load_lexer_from_file(filename, name,
|
| 327 |
+
**parsed_opts)
|
| 328 |
+
else:
|
| 329 |
+
lexer = load_lexer_from_file(lexername, **parsed_opts)
|
| 330 |
+
except ClassNotFound as err:
|
| 331 |
+
print('Error:', err, file=sys.stderr)
|
| 332 |
+
return 1
|
| 333 |
+
else:
|
| 334 |
+
try:
|
| 335 |
+
lexer = get_lexer_by_name(lexername, **parsed_opts)
|
| 336 |
+
except (OptionError, ClassNotFound) as err:
|
| 337 |
+
print('Error:', err, file=sys.stderr)
|
| 338 |
+
return 1
|
| 339 |
+
|
| 340 |
+
# read input code
|
| 341 |
+
code = None
|
| 342 |
+
|
| 343 |
+
if argns.INPUTFILE:
|
| 344 |
+
if argns.s:
|
| 345 |
+
print('Error: -s option not usable when input file specified',
|
| 346 |
+
file=sys.stderr)
|
| 347 |
+
return 2
|
| 348 |
+
|
| 349 |
+
infn = argns.INPUTFILE
|
| 350 |
+
try:
|
| 351 |
+
with open(infn, 'rb') as infp:
|
| 352 |
+
code = infp.read()
|
| 353 |
+
except Exception as err:
|
| 354 |
+
print('Error: cannot read infile:', err, file=sys.stderr)
|
| 355 |
+
return 1
|
| 356 |
+
if not inencoding:
|
| 357 |
+
code, inencoding = guess_decode(code)
|
| 358 |
+
|
| 359 |
+
# do we have to guess the lexer?
|
| 360 |
+
if not lexer:
|
| 361 |
+
try:
|
| 362 |
+
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
|
| 363 |
+
except ClassNotFound as err:
|
| 364 |
+
if argns.g:
|
| 365 |
+
try:
|
| 366 |
+
lexer = guess_lexer(code, **parsed_opts)
|
| 367 |
+
except ClassNotFound:
|
| 368 |
+
lexer = TextLexer(**parsed_opts)
|
| 369 |
+
else:
|
| 370 |
+
print('Error:', err, file=sys.stderr)
|
| 371 |
+
return 1
|
| 372 |
+
except OptionError as err:
|
| 373 |
+
print('Error:', err, file=sys.stderr)
|
| 374 |
+
return 1
|
| 375 |
+
|
| 376 |
+
elif not argns.s: # treat stdin as full file (-s support is later)
|
| 377 |
+
# read code from terminal, always in binary mode since we want to
|
| 378 |
+
# decode ourselves and be tolerant with it
|
| 379 |
+
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
|
| 380 |
+
if not inencoding:
|
| 381 |
+
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
|
| 382 |
+
# else the lexer will do the decoding
|
| 383 |
+
if not lexer:
|
| 384 |
+
try:
|
| 385 |
+
lexer = guess_lexer(code, **parsed_opts)
|
| 386 |
+
except ClassNotFound:
|
| 387 |
+
lexer = TextLexer(**parsed_opts)
|
| 388 |
+
|
| 389 |
+
else: # -s option needs a lexer with -l
|
| 390 |
+
if not lexer:
|
| 391 |
+
print('Error: when using -s a lexer has to be selected with -l',
|
| 392 |
+
file=sys.stderr)
|
| 393 |
+
return 2
|
| 394 |
+
|
| 395 |
+
# process filters
|
| 396 |
+
for fname, fopts in F_opts:
|
| 397 |
+
try:
|
| 398 |
+
lexer.add_filter(fname, **fopts)
|
| 399 |
+
except ClassNotFound as err:
|
| 400 |
+
print('Error:', err, file=sys.stderr)
|
| 401 |
+
return 1
|
| 402 |
+
|
| 403 |
+
# select formatter
|
| 404 |
+
outfn = argns.o
|
| 405 |
+
fmter = argns.f
|
| 406 |
+
if fmter:
|
| 407 |
+
# custom formatter, located relative to user's cwd
|
| 408 |
+
if allow_custom_lexer_formatter and '.py' in fmter:
|
| 409 |
+
try:
|
| 410 |
+
filename = None
|
| 411 |
+
name = None
|
| 412 |
+
if ':' in fmter:
|
| 413 |
+
# Same logic as above for custom lexer
|
| 414 |
+
filename, name = fmter.rsplit(':', 1)
|
| 415 |
+
|
| 416 |
+
if '.py' in name:
|
| 417 |
+
name = None
|
| 418 |
+
|
| 419 |
+
if filename and name:
|
| 420 |
+
fmter = load_formatter_from_file(filename, name,
|
| 421 |
+
**parsed_opts)
|
| 422 |
+
else:
|
| 423 |
+
fmter = load_formatter_from_file(fmter, **parsed_opts)
|
| 424 |
+
except ClassNotFound as err:
|
| 425 |
+
print('Error:', err, file=sys.stderr)
|
| 426 |
+
return 1
|
| 427 |
+
else:
|
| 428 |
+
try:
|
| 429 |
+
fmter = get_formatter_by_name(fmter, **parsed_opts)
|
| 430 |
+
except (OptionError, ClassNotFound) as err:
|
| 431 |
+
print('Error:', err, file=sys.stderr)
|
| 432 |
+
return 1
|
| 433 |
+
|
| 434 |
+
if outfn:
|
| 435 |
+
if not fmter:
|
| 436 |
+
try:
|
| 437 |
+
fmter = get_formatter_for_filename(outfn, **parsed_opts)
|
| 438 |
+
except (OptionError, ClassNotFound) as err:
|
| 439 |
+
print('Error:', err, file=sys.stderr)
|
| 440 |
+
return 1
|
| 441 |
+
try:
|
| 442 |
+
outfile = open(outfn, 'wb')
|
| 443 |
+
except Exception as err:
|
| 444 |
+
print('Error: cannot open outfile:', err, file=sys.stderr)
|
| 445 |
+
return 1
|
| 446 |
+
else:
|
| 447 |
+
if not fmter:
|
| 448 |
+
if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
|
| 449 |
+
fmter = TerminalTrueColorFormatter(**parsed_opts)
|
| 450 |
+
elif '256' in os.environ.get('TERM', ''):
|
| 451 |
+
fmter = Terminal256Formatter(**parsed_opts)
|
| 452 |
+
else:
|
| 453 |
+
fmter = TerminalFormatter(**parsed_opts)
|
| 454 |
+
outfile = sys.stdout.buffer
|
| 455 |
+
|
| 456 |
+
# determine output encoding if not explicitly selected
|
| 457 |
+
if not outencoding:
|
| 458 |
+
if outfn:
|
| 459 |
+
# output file? use lexer encoding for now (can still be None)
|
| 460 |
+
fmter.encoding = inencoding
|
| 461 |
+
else:
|
| 462 |
+
# else use terminal encoding
|
| 463 |
+
fmter.encoding = terminal_encoding(sys.stdout)
|
| 464 |
+
|
| 465 |
+
# provide coloring under Windows, if possible
|
| 466 |
+
if not outfn and sys.platform in ('win32', 'cygwin') and \
|
| 467 |
+
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
|
| 468 |
+
# unfortunately colorama doesn't support binary streams on Py3
|
| 469 |
+
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
|
| 470 |
+
fmter.encoding = None
|
| 471 |
+
try:
|
| 472 |
+
import colorama.initialise
|
| 473 |
+
except ImportError:
|
| 474 |
+
pass
|
| 475 |
+
else:
|
| 476 |
+
outfile = colorama.initialise.wrap_stream(
|
| 477 |
+
outfile, convert=None, strip=None, autoreset=False, wrap=True)
|
| 478 |
+
|
| 479 |
+
# When using the LaTeX formatter and the option `escapeinside` is
|
| 480 |
+
# specified, we need a special lexer which collects escaped text
|
| 481 |
+
# before running the chosen language lexer.
|
| 482 |
+
escapeinside = parsed_opts.get('escapeinside', '')
|
| 483 |
+
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
|
| 484 |
+
left = escapeinside[0]
|
| 485 |
+
right = escapeinside[1]
|
| 486 |
+
lexer = LatexEmbeddedLexer(left, right, lexer)
|
| 487 |
+
|
| 488 |
+
# ... and do it!
|
| 489 |
+
if not argns.s:
|
| 490 |
+
# process whole input as per normal...
|
| 491 |
+
try:
|
| 492 |
+
highlight(code, lexer, fmter, outfile)
|
| 493 |
+
finally:
|
| 494 |
+
if outfn:
|
| 495 |
+
outfile.close()
|
| 496 |
+
return 0
|
| 497 |
+
else:
|
| 498 |
+
# line by line processing of stdin (eg: for 'tail -f')...
|
| 499 |
+
try:
|
| 500 |
+
while 1:
|
| 501 |
+
line = sys.stdin.buffer.readline()
|
| 502 |
+
if not line:
|
| 503 |
+
break
|
| 504 |
+
if not inencoding:
|
| 505 |
+
line = guess_decode_from_terminal(line, sys.stdin)[0]
|
| 506 |
+
highlight(line, lexer, fmter, outfile)
|
| 507 |
+
if hasattr(outfile, 'flush'):
|
| 508 |
+
outfile.flush()
|
| 509 |
+
return 0
|
| 510 |
+
except KeyboardInterrupt: # pragma: no cover
|
| 511 |
+
return 0
|
| 512 |
+
finally:
|
| 513 |
+
if outfn:
|
| 514 |
+
outfile.close()
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
class HelpFormatter(argparse.HelpFormatter):
|
| 518 |
+
def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
|
| 519 |
+
if width is None:
|
| 520 |
+
try:
|
| 521 |
+
width = shutil.get_terminal_size().columns - 2
|
| 522 |
+
except Exception:
|
| 523 |
+
pass
|
| 524 |
+
argparse.HelpFormatter.__init__(self, prog, indent_increment,
|
| 525 |
+
max_help_position, width)
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def main(args=sys.argv):
|
| 529 |
+
"""
|
| 530 |
+
Main command line entry point.
|
| 531 |
+
"""
|
| 532 |
+
desc = "Highlight an input file and write the result to an output file."
|
| 533 |
+
parser = argparse.ArgumentParser(description=desc, add_help=False,
|
| 534 |
+
formatter_class=HelpFormatter)
|
| 535 |
+
|
| 536 |
+
operation = parser.add_argument_group('Main operation')
|
| 537 |
+
lexersel = operation.add_mutually_exclusive_group()
|
| 538 |
+
lexersel.add_argument(
|
| 539 |
+
'-l', metavar='LEXER',
|
| 540 |
+
help='Specify the lexer to use. (Query names with -L.) If not '
|
| 541 |
+
'given and -g is not present, the lexer is guessed from the filename.')
|
| 542 |
+
lexersel.add_argument(
|
| 543 |
+
'-g', action='store_true',
|
| 544 |
+
help='Guess the lexer from the file contents, or pass through '
|
| 545 |
+
'as plain text if nothing can be guessed.')
|
| 546 |
+
operation.add_argument(
|
| 547 |
+
'-F', metavar='FILTER[:options]', action='append',
|
| 548 |
+
help='Add a filter to the token stream. (Query names with -L.) '
|
| 549 |
+
'Filter options are given after a colon if necessary.')
|
| 550 |
+
operation.add_argument(
|
| 551 |
+
'-f', metavar='FORMATTER',
|
| 552 |
+
help='Specify the formatter to use. (Query names with -L.) '
|
| 553 |
+
'If not given, the formatter is guessed from the output filename, '
|
| 554 |
+
'and defaults to the terminal formatter if the output is to the '
|
| 555 |
+
'terminal or an unknown file extension.')
|
| 556 |
+
operation.add_argument(
|
| 557 |
+
'-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
|
| 558 |
+
help='Give options to the lexer and formatter as a comma-separated '
|
| 559 |
+
'list of key-value pairs. '
|
| 560 |
+
'Example: `-O bg=light,python=cool`.')
|
| 561 |
+
operation.add_argument(
|
| 562 |
+
'-P', metavar='OPTION=value', action='append',
|
| 563 |
+
help='Give a single option to the lexer and formatter - with this '
|
| 564 |
+
'you can pass options whose value contains commas and equal signs. '
|
| 565 |
+
'Example: `-P "heading=Pygments, the Python highlighter"`.')
|
| 566 |
+
operation.add_argument(
|
| 567 |
+
'-o', metavar='OUTPUTFILE',
|
| 568 |
+
help='Where to write the output. Defaults to standard output.')
|
| 569 |
+
|
| 570 |
+
operation.add_argument(
|
| 571 |
+
'INPUTFILE', nargs='?',
|
| 572 |
+
help='Where to read the input. Defaults to standard input.')
|
| 573 |
+
|
| 574 |
+
flags = parser.add_argument_group('Operation flags')
|
| 575 |
+
flags.add_argument(
|
| 576 |
+
'-v', action='store_true',
|
| 577 |
+
help='Print a detailed traceback on unhandled exceptions, which '
|
| 578 |
+
'is useful for debugging and bug reports.')
|
| 579 |
+
flags.add_argument(
|
| 580 |
+
'-s', action='store_true',
|
| 581 |
+
help='Process lines one at a time until EOF, rather than waiting to '
|
| 582 |
+
'process the entire file. This only works for stdin, only for lexers '
|
| 583 |
+
'with no line-spanning constructs, and is intended for streaming '
|
| 584 |
+
'input such as you get from `tail -f`. '
|
| 585 |
+
'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
|
| 586 |
+
flags.add_argument(
|
| 587 |
+
'-x', action='store_true',
|
| 588 |
+
help='Allow custom lexers and formatters to be loaded from a .py file '
|
| 589 |
+
'relative to the current working directory. For example, '
|
| 590 |
+
'`-l ./customlexer.py -x`. By default, this option expects a file '
|
| 591 |
+
'with a class named CustomLexer or CustomFormatter; you can also '
|
| 592 |
+
'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
|
| 593 |
+
'Users should be very careful not to use this option with untrusted '
|
| 594 |
+
'files, because it will import and run them.')
|
| 595 |
+
flags.add_argument('--json', help='Output as JSON. This can '
|
| 596 |
+
'be only used in conjunction with -L.',
|
| 597 |
+
default=False,
|
| 598 |
+
action='store_true')
|
| 599 |
+
|
| 600 |
+
special_modes_group = parser.add_argument_group(
|
| 601 |
+
'Special modes - do not do any highlighting')
|
| 602 |
+
special_modes = special_modes_group.add_mutually_exclusive_group()
|
| 603 |
+
special_modes.add_argument(
|
| 604 |
+
'-S', metavar='STYLE -f formatter',
|
| 605 |
+
help='Print style definitions for STYLE for a formatter '
|
| 606 |
+
'given with -f. The argument given by -a is formatter '
|
| 607 |
+
'dependent.')
|
| 608 |
+
special_modes.add_argument(
|
| 609 |
+
'-L', nargs='*', metavar='WHAT',
|
| 610 |
+
help='List lexers, formatters, styles or filters -- '
|
| 611 |
+
'give additional arguments for the thing(s) you want to list '
|
| 612 |
+
'(e.g. "styles"), or omit them to list everything.')
|
| 613 |
+
special_modes.add_argument(
|
| 614 |
+
'-N', metavar='FILENAME',
|
| 615 |
+
help='Guess and print out a lexer name based solely on the given '
|
| 616 |
+
'filename. Does not take input or highlight anything. If no specific '
|
| 617 |
+
'lexer can be determined, "text" is printed.')
|
| 618 |
+
special_modes.add_argument(
|
| 619 |
+
'-C', action='store_true',
|
| 620 |
+
help='Like -N, but print out a lexer name based solely on '
|
| 621 |
+
'a given content from standard input.')
|
| 622 |
+
special_modes.add_argument(
|
| 623 |
+
'-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
|
| 624 |
+
help='Print detailed help for the object <name> of type <type>, '
|
| 625 |
+
'where <type> is one of "lexer", "formatter" or "filter".')
|
| 626 |
+
special_modes.add_argument(
|
| 627 |
+
'-V', action='store_true',
|
| 628 |
+
help='Print the package version.')
|
| 629 |
+
special_modes.add_argument(
|
| 630 |
+
'-h', '--help', action='store_true',
|
| 631 |
+
help='Print this help.')
|
| 632 |
+
special_modes_group.add_argument(
|
| 633 |
+
'-a', metavar='ARG',
|
| 634 |
+
help='Formatter-specific additional argument for the -S (print '
|
| 635 |
+
'style sheet) mode.')
|
| 636 |
+
|
| 637 |
+
argns = parser.parse_args(args[1:])
|
| 638 |
+
|
| 639 |
+
try:
|
| 640 |
+
return main_inner(parser, argns)
|
| 641 |
+
except BrokenPipeError:
|
| 642 |
+
# someone closed our stdout, e.g. by quitting a pager.
|
| 643 |
+
return 0
|
| 644 |
+
except Exception:
|
| 645 |
+
if argns.v:
|
| 646 |
+
print(file=sys.stderr)
|
| 647 |
+
print('*' * 65, file=sys.stderr)
|
| 648 |
+
print('An unhandled exception occurred while highlighting.',
|
| 649 |
+
file=sys.stderr)
|
| 650 |
+
print('Please report the whole traceback to the issue tracker at',
|
| 651 |
+
file=sys.stderr)
|
| 652 |
+
print('<https://github.com/pygments/pygments/issues>.',
|
| 653 |
+
file=sys.stderr)
|
| 654 |
+
print('*' * 65, file=sys.stderr)
|
| 655 |
+
print(file=sys.stderr)
|
| 656 |
+
raise
|
| 657 |
+
import traceback
|
| 658 |
+
info = traceback.format_exception(*sys.exc_info())
|
| 659 |
+
msg = info[-1].strip()
|
| 660 |
+
if len(info) >= 3:
|
| 661 |
+
# extract relevant file and position info
|
| 662 |
+
msg += '\n (f{})'.format(info[-2].split('\n')[0].strip()[1:])
|
| 663 |
+
print(file=sys.stderr)
|
| 664 |
+
print('*** Error while highlighting:', file=sys.stderr)
|
| 665 |
+
print(msg, file=sys.stderr)
|
| 666 |
+
print('*** If this is a bug you want to report, please rerun with -v.',
|
| 667 |
+
file=sys.stderr)
|
| 668 |
+
return 1
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/console.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.console
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Format colored console output.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
esc = "\x1b["
|
| 12 |
+
|
| 13 |
+
codes = {}
|
| 14 |
+
codes[""] = ""
|
| 15 |
+
codes["reset"] = esc + "39;49;00m"
|
| 16 |
+
|
| 17 |
+
codes["bold"] = esc + "01m"
|
| 18 |
+
codes["faint"] = esc + "02m"
|
| 19 |
+
codes["standout"] = esc + "03m"
|
| 20 |
+
codes["underline"] = esc + "04m"
|
| 21 |
+
codes["blink"] = esc + "05m"
|
| 22 |
+
codes["overline"] = esc + "06m"
|
| 23 |
+
|
| 24 |
+
dark_colors = ["black", "red", "green", "yellow", "blue",
|
| 25 |
+
"magenta", "cyan", "gray"]
|
| 26 |
+
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
|
| 27 |
+
"brightmagenta", "brightcyan", "white"]
|
| 28 |
+
|
| 29 |
+
x = 30
|
| 30 |
+
for dark, light in zip(dark_colors, light_colors):
|
| 31 |
+
codes[dark] = esc + "%im" % x
|
| 32 |
+
codes[light] = esc + "%im" % (60 + x)
|
| 33 |
+
x += 1
|
| 34 |
+
|
| 35 |
+
del dark, light, x
|
| 36 |
+
|
| 37 |
+
codes["white"] = codes["bold"]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def reset_color():
|
| 41 |
+
return codes["reset"]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def colorize(color_key, text):
|
| 45 |
+
return codes[color_key] + text + codes["reset"]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def ansiformat(attr, text):
|
| 49 |
+
"""
|
| 50 |
+
Format ``text`` with a color and/or some attributes::
|
| 51 |
+
|
| 52 |
+
color normal color
|
| 53 |
+
*color* bold color
|
| 54 |
+
_color_ underlined color
|
| 55 |
+
+color+ blinking color
|
| 56 |
+
"""
|
| 57 |
+
result = []
|
| 58 |
+
if attr[:1] == attr[-1:] == '+':
|
| 59 |
+
result.append(codes['blink'])
|
| 60 |
+
attr = attr[1:-1]
|
| 61 |
+
if attr[:1] == attr[-1:] == '*':
|
| 62 |
+
result.append(codes['bold'])
|
| 63 |
+
attr = attr[1:-1]
|
| 64 |
+
if attr[:1] == attr[-1:] == '_':
|
| 65 |
+
result.append(codes['underline'])
|
| 66 |
+
attr = attr[1:-1]
|
| 67 |
+
result.append(codes[attr])
|
| 68 |
+
result.append(text)
|
| 69 |
+
result.append(codes['reset'])
|
| 70 |
+
return ''.join(result)
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/filter.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.filter
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Module that implements the default filter.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def apply_filters(stream, filters, lexer=None):
|
| 13 |
+
"""
|
| 14 |
+
Use this method to apply an iterable of filters to
|
| 15 |
+
a stream. If lexer is given it's forwarded to the
|
| 16 |
+
filter, otherwise the filter receives `None`.
|
| 17 |
+
"""
|
| 18 |
+
def _apply(filter_, stream):
|
| 19 |
+
yield from filter_.filter(lexer, stream)
|
| 20 |
+
for filter_ in filters:
|
| 21 |
+
stream = _apply(filter_, stream)
|
| 22 |
+
return stream
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def simplefilter(f):
|
| 26 |
+
"""
|
| 27 |
+
Decorator that converts a function into a filter::
|
| 28 |
+
|
| 29 |
+
@simplefilter
|
| 30 |
+
def lowercase(self, lexer, stream, options):
|
| 31 |
+
for ttype, value in stream:
|
| 32 |
+
yield ttype, value.lower()
|
| 33 |
+
"""
|
| 34 |
+
return type(f.__name__, (FunctionFilter,), {
|
| 35 |
+
'__module__': getattr(f, '__module__'),
|
| 36 |
+
'__doc__': f.__doc__,
|
| 37 |
+
'function': f,
|
| 38 |
+
})
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Filter:
|
| 42 |
+
"""
|
| 43 |
+
Default filter. Subclass this class or use the `simplefilter`
|
| 44 |
+
decorator to create own filters.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self, **options):
|
| 48 |
+
self.options = options
|
| 49 |
+
|
| 50 |
+
def filter(self, lexer, stream):
|
| 51 |
+
raise NotImplementedError()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class FunctionFilter(Filter):
|
| 55 |
+
"""
|
| 56 |
+
Abstract class used by `simplefilter` to create simple
|
| 57 |
+
function filters on the fly. The `simplefilter` decorator
|
| 58 |
+
automatically creates subclasses of this class for
|
| 59 |
+
functions passed to it.
|
| 60 |
+
"""
|
| 61 |
+
function = None
|
| 62 |
+
|
| 63 |
+
def __init__(self, **options):
|
| 64 |
+
if not hasattr(self, 'function'):
|
| 65 |
+
raise TypeError(f'{self.__class__.__name__!r} used without bound function')
|
| 66 |
+
Filter.__init__(self, **options)
|
| 67 |
+
|
| 68 |
+
def filter(self, lexer, stream):
|
| 69 |
+
# pylint: disable=not-callable
|
| 70 |
+
yield from self.function(lexer, stream, self.options)
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/other.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.formatters.other
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Other formatters: NullFormatter, RawTokenFormatter.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from pip._vendor.pygments.formatter import Formatter
|
| 12 |
+
from pip._vendor.pygments.util import get_choice_opt
|
| 13 |
+
from pip._vendor.pygments.token import Token
|
| 14 |
+
from pip._vendor.pygments.console import colorize
|
| 15 |
+
|
| 16 |
+
__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class NullFormatter(Formatter):
|
| 20 |
+
"""
|
| 21 |
+
Output the text unchanged without any formatting.
|
| 22 |
+
"""
|
| 23 |
+
name = 'Text only'
|
| 24 |
+
aliases = ['text', 'null']
|
| 25 |
+
filenames = ['*.txt']
|
| 26 |
+
|
| 27 |
+
def format(self, tokensource, outfile):
|
| 28 |
+
enc = self.encoding
|
| 29 |
+
for ttype, value in tokensource:
|
| 30 |
+
if enc:
|
| 31 |
+
outfile.write(value.encode(enc))
|
| 32 |
+
else:
|
| 33 |
+
outfile.write(value)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class RawTokenFormatter(Formatter):
|
| 37 |
+
r"""
|
| 38 |
+
Format tokens as a raw representation for storing token streams.
|
| 39 |
+
|
| 40 |
+
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
|
| 41 |
+
be converted to a token stream with the `RawTokenLexer`, described in the
|
| 42 |
+
:doc:`lexer list <lexers>`.
|
| 43 |
+
|
| 44 |
+
Only two options are accepted:
|
| 45 |
+
|
| 46 |
+
`compress`
|
| 47 |
+
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
|
| 48 |
+
compression algorithm after encoding (default: ``''``).
|
| 49 |
+
`error_color`
|
| 50 |
+
If set to a color name, highlight error tokens using that color. If
|
| 51 |
+
set but with no value, defaults to ``'red'``.
|
| 52 |
+
|
| 53 |
+
.. versionadded:: 0.11
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
name = 'Raw tokens'
|
| 57 |
+
aliases = ['raw', 'tokens']
|
| 58 |
+
filenames = ['*.raw']
|
| 59 |
+
|
| 60 |
+
unicodeoutput = False
|
| 61 |
+
|
| 62 |
+
def __init__(self, **options):
|
| 63 |
+
Formatter.__init__(self, **options)
|
| 64 |
+
# We ignore self.encoding if it is set, since it gets set for lexer
|
| 65 |
+
# and formatter if given with -Oencoding on the command line.
|
| 66 |
+
# The RawTokenFormatter outputs only ASCII. Override here.
|
| 67 |
+
self.encoding = 'ascii' # let pygments.format() do the right thing
|
| 68 |
+
self.compress = get_choice_opt(options, 'compress',
|
| 69 |
+
['', 'none', 'gz', 'bz2'], '')
|
| 70 |
+
self.error_color = options.get('error_color', None)
|
| 71 |
+
if self.error_color is True:
|
| 72 |
+
self.error_color = 'red'
|
| 73 |
+
if self.error_color is not None:
|
| 74 |
+
try:
|
| 75 |
+
colorize(self.error_color, '')
|
| 76 |
+
except KeyError:
|
| 77 |
+
raise ValueError(f"Invalid color {self.error_color!r} specified")
|
| 78 |
+
|
| 79 |
+
def format(self, tokensource, outfile):
|
| 80 |
+
try:
|
| 81 |
+
outfile.write(b'')
|
| 82 |
+
except TypeError:
|
| 83 |
+
raise TypeError('The raw tokens formatter needs a binary '
|
| 84 |
+
'output file')
|
| 85 |
+
if self.compress == 'gz':
|
| 86 |
+
import gzip
|
| 87 |
+
outfile = gzip.GzipFile('', 'wb', 9, outfile)
|
| 88 |
+
|
| 89 |
+
write = outfile.write
|
| 90 |
+
flush = outfile.close
|
| 91 |
+
elif self.compress == 'bz2':
|
| 92 |
+
import bz2
|
| 93 |
+
compressor = bz2.BZ2Compressor(9)
|
| 94 |
+
|
| 95 |
+
def write(text):
|
| 96 |
+
outfile.write(compressor.compress(text))
|
| 97 |
+
|
| 98 |
+
def flush():
|
| 99 |
+
outfile.write(compressor.flush())
|
| 100 |
+
outfile.flush()
|
| 101 |
+
else:
|
| 102 |
+
write = outfile.write
|
| 103 |
+
flush = outfile.flush
|
| 104 |
+
|
| 105 |
+
if self.error_color:
|
| 106 |
+
for ttype, value in tokensource:
|
| 107 |
+
line = b"%r\t%r\n" % (ttype, value)
|
| 108 |
+
if ttype is Token.Error:
|
| 109 |
+
write(colorize(self.error_color, line))
|
| 110 |
+
else:
|
| 111 |
+
write(line)
|
| 112 |
+
else:
|
| 113 |
+
for ttype, value in tokensource:
|
| 114 |
+
write(b"%r\t%r\n" % (ttype, value))
|
| 115 |
+
flush()
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
TESTCASE_BEFORE = '''\
|
| 119 |
+
def testNeedsName(lexer):
|
| 120 |
+
fragment = %r
|
| 121 |
+
tokens = [
|
| 122 |
+
'''
|
| 123 |
+
TESTCASE_AFTER = '''\
|
| 124 |
+
]
|
| 125 |
+
assert list(lexer.get_tokens(fragment)) == tokens
|
| 126 |
+
'''
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class TestcaseFormatter(Formatter):
|
| 130 |
+
"""
|
| 131 |
+
Format tokens as appropriate for a new testcase.
|
| 132 |
+
|
| 133 |
+
.. versionadded:: 2.0
|
| 134 |
+
"""
|
| 135 |
+
name = 'Testcase'
|
| 136 |
+
aliases = ['testcase']
|
| 137 |
+
|
| 138 |
+
def __init__(self, **options):
|
| 139 |
+
Formatter.__init__(self, **options)
|
| 140 |
+
if self.encoding is not None and self.encoding != 'utf-8':
|
| 141 |
+
raise ValueError("Only None and utf-8 are allowed encodings.")
|
| 142 |
+
|
| 143 |
+
def format(self, tokensource, outfile):
|
| 144 |
+
indentation = ' ' * 12
|
| 145 |
+
rawbuf = []
|
| 146 |
+
outbuf = []
|
| 147 |
+
for ttype, value in tokensource:
|
| 148 |
+
rawbuf.append(value)
|
| 149 |
+
outbuf.append(f'{indentation}({ttype}, {value!r}),\n')
|
| 150 |
+
|
| 151 |
+
before = TESTCASE_BEFORE % (''.join(rawbuf),)
|
| 152 |
+
during = ''.join(outbuf)
|
| 153 |
+
after = TESTCASE_AFTER
|
| 154 |
+
if self.encoding is None:
|
| 155 |
+
outfile.write(before + during + after)
|
| 156 |
+
else:
|
| 157 |
+
outfile.write(before.encode('utf-8'))
|
| 158 |
+
outfile.write(during.encode('utf-8'))
|
| 159 |
+
outfile.write(after.encode('utf-8'))
|
| 160 |
+
outfile.flush()
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/lexer.py
ADDED
|
@@ -0,0 +1,963 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.lexer
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Base lexer classes.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
import sys
|
| 13 |
+
import time
|
| 14 |
+
|
| 15 |
+
from pip._vendor.pygments.filter import apply_filters, Filter
|
| 16 |
+
from pip._vendor.pygments.filters import get_filter_by_name
|
| 17 |
+
from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
|
| 18 |
+
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
| 19 |
+
make_analysator, Future, guess_decode
|
| 20 |
+
from pip._vendor.pygments.regexopt import regex_opt
|
| 21 |
+
|
| 22 |
+
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
|
| 23 |
+
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
|
| 24 |
+
'default', 'words', 'line_re']
|
| 25 |
+
|
| 26 |
+
line_re = re.compile('.*?\n')
|
| 27 |
+
|
| 28 |
+
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
|
| 29 |
+
(b'\xff\xfe\0\0', 'utf-32'),
|
| 30 |
+
(b'\0\0\xfe\xff', 'utf-32be'),
|
| 31 |
+
(b'\xff\xfe', 'utf-16'),
|
| 32 |
+
(b'\xfe\xff', 'utf-16be')]
|
| 33 |
+
|
| 34 |
+
_default_analyse = staticmethod(lambda x: 0.0)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LexerMeta(type):
|
| 38 |
+
"""
|
| 39 |
+
This metaclass automagically converts ``analyse_text`` methods into
|
| 40 |
+
static methods which always return float values.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def __new__(mcs, name, bases, d):
|
| 44 |
+
if 'analyse_text' in d:
|
| 45 |
+
d['analyse_text'] = make_analysator(d['analyse_text'])
|
| 46 |
+
return type.__new__(mcs, name, bases, d)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class Lexer(metaclass=LexerMeta):
|
| 50 |
+
"""
|
| 51 |
+
Lexer for a specific language.
|
| 52 |
+
|
| 53 |
+
See also :doc:`lexerdevelopment`, a high-level guide to writing
|
| 54 |
+
lexers.
|
| 55 |
+
|
| 56 |
+
Lexer classes have attributes used for choosing the most appropriate
|
| 57 |
+
lexer based on various criteria.
|
| 58 |
+
|
| 59 |
+
.. autoattribute:: name
|
| 60 |
+
:no-value:
|
| 61 |
+
.. autoattribute:: aliases
|
| 62 |
+
:no-value:
|
| 63 |
+
.. autoattribute:: filenames
|
| 64 |
+
:no-value:
|
| 65 |
+
.. autoattribute:: alias_filenames
|
| 66 |
+
.. autoattribute:: mimetypes
|
| 67 |
+
:no-value:
|
| 68 |
+
.. autoattribute:: priority
|
| 69 |
+
|
| 70 |
+
Lexers included in Pygments should have two additional attributes:
|
| 71 |
+
|
| 72 |
+
.. autoattribute:: url
|
| 73 |
+
:no-value:
|
| 74 |
+
.. autoattribute:: version_added
|
| 75 |
+
:no-value:
|
| 76 |
+
|
| 77 |
+
Lexers included in Pygments may have additional attributes:
|
| 78 |
+
|
| 79 |
+
.. autoattribute:: _example
|
| 80 |
+
:no-value:
|
| 81 |
+
|
| 82 |
+
You can pass options to the constructor. The basic options recognized
|
| 83 |
+
by all lexers and processed by the base `Lexer` class are:
|
| 84 |
+
|
| 85 |
+
``stripnl``
|
| 86 |
+
Strip leading and trailing newlines from the input (default: True).
|
| 87 |
+
``stripall``
|
| 88 |
+
Strip all leading and trailing whitespace from the input
|
| 89 |
+
(default: False).
|
| 90 |
+
``ensurenl``
|
| 91 |
+
Make sure that the input ends with a newline (default: True). This
|
| 92 |
+
is required for some lexers that consume input linewise.
|
| 93 |
+
|
| 94 |
+
.. versionadded:: 1.3
|
| 95 |
+
|
| 96 |
+
``tabsize``
|
| 97 |
+
If given and greater than 0, expand tabs in the input (default: 0).
|
| 98 |
+
``encoding``
|
| 99 |
+
If given, must be an encoding name. This encoding will be used to
|
| 100 |
+
convert the input string to Unicode, if it is not already a Unicode
|
| 101 |
+
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
|
| 102 |
+
Latin1 detection. Can also be ``'chardet'`` to use the chardet
|
| 103 |
+
library, if it is installed.
|
| 104 |
+
``inencoding``
|
| 105 |
+
Overrides the ``encoding`` if given.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
#: Full name of the lexer, in human-readable form
|
| 109 |
+
name = None
|
| 110 |
+
|
| 111 |
+
#: A list of short, unique identifiers that can be used to look
|
| 112 |
+
#: up the lexer from a list, e.g., using `get_lexer_by_name()`.
|
| 113 |
+
aliases = []
|
| 114 |
+
|
| 115 |
+
#: A list of `fnmatch` patterns that match filenames which contain
|
| 116 |
+
#: content for this lexer. The patterns in this list should be unique among
|
| 117 |
+
#: all lexers.
|
| 118 |
+
filenames = []
|
| 119 |
+
|
| 120 |
+
#: A list of `fnmatch` patterns that match filenames which may or may not
|
| 121 |
+
#: contain content for this lexer. This list is used by the
|
| 122 |
+
#: :func:`.guess_lexer_for_filename()` function, to determine which lexers
|
| 123 |
+
#: are then included in guessing the correct one. That means that
|
| 124 |
+
#: e.g. every lexer for HTML and a template language should include
|
| 125 |
+
#: ``\*.html`` in this list.
|
| 126 |
+
alias_filenames = []
|
| 127 |
+
|
| 128 |
+
#: A list of MIME types for content that can be lexed with this lexer.
|
| 129 |
+
mimetypes = []
|
| 130 |
+
|
| 131 |
+
#: Priority, should multiple lexers match and no content is provided
|
| 132 |
+
priority = 0
|
| 133 |
+
|
| 134 |
+
#: URL of the language specification/definition. Used in the Pygments
|
| 135 |
+
#: documentation. Set to an empty string to disable.
|
| 136 |
+
url = None
|
| 137 |
+
|
| 138 |
+
#: Version of Pygments in which the lexer was added.
|
| 139 |
+
version_added = None
|
| 140 |
+
|
| 141 |
+
#: Example file name. Relative to the ``tests/examplefiles`` directory.
|
| 142 |
+
#: This is used by the documentation generator to show an example.
|
| 143 |
+
_example = None
|
| 144 |
+
|
| 145 |
+
def __init__(self, **options):
|
| 146 |
+
"""
|
| 147 |
+
This constructor takes arbitrary options as keyword arguments.
|
| 148 |
+
Every subclass must first process its own options and then call
|
| 149 |
+
the `Lexer` constructor, since it processes the basic
|
| 150 |
+
options like `stripnl`.
|
| 151 |
+
|
| 152 |
+
An example looks like this:
|
| 153 |
+
|
| 154 |
+
.. sourcecode:: python
|
| 155 |
+
|
| 156 |
+
def __init__(self, **options):
|
| 157 |
+
self.compress = options.get('compress', '')
|
| 158 |
+
Lexer.__init__(self, **options)
|
| 159 |
+
|
| 160 |
+
As these options must all be specifiable as strings (due to the
|
| 161 |
+
command line usage), there are various utility functions
|
| 162 |
+
available to help with that, see `Utilities`_.
|
| 163 |
+
"""
|
| 164 |
+
self.options = options
|
| 165 |
+
self.stripnl = get_bool_opt(options, 'stripnl', True)
|
| 166 |
+
self.stripall = get_bool_opt(options, 'stripall', False)
|
| 167 |
+
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
|
| 168 |
+
self.tabsize = get_int_opt(options, 'tabsize', 0)
|
| 169 |
+
self.encoding = options.get('encoding', 'guess')
|
| 170 |
+
self.encoding = options.get('inencoding') or self.encoding
|
| 171 |
+
self.filters = []
|
| 172 |
+
for filter_ in get_list_opt(options, 'filters', ()):
|
| 173 |
+
self.add_filter(filter_)
|
| 174 |
+
|
| 175 |
+
def __repr__(self):
|
| 176 |
+
if self.options:
|
| 177 |
+
return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
|
| 178 |
+
else:
|
| 179 |
+
return f'<pygments.lexers.{self.__class__.__name__}>'
|
| 180 |
+
|
| 181 |
+
def add_filter(self, filter_, **options):
|
| 182 |
+
"""
|
| 183 |
+
Add a new stream filter to this lexer.
|
| 184 |
+
"""
|
| 185 |
+
if not isinstance(filter_, Filter):
|
| 186 |
+
filter_ = get_filter_by_name(filter_, **options)
|
| 187 |
+
self.filters.append(filter_)
|
| 188 |
+
|
| 189 |
+
def analyse_text(text):
|
| 190 |
+
"""
|
| 191 |
+
A static method which is called for lexer guessing.
|
| 192 |
+
|
| 193 |
+
It should analyse the text and return a float in the range
|
| 194 |
+
from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
|
| 195 |
+
will not be selected as the most probable one, if it returns
|
| 196 |
+
``1.0``, it will be selected immediately. This is used by
|
| 197 |
+
`guess_lexer`.
|
| 198 |
+
|
| 199 |
+
The `LexerMeta` metaclass automatically wraps this function so
|
| 200 |
+
that it works like a static method (no ``self`` or ``cls``
|
| 201 |
+
parameter) and the return value is automatically converted to
|
| 202 |
+
`float`. If the return value is an object that is boolean `False`
|
| 203 |
+
it's the same as if the return values was ``0.0``.
|
| 204 |
+
"""
|
| 205 |
+
|
| 206 |
+
def _preprocess_lexer_input(self, text):
|
| 207 |
+
"""Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
|
| 208 |
+
|
| 209 |
+
if not isinstance(text, str):
|
| 210 |
+
if self.encoding == 'guess':
|
| 211 |
+
text, _ = guess_decode(text)
|
| 212 |
+
elif self.encoding == 'chardet':
|
| 213 |
+
try:
|
| 214 |
+
# pip vendoring note: this code is not reachable by pip,
|
| 215 |
+
# removed import of chardet to make it clear.
|
| 216 |
+
raise ImportError('chardet is not vendored by pip')
|
| 217 |
+
except ImportError as e:
|
| 218 |
+
raise ImportError('To enable chardet encoding guessing, '
|
| 219 |
+
'please install the chardet library '
|
| 220 |
+
'from http://chardet.feedparser.org/') from e
|
| 221 |
+
# check for BOM first
|
| 222 |
+
decoded = None
|
| 223 |
+
for bom, encoding in _encoding_map:
|
| 224 |
+
if text.startswith(bom):
|
| 225 |
+
decoded = text[len(bom):].decode(encoding, 'replace')
|
| 226 |
+
break
|
| 227 |
+
# no BOM found, so use chardet
|
| 228 |
+
if decoded is None:
|
| 229 |
+
enc = chardet.detect(text[:1024]) # Guess using first 1KB
|
| 230 |
+
decoded = text.decode(enc.get('encoding') or 'utf-8',
|
| 231 |
+
'replace')
|
| 232 |
+
text = decoded
|
| 233 |
+
else:
|
| 234 |
+
text = text.decode(self.encoding)
|
| 235 |
+
if text.startswith('\ufeff'):
|
| 236 |
+
text = text[len('\ufeff'):]
|
| 237 |
+
else:
|
| 238 |
+
if text.startswith('\ufeff'):
|
| 239 |
+
text = text[len('\ufeff'):]
|
| 240 |
+
|
| 241 |
+
# text now *is* a unicode string
|
| 242 |
+
text = text.replace('\r\n', '\n')
|
| 243 |
+
text = text.replace('\r', '\n')
|
| 244 |
+
if self.stripall:
|
| 245 |
+
text = text.strip()
|
| 246 |
+
elif self.stripnl:
|
| 247 |
+
text = text.strip('\n')
|
| 248 |
+
if self.tabsize > 0:
|
| 249 |
+
text = text.expandtabs(self.tabsize)
|
| 250 |
+
if self.ensurenl and not text.endswith('\n'):
|
| 251 |
+
text += '\n'
|
| 252 |
+
|
| 253 |
+
return text
|
| 254 |
+
|
| 255 |
+
def get_tokens(self, text, unfiltered=False):
|
| 256 |
+
"""
|
| 257 |
+
This method is the basic interface of a lexer. It is called by
|
| 258 |
+
the `highlight()` function. It must process the text and return an
|
| 259 |
+
iterable of ``(tokentype, value)`` pairs from `text`.
|
| 260 |
+
|
| 261 |
+
Normally, you don't need to override this method. The default
|
| 262 |
+
implementation processes the options recognized by all lexers
|
| 263 |
+
(`stripnl`, `stripall` and so on), and then yields all tokens
|
| 264 |
+
from `get_tokens_unprocessed()`, with the ``index`` dropped.
|
| 265 |
+
|
| 266 |
+
If `unfiltered` is set to `True`, the filtering mechanism is
|
| 267 |
+
bypassed even if filters are defined.
|
| 268 |
+
"""
|
| 269 |
+
text = self._preprocess_lexer_input(text)
|
| 270 |
+
|
| 271 |
+
def streamer():
|
| 272 |
+
for _, t, v in self.get_tokens_unprocessed(text):
|
| 273 |
+
yield t, v
|
| 274 |
+
stream = streamer()
|
| 275 |
+
if not unfiltered:
|
| 276 |
+
stream = apply_filters(stream, self.filters, self)
|
| 277 |
+
return stream
|
| 278 |
+
|
| 279 |
+
def get_tokens_unprocessed(self, text):
|
| 280 |
+
"""
|
| 281 |
+
This method should process the text and return an iterable of
|
| 282 |
+
``(index, tokentype, value)`` tuples where ``index`` is the starting
|
| 283 |
+
position of the token within the input text.
|
| 284 |
+
|
| 285 |
+
It must be overridden by subclasses. It is recommended to
|
| 286 |
+
implement it as a generator to maximize effectiveness.
|
| 287 |
+
"""
|
| 288 |
+
raise NotImplementedError
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class DelegatingLexer(Lexer):
|
| 292 |
+
"""
|
| 293 |
+
This lexer takes two lexer as arguments. A root lexer and
|
| 294 |
+
a language lexer. First everything is scanned using the language
|
| 295 |
+
lexer, afterwards all ``Other`` tokens are lexed using the root
|
| 296 |
+
lexer.
|
| 297 |
+
|
| 298 |
+
The lexers from the ``template`` lexer package use this base lexer.
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
|
| 302 |
+
self.root_lexer = _root_lexer(**options)
|
| 303 |
+
self.language_lexer = _language_lexer(**options)
|
| 304 |
+
self.needle = _needle
|
| 305 |
+
Lexer.__init__(self, **options)
|
| 306 |
+
|
| 307 |
+
def get_tokens_unprocessed(self, text):
|
| 308 |
+
buffered = ''
|
| 309 |
+
insertions = []
|
| 310 |
+
lng_buffer = []
|
| 311 |
+
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
|
| 312 |
+
if t is self.needle:
|
| 313 |
+
if lng_buffer:
|
| 314 |
+
insertions.append((len(buffered), lng_buffer))
|
| 315 |
+
lng_buffer = []
|
| 316 |
+
buffered += v
|
| 317 |
+
else:
|
| 318 |
+
lng_buffer.append((i, t, v))
|
| 319 |
+
if lng_buffer:
|
| 320 |
+
insertions.append((len(buffered), lng_buffer))
|
| 321 |
+
return do_insertions(insertions,
|
| 322 |
+
self.root_lexer.get_tokens_unprocessed(buffered))
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
# ------------------------------------------------------------------------------
|
| 326 |
+
# RegexLexer and ExtendedRegexLexer
|
| 327 |
+
#
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
class include(str): # pylint: disable=invalid-name
|
| 331 |
+
"""
|
| 332 |
+
Indicates that a state should include rules from another state.
|
| 333 |
+
"""
|
| 334 |
+
pass
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
class _inherit:
|
| 338 |
+
"""
|
| 339 |
+
Indicates the a state should inherit from its superclass.
|
| 340 |
+
"""
|
| 341 |
+
def __repr__(self):
|
| 342 |
+
return 'inherit'
|
| 343 |
+
|
| 344 |
+
inherit = _inherit() # pylint: disable=invalid-name
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class combined(tuple): # pylint: disable=invalid-name
|
| 348 |
+
"""
|
| 349 |
+
Indicates a state combined from multiple states.
|
| 350 |
+
"""
|
| 351 |
+
|
| 352 |
+
def __new__(cls, *args):
|
| 353 |
+
return tuple.__new__(cls, args)
|
| 354 |
+
|
| 355 |
+
def __init__(self, *args):
|
| 356 |
+
# tuple.__init__ doesn't do anything
|
| 357 |
+
pass
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
class _PseudoMatch:
|
| 361 |
+
"""
|
| 362 |
+
A pseudo match object constructed from a string.
|
| 363 |
+
"""
|
| 364 |
+
|
| 365 |
+
def __init__(self, start, text):
|
| 366 |
+
self._text = text
|
| 367 |
+
self._start = start
|
| 368 |
+
|
| 369 |
+
def start(self, arg=None):
|
| 370 |
+
return self._start
|
| 371 |
+
|
| 372 |
+
def end(self, arg=None):
|
| 373 |
+
return self._start + len(self._text)
|
| 374 |
+
|
| 375 |
+
def group(self, arg=None):
|
| 376 |
+
if arg:
|
| 377 |
+
raise IndexError('No such group')
|
| 378 |
+
return self._text
|
| 379 |
+
|
| 380 |
+
def groups(self):
|
| 381 |
+
return (self._text,)
|
| 382 |
+
|
| 383 |
+
def groupdict(self):
|
| 384 |
+
return {}
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def bygroups(*args):
|
| 388 |
+
"""
|
| 389 |
+
Callback that yields multiple actions for each group in the match.
|
| 390 |
+
"""
|
| 391 |
+
def callback(lexer, match, ctx=None):
|
| 392 |
+
for i, action in enumerate(args):
|
| 393 |
+
if action is None:
|
| 394 |
+
continue
|
| 395 |
+
elif type(action) is _TokenType:
|
| 396 |
+
data = match.group(i + 1)
|
| 397 |
+
if data:
|
| 398 |
+
yield match.start(i + 1), action, data
|
| 399 |
+
else:
|
| 400 |
+
data = match.group(i + 1)
|
| 401 |
+
if data is not None:
|
| 402 |
+
if ctx:
|
| 403 |
+
ctx.pos = match.start(i + 1)
|
| 404 |
+
for item in action(lexer,
|
| 405 |
+
_PseudoMatch(match.start(i + 1), data), ctx):
|
| 406 |
+
if item:
|
| 407 |
+
yield item
|
| 408 |
+
if ctx:
|
| 409 |
+
ctx.pos = match.end()
|
| 410 |
+
return callback
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class _This:
|
| 414 |
+
"""
|
| 415 |
+
Special singleton used for indicating the caller class.
|
| 416 |
+
Used by ``using``.
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
this = _This()
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def using(_other, **kwargs):
|
| 423 |
+
"""
|
| 424 |
+
Callback that processes the match with a different lexer.
|
| 425 |
+
|
| 426 |
+
The keyword arguments are forwarded to the lexer, except `state` which
|
| 427 |
+
is handled separately.
|
| 428 |
+
|
| 429 |
+
`state` specifies the state that the new lexer will start in, and can
|
| 430 |
+
be an enumerable such as ('root', 'inline', 'string') or a simple
|
| 431 |
+
string which is assumed to be on top of the root state.
|
| 432 |
+
|
| 433 |
+
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
|
| 434 |
+
"""
|
| 435 |
+
gt_kwargs = {}
|
| 436 |
+
if 'state' in kwargs:
|
| 437 |
+
s = kwargs.pop('state')
|
| 438 |
+
if isinstance(s, (list, tuple)):
|
| 439 |
+
gt_kwargs['stack'] = s
|
| 440 |
+
else:
|
| 441 |
+
gt_kwargs['stack'] = ('root', s)
|
| 442 |
+
|
| 443 |
+
if _other is this:
|
| 444 |
+
def callback(lexer, match, ctx=None):
|
| 445 |
+
# if keyword arguments are given the callback
|
| 446 |
+
# function has to create a new lexer instance
|
| 447 |
+
if kwargs:
|
| 448 |
+
# XXX: cache that somehow
|
| 449 |
+
kwargs.update(lexer.options)
|
| 450 |
+
lx = lexer.__class__(**kwargs)
|
| 451 |
+
else:
|
| 452 |
+
lx = lexer
|
| 453 |
+
s = match.start()
|
| 454 |
+
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
| 455 |
+
yield i + s, t, v
|
| 456 |
+
if ctx:
|
| 457 |
+
ctx.pos = match.end()
|
| 458 |
+
else:
|
| 459 |
+
def callback(lexer, match, ctx=None):
|
| 460 |
+
# XXX: cache that somehow
|
| 461 |
+
kwargs.update(lexer.options)
|
| 462 |
+
lx = _other(**kwargs)
|
| 463 |
+
|
| 464 |
+
s = match.start()
|
| 465 |
+
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
| 466 |
+
yield i + s, t, v
|
| 467 |
+
if ctx:
|
| 468 |
+
ctx.pos = match.end()
|
| 469 |
+
return callback
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
class default:
|
| 473 |
+
"""
|
| 474 |
+
Indicates a state or state action (e.g. #pop) to apply.
|
| 475 |
+
For example default('#pop') is equivalent to ('', Token, '#pop')
|
| 476 |
+
Note that state tuples may be used as well.
|
| 477 |
+
|
| 478 |
+
.. versionadded:: 2.0
|
| 479 |
+
"""
|
| 480 |
+
def __init__(self, state):
|
| 481 |
+
self.state = state
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
class words(Future):
|
| 485 |
+
"""
|
| 486 |
+
Indicates a list of literal words that is transformed into an optimized
|
| 487 |
+
regex that matches any of the words.
|
| 488 |
+
|
| 489 |
+
.. versionadded:: 2.0
|
| 490 |
+
"""
|
| 491 |
+
def __init__(self, words, prefix='', suffix=''):
|
| 492 |
+
self.words = words
|
| 493 |
+
self.prefix = prefix
|
| 494 |
+
self.suffix = suffix
|
| 495 |
+
|
| 496 |
+
def get(self):
|
| 497 |
+
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
class RegexLexerMeta(LexerMeta):
|
| 501 |
+
"""
|
| 502 |
+
Metaclass for RegexLexer, creates the self._tokens attribute from
|
| 503 |
+
self.tokens on the first instantiation.
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
def _process_regex(cls, regex, rflags, state):
|
| 507 |
+
"""Preprocess the regular expression component of a token definition."""
|
| 508 |
+
if isinstance(regex, Future):
|
| 509 |
+
regex = regex.get()
|
| 510 |
+
return re.compile(regex, rflags).match
|
| 511 |
+
|
| 512 |
+
def _process_token(cls, token):
|
| 513 |
+
"""Preprocess the token component of a token definition."""
|
| 514 |
+
assert type(token) is _TokenType or callable(token), \
|
| 515 |
+
f'token type must be simple type or callable, not {token!r}'
|
| 516 |
+
return token
|
| 517 |
+
|
| 518 |
+
def _process_new_state(cls, new_state, unprocessed, processed):
|
| 519 |
+
"""Preprocess the state transition action of a token definition."""
|
| 520 |
+
if isinstance(new_state, str):
|
| 521 |
+
# an existing state
|
| 522 |
+
if new_state == '#pop':
|
| 523 |
+
return -1
|
| 524 |
+
elif new_state in unprocessed:
|
| 525 |
+
return (new_state,)
|
| 526 |
+
elif new_state == '#push':
|
| 527 |
+
return new_state
|
| 528 |
+
elif new_state[:5] == '#pop:':
|
| 529 |
+
return -int(new_state[5:])
|
| 530 |
+
else:
|
| 531 |
+
assert False, f'unknown new state {new_state!r}'
|
| 532 |
+
elif isinstance(new_state, combined):
|
| 533 |
+
# combine a new state from existing ones
|
| 534 |
+
tmp_state = '_tmp_%d' % cls._tmpname
|
| 535 |
+
cls._tmpname += 1
|
| 536 |
+
itokens = []
|
| 537 |
+
for istate in new_state:
|
| 538 |
+
assert istate != new_state, f'circular state ref {istate!r}'
|
| 539 |
+
itokens.extend(cls._process_state(unprocessed,
|
| 540 |
+
processed, istate))
|
| 541 |
+
processed[tmp_state] = itokens
|
| 542 |
+
return (tmp_state,)
|
| 543 |
+
elif isinstance(new_state, tuple):
|
| 544 |
+
# push more than one state
|
| 545 |
+
for istate in new_state:
|
| 546 |
+
assert (istate in unprocessed or
|
| 547 |
+
istate in ('#pop', '#push')), \
|
| 548 |
+
'unknown new state ' + istate
|
| 549 |
+
return new_state
|
| 550 |
+
else:
|
| 551 |
+
assert False, f'unknown new state def {new_state!r}'
|
| 552 |
+
|
| 553 |
+
def _process_state(cls, unprocessed, processed, state):
|
| 554 |
+
"""Preprocess a single state definition."""
|
| 555 |
+
assert isinstance(state, str), f"wrong state name {state!r}"
|
| 556 |
+
assert state[0] != '#', f"invalid state name {state!r}"
|
| 557 |
+
if state in processed:
|
| 558 |
+
return processed[state]
|
| 559 |
+
tokens = processed[state] = []
|
| 560 |
+
rflags = cls.flags
|
| 561 |
+
for tdef in unprocessed[state]:
|
| 562 |
+
if isinstance(tdef, include):
|
| 563 |
+
# it's a state reference
|
| 564 |
+
assert tdef != state, f"circular state reference {state!r}"
|
| 565 |
+
tokens.extend(cls._process_state(unprocessed, processed,
|
| 566 |
+
str(tdef)))
|
| 567 |
+
continue
|
| 568 |
+
if isinstance(tdef, _inherit):
|
| 569 |
+
# should be processed already, but may not in the case of:
|
| 570 |
+
# 1. the state has no counterpart in any parent
|
| 571 |
+
# 2. the state includes more than one 'inherit'
|
| 572 |
+
continue
|
| 573 |
+
if isinstance(tdef, default):
|
| 574 |
+
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
|
| 575 |
+
tokens.append((re.compile('').match, None, new_state))
|
| 576 |
+
continue
|
| 577 |
+
|
| 578 |
+
assert type(tdef) is tuple, f"wrong rule def {tdef!r}"
|
| 579 |
+
|
| 580 |
+
try:
|
| 581 |
+
rex = cls._process_regex(tdef[0], rflags, state)
|
| 582 |
+
except Exception as err:
|
| 583 |
+
raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err
|
| 584 |
+
|
| 585 |
+
token = cls._process_token(tdef[1])
|
| 586 |
+
|
| 587 |
+
if len(tdef) == 2:
|
| 588 |
+
new_state = None
|
| 589 |
+
else:
|
| 590 |
+
new_state = cls._process_new_state(tdef[2],
|
| 591 |
+
unprocessed, processed)
|
| 592 |
+
|
| 593 |
+
tokens.append((rex, token, new_state))
|
| 594 |
+
return tokens
|
| 595 |
+
|
| 596 |
+
def process_tokendef(cls, name, tokendefs=None):
|
| 597 |
+
"""Preprocess a dictionary of token definitions."""
|
| 598 |
+
processed = cls._all_tokens[name] = {}
|
| 599 |
+
tokendefs = tokendefs or cls.tokens[name]
|
| 600 |
+
for state in list(tokendefs):
|
| 601 |
+
cls._process_state(tokendefs, processed, state)
|
| 602 |
+
return processed
|
| 603 |
+
|
| 604 |
+
def get_tokendefs(cls):
|
| 605 |
+
"""
|
| 606 |
+
Merge tokens from superclasses in MRO order, returning a single tokendef
|
| 607 |
+
dictionary.
|
| 608 |
+
|
| 609 |
+
Any state that is not defined by a subclass will be inherited
|
| 610 |
+
automatically. States that *are* defined by subclasses will, by
|
| 611 |
+
default, override that state in the superclass. If a subclass wishes to
|
| 612 |
+
inherit definitions from a superclass, it can use the special value
|
| 613 |
+
"inherit", which will cause the superclass' state definition to be
|
| 614 |
+
included at that point in the state.
|
| 615 |
+
"""
|
| 616 |
+
tokens = {}
|
| 617 |
+
inheritable = {}
|
| 618 |
+
for c in cls.__mro__:
|
| 619 |
+
toks = c.__dict__.get('tokens', {})
|
| 620 |
+
|
| 621 |
+
for state, items in toks.items():
|
| 622 |
+
curitems = tokens.get(state)
|
| 623 |
+
if curitems is None:
|
| 624 |
+
# N.b. because this is assigned by reference, sufficiently
|
| 625 |
+
# deep hierarchies are processed incrementally (e.g. for
|
| 626 |
+
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
|
| 627 |
+
# will not see any inherits in B).
|
| 628 |
+
tokens[state] = items
|
| 629 |
+
try:
|
| 630 |
+
inherit_ndx = items.index(inherit)
|
| 631 |
+
except ValueError:
|
| 632 |
+
continue
|
| 633 |
+
inheritable[state] = inherit_ndx
|
| 634 |
+
continue
|
| 635 |
+
|
| 636 |
+
inherit_ndx = inheritable.pop(state, None)
|
| 637 |
+
if inherit_ndx is None:
|
| 638 |
+
continue
|
| 639 |
+
|
| 640 |
+
# Replace the "inherit" value with the items
|
| 641 |
+
curitems[inherit_ndx:inherit_ndx+1] = items
|
| 642 |
+
try:
|
| 643 |
+
# N.b. this is the index in items (that is, the superclass
|
| 644 |
+
# copy), so offset required when storing below.
|
| 645 |
+
new_inh_ndx = items.index(inherit)
|
| 646 |
+
except ValueError:
|
| 647 |
+
pass
|
| 648 |
+
else:
|
| 649 |
+
inheritable[state] = inherit_ndx + new_inh_ndx
|
| 650 |
+
|
| 651 |
+
return tokens
|
| 652 |
+
|
| 653 |
+
def __call__(cls, *args, **kwds):
|
| 654 |
+
"""Instantiate cls after preprocessing its token definitions."""
|
| 655 |
+
if '_tokens' not in cls.__dict__:
|
| 656 |
+
cls._all_tokens = {}
|
| 657 |
+
cls._tmpname = 0
|
| 658 |
+
if hasattr(cls, 'token_variants') and cls.token_variants:
|
| 659 |
+
# don't process yet
|
| 660 |
+
pass
|
| 661 |
+
else:
|
| 662 |
+
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
|
| 663 |
+
|
| 664 |
+
return type.__call__(cls, *args, **kwds)
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
|
| 668 |
+
"""
|
| 669 |
+
Base for simple stateful regular expression-based lexers.
|
| 670 |
+
Simplifies the lexing process so that you need only
|
| 671 |
+
provide a list of states and regular expressions.
|
| 672 |
+
"""
|
| 673 |
+
|
| 674 |
+
#: Flags for compiling the regular expressions.
|
| 675 |
+
#: Defaults to MULTILINE.
|
| 676 |
+
flags = re.MULTILINE
|
| 677 |
+
|
| 678 |
+
#: At all time there is a stack of states. Initially, the stack contains
|
| 679 |
+
#: a single state 'root'. The top of the stack is called "the current state".
|
| 680 |
+
#:
|
| 681 |
+
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
|
| 682 |
+
#:
|
| 683 |
+
#: ``new_state`` can be omitted to signify no state transition.
|
| 684 |
+
#: If ``new_state`` is a string, it is pushed on the stack. This ensure
|
| 685 |
+
#: the new current state is ``new_state``.
|
| 686 |
+
#: If ``new_state`` is a tuple of strings, all of those strings are pushed
|
| 687 |
+
#: on the stack and the current state will be the last element of the list.
|
| 688 |
+
#: ``new_state`` can also be ``combined('state1', 'state2', ...)``
|
| 689 |
+
#: to signify a new, anonymous state combined from the rules of two
|
| 690 |
+
#: or more existing ones.
|
| 691 |
+
#: Furthermore, it can be '#pop' to signify going back one step in
|
| 692 |
+
#: the state stack, or '#push' to push the current state on the stack
|
| 693 |
+
#: again. Note that if you push while in a combined state, the combined
|
| 694 |
+
#: state itself is pushed, and not only the state in which the rule is
|
| 695 |
+
#: defined.
|
| 696 |
+
#:
|
| 697 |
+
#: The tuple can also be replaced with ``include('state')``, in which
|
| 698 |
+
#: case the rules from the state named by the string are included in the
|
| 699 |
+
#: current one.
|
| 700 |
+
tokens = {}
|
| 701 |
+
|
| 702 |
+
def get_tokens_unprocessed(self, text, stack=('root',)):
|
| 703 |
+
"""
|
| 704 |
+
Split ``text`` into (tokentype, text) pairs.
|
| 705 |
+
|
| 706 |
+
``stack`` is the initial stack (default: ``['root']``)
|
| 707 |
+
"""
|
| 708 |
+
pos = 0
|
| 709 |
+
tokendefs = self._tokens
|
| 710 |
+
statestack = list(stack)
|
| 711 |
+
statetokens = tokendefs[statestack[-1]]
|
| 712 |
+
while 1:
|
| 713 |
+
for rexmatch, action, new_state in statetokens:
|
| 714 |
+
m = rexmatch(text, pos)
|
| 715 |
+
if m:
|
| 716 |
+
if action is not None:
|
| 717 |
+
if type(action) is _TokenType:
|
| 718 |
+
yield pos, action, m.group()
|
| 719 |
+
else:
|
| 720 |
+
yield from action(self, m)
|
| 721 |
+
pos = m.end()
|
| 722 |
+
if new_state is not None:
|
| 723 |
+
# state transition
|
| 724 |
+
if isinstance(new_state, tuple):
|
| 725 |
+
for state in new_state:
|
| 726 |
+
if state == '#pop':
|
| 727 |
+
if len(statestack) > 1:
|
| 728 |
+
statestack.pop()
|
| 729 |
+
elif state == '#push':
|
| 730 |
+
statestack.append(statestack[-1])
|
| 731 |
+
else:
|
| 732 |
+
statestack.append(state)
|
| 733 |
+
elif isinstance(new_state, int):
|
| 734 |
+
# pop, but keep at least one state on the stack
|
| 735 |
+
# (random code leading to unexpected pops should
|
| 736 |
+
# not allow exceptions)
|
| 737 |
+
if abs(new_state) >= len(statestack):
|
| 738 |
+
del statestack[1:]
|
| 739 |
+
else:
|
| 740 |
+
del statestack[new_state:]
|
| 741 |
+
elif new_state == '#push':
|
| 742 |
+
statestack.append(statestack[-1])
|
| 743 |
+
else:
|
| 744 |
+
assert False, f"wrong state def: {new_state!r}"
|
| 745 |
+
statetokens = tokendefs[statestack[-1]]
|
| 746 |
+
break
|
| 747 |
+
else:
|
| 748 |
+
# We are here only if all state tokens have been considered
|
| 749 |
+
# and there was not a match on any of them.
|
| 750 |
+
try:
|
| 751 |
+
if text[pos] == '\n':
|
| 752 |
+
# at EOL, reset state to "root"
|
| 753 |
+
statestack = ['root']
|
| 754 |
+
statetokens = tokendefs['root']
|
| 755 |
+
yield pos, Whitespace, '\n'
|
| 756 |
+
pos += 1
|
| 757 |
+
continue
|
| 758 |
+
yield pos, Error, text[pos]
|
| 759 |
+
pos += 1
|
| 760 |
+
except IndexError:
|
| 761 |
+
break
|
| 762 |
+
|
| 763 |
+
|
| 764 |
+
class LexerContext:
|
| 765 |
+
"""
|
| 766 |
+
A helper object that holds lexer position data.
|
| 767 |
+
"""
|
| 768 |
+
|
| 769 |
+
def __init__(self, text, pos, stack=None, end=None):
|
| 770 |
+
self.text = text
|
| 771 |
+
self.pos = pos
|
| 772 |
+
self.end = end or len(text) # end=0 not supported ;-)
|
| 773 |
+
self.stack = stack or ['root']
|
| 774 |
+
|
| 775 |
+
def __repr__(self):
|
| 776 |
+
return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
class ExtendedRegexLexer(RegexLexer):
|
| 780 |
+
"""
|
| 781 |
+
A RegexLexer that uses a context object to store its state.
|
| 782 |
+
"""
|
| 783 |
+
|
| 784 |
+
def get_tokens_unprocessed(self, text=None, context=None):
|
| 785 |
+
"""
|
| 786 |
+
Split ``text`` into (tokentype, text) pairs.
|
| 787 |
+
If ``context`` is given, use this lexer context instead.
|
| 788 |
+
"""
|
| 789 |
+
tokendefs = self._tokens
|
| 790 |
+
if not context:
|
| 791 |
+
ctx = LexerContext(text, 0)
|
| 792 |
+
statetokens = tokendefs['root']
|
| 793 |
+
else:
|
| 794 |
+
ctx = context
|
| 795 |
+
statetokens = tokendefs[ctx.stack[-1]]
|
| 796 |
+
text = ctx.text
|
| 797 |
+
while 1:
|
| 798 |
+
for rexmatch, action, new_state in statetokens:
|
| 799 |
+
m = rexmatch(text, ctx.pos, ctx.end)
|
| 800 |
+
if m:
|
| 801 |
+
if action is not None:
|
| 802 |
+
if type(action) is _TokenType:
|
| 803 |
+
yield ctx.pos, action, m.group()
|
| 804 |
+
ctx.pos = m.end()
|
| 805 |
+
else:
|
| 806 |
+
yield from action(self, m, ctx)
|
| 807 |
+
if not new_state:
|
| 808 |
+
# altered the state stack?
|
| 809 |
+
statetokens = tokendefs[ctx.stack[-1]]
|
| 810 |
+
# CAUTION: callback must set ctx.pos!
|
| 811 |
+
if new_state is not None:
|
| 812 |
+
# state transition
|
| 813 |
+
if isinstance(new_state, tuple):
|
| 814 |
+
for state in new_state:
|
| 815 |
+
if state == '#pop':
|
| 816 |
+
if len(ctx.stack) > 1:
|
| 817 |
+
ctx.stack.pop()
|
| 818 |
+
elif state == '#push':
|
| 819 |
+
ctx.stack.append(ctx.stack[-1])
|
| 820 |
+
else:
|
| 821 |
+
ctx.stack.append(state)
|
| 822 |
+
elif isinstance(new_state, int):
|
| 823 |
+
# see RegexLexer for why this check is made
|
| 824 |
+
if abs(new_state) >= len(ctx.stack):
|
| 825 |
+
del ctx.stack[1:]
|
| 826 |
+
else:
|
| 827 |
+
del ctx.stack[new_state:]
|
| 828 |
+
elif new_state == '#push':
|
| 829 |
+
ctx.stack.append(ctx.stack[-1])
|
| 830 |
+
else:
|
| 831 |
+
assert False, f"wrong state def: {new_state!r}"
|
| 832 |
+
statetokens = tokendefs[ctx.stack[-1]]
|
| 833 |
+
break
|
| 834 |
+
else:
|
| 835 |
+
try:
|
| 836 |
+
if ctx.pos >= ctx.end:
|
| 837 |
+
break
|
| 838 |
+
if text[ctx.pos] == '\n':
|
| 839 |
+
# at EOL, reset state to "root"
|
| 840 |
+
ctx.stack = ['root']
|
| 841 |
+
statetokens = tokendefs['root']
|
| 842 |
+
yield ctx.pos, Text, '\n'
|
| 843 |
+
ctx.pos += 1
|
| 844 |
+
continue
|
| 845 |
+
yield ctx.pos, Error, text[ctx.pos]
|
| 846 |
+
ctx.pos += 1
|
| 847 |
+
except IndexError:
|
| 848 |
+
break
|
| 849 |
+
|
| 850 |
+
|
| 851 |
+
def do_insertions(insertions, tokens):
|
| 852 |
+
"""
|
| 853 |
+
Helper for lexers which must combine the results of several
|
| 854 |
+
sublexers.
|
| 855 |
+
|
| 856 |
+
``insertions`` is a list of ``(index, itokens)`` pairs.
|
| 857 |
+
Each ``itokens`` iterable should be inserted at position
|
| 858 |
+
``index`` into the token stream given by the ``tokens``
|
| 859 |
+
argument.
|
| 860 |
+
|
| 861 |
+
The result is a combined token stream.
|
| 862 |
+
|
| 863 |
+
TODO: clean up the code here.
|
| 864 |
+
"""
|
| 865 |
+
insertions = iter(insertions)
|
| 866 |
+
try:
|
| 867 |
+
index, itokens = next(insertions)
|
| 868 |
+
except StopIteration:
|
| 869 |
+
# no insertions
|
| 870 |
+
yield from tokens
|
| 871 |
+
return
|
| 872 |
+
|
| 873 |
+
realpos = None
|
| 874 |
+
insleft = True
|
| 875 |
+
|
| 876 |
+
# iterate over the token stream where we want to insert
|
| 877 |
+
# the tokens from the insertion list.
|
| 878 |
+
for i, t, v in tokens:
|
| 879 |
+
# first iteration. store the position of first item
|
| 880 |
+
if realpos is None:
|
| 881 |
+
realpos = i
|
| 882 |
+
oldi = 0
|
| 883 |
+
while insleft and i + len(v) >= index:
|
| 884 |
+
tmpval = v[oldi:index - i]
|
| 885 |
+
if tmpval:
|
| 886 |
+
yield realpos, t, tmpval
|
| 887 |
+
realpos += len(tmpval)
|
| 888 |
+
for it_index, it_token, it_value in itokens:
|
| 889 |
+
yield realpos, it_token, it_value
|
| 890 |
+
realpos += len(it_value)
|
| 891 |
+
oldi = index - i
|
| 892 |
+
try:
|
| 893 |
+
index, itokens = next(insertions)
|
| 894 |
+
except StopIteration:
|
| 895 |
+
insleft = False
|
| 896 |
+
break # not strictly necessary
|
| 897 |
+
if oldi < len(v):
|
| 898 |
+
yield realpos, t, v[oldi:]
|
| 899 |
+
realpos += len(v) - oldi
|
| 900 |
+
|
| 901 |
+
# leftover tokens
|
| 902 |
+
while insleft:
|
| 903 |
+
# no normal tokens, set realpos to zero
|
| 904 |
+
realpos = realpos or 0
|
| 905 |
+
for p, t, v in itokens:
|
| 906 |
+
yield realpos, t, v
|
| 907 |
+
realpos += len(v)
|
| 908 |
+
try:
|
| 909 |
+
index, itokens = next(insertions)
|
| 910 |
+
except StopIteration:
|
| 911 |
+
insleft = False
|
| 912 |
+
break # not strictly necessary
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
class ProfilingRegexLexerMeta(RegexLexerMeta):
|
| 916 |
+
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
|
| 917 |
+
|
| 918 |
+
def _process_regex(cls, regex, rflags, state):
|
| 919 |
+
if isinstance(regex, words):
|
| 920 |
+
rex = regex_opt(regex.words, prefix=regex.prefix,
|
| 921 |
+
suffix=regex.suffix)
|
| 922 |
+
else:
|
| 923 |
+
rex = regex
|
| 924 |
+
compiled = re.compile(rex, rflags)
|
| 925 |
+
|
| 926 |
+
def match_func(text, pos, endpos=sys.maxsize):
|
| 927 |
+
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
|
| 928 |
+
t0 = time.time()
|
| 929 |
+
res = compiled.match(text, pos, endpos)
|
| 930 |
+
t1 = time.time()
|
| 931 |
+
info[0] += 1
|
| 932 |
+
info[1] += t1 - t0
|
| 933 |
+
return res
|
| 934 |
+
return match_func
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
|
| 938 |
+
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
|
| 939 |
+
|
| 940 |
+
_prof_data = []
|
| 941 |
+
_prof_sort_index = 4 # defaults to time per call
|
| 942 |
+
|
| 943 |
+
def get_tokens_unprocessed(self, text, stack=('root',)):
|
| 944 |
+
# this needs to be a stack, since using(this) will produce nested calls
|
| 945 |
+
self.__class__._prof_data.append({})
|
| 946 |
+
yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
|
| 947 |
+
rawdata = self.__class__._prof_data.pop()
|
| 948 |
+
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
|
| 949 |
+
n, 1000 * t, 1000 * t / n)
|
| 950 |
+
for ((s, r), (n, t)) in rawdata.items()),
|
| 951 |
+
key=lambda x: x[self._prof_sort_index],
|
| 952 |
+
reverse=True)
|
| 953 |
+
sum_total = sum(x[3] for x in data)
|
| 954 |
+
|
| 955 |
+
print()
|
| 956 |
+
print('Profiling result for %s lexing %d chars in %.3f ms' %
|
| 957 |
+
(self.__class__.__name__, len(text), sum_total))
|
| 958 |
+
print('=' * 110)
|
| 959 |
+
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
|
| 960 |
+
print('-' * 110)
|
| 961 |
+
for d in data:
|
| 962 |
+
print('%-20s %-65s %5d %8.4f %8.4f' % d)
|
| 963 |
+
print('=' * 110)
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/modeline.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.modeline
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
A simple modeline parser (based on pymodeline).
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import re
|
| 12 |
+
|
| 13 |
+
__all__ = ['get_filetype_from_buffer']
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
modeline_re = re.compile(r'''
|
| 17 |
+
(?: vi | vim | ex ) (?: [<=>]? \d* )? :
|
| 18 |
+
.* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
|
| 19 |
+
''', re.VERBOSE)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_filetype_from_line(l): # noqa: E741
|
| 23 |
+
m = modeline_re.search(l)
|
| 24 |
+
if m:
|
| 25 |
+
return m.group(1)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_filetype_from_buffer(buf, max_lines=5):
|
| 29 |
+
"""
|
| 30 |
+
Scan the buffer for modelines and return filetype if one is found.
|
| 31 |
+
"""
|
| 32 |
+
lines = buf.splitlines()
|
| 33 |
+
for line in lines[-1:-max_lines-1:-1]:
|
| 34 |
+
ret = get_filetype_from_line(line)
|
| 35 |
+
if ret:
|
| 36 |
+
return ret
|
| 37 |
+
for i in range(max_lines, -1, -1):
|
| 38 |
+
if i < len(lines):
|
| 39 |
+
ret = get_filetype_from_line(lines[i])
|
| 40 |
+
if ret:
|
| 41 |
+
return ret
|
| 42 |
+
|
| 43 |
+
return None
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/plugin.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.plugin
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Pygments plugin interface.
|
| 6 |
+
|
| 7 |
+
lexer plugins::
|
| 8 |
+
|
| 9 |
+
[pygments.lexers]
|
| 10 |
+
yourlexer = yourmodule:YourLexer
|
| 11 |
+
|
| 12 |
+
formatter plugins::
|
| 13 |
+
|
| 14 |
+
[pygments.formatters]
|
| 15 |
+
yourformatter = yourformatter:YourFormatter
|
| 16 |
+
/.ext = yourformatter:YourFormatter
|
| 17 |
+
|
| 18 |
+
As you can see, you can define extensions for the formatter
|
| 19 |
+
with a leading slash.
|
| 20 |
+
|
| 21 |
+
syntax plugins::
|
| 22 |
+
|
| 23 |
+
[pygments.styles]
|
| 24 |
+
yourstyle = yourstyle:YourStyle
|
| 25 |
+
|
| 26 |
+
filter plugin::
|
| 27 |
+
|
| 28 |
+
[pygments.filter]
|
| 29 |
+
yourfilter = yourfilter:YourFilter
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 33 |
+
:license: BSD, see LICENSE for details.
|
| 34 |
+
"""
|
| 35 |
+
from importlib.metadata import entry_points
|
| 36 |
+
|
| 37 |
+
LEXER_ENTRY_POINT = 'pygments.lexers'
|
| 38 |
+
FORMATTER_ENTRY_POINT = 'pygments.formatters'
|
| 39 |
+
STYLE_ENTRY_POINT = 'pygments.styles'
|
| 40 |
+
FILTER_ENTRY_POINT = 'pygments.filters'
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def iter_entry_points(group_name):
|
| 44 |
+
groups = entry_points()
|
| 45 |
+
if hasattr(groups, 'select'):
|
| 46 |
+
# New interface in Python 3.10 and newer versions of the
|
| 47 |
+
# importlib_metadata backport.
|
| 48 |
+
return groups.select(group=group_name)
|
| 49 |
+
else:
|
| 50 |
+
# Older interface, deprecated in Python 3.10 and recent
|
| 51 |
+
# importlib_metadata, but we need it in Python 3.8 and 3.9.
|
| 52 |
+
return groups.get(group_name, [])
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def find_plugin_lexers():
|
| 56 |
+
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
|
| 57 |
+
yield entrypoint.load()
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def find_plugin_formatters():
|
| 61 |
+
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
|
| 62 |
+
yield entrypoint.name, entrypoint.load()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def find_plugin_styles():
|
| 66 |
+
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
|
| 67 |
+
yield entrypoint.name, entrypoint.load()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def find_plugin_filters():
|
| 71 |
+
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
|
| 72 |
+
yield entrypoint.name, entrypoint.load()
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/regexopt.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.regexopt
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
An algorithm that generates optimized regexes for matching long lists of
|
| 6 |
+
literal strings.
|
| 7 |
+
|
| 8 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 9 |
+
:license: BSD, see LICENSE for details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import re
|
| 13 |
+
from re import escape
|
| 14 |
+
from os.path import commonprefix
|
| 15 |
+
from itertools import groupby
|
| 16 |
+
from operator import itemgetter
|
| 17 |
+
|
| 18 |
+
CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
|
| 19 |
+
FIRST_ELEMENT = itemgetter(0)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def make_charset(letters):
|
| 23 |
+
return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def regex_opt_inner(strings, open_paren):
|
| 27 |
+
"""Return a regex that matches any string in the sorted list of strings."""
|
| 28 |
+
close_paren = open_paren and ')' or ''
|
| 29 |
+
# print strings, repr(open_paren)
|
| 30 |
+
if not strings:
|
| 31 |
+
# print '-> nothing left'
|
| 32 |
+
return ''
|
| 33 |
+
first = strings[0]
|
| 34 |
+
if len(strings) == 1:
|
| 35 |
+
# print '-> only 1 string'
|
| 36 |
+
return open_paren + escape(first) + close_paren
|
| 37 |
+
if not first:
|
| 38 |
+
# print '-> first string empty'
|
| 39 |
+
return open_paren + regex_opt_inner(strings[1:], '(?:') \
|
| 40 |
+
+ '?' + close_paren
|
| 41 |
+
if len(first) == 1:
|
| 42 |
+
# multiple one-char strings? make a charset
|
| 43 |
+
oneletter = []
|
| 44 |
+
rest = []
|
| 45 |
+
for s in strings:
|
| 46 |
+
if len(s) == 1:
|
| 47 |
+
oneletter.append(s)
|
| 48 |
+
else:
|
| 49 |
+
rest.append(s)
|
| 50 |
+
if len(oneletter) > 1: # do we have more than one oneletter string?
|
| 51 |
+
if rest:
|
| 52 |
+
# print '-> 1-character + rest'
|
| 53 |
+
return open_paren + regex_opt_inner(rest, '') + '|' \
|
| 54 |
+
+ make_charset(oneletter) + close_paren
|
| 55 |
+
# print '-> only 1-character'
|
| 56 |
+
return open_paren + make_charset(oneletter) + close_paren
|
| 57 |
+
prefix = commonprefix(strings)
|
| 58 |
+
if prefix:
|
| 59 |
+
plen = len(prefix)
|
| 60 |
+
# we have a prefix for all strings
|
| 61 |
+
# print '-> prefix:', prefix
|
| 62 |
+
return open_paren + escape(prefix) \
|
| 63 |
+
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
|
| 64 |
+
+ close_paren
|
| 65 |
+
# is there a suffix?
|
| 66 |
+
strings_rev = [s[::-1] for s in strings]
|
| 67 |
+
suffix = commonprefix(strings_rev)
|
| 68 |
+
if suffix:
|
| 69 |
+
slen = len(suffix)
|
| 70 |
+
# print '-> suffix:', suffix[::-1]
|
| 71 |
+
return open_paren \
|
| 72 |
+
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
|
| 73 |
+
+ escape(suffix[::-1]) + close_paren
|
| 74 |
+
# recurse on common 1-string prefixes
|
| 75 |
+
# print '-> last resort'
|
| 76 |
+
return open_paren + \
|
| 77 |
+
'|'.join(regex_opt_inner(list(group[1]), '')
|
| 78 |
+
for group in groupby(strings, lambda s: s[0] == first[0])) \
|
| 79 |
+
+ close_paren
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def regex_opt(strings, prefix='', suffix=''):
|
| 83 |
+
"""Return a compiled regex that matches any string in the given list.
|
| 84 |
+
|
| 85 |
+
The strings to match must be literal strings, not regexes. They will be
|
| 86 |
+
regex-escaped.
|
| 87 |
+
|
| 88 |
+
*prefix* and *suffix* are pre- and appended to the final regex.
|
| 89 |
+
"""
|
| 90 |
+
strings = sorted(strings)
|
| 91 |
+
return prefix + regex_opt_inner(strings, '(') + suffix
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/sphinxext.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.sphinxext
|
| 3 |
+
~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Sphinx extension to generate automatic documentation of lexers,
|
| 6 |
+
formatters and filters.
|
| 7 |
+
|
| 8 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 9 |
+
:license: BSD, see LICENSE for details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import sys
|
| 13 |
+
|
| 14 |
+
from docutils import nodes
|
| 15 |
+
from docutils.statemachine import ViewList
|
| 16 |
+
from docutils.parsers.rst import Directive
|
| 17 |
+
from sphinx.util.nodes import nested_parse_with_titles
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
MODULEDOC = '''
|
| 21 |
+
.. module:: %s
|
| 22 |
+
|
| 23 |
+
%s
|
| 24 |
+
%s
|
| 25 |
+
'''
|
| 26 |
+
|
| 27 |
+
LEXERDOC = '''
|
| 28 |
+
.. class:: %s
|
| 29 |
+
|
| 30 |
+
:Short names: %s
|
| 31 |
+
:Filenames: %s
|
| 32 |
+
:MIME types: %s
|
| 33 |
+
|
| 34 |
+
%s
|
| 35 |
+
|
| 36 |
+
%s
|
| 37 |
+
|
| 38 |
+
'''
|
| 39 |
+
|
| 40 |
+
FMTERDOC = '''
|
| 41 |
+
.. class:: %s
|
| 42 |
+
|
| 43 |
+
:Short names: %s
|
| 44 |
+
:Filenames: %s
|
| 45 |
+
|
| 46 |
+
%s
|
| 47 |
+
|
| 48 |
+
'''
|
| 49 |
+
|
| 50 |
+
FILTERDOC = '''
|
| 51 |
+
.. class:: %s
|
| 52 |
+
|
| 53 |
+
:Name: %s
|
| 54 |
+
|
| 55 |
+
%s
|
| 56 |
+
|
| 57 |
+
'''
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class PygmentsDoc(Directive):
|
| 61 |
+
"""
|
| 62 |
+
A directive to collect all lexers/formatters/filters and generate
|
| 63 |
+
autoclass directives for them.
|
| 64 |
+
"""
|
| 65 |
+
has_content = False
|
| 66 |
+
required_arguments = 1
|
| 67 |
+
optional_arguments = 0
|
| 68 |
+
final_argument_whitespace = False
|
| 69 |
+
option_spec = {}
|
| 70 |
+
|
| 71 |
+
def run(self):
|
| 72 |
+
self.filenames = set()
|
| 73 |
+
if self.arguments[0] == 'lexers':
|
| 74 |
+
out = self.document_lexers()
|
| 75 |
+
elif self.arguments[0] == 'formatters':
|
| 76 |
+
out = self.document_formatters()
|
| 77 |
+
elif self.arguments[0] == 'filters':
|
| 78 |
+
out = self.document_filters()
|
| 79 |
+
elif self.arguments[0] == 'lexers_overview':
|
| 80 |
+
out = self.document_lexers_overview()
|
| 81 |
+
else:
|
| 82 |
+
raise Exception('invalid argument for "pygmentsdoc" directive')
|
| 83 |
+
node = nodes.compound()
|
| 84 |
+
vl = ViewList(out.split('\n'), source='')
|
| 85 |
+
nested_parse_with_titles(self.state, vl, node)
|
| 86 |
+
for fn in self.filenames:
|
| 87 |
+
self.state.document.settings.record_dependencies.add(fn)
|
| 88 |
+
return node.children
|
| 89 |
+
|
| 90 |
+
def document_lexers_overview(self):
|
| 91 |
+
"""Generate a tabular overview of all lexers.
|
| 92 |
+
|
| 93 |
+
The columns are the lexer name, the extensions handled by this lexer
|
| 94 |
+
(or "None"), the aliases and a link to the lexer class."""
|
| 95 |
+
from pip._vendor.pygments.lexers._mapping import LEXERS
|
| 96 |
+
from pip._vendor.pygments.lexers import find_lexer_class
|
| 97 |
+
out = []
|
| 98 |
+
|
| 99 |
+
table = []
|
| 100 |
+
|
| 101 |
+
def format_link(name, url):
|
| 102 |
+
if url:
|
| 103 |
+
return f'`{name} <{url}>`_'
|
| 104 |
+
return name
|
| 105 |
+
|
| 106 |
+
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
|
| 107 |
+
lexer_cls = find_lexer_class(data[1])
|
| 108 |
+
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
|
| 109 |
+
|
| 110 |
+
table.append({
|
| 111 |
+
'name': format_link(data[1], lexer_cls.url),
|
| 112 |
+
'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
|
| 113 |
+
'aliases': ', '.join(data[2]),
|
| 114 |
+
'class': f'{data[0]}.{classname}'
|
| 115 |
+
})
|
| 116 |
+
|
| 117 |
+
column_names = ['name', 'extensions', 'aliases', 'class']
|
| 118 |
+
column_lengths = [max([len(row[column]) for row in table if row[column]])
|
| 119 |
+
for column in column_names]
|
| 120 |
+
|
| 121 |
+
def write_row(*columns):
|
| 122 |
+
"""Format a table row"""
|
| 123 |
+
out = []
|
| 124 |
+
for length, col in zip(column_lengths, columns):
|
| 125 |
+
if col:
|
| 126 |
+
out.append(col.ljust(length))
|
| 127 |
+
else:
|
| 128 |
+
out.append(' '*length)
|
| 129 |
+
|
| 130 |
+
return ' '.join(out)
|
| 131 |
+
|
| 132 |
+
def write_seperator():
|
| 133 |
+
"""Write a table separator row"""
|
| 134 |
+
sep = ['='*c for c in column_lengths]
|
| 135 |
+
return write_row(*sep)
|
| 136 |
+
|
| 137 |
+
out.append(write_seperator())
|
| 138 |
+
out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
|
| 139 |
+
out.append(write_seperator())
|
| 140 |
+
for row in table:
|
| 141 |
+
out.append(write_row(
|
| 142 |
+
row['name'],
|
| 143 |
+
row['extensions'],
|
| 144 |
+
row['aliases'],
|
| 145 |
+
f':class:`~{row["class"]}`'))
|
| 146 |
+
out.append(write_seperator())
|
| 147 |
+
|
| 148 |
+
return '\n'.join(out)
|
| 149 |
+
|
| 150 |
+
def document_lexers(self):
|
| 151 |
+
from pip._vendor.pygments.lexers._mapping import LEXERS
|
| 152 |
+
from pip._vendor import pygments
|
| 153 |
+
import inspect
|
| 154 |
+
import pathlib
|
| 155 |
+
|
| 156 |
+
out = []
|
| 157 |
+
modules = {}
|
| 158 |
+
moduledocstrings = {}
|
| 159 |
+
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
|
| 160 |
+
module = data[0]
|
| 161 |
+
mod = __import__(module, None, None, [classname])
|
| 162 |
+
self.filenames.add(mod.__file__)
|
| 163 |
+
cls = getattr(mod, classname)
|
| 164 |
+
if not cls.__doc__:
|
| 165 |
+
print(f"Warning: {classname} does not have a docstring.")
|
| 166 |
+
docstring = cls.__doc__
|
| 167 |
+
if isinstance(docstring, bytes):
|
| 168 |
+
docstring = docstring.decode('utf8')
|
| 169 |
+
|
| 170 |
+
example_file = getattr(cls, '_example', None)
|
| 171 |
+
if example_file:
|
| 172 |
+
p = pathlib.Path(inspect.getabsfile(pygments)).parent.parent /\
|
| 173 |
+
'tests' / 'examplefiles' / example_file
|
| 174 |
+
content = p.read_text(encoding='utf-8')
|
| 175 |
+
if not content:
|
| 176 |
+
raise Exception(
|
| 177 |
+
f"Empty example file '{example_file}' for lexer "
|
| 178 |
+
f"{classname}")
|
| 179 |
+
|
| 180 |
+
if data[2]:
|
| 181 |
+
lexer_name = data[2][0]
|
| 182 |
+
docstring += '\n\n .. admonition:: Example\n'
|
| 183 |
+
docstring += f'\n .. code-block:: {lexer_name}\n\n'
|
| 184 |
+
for line in content.splitlines():
|
| 185 |
+
docstring += f' {line}\n'
|
| 186 |
+
|
| 187 |
+
if cls.version_added:
|
| 188 |
+
version_line = f'.. versionadded:: {cls.version_added}'
|
| 189 |
+
else:
|
| 190 |
+
version_line = ''
|
| 191 |
+
|
| 192 |
+
modules.setdefault(module, []).append((
|
| 193 |
+
classname,
|
| 194 |
+
', '.join(data[2]) or 'None',
|
| 195 |
+
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
|
| 196 |
+
', '.join(data[4]) or 'None',
|
| 197 |
+
docstring,
|
| 198 |
+
version_line))
|
| 199 |
+
if module not in moduledocstrings:
|
| 200 |
+
moddoc = mod.__doc__
|
| 201 |
+
if isinstance(moddoc, bytes):
|
| 202 |
+
moddoc = moddoc.decode('utf8')
|
| 203 |
+
moduledocstrings[module] = moddoc
|
| 204 |
+
|
| 205 |
+
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
|
| 206 |
+
if moduledocstrings[module] is None:
|
| 207 |
+
raise Exception(f"Missing docstring for {module}")
|
| 208 |
+
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
|
| 209 |
+
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
|
| 210 |
+
for data in lexers:
|
| 211 |
+
out.append(LEXERDOC % data)
|
| 212 |
+
|
| 213 |
+
return ''.join(out)
|
| 214 |
+
|
| 215 |
+
def document_formatters(self):
|
| 216 |
+
from pip._vendor.pygments.formatters import FORMATTERS
|
| 217 |
+
|
| 218 |
+
out = []
|
| 219 |
+
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
|
| 220 |
+
module = data[0]
|
| 221 |
+
mod = __import__(module, None, None, [classname])
|
| 222 |
+
self.filenames.add(mod.__file__)
|
| 223 |
+
cls = getattr(mod, classname)
|
| 224 |
+
docstring = cls.__doc__
|
| 225 |
+
if isinstance(docstring, bytes):
|
| 226 |
+
docstring = docstring.decode('utf8')
|
| 227 |
+
heading = cls.__name__
|
| 228 |
+
out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
|
| 229 |
+
', '.join(data[3]).replace('*', '\\*') or 'None',
|
| 230 |
+
docstring))
|
| 231 |
+
return ''.join(out)
|
| 232 |
+
|
| 233 |
+
def document_filters(self):
|
| 234 |
+
from pip._vendor.pygments.filters import FILTERS
|
| 235 |
+
|
| 236 |
+
out = []
|
| 237 |
+
for name, cls in FILTERS.items():
|
| 238 |
+
self.filenames.add(sys.modules[cls.__module__].__file__)
|
| 239 |
+
docstring = cls.__doc__
|
| 240 |
+
if isinstance(docstring, bytes):
|
| 241 |
+
docstring = docstring.decode('utf8')
|
| 242 |
+
out.append(FILTERDOC % (cls.__name__, name, docstring))
|
| 243 |
+
return ''.join(out)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def setup(app):
|
| 247 |
+
app.add_directive('pygmentsdoc', PygmentsDoc)
|
evalkit_llava/lib/python3.10/site-packages/pip/_vendor/pygments/token.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
pygments.token
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Basic token types and the standard tokens.
|
| 6 |
+
|
| 7 |
+
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
|
| 8 |
+
:license: BSD, see LICENSE for details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class _TokenType(tuple):
|
| 13 |
+
parent = None
|
| 14 |
+
|
| 15 |
+
def split(self):
|
| 16 |
+
buf = []
|
| 17 |
+
node = self
|
| 18 |
+
while node is not None:
|
| 19 |
+
buf.append(node)
|
| 20 |
+
node = node.parent
|
| 21 |
+
buf.reverse()
|
| 22 |
+
return buf
|
| 23 |
+
|
| 24 |
+
def __init__(self, *args):
|
| 25 |
+
# no need to call super.__init__
|
| 26 |
+
self.subtypes = set()
|
| 27 |
+
|
| 28 |
+
def __contains__(self, val):
|
| 29 |
+
return self is val or (
|
| 30 |
+
type(val) is self.__class__ and
|
| 31 |
+
val[:len(self)] == self
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
def __getattr__(self, val):
|
| 35 |
+
if not val or not val[0].isupper():
|
| 36 |
+
return tuple.__getattribute__(self, val)
|
| 37 |
+
new = _TokenType(self + (val,))
|
| 38 |
+
setattr(self, val, new)
|
| 39 |
+
self.subtypes.add(new)
|
| 40 |
+
new.parent = self
|
| 41 |
+
return new
|
| 42 |
+
|
| 43 |
+
def __repr__(self):
|
| 44 |
+
return 'Token' + (self and '.' or '') + '.'.join(self)
|
| 45 |
+
|
| 46 |
+
def __copy__(self):
|
| 47 |
+
# These instances are supposed to be singletons
|
| 48 |
+
return self
|
| 49 |
+
|
| 50 |
+
def __deepcopy__(self, memo):
|
| 51 |
+
# These instances are supposed to be singletons
|
| 52 |
+
return self
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Token = _TokenType()
|
| 56 |
+
|
| 57 |
+
# Special token types
|
| 58 |
+
Text = Token.Text
|
| 59 |
+
Whitespace = Text.Whitespace
|
| 60 |
+
Escape = Token.Escape
|
| 61 |
+
Error = Token.Error
|
| 62 |
+
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
|
| 63 |
+
Other = Token.Other
|
| 64 |
+
|
| 65 |
+
# Common token types for source code
|
| 66 |
+
Keyword = Token.Keyword
|
| 67 |
+
Name = Token.Name
|
| 68 |
+
Literal = Token.Literal
|
| 69 |
+
String = Literal.String
|
| 70 |
+
Number = Literal.Number
|
| 71 |
+
Punctuation = Token.Punctuation
|
| 72 |
+
Operator = Token.Operator
|
| 73 |
+
Comment = Token.Comment
|
| 74 |
+
|
| 75 |
+
# Generic types for non-source code
|
| 76 |
+
Generic = Token.Generic
|
| 77 |
+
|
| 78 |
+
# String and some others are not direct children of Token.
|
| 79 |
+
# alias them:
|
| 80 |
+
Token.Token = Token
|
| 81 |
+
Token.String = String
|
| 82 |
+
Token.Number = Number
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def is_token_subtype(ttype, other):
|
| 86 |
+
"""
|
| 87 |
+
Return True if ``ttype`` is a subtype of ``other``.
|
| 88 |
+
|
| 89 |
+
exists for backwards compatibility. use ``ttype in other`` now.
|
| 90 |
+
"""
|
| 91 |
+
return ttype in other
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def string_to_tokentype(s):
|
| 95 |
+
"""
|
| 96 |
+
Convert a string into a token type::
|
| 97 |
+
|
| 98 |
+
>>> string_to_token('String.Double')
|
| 99 |
+
Token.Literal.String.Double
|
| 100 |
+
>>> string_to_token('Token.Literal.Number')
|
| 101 |
+
Token.Literal.Number
|
| 102 |
+
>>> string_to_token('')
|
| 103 |
+
Token
|
| 104 |
+
|
| 105 |
+
Tokens that are already tokens are returned unchanged:
|
| 106 |
+
|
| 107 |
+
>>> string_to_token(String)
|
| 108 |
+
Token.Literal.String
|
| 109 |
+
"""
|
| 110 |
+
if isinstance(s, _TokenType):
|
| 111 |
+
return s
|
| 112 |
+
if not s:
|
| 113 |
+
return Token
|
| 114 |
+
node = Token
|
| 115 |
+
for item in s.split('.'):
|
| 116 |
+
node = getattr(node, item)
|
| 117 |
+
return node
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Map standard token types to short names, used in CSS class naming.
|
| 121 |
+
# If you add a new item, please be sure to run this file to perform
|
| 122 |
+
# a consistency check for duplicate values.
|
| 123 |
+
STANDARD_TYPES = {
|
| 124 |
+
Token: '',
|
| 125 |
+
|
| 126 |
+
Text: '',
|
| 127 |
+
Whitespace: 'w',
|
| 128 |
+
Escape: 'esc',
|
| 129 |
+
Error: 'err',
|
| 130 |
+
Other: 'x',
|
| 131 |
+
|
| 132 |
+
Keyword: 'k',
|
| 133 |
+
Keyword.Constant: 'kc',
|
| 134 |
+
Keyword.Declaration: 'kd',
|
| 135 |
+
Keyword.Namespace: 'kn',
|
| 136 |
+
Keyword.Pseudo: 'kp',
|
| 137 |
+
Keyword.Reserved: 'kr',
|
| 138 |
+
Keyword.Type: 'kt',
|
| 139 |
+
|
| 140 |
+
Name: 'n',
|
| 141 |
+
Name.Attribute: 'na',
|
| 142 |
+
Name.Builtin: 'nb',
|
| 143 |
+
Name.Builtin.Pseudo: 'bp',
|
| 144 |
+
Name.Class: 'nc',
|
| 145 |
+
Name.Constant: 'no',
|
| 146 |
+
Name.Decorator: 'nd',
|
| 147 |
+
Name.Entity: 'ni',
|
| 148 |
+
Name.Exception: 'ne',
|
| 149 |
+
Name.Function: 'nf',
|
| 150 |
+
Name.Function.Magic: 'fm',
|
| 151 |
+
Name.Property: 'py',
|
| 152 |
+
Name.Label: 'nl',
|
| 153 |
+
Name.Namespace: 'nn',
|
| 154 |
+
Name.Other: 'nx',
|
| 155 |
+
Name.Tag: 'nt',
|
| 156 |
+
Name.Variable: 'nv',
|
| 157 |
+
Name.Variable.Class: 'vc',
|
| 158 |
+
Name.Variable.Global: 'vg',
|
| 159 |
+
Name.Variable.Instance: 'vi',
|
| 160 |
+
Name.Variable.Magic: 'vm',
|
| 161 |
+
|
| 162 |
+
Literal: 'l',
|
| 163 |
+
Literal.Date: 'ld',
|
| 164 |
+
|
| 165 |
+
String: 's',
|
| 166 |
+
String.Affix: 'sa',
|
| 167 |
+
String.Backtick: 'sb',
|
| 168 |
+
String.Char: 'sc',
|
| 169 |
+
String.Delimiter: 'dl',
|
| 170 |
+
String.Doc: 'sd',
|
| 171 |
+
String.Double: 's2',
|
| 172 |
+
String.Escape: 'se',
|
| 173 |
+
String.Heredoc: 'sh',
|
| 174 |
+
String.Interpol: 'si',
|
| 175 |
+
String.Other: 'sx',
|
| 176 |
+
String.Regex: 'sr',
|
| 177 |
+
String.Single: 's1',
|
| 178 |
+
String.Symbol: 'ss',
|
| 179 |
+
|
| 180 |
+
Number: 'm',
|
| 181 |
+
Number.Bin: 'mb',
|
| 182 |
+
Number.Float: 'mf',
|
| 183 |
+
Number.Hex: 'mh',
|
| 184 |
+
Number.Integer: 'mi',
|
| 185 |
+
Number.Integer.Long: 'il',
|
| 186 |
+
Number.Oct: 'mo',
|
| 187 |
+
|
| 188 |
+
Operator: 'o',
|
| 189 |
+
Operator.Word: 'ow',
|
| 190 |
+
|
| 191 |
+
Punctuation: 'p',
|
| 192 |
+
Punctuation.Marker: 'pm',
|
| 193 |
+
|
| 194 |
+
Comment: 'c',
|
| 195 |
+
Comment.Hashbang: 'ch',
|
| 196 |
+
Comment.Multiline: 'cm',
|
| 197 |
+
Comment.Preproc: 'cp',
|
| 198 |
+
Comment.PreprocFile: 'cpf',
|
| 199 |
+
Comment.Single: 'c1',
|
| 200 |
+
Comment.Special: 'cs',
|
| 201 |
+
|
| 202 |
+
Generic: 'g',
|
| 203 |
+
Generic.Deleted: 'gd',
|
| 204 |
+
Generic.Emph: 'ge',
|
| 205 |
+
Generic.Error: 'gr',
|
| 206 |
+
Generic.Heading: 'gh',
|
| 207 |
+
Generic.Inserted: 'gi',
|
| 208 |
+
Generic.Output: 'go',
|
| 209 |
+
Generic.Prompt: 'gp',
|
| 210 |
+
Generic.Strong: 'gs',
|
| 211 |
+
Generic.Subheading: 'gu',
|
| 212 |
+
Generic.EmphStrong: 'ges',
|
| 213 |
+
Generic.Traceback: 'gt',
|
| 214 |
+
}
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_core_metadata.cpython-310.pyc
ADDED
|
Binary file (9 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_itertools.cpython-310.pyc
ADDED
|
Binary file (862 Bytes). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_path.cpython-310.pyc
ADDED
|
Binary file (2.89 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_shutil.cpython-310.pyc
ADDED
|
Binary file (1.71 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/_static.cpython-310.pyc
ADDED
|
Binary file (5.15 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/build_meta.cpython-310.pyc
ADDED
|
Binary file (18.4 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/errors.cpython-310.pyc
ADDED
|
Binary file (2.84 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/launch.cpython-310.pyc
ADDED
|
Binary file (891 Bytes). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/logging.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/monkey.cpython-310.pyc
ADDED
|
Binary file (3.62 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/msvc.cpython-310.pyc
ADDED
|
Binary file (36.1 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/__pycache__/namespaces.cpython-310.pyc
ADDED
|
Binary file (3.72 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (410 Bytes). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/_requirestxt.cpython-310.pyc
ADDED
|
Binary file (4.66 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/bdist_wheel.cpython-310.pyc
ADDED
|
Binary file (15.5 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build.cpython-310.pyc
ADDED
|
Binary file (5.28 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build_clib.cpython-310.pyc
ADDED
|
Binary file (2.5 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build_ext.cpython-310.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/build_py.cpython-310.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/develop.cpython-310.pyc
ADDED
|
Binary file (6.1 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/editable_wheel.cpython-310.pyc
ADDED
|
Binary file (35.8 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/install.cpython-310.pyc
ADDED
|
Binary file (5.42 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-310.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/sdist.cpython-310.pyc
ADDED
|
Binary file (7.98 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/__pycache__/setopt.cpython-310.pyc
ADDED
|
Binary file (4.75 kB). View file
|
|
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/_requirestxt.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Helper code used to generate ``requires.txt`` files in the egg-info directory.
|
| 2 |
+
|
| 3 |
+
The ``requires.txt`` file has an specific format:
|
| 4 |
+
- Environment markers need to be part of the section headers and
|
| 5 |
+
should not be part of the requirement spec itself.
|
| 6 |
+
|
| 7 |
+
See https://setuptools.pypa.io/en/latest/deprecated/python_eggs.html#requires-txt
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import io
|
| 13 |
+
from collections import defaultdict
|
| 14 |
+
from collections.abc import Mapping
|
| 15 |
+
from itertools import filterfalse
|
| 16 |
+
from typing import TypeVar
|
| 17 |
+
|
| 18 |
+
from jaraco.text import yield_lines
|
| 19 |
+
from packaging.requirements import Requirement
|
| 20 |
+
|
| 21 |
+
from .. import _reqs
|
| 22 |
+
from .._reqs import _StrOrIter
|
| 23 |
+
|
| 24 |
+
# dict can work as an ordered set
|
| 25 |
+
_T = TypeVar("_T")
|
| 26 |
+
_Ordered = dict[_T, None]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _prepare(
|
| 30 |
+
install_requires: _StrOrIter, extras_require: Mapping[str, _StrOrIter]
|
| 31 |
+
) -> tuple[list[str], dict[str, list[str]]]:
|
| 32 |
+
"""Given values for ``install_requires`` and ``extras_require``
|
| 33 |
+
create modified versions in a way that can be written in ``requires.txt``
|
| 34 |
+
"""
|
| 35 |
+
extras = _convert_extras_requirements(extras_require)
|
| 36 |
+
return _move_install_requirements_markers(install_requires, extras)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _convert_extras_requirements(
|
| 40 |
+
extras_require: Mapping[str, _StrOrIter],
|
| 41 |
+
) -> defaultdict[str, _Ordered[Requirement]]:
|
| 42 |
+
"""
|
| 43 |
+
Convert requirements in `extras_require` of the form
|
| 44 |
+
`"extra": ["barbazquux; {marker}"]` to
|
| 45 |
+
`"extra:{marker}": ["barbazquux"]`.
|
| 46 |
+
"""
|
| 47 |
+
output = defaultdict[str, _Ordered[Requirement]](dict)
|
| 48 |
+
for section, v in extras_require.items():
|
| 49 |
+
# Do not strip empty sections.
|
| 50 |
+
output[section]
|
| 51 |
+
for r in _reqs.parse(v):
|
| 52 |
+
output[section + _suffix_for(r)].setdefault(r)
|
| 53 |
+
|
| 54 |
+
return output
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _move_install_requirements_markers(
|
| 58 |
+
install_requires: _StrOrIter, extras_require: Mapping[str, _Ordered[Requirement]]
|
| 59 |
+
) -> tuple[list[str], dict[str, list[str]]]:
|
| 60 |
+
"""
|
| 61 |
+
The ``requires.txt`` file has an specific format:
|
| 62 |
+
- Environment markers need to be part of the section headers and
|
| 63 |
+
should not be part of the requirement spec itself.
|
| 64 |
+
|
| 65 |
+
Move requirements in ``install_requires`` that are using environment
|
| 66 |
+
markers ``extras_require``.
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
# divide the install_requires into two sets, simple ones still
|
| 70 |
+
# handled by install_requires and more complex ones handled by extras_require.
|
| 71 |
+
|
| 72 |
+
inst_reqs = list(_reqs.parse(install_requires))
|
| 73 |
+
simple_reqs = filter(_no_marker, inst_reqs)
|
| 74 |
+
complex_reqs = filterfalse(_no_marker, inst_reqs)
|
| 75 |
+
simple_install_requires = list(map(str, simple_reqs))
|
| 76 |
+
|
| 77 |
+
for r in complex_reqs:
|
| 78 |
+
extras_require[':' + str(r.marker)].setdefault(r)
|
| 79 |
+
|
| 80 |
+
expanded_extras = dict(
|
| 81 |
+
# list(dict.fromkeys(...)) ensures a list of unique strings
|
| 82 |
+
(k, list(dict.fromkeys(str(r) for r in map(_clean_req, v))))
|
| 83 |
+
for k, v in extras_require.items()
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
return simple_install_requires, expanded_extras
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def _suffix_for(req):
|
| 90 |
+
"""Return the 'extras_require' suffix for a given requirement."""
|
| 91 |
+
return ':' + str(req.marker) if req.marker else ''
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _clean_req(req):
|
| 95 |
+
"""Given a Requirement, remove environment markers and return it"""
|
| 96 |
+
r = Requirement(str(req)) # create a copy before modifying
|
| 97 |
+
r.marker = None
|
| 98 |
+
return r
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _no_marker(req):
|
| 102 |
+
return not req.marker
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _write_requirements(stream, reqs):
|
| 106 |
+
lines = yield_lines(reqs or ())
|
| 107 |
+
|
| 108 |
+
def append_cr(line):
|
| 109 |
+
return line + '\n'
|
| 110 |
+
|
| 111 |
+
lines = map(append_cr, lines)
|
| 112 |
+
stream.writelines(lines)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def write_requirements(cmd, basename, filename):
|
| 116 |
+
dist = cmd.distribution
|
| 117 |
+
data = io.StringIO()
|
| 118 |
+
install_requires, extras_require = _prepare(
|
| 119 |
+
dist.install_requires or (), dist.extras_require or {}
|
| 120 |
+
)
|
| 121 |
+
_write_requirements(data, install_requires)
|
| 122 |
+
for extra in sorted(extras_require):
|
| 123 |
+
data.write('\n[{extra}]\n'.format(**vars()))
|
| 124 |
+
_write_requirements(data, extras_require[extra])
|
| 125 |
+
cmd.write_or_delete_file("requirements", filename, data.getvalue())
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def write_setup_requirements(cmd, basename, filename):
|
| 129 |
+
data = io.StringIO()
|
| 130 |
+
_write_requirements(data, cmd.distribution.setup_requires)
|
| 131 |
+
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/alias.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools.command.setopt import config_file, edit_config, option_base
|
| 2 |
+
|
| 3 |
+
from distutils.errors import DistutilsOptionError
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def shquote(arg):
|
| 7 |
+
"""Quote an argument for later parsing by shlex.split()"""
|
| 8 |
+
for c in '"', "'", "\\", "#":
|
| 9 |
+
if c in arg:
|
| 10 |
+
return repr(arg)
|
| 11 |
+
if arg.split() != [arg]:
|
| 12 |
+
return repr(arg)
|
| 13 |
+
return arg
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class alias(option_base):
|
| 17 |
+
"""Define a shortcut that invokes one or more commands"""
|
| 18 |
+
|
| 19 |
+
description = "define a shortcut to invoke one or more commands"
|
| 20 |
+
command_consumes_arguments = True
|
| 21 |
+
|
| 22 |
+
user_options = [
|
| 23 |
+
('remove', 'r', 'remove (unset) the alias'),
|
| 24 |
+
] + option_base.user_options
|
| 25 |
+
|
| 26 |
+
boolean_options = option_base.boolean_options + ['remove']
|
| 27 |
+
|
| 28 |
+
def initialize_options(self):
|
| 29 |
+
option_base.initialize_options(self)
|
| 30 |
+
self.args = None
|
| 31 |
+
self.remove = None
|
| 32 |
+
|
| 33 |
+
def finalize_options(self) -> None:
|
| 34 |
+
option_base.finalize_options(self)
|
| 35 |
+
if self.remove and len(self.args) != 1:
|
| 36 |
+
raise DistutilsOptionError(
|
| 37 |
+
"Must specify exactly one argument (the alias name) when using --remove"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
def run(self) -> None:
|
| 41 |
+
aliases = self.distribution.get_option_dict('aliases')
|
| 42 |
+
|
| 43 |
+
if not self.args:
|
| 44 |
+
print("Command Aliases")
|
| 45 |
+
print("---------------")
|
| 46 |
+
for alias in aliases:
|
| 47 |
+
print("setup.py alias", format_alias(alias, aliases))
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
elif len(self.args) == 1:
|
| 51 |
+
(alias,) = self.args
|
| 52 |
+
if self.remove:
|
| 53 |
+
command = None
|
| 54 |
+
elif alias in aliases:
|
| 55 |
+
print("setup.py alias", format_alias(alias, aliases))
|
| 56 |
+
return
|
| 57 |
+
else:
|
| 58 |
+
print(f"No alias definition found for {alias!r}")
|
| 59 |
+
return
|
| 60 |
+
else:
|
| 61 |
+
alias = self.args[0]
|
| 62 |
+
command = ' '.join(map(shquote, self.args[1:]))
|
| 63 |
+
|
| 64 |
+
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def format_alias(name, aliases):
|
| 68 |
+
source, command = aliases[name]
|
| 69 |
+
if source == config_file('global'):
|
| 70 |
+
source = '--global-config '
|
| 71 |
+
elif source == config_file('user'):
|
| 72 |
+
source = '--user-config '
|
| 73 |
+
elif source == config_file('local'):
|
| 74 |
+
source = ''
|
| 75 |
+
else:
|
| 76 |
+
source = f'--filename={source!r}'
|
| 77 |
+
return source + name + ' ' + command
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/bdist_egg.py
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""setuptools.command.bdist_egg
|
| 2 |
+
|
| 3 |
+
Build .egg distributions"""
|
| 4 |
+
|
| 5 |
+
from __future__ import annotations
|
| 6 |
+
|
| 7 |
+
import marshal
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
import sys
|
| 11 |
+
import textwrap
|
| 12 |
+
from sysconfig import get_path, get_python_version
|
| 13 |
+
from types import CodeType
|
| 14 |
+
from typing import TYPE_CHECKING, Literal
|
| 15 |
+
|
| 16 |
+
from setuptools import Command
|
| 17 |
+
from setuptools.extension import Library
|
| 18 |
+
|
| 19 |
+
from .._path import StrPathT, ensure_directory
|
| 20 |
+
|
| 21 |
+
from distutils import log
|
| 22 |
+
from distutils.dir_util import mkpath, remove_tree
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from typing_extensions import TypeAlias
|
| 26 |
+
|
| 27 |
+
# Same as zipfile._ZipFileMode from typeshed
|
| 28 |
+
_ZipFileMode: TypeAlias = Literal["r", "w", "x", "a"]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _get_purelib():
|
| 32 |
+
return get_path("purelib")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def strip_module(filename):
|
| 36 |
+
if '.' in filename:
|
| 37 |
+
filename = os.path.splitext(filename)[0]
|
| 38 |
+
if filename.endswith('module'):
|
| 39 |
+
filename = filename[:-6]
|
| 40 |
+
return filename
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def sorted_walk(dir):
|
| 44 |
+
"""Do os.walk in a reproducible way,
|
| 45 |
+
independent of indeterministic filesystem readdir order
|
| 46 |
+
"""
|
| 47 |
+
for base, dirs, files in os.walk(dir):
|
| 48 |
+
dirs.sort()
|
| 49 |
+
files.sort()
|
| 50 |
+
yield base, dirs, files
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def write_stub(resource, pyfile) -> None:
|
| 54 |
+
_stub_template = textwrap.dedent(
|
| 55 |
+
"""
|
| 56 |
+
def __bootstrap__():
|
| 57 |
+
global __bootstrap__, __loader__, __file__
|
| 58 |
+
import sys, pkg_resources, importlib.util
|
| 59 |
+
__file__ = pkg_resources.resource_filename(__name__, %r)
|
| 60 |
+
__loader__ = None; del __bootstrap__, __loader__
|
| 61 |
+
spec = importlib.util.spec_from_file_location(__name__,__file__)
|
| 62 |
+
mod = importlib.util.module_from_spec(spec)
|
| 63 |
+
spec.loader.exec_module(mod)
|
| 64 |
+
__bootstrap__()
|
| 65 |
+
"""
|
| 66 |
+
).lstrip()
|
| 67 |
+
with open(pyfile, 'w', encoding="utf-8") as f:
|
| 68 |
+
f.write(_stub_template % resource)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class bdist_egg(Command):
|
| 72 |
+
description = 'create an "egg" distribution'
|
| 73 |
+
|
| 74 |
+
user_options = [
|
| 75 |
+
('bdist-dir=', 'b', "temporary directory for creating the distribution"),
|
| 76 |
+
(
|
| 77 |
+
'plat-name=',
|
| 78 |
+
'p',
|
| 79 |
+
"platform name to embed in generated filenames "
|
| 80 |
+
"(by default uses `pkg_resources.get_build_platform()`)",
|
| 81 |
+
),
|
| 82 |
+
('exclude-source-files', None, "remove all .py files from the generated egg"),
|
| 83 |
+
(
|
| 84 |
+
'keep-temp',
|
| 85 |
+
'k',
|
| 86 |
+
"keep the pseudo-installation tree around after "
|
| 87 |
+
"creating the distribution archive",
|
| 88 |
+
),
|
| 89 |
+
('dist-dir=', 'd', "directory to put final built distributions in"),
|
| 90 |
+
('skip-build', None, "skip rebuilding everything (for testing/debugging)"),
|
| 91 |
+
]
|
| 92 |
+
|
| 93 |
+
boolean_options = ['keep-temp', 'skip-build', 'exclude-source-files']
|
| 94 |
+
|
| 95 |
+
def initialize_options(self):
|
| 96 |
+
self.bdist_dir = None
|
| 97 |
+
self.plat_name = None
|
| 98 |
+
self.keep_temp = False
|
| 99 |
+
self.dist_dir = None
|
| 100 |
+
self.skip_build = False
|
| 101 |
+
self.egg_output = None
|
| 102 |
+
self.exclude_source_files = None
|
| 103 |
+
|
| 104 |
+
def finalize_options(self) -> None:
|
| 105 |
+
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
|
| 106 |
+
self.egg_info = ei_cmd.egg_info
|
| 107 |
+
|
| 108 |
+
if self.bdist_dir is None:
|
| 109 |
+
bdist_base = self.get_finalized_command('bdist').bdist_base
|
| 110 |
+
self.bdist_dir = os.path.join(bdist_base, 'egg')
|
| 111 |
+
|
| 112 |
+
if self.plat_name is None:
|
| 113 |
+
from pkg_resources import get_build_platform
|
| 114 |
+
|
| 115 |
+
self.plat_name = get_build_platform()
|
| 116 |
+
|
| 117 |
+
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
|
| 118 |
+
|
| 119 |
+
if self.egg_output is None:
|
| 120 |
+
# Compute filename of the output egg
|
| 121 |
+
basename = ei_cmd._get_egg_basename(
|
| 122 |
+
py_version=get_python_version(),
|
| 123 |
+
platform=self.distribution.has_ext_modules() and self.plat_name,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
|
| 127 |
+
|
| 128 |
+
def do_install_data(self) -> None:
|
| 129 |
+
# Hack for packages that install data to install's --install-lib
|
| 130 |
+
self.get_finalized_command('install').install_lib = self.bdist_dir
|
| 131 |
+
|
| 132 |
+
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
|
| 133 |
+
old, self.distribution.data_files = self.distribution.data_files, []
|
| 134 |
+
|
| 135 |
+
for item in old:
|
| 136 |
+
if isinstance(item, tuple) and len(item) == 2:
|
| 137 |
+
if os.path.isabs(item[0]):
|
| 138 |
+
realpath = os.path.realpath(item[0])
|
| 139 |
+
normalized = os.path.normcase(realpath)
|
| 140 |
+
if normalized == site_packages or normalized.startswith(
|
| 141 |
+
site_packages + os.sep
|
| 142 |
+
):
|
| 143 |
+
item = realpath[len(site_packages) + 1 :], item[1]
|
| 144 |
+
# XXX else: raise ???
|
| 145 |
+
self.distribution.data_files.append(item)
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
log.info("installing package data to %s", self.bdist_dir)
|
| 149 |
+
self.call_command('install_data', force=False, root=None)
|
| 150 |
+
finally:
|
| 151 |
+
self.distribution.data_files = old
|
| 152 |
+
|
| 153 |
+
def get_outputs(self):
|
| 154 |
+
return [self.egg_output]
|
| 155 |
+
|
| 156 |
+
def call_command(self, cmdname, **kw):
|
| 157 |
+
"""Invoke reinitialized command `cmdname` with keyword args"""
|
| 158 |
+
for dirname in INSTALL_DIRECTORY_ATTRS:
|
| 159 |
+
kw.setdefault(dirname, self.bdist_dir)
|
| 160 |
+
kw.setdefault('skip_build', self.skip_build)
|
| 161 |
+
kw.setdefault('dry_run', self.dry_run)
|
| 162 |
+
cmd = self.reinitialize_command(cmdname, **kw)
|
| 163 |
+
self.run_command(cmdname)
|
| 164 |
+
return cmd
|
| 165 |
+
|
| 166 |
+
def run(self): # noqa: C901 # is too complex (14) # FIXME
|
| 167 |
+
# Generate metadata first
|
| 168 |
+
self.run_command("egg_info")
|
| 169 |
+
# We run install_lib before install_data, because some data hacks
|
| 170 |
+
# pull their data path from the install_lib command.
|
| 171 |
+
log.info("installing library code to %s", self.bdist_dir)
|
| 172 |
+
instcmd = self.get_finalized_command('install')
|
| 173 |
+
old_root = instcmd.root
|
| 174 |
+
instcmd.root = None
|
| 175 |
+
if self.distribution.has_c_libraries() and not self.skip_build:
|
| 176 |
+
self.run_command('build_clib')
|
| 177 |
+
cmd = self.call_command('install_lib', warn_dir=False)
|
| 178 |
+
instcmd.root = old_root
|
| 179 |
+
|
| 180 |
+
all_outputs, ext_outputs = self.get_ext_outputs()
|
| 181 |
+
self.stubs = []
|
| 182 |
+
to_compile = []
|
| 183 |
+
for p, ext_name in enumerate(ext_outputs):
|
| 184 |
+
filename, _ext = os.path.splitext(ext_name)
|
| 185 |
+
pyfile = os.path.join(self.bdist_dir, strip_module(filename) + '.py')
|
| 186 |
+
self.stubs.append(pyfile)
|
| 187 |
+
log.info("creating stub loader for %s", ext_name)
|
| 188 |
+
if not self.dry_run:
|
| 189 |
+
write_stub(os.path.basename(ext_name), pyfile)
|
| 190 |
+
to_compile.append(pyfile)
|
| 191 |
+
ext_outputs[p] = ext_name.replace(os.sep, '/')
|
| 192 |
+
|
| 193 |
+
if to_compile:
|
| 194 |
+
cmd.byte_compile(to_compile)
|
| 195 |
+
if self.distribution.data_files:
|
| 196 |
+
self.do_install_data()
|
| 197 |
+
|
| 198 |
+
# Make the EGG-INFO directory
|
| 199 |
+
archive_root = self.bdist_dir
|
| 200 |
+
egg_info = os.path.join(archive_root, 'EGG-INFO')
|
| 201 |
+
self.mkpath(egg_info)
|
| 202 |
+
if self.distribution.scripts:
|
| 203 |
+
script_dir = os.path.join(egg_info, 'scripts')
|
| 204 |
+
log.info("installing scripts to %s", script_dir)
|
| 205 |
+
self.call_command('install_scripts', install_dir=script_dir, no_ep=True)
|
| 206 |
+
|
| 207 |
+
self.copy_metadata_to(egg_info)
|
| 208 |
+
native_libs = os.path.join(egg_info, "native_libs.txt")
|
| 209 |
+
if all_outputs:
|
| 210 |
+
log.info("writing %s", native_libs)
|
| 211 |
+
if not self.dry_run:
|
| 212 |
+
ensure_directory(native_libs)
|
| 213 |
+
with open(native_libs, 'wt', encoding="utf-8") as libs_file:
|
| 214 |
+
libs_file.write('\n'.join(all_outputs))
|
| 215 |
+
libs_file.write('\n')
|
| 216 |
+
elif os.path.isfile(native_libs):
|
| 217 |
+
log.info("removing %s", native_libs)
|
| 218 |
+
if not self.dry_run:
|
| 219 |
+
os.unlink(native_libs)
|
| 220 |
+
|
| 221 |
+
write_safety_flag(os.path.join(archive_root, 'EGG-INFO'), self.zip_safe())
|
| 222 |
+
|
| 223 |
+
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
|
| 224 |
+
log.warn(
|
| 225 |
+
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
|
| 226 |
+
"Use the install_requires/extras_require setup() args instead."
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
if self.exclude_source_files:
|
| 230 |
+
self.zap_pyfiles()
|
| 231 |
+
|
| 232 |
+
# Make the archive
|
| 233 |
+
make_zipfile(
|
| 234 |
+
self.egg_output,
|
| 235 |
+
archive_root,
|
| 236 |
+
verbose=self.verbose,
|
| 237 |
+
dry_run=self.dry_run,
|
| 238 |
+
mode=self.gen_header(),
|
| 239 |
+
)
|
| 240 |
+
if not self.keep_temp:
|
| 241 |
+
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
| 242 |
+
|
| 243 |
+
# Add to 'Distribution.dist_files' so that the "upload" command works
|
| 244 |
+
getattr(self.distribution, 'dist_files', []).append((
|
| 245 |
+
'bdist_egg',
|
| 246 |
+
get_python_version(),
|
| 247 |
+
self.egg_output,
|
| 248 |
+
))
|
| 249 |
+
|
| 250 |
+
def zap_pyfiles(self):
|
| 251 |
+
log.info("Removing .py files from temporary directory")
|
| 252 |
+
for base, dirs, files in walk_egg(self.bdist_dir):
|
| 253 |
+
for name in files:
|
| 254 |
+
path = os.path.join(base, name)
|
| 255 |
+
|
| 256 |
+
if name.endswith('.py'):
|
| 257 |
+
log.debug("Deleting %s", path)
|
| 258 |
+
os.unlink(path)
|
| 259 |
+
|
| 260 |
+
if base.endswith('__pycache__'):
|
| 261 |
+
path_old = path
|
| 262 |
+
|
| 263 |
+
pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
|
| 264 |
+
m = re.match(pattern, name)
|
| 265 |
+
path_new = os.path.join(base, os.pardir, m.group('name') + '.pyc')
|
| 266 |
+
log.info(f"Renaming file from [{path_old}] to [{path_new}]")
|
| 267 |
+
try:
|
| 268 |
+
os.remove(path_new)
|
| 269 |
+
except OSError:
|
| 270 |
+
pass
|
| 271 |
+
os.rename(path_old, path_new)
|
| 272 |
+
|
| 273 |
+
def zip_safe(self):
|
| 274 |
+
safe = getattr(self.distribution, 'zip_safe', None)
|
| 275 |
+
if safe is not None:
|
| 276 |
+
return safe
|
| 277 |
+
log.warn("zip_safe flag not set; analyzing archive contents...")
|
| 278 |
+
return analyze_egg(self.bdist_dir, self.stubs)
|
| 279 |
+
|
| 280 |
+
def gen_header(self) -> Literal["w"]:
|
| 281 |
+
return 'w'
|
| 282 |
+
|
| 283 |
+
def copy_metadata_to(self, target_dir) -> None:
|
| 284 |
+
"Copy metadata (egg info) to the target_dir"
|
| 285 |
+
# normalize the path (so that a forward-slash in egg_info will
|
| 286 |
+
# match using startswith below)
|
| 287 |
+
norm_egg_info = os.path.normpath(self.egg_info)
|
| 288 |
+
prefix = os.path.join(norm_egg_info, '')
|
| 289 |
+
for path in self.ei_cmd.filelist.files:
|
| 290 |
+
if path.startswith(prefix):
|
| 291 |
+
target = os.path.join(target_dir, path[len(prefix) :])
|
| 292 |
+
ensure_directory(target)
|
| 293 |
+
self.copy_file(path, target)
|
| 294 |
+
|
| 295 |
+
def get_ext_outputs(self):
|
| 296 |
+
"""Get a list of relative paths to C extensions in the output distro"""
|
| 297 |
+
|
| 298 |
+
all_outputs = []
|
| 299 |
+
ext_outputs = []
|
| 300 |
+
|
| 301 |
+
paths = {self.bdist_dir: ''}
|
| 302 |
+
for base, dirs, files in sorted_walk(self.bdist_dir):
|
| 303 |
+
all_outputs.extend(
|
| 304 |
+
paths[base] + filename
|
| 305 |
+
for filename in files
|
| 306 |
+
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS
|
| 307 |
+
)
|
| 308 |
+
for filename in dirs:
|
| 309 |
+
paths[os.path.join(base, filename)] = paths[base] + filename + '/'
|
| 310 |
+
|
| 311 |
+
if self.distribution.has_ext_modules():
|
| 312 |
+
build_cmd = self.get_finalized_command('build_ext')
|
| 313 |
+
for ext in build_cmd.extensions:
|
| 314 |
+
if isinstance(ext, Library):
|
| 315 |
+
continue
|
| 316 |
+
fullname = build_cmd.get_ext_fullname(ext.name)
|
| 317 |
+
filename = build_cmd.get_ext_filename(fullname)
|
| 318 |
+
if not os.path.basename(filename).startswith('dl-'):
|
| 319 |
+
if os.path.exists(os.path.join(self.bdist_dir, filename)):
|
| 320 |
+
ext_outputs.append(filename)
|
| 321 |
+
|
| 322 |
+
return all_outputs, ext_outputs
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
NATIVE_EXTENSIONS: dict[str, None] = dict.fromkeys('.dll .so .dylib .pyd'.split())
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def walk_egg(egg_dir):
|
| 329 |
+
"""Walk an unpacked egg's contents, skipping the metadata directory"""
|
| 330 |
+
walker = sorted_walk(egg_dir)
|
| 331 |
+
base, dirs, files = next(walker)
|
| 332 |
+
if 'EGG-INFO' in dirs:
|
| 333 |
+
dirs.remove('EGG-INFO')
|
| 334 |
+
yield base, dirs, files
|
| 335 |
+
yield from walker
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def analyze_egg(egg_dir, stubs):
|
| 339 |
+
# check for existing flag in EGG-INFO
|
| 340 |
+
for flag, fn in safety_flags.items():
|
| 341 |
+
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
|
| 342 |
+
return flag
|
| 343 |
+
if not can_scan():
|
| 344 |
+
return False
|
| 345 |
+
safe = True
|
| 346 |
+
for base, dirs, files in walk_egg(egg_dir):
|
| 347 |
+
for name in files:
|
| 348 |
+
if name.endswith('.py') or name.endswith('.pyw'):
|
| 349 |
+
continue
|
| 350 |
+
elif name.endswith('.pyc') or name.endswith('.pyo'):
|
| 351 |
+
# always scan, even if we already know we're not safe
|
| 352 |
+
safe = scan_module(egg_dir, base, name, stubs) and safe
|
| 353 |
+
return safe
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def write_safety_flag(egg_dir, safe) -> None:
|
| 357 |
+
# Write or remove zip safety flag file(s)
|
| 358 |
+
for flag, fn in safety_flags.items():
|
| 359 |
+
fn = os.path.join(egg_dir, fn)
|
| 360 |
+
if os.path.exists(fn):
|
| 361 |
+
if safe is None or bool(safe) != flag:
|
| 362 |
+
os.unlink(fn)
|
| 363 |
+
elif safe is not None and bool(safe) == flag:
|
| 364 |
+
with open(fn, 'wt', encoding="utf-8") as f:
|
| 365 |
+
f.write('\n')
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
safety_flags = {
|
| 369 |
+
True: 'zip-safe',
|
| 370 |
+
False: 'not-zip-safe',
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def scan_module(egg_dir, base, name, stubs):
|
| 375 |
+
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
|
| 376 |
+
|
| 377 |
+
filename = os.path.join(base, name)
|
| 378 |
+
if filename[:-1] in stubs:
|
| 379 |
+
return True # Extension module
|
| 380 |
+
pkg = base[len(egg_dir) + 1 :].replace(os.sep, '.')
|
| 381 |
+
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
|
| 382 |
+
skip = 16 # skip magic & reserved? & date & file size
|
| 383 |
+
f = open(filename, 'rb')
|
| 384 |
+
f.read(skip)
|
| 385 |
+
code = marshal.load(f)
|
| 386 |
+
f.close()
|
| 387 |
+
safe = True
|
| 388 |
+
symbols = dict.fromkeys(iter_symbols(code))
|
| 389 |
+
for bad in ['__file__', '__path__']:
|
| 390 |
+
if bad in symbols:
|
| 391 |
+
log.warn("%s: module references %s", module, bad)
|
| 392 |
+
safe = False
|
| 393 |
+
if 'inspect' in symbols:
|
| 394 |
+
for bad in [
|
| 395 |
+
'getsource',
|
| 396 |
+
'getabsfile',
|
| 397 |
+
'getfile',
|
| 398 |
+
'getsourcefile',
|
| 399 |
+
'getsourcelines',
|
| 400 |
+
'findsource',
|
| 401 |
+
'getcomments',
|
| 402 |
+
'getframeinfo',
|
| 403 |
+
'getinnerframes',
|
| 404 |
+
'getouterframes',
|
| 405 |
+
'stack',
|
| 406 |
+
'trace',
|
| 407 |
+
]:
|
| 408 |
+
if bad in symbols:
|
| 409 |
+
log.warn("%s: module MAY be using inspect.%s", module, bad)
|
| 410 |
+
safe = False
|
| 411 |
+
return safe
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def iter_symbols(code):
|
| 415 |
+
"""Yield names and strings used by `code` and its nested code objects"""
|
| 416 |
+
yield from code.co_names
|
| 417 |
+
for const in code.co_consts:
|
| 418 |
+
if isinstance(const, str):
|
| 419 |
+
yield const
|
| 420 |
+
elif isinstance(const, CodeType):
|
| 421 |
+
yield from iter_symbols(const)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def can_scan() -> bool:
|
| 425 |
+
if not sys.platform.startswith('java') and sys.platform != 'cli':
|
| 426 |
+
# CPython, PyPy, etc.
|
| 427 |
+
return True
|
| 428 |
+
log.warn("Unable to analyze compiled code on this platform.")
|
| 429 |
+
log.warn(
|
| 430 |
+
"Please ask the author to include a 'zip_safe'"
|
| 431 |
+
" setting (either True or False) in the package's setup.py"
|
| 432 |
+
)
|
| 433 |
+
return False
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
# Attribute names of options for commands that might need to be convinced to
|
| 437 |
+
# install to the egg build directory
|
| 438 |
+
|
| 439 |
+
INSTALL_DIRECTORY_ATTRS = ['install_lib', 'install_dir', 'install_data', 'install_base']
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def make_zipfile(
|
| 443 |
+
zip_filename: StrPathT,
|
| 444 |
+
base_dir,
|
| 445 |
+
verbose: bool = False,
|
| 446 |
+
dry_run: bool = False,
|
| 447 |
+
compress=True,
|
| 448 |
+
mode: _ZipFileMode = 'w',
|
| 449 |
+
) -> StrPathT:
|
| 450 |
+
"""Create a zip file from all the files under 'base_dir'. The output
|
| 451 |
+
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
|
| 452 |
+
Python module (if available) or the InfoZIP "zip" utility (if installed
|
| 453 |
+
and found on the default search path). If neither tool is available,
|
| 454 |
+
raises DistutilsExecError. Returns the name of the output zip file.
|
| 455 |
+
"""
|
| 456 |
+
import zipfile
|
| 457 |
+
|
| 458 |
+
mkpath(os.path.dirname(zip_filename), dry_run=dry_run) # type: ignore[arg-type] # python/mypy#18075
|
| 459 |
+
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
|
| 460 |
+
|
| 461 |
+
def visit(z, dirname, names):
|
| 462 |
+
for name in names:
|
| 463 |
+
path = os.path.normpath(os.path.join(dirname, name))
|
| 464 |
+
if os.path.isfile(path):
|
| 465 |
+
p = path[len(base_dir) + 1 :]
|
| 466 |
+
if not dry_run:
|
| 467 |
+
z.write(path, p)
|
| 468 |
+
log.debug("adding '%s'", p)
|
| 469 |
+
|
| 470 |
+
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
|
| 471 |
+
if not dry_run:
|
| 472 |
+
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
|
| 473 |
+
for dirname, dirs, files in sorted_walk(base_dir):
|
| 474 |
+
visit(z, dirname, files)
|
| 475 |
+
z.close()
|
| 476 |
+
else:
|
| 477 |
+
for dirname, dirs, files in sorted_walk(base_dir):
|
| 478 |
+
visit(None, dirname, files)
|
| 479 |
+
return zip_filename
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/bdist_wheel.py
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Create a wheel (.whl) distribution.
|
| 3 |
+
|
| 4 |
+
A wheel is a built archive format.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
import re
|
| 11 |
+
import shutil
|
| 12 |
+
import struct
|
| 13 |
+
import sys
|
| 14 |
+
import sysconfig
|
| 15 |
+
import warnings
|
| 16 |
+
from collections.abc import Iterable, Sequence
|
| 17 |
+
from email.generator import BytesGenerator
|
| 18 |
+
from glob import iglob
|
| 19 |
+
from typing import Literal, cast
|
| 20 |
+
from zipfile import ZIP_DEFLATED, ZIP_STORED
|
| 21 |
+
|
| 22 |
+
from packaging import tags, version as _packaging_version
|
| 23 |
+
from wheel.wheelfile import WheelFile
|
| 24 |
+
|
| 25 |
+
from .. import Command, __version__, _shutil
|
| 26 |
+
from ..warnings import SetuptoolsDeprecationWarning
|
| 27 |
+
from .egg_info import egg_info as egg_info_cls
|
| 28 |
+
|
| 29 |
+
from distutils import log
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def safe_name(name: str) -> str:
|
| 33 |
+
"""Convert an arbitrary string to a standard distribution name
|
| 34 |
+
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
|
| 35 |
+
"""
|
| 36 |
+
return re.sub("[^A-Za-z0-9.]+", "-", name)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def safe_version(version: str) -> str:
|
| 40 |
+
"""
|
| 41 |
+
Convert an arbitrary string to a standard version string
|
| 42 |
+
"""
|
| 43 |
+
try:
|
| 44 |
+
# normalize the version
|
| 45 |
+
return str(_packaging_version.Version(version))
|
| 46 |
+
except _packaging_version.InvalidVersion:
|
| 47 |
+
version = version.replace(" ", ".")
|
| 48 |
+
return re.sub("[^A-Za-z0-9.]+", "-", version)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
setuptools_major_version = int(__version__.split(".")[0])
|
| 52 |
+
|
| 53 |
+
PY_LIMITED_API_PATTERN = r"cp3\d"
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _is_32bit_interpreter() -> bool:
|
| 57 |
+
return struct.calcsize("P") == 4
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def python_tag() -> str:
|
| 61 |
+
return f"py{sys.version_info.major}"
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def get_platform(archive_root: str | None) -> str:
|
| 65 |
+
"""Return our platform name 'win32', 'linux_x86_64'"""
|
| 66 |
+
result = sysconfig.get_platform()
|
| 67 |
+
if result.startswith("macosx") and archive_root is not None: # pragma: no cover
|
| 68 |
+
from wheel.macosx_libfile import calculate_macosx_platform_tag
|
| 69 |
+
|
| 70 |
+
result = calculate_macosx_platform_tag(archive_root, result)
|
| 71 |
+
elif _is_32bit_interpreter():
|
| 72 |
+
if result == "linux-x86_64":
|
| 73 |
+
# pip pull request #3497
|
| 74 |
+
result = "linux-i686"
|
| 75 |
+
elif result == "linux-aarch64":
|
| 76 |
+
# packaging pull request #234
|
| 77 |
+
# TODO armv8l, packaging pull request #690 => this did not land
|
| 78 |
+
# in pip/packaging yet
|
| 79 |
+
result = "linux-armv7l"
|
| 80 |
+
|
| 81 |
+
return result.replace("-", "_")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def get_flag(
|
| 85 |
+
var: str, fallback: bool, expected: bool = True, warn: bool = True
|
| 86 |
+
) -> bool:
|
| 87 |
+
"""Use a fallback value for determining SOABI flags if the needed config
|
| 88 |
+
var is unset or unavailable."""
|
| 89 |
+
val = sysconfig.get_config_var(var)
|
| 90 |
+
if val is None:
|
| 91 |
+
if warn:
|
| 92 |
+
warnings.warn(
|
| 93 |
+
f"Config variable '{var}' is unset, Python ABI tag may be incorrect",
|
| 94 |
+
RuntimeWarning,
|
| 95 |
+
stacklevel=2,
|
| 96 |
+
)
|
| 97 |
+
return fallback
|
| 98 |
+
return val == expected
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def get_abi_tag() -> str | None:
|
| 102 |
+
"""Return the ABI tag based on SOABI (if available) or emulate SOABI (PyPy2)."""
|
| 103 |
+
soabi: str = sysconfig.get_config_var("SOABI")
|
| 104 |
+
impl = tags.interpreter_name()
|
| 105 |
+
if not soabi and impl in ("cp", "pp") and hasattr(sys, "maxunicode"):
|
| 106 |
+
d = ""
|
| 107 |
+
u = ""
|
| 108 |
+
if get_flag("Py_DEBUG", hasattr(sys, "gettotalrefcount"), warn=(impl == "cp")):
|
| 109 |
+
d = "d"
|
| 110 |
+
|
| 111 |
+
abi = f"{impl}{tags.interpreter_version()}{d}{u}"
|
| 112 |
+
elif soabi and impl == "cp" and soabi.startswith("cpython"):
|
| 113 |
+
# non-Windows
|
| 114 |
+
abi = "cp" + soabi.split("-")[1]
|
| 115 |
+
elif soabi and impl == "cp" and soabi.startswith("cp"):
|
| 116 |
+
# Windows
|
| 117 |
+
abi = soabi.split("-")[0]
|
| 118 |
+
if hasattr(sys, "gettotalrefcount"):
|
| 119 |
+
# using debug build; append "d" flag
|
| 120 |
+
abi += "d"
|
| 121 |
+
elif soabi and impl == "pp":
|
| 122 |
+
# we want something like pypy36-pp73
|
| 123 |
+
abi = "-".join(soabi.split("-")[:2])
|
| 124 |
+
abi = abi.replace(".", "_").replace("-", "_")
|
| 125 |
+
elif soabi and impl == "graalpy":
|
| 126 |
+
abi = "-".join(soabi.split("-")[:3])
|
| 127 |
+
abi = abi.replace(".", "_").replace("-", "_")
|
| 128 |
+
elif soabi:
|
| 129 |
+
abi = soabi.replace(".", "_").replace("-", "_")
|
| 130 |
+
else:
|
| 131 |
+
abi = None
|
| 132 |
+
|
| 133 |
+
return abi
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def safer_name(name: str) -> str:
|
| 137 |
+
return safe_name(name).replace("-", "_")
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def safer_version(version: str) -> str:
|
| 141 |
+
return safe_version(version).replace("-", "_")
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class bdist_wheel(Command):
|
| 145 |
+
description = "create a wheel distribution"
|
| 146 |
+
|
| 147 |
+
supported_compressions = {
|
| 148 |
+
"stored": ZIP_STORED,
|
| 149 |
+
"deflated": ZIP_DEFLATED,
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
user_options = [
|
| 153 |
+
("bdist-dir=", "b", "temporary directory for creating the distribution"),
|
| 154 |
+
(
|
| 155 |
+
"plat-name=",
|
| 156 |
+
"p",
|
| 157 |
+
"platform name to embed in generated filenames "
|
| 158 |
+
f"[default: {get_platform(None)}]",
|
| 159 |
+
),
|
| 160 |
+
(
|
| 161 |
+
"keep-temp",
|
| 162 |
+
"k",
|
| 163 |
+
"keep the pseudo-installation tree around after "
|
| 164 |
+
"creating the distribution archive",
|
| 165 |
+
),
|
| 166 |
+
("dist-dir=", "d", "directory to put final built distributions in"),
|
| 167 |
+
("skip-build", None, "skip rebuilding everything (for testing/debugging)"),
|
| 168 |
+
(
|
| 169 |
+
"relative",
|
| 170 |
+
None,
|
| 171 |
+
"build the archive using relative paths [default: false]",
|
| 172 |
+
),
|
| 173 |
+
(
|
| 174 |
+
"owner=",
|
| 175 |
+
"u",
|
| 176 |
+
"Owner name used when creating a tar file [default: current user]",
|
| 177 |
+
),
|
| 178 |
+
(
|
| 179 |
+
"group=",
|
| 180 |
+
"g",
|
| 181 |
+
"Group name used when creating a tar file [default: current group]",
|
| 182 |
+
),
|
| 183 |
+
("universal", None, "*DEPRECATED* make a universal wheel [default: false]"),
|
| 184 |
+
(
|
| 185 |
+
"compression=",
|
| 186 |
+
None,
|
| 187 |
+
f"zipfile compression (one of: {', '.join(supported_compressions)}) [default: 'deflated']",
|
| 188 |
+
),
|
| 189 |
+
(
|
| 190 |
+
"python-tag=",
|
| 191 |
+
None,
|
| 192 |
+
f"Python implementation compatibility tag [default: '{python_tag()}']",
|
| 193 |
+
),
|
| 194 |
+
(
|
| 195 |
+
"build-number=",
|
| 196 |
+
None,
|
| 197 |
+
"Build number for this particular version. "
|
| 198 |
+
"As specified in PEP-0427, this must start with a digit. "
|
| 199 |
+
"[default: None]",
|
| 200 |
+
),
|
| 201 |
+
(
|
| 202 |
+
"py-limited-api=",
|
| 203 |
+
None,
|
| 204 |
+
"Python tag (cp32|cp33|cpNN) for abi3 wheel tag [default: false]",
|
| 205 |
+
),
|
| 206 |
+
(
|
| 207 |
+
"dist-info-dir=",
|
| 208 |
+
None,
|
| 209 |
+
"directory where a pre-generated dist-info can be found (e.g. as a "
|
| 210 |
+
"result of calling the PEP517 'prepare_metadata_for_build_wheel' "
|
| 211 |
+
"method)",
|
| 212 |
+
),
|
| 213 |
+
]
|
| 214 |
+
|
| 215 |
+
boolean_options = ["keep-temp", "skip-build", "relative", "universal"]
|
| 216 |
+
|
| 217 |
+
def initialize_options(self) -> None:
|
| 218 |
+
self.bdist_dir: str | None = None
|
| 219 |
+
self.data_dir = ""
|
| 220 |
+
self.plat_name: str | None = None
|
| 221 |
+
self.plat_tag: str | None = None
|
| 222 |
+
self.format = "zip"
|
| 223 |
+
self.keep_temp = False
|
| 224 |
+
self.dist_dir: str | None = None
|
| 225 |
+
self.dist_info_dir = None
|
| 226 |
+
self.egginfo_dir: str | None = None
|
| 227 |
+
self.root_is_pure: bool | None = None
|
| 228 |
+
self.skip_build = False
|
| 229 |
+
self.relative = False
|
| 230 |
+
self.owner = None
|
| 231 |
+
self.group = None
|
| 232 |
+
self.universal = False
|
| 233 |
+
self.compression: str | int = "deflated"
|
| 234 |
+
self.python_tag = python_tag()
|
| 235 |
+
self.build_number: str | None = None
|
| 236 |
+
self.py_limited_api: str | Literal[False] = False
|
| 237 |
+
self.plat_name_supplied = False
|
| 238 |
+
|
| 239 |
+
def finalize_options(self) -> None:
|
| 240 |
+
if not self.bdist_dir:
|
| 241 |
+
bdist_base = self.get_finalized_command("bdist").bdist_base
|
| 242 |
+
self.bdist_dir = os.path.join(bdist_base, "wheel")
|
| 243 |
+
|
| 244 |
+
if self.dist_info_dir is None:
|
| 245 |
+
egg_info = cast(egg_info_cls, self.distribution.get_command_obj("egg_info"))
|
| 246 |
+
egg_info.ensure_finalized() # needed for correct `wheel_dist_name`
|
| 247 |
+
|
| 248 |
+
self.data_dir = self.wheel_dist_name + ".data"
|
| 249 |
+
self.plat_name_supplied = bool(self.plat_name)
|
| 250 |
+
|
| 251 |
+
need_options = ("dist_dir", "plat_name", "skip_build")
|
| 252 |
+
|
| 253 |
+
self.set_undefined_options("bdist", *zip(need_options, need_options))
|
| 254 |
+
|
| 255 |
+
self.root_is_pure = not (
|
| 256 |
+
self.distribution.has_ext_modules() or self.distribution.has_c_libraries()
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
self._validate_py_limited_api()
|
| 260 |
+
|
| 261 |
+
# Support legacy [wheel] section for setting universal
|
| 262 |
+
wheel = self.distribution.get_option_dict("wheel")
|
| 263 |
+
if "universal" in wheel: # pragma: no cover
|
| 264 |
+
# please don't define this in your global configs
|
| 265 |
+
log.warn("The [wheel] section is deprecated. Use [bdist_wheel] instead.")
|
| 266 |
+
val = wheel["universal"][1].strip()
|
| 267 |
+
if val.lower() in ("1", "true", "yes"):
|
| 268 |
+
self.universal = True
|
| 269 |
+
|
| 270 |
+
if self.universal:
|
| 271 |
+
SetuptoolsDeprecationWarning.emit(
|
| 272 |
+
"bdist_wheel.universal is deprecated",
|
| 273 |
+
"""
|
| 274 |
+
With Python 2.7 end-of-life, support for building universal wheels
|
| 275 |
+
(i.e., wheels that support both Python 2 and Python 3)
|
| 276 |
+
is being obviated.
|
| 277 |
+
Please discontinue using this option, or if you still need it,
|
| 278 |
+
file an issue with pypa/setuptools describing your use case.
|
| 279 |
+
""",
|
| 280 |
+
due_date=(2025, 8, 30), # Introduced in 2024-08-30
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
if self.build_number is not None and not self.build_number[:1].isdigit():
|
| 284 |
+
raise ValueError("Build tag (build-number) must start with a digit.")
|
| 285 |
+
|
| 286 |
+
def _validate_py_limited_api(self) -> None:
|
| 287 |
+
if not self.py_limited_api:
|
| 288 |
+
return
|
| 289 |
+
|
| 290 |
+
if not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
|
| 291 |
+
raise ValueError(f"py-limited-api must match '{PY_LIMITED_API_PATTERN}'")
|
| 292 |
+
|
| 293 |
+
if sysconfig.get_config_var("Py_GIL_DISABLED"):
|
| 294 |
+
raise ValueError(
|
| 295 |
+
f"`py_limited_api={self.py_limited_api!r}` not supported. "
|
| 296 |
+
"`Py_LIMITED_API` is currently incompatible with "
|
| 297 |
+
f"`Py_GIL_DISABLED` ({sys.abiflags=!r}). "
|
| 298 |
+
"See https://github.com/python/cpython/issues/111506."
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
@property
|
| 302 |
+
def wheel_dist_name(self) -> str:
|
| 303 |
+
"""Return distribution full name with - replaced with _"""
|
| 304 |
+
components = [
|
| 305 |
+
safer_name(self.distribution.get_name()),
|
| 306 |
+
safer_version(self.distribution.get_version()),
|
| 307 |
+
]
|
| 308 |
+
if self.build_number:
|
| 309 |
+
components.append(self.build_number)
|
| 310 |
+
return "-".join(components)
|
| 311 |
+
|
| 312 |
+
def get_tag(self) -> tuple[str, str, str]:
|
| 313 |
+
# bdist sets self.plat_name if unset, we should only use it for purepy
|
| 314 |
+
# wheels if the user supplied it.
|
| 315 |
+
if self.plat_name_supplied and self.plat_name:
|
| 316 |
+
plat_name = self.plat_name
|
| 317 |
+
elif self.root_is_pure:
|
| 318 |
+
plat_name = "any"
|
| 319 |
+
else:
|
| 320 |
+
# macosx contains system version in platform name so need special handle
|
| 321 |
+
if self.plat_name and not self.plat_name.startswith("macosx"):
|
| 322 |
+
plat_name = self.plat_name
|
| 323 |
+
else:
|
| 324 |
+
# on macosx always limit the platform name to comply with any
|
| 325 |
+
# c-extension modules in bdist_dir, since the user can specify
|
| 326 |
+
# a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
|
| 327 |
+
|
| 328 |
+
# on other platforms, and on macosx if there are no c-extension
|
| 329 |
+
# modules, use the default platform name.
|
| 330 |
+
plat_name = get_platform(self.bdist_dir)
|
| 331 |
+
|
| 332 |
+
if _is_32bit_interpreter():
|
| 333 |
+
if plat_name in ("linux-x86_64", "linux_x86_64"):
|
| 334 |
+
plat_name = "linux_i686"
|
| 335 |
+
if plat_name in ("linux-aarch64", "linux_aarch64"):
|
| 336 |
+
# TODO armv8l, packaging pull request #690 => this did not land
|
| 337 |
+
# in pip/packaging yet
|
| 338 |
+
plat_name = "linux_armv7l"
|
| 339 |
+
|
| 340 |
+
plat_name = (
|
| 341 |
+
plat_name.lower().replace("-", "_").replace(".", "_").replace(" ", "_")
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
if self.root_is_pure:
|
| 345 |
+
if self.universal:
|
| 346 |
+
impl = "py2.py3"
|
| 347 |
+
else:
|
| 348 |
+
impl = self.python_tag
|
| 349 |
+
tag = (impl, "none", plat_name)
|
| 350 |
+
else:
|
| 351 |
+
impl_name = tags.interpreter_name()
|
| 352 |
+
impl_ver = tags.interpreter_version()
|
| 353 |
+
impl = impl_name + impl_ver
|
| 354 |
+
# We don't work on CPython 3.1, 3.0.
|
| 355 |
+
if self.py_limited_api and (impl_name + impl_ver).startswith("cp3"):
|
| 356 |
+
impl = self.py_limited_api
|
| 357 |
+
abi_tag = "abi3"
|
| 358 |
+
else:
|
| 359 |
+
abi_tag = str(get_abi_tag()).lower()
|
| 360 |
+
tag = (impl, abi_tag, plat_name)
|
| 361 |
+
# issue gh-374: allow overriding plat_name
|
| 362 |
+
supported_tags = [
|
| 363 |
+
(t.interpreter, t.abi, plat_name) for t in tags.sys_tags()
|
| 364 |
+
]
|
| 365 |
+
assert tag in supported_tags, (
|
| 366 |
+
f"would build wheel with unsupported tag {tag}"
|
| 367 |
+
)
|
| 368 |
+
return tag
|
| 369 |
+
|
| 370 |
+
def run(self):
|
| 371 |
+
build_scripts = self.reinitialize_command("build_scripts")
|
| 372 |
+
build_scripts.executable = "python"
|
| 373 |
+
build_scripts.force = True
|
| 374 |
+
|
| 375 |
+
build_ext = self.reinitialize_command("build_ext")
|
| 376 |
+
build_ext.inplace = False
|
| 377 |
+
|
| 378 |
+
if not self.skip_build:
|
| 379 |
+
self.run_command("build")
|
| 380 |
+
|
| 381 |
+
install = self.reinitialize_command("install", reinit_subcommands=True)
|
| 382 |
+
install.root = self.bdist_dir
|
| 383 |
+
install.compile = False
|
| 384 |
+
install.skip_build = self.skip_build
|
| 385 |
+
install.warn_dir = False
|
| 386 |
+
|
| 387 |
+
# A wheel without setuptools scripts is more cross-platform.
|
| 388 |
+
# Use the (undocumented) `no_ep` option to setuptools'
|
| 389 |
+
# install_scripts command to avoid creating entry point scripts.
|
| 390 |
+
install_scripts = self.reinitialize_command("install_scripts")
|
| 391 |
+
install_scripts.no_ep = True
|
| 392 |
+
|
| 393 |
+
# Use a custom scheme for the archive, because we have to decide
|
| 394 |
+
# at installation time which scheme to use.
|
| 395 |
+
for key in ("headers", "scripts", "data", "purelib", "platlib"):
|
| 396 |
+
setattr(install, "install_" + key, os.path.join(self.data_dir, key))
|
| 397 |
+
|
| 398 |
+
basedir_observed = ""
|
| 399 |
+
|
| 400 |
+
if os.name == "nt":
|
| 401 |
+
# win32 barfs if any of these are ''; could be '.'?
|
| 402 |
+
# (distutils.command.install:change_roots bug)
|
| 403 |
+
basedir_observed = os.path.normpath(os.path.join(self.data_dir, ".."))
|
| 404 |
+
self.install_libbase = self.install_lib = basedir_observed
|
| 405 |
+
|
| 406 |
+
setattr(
|
| 407 |
+
install,
|
| 408 |
+
"install_purelib" if self.root_is_pure else "install_platlib",
|
| 409 |
+
basedir_observed,
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
log.info(f"installing to {self.bdist_dir}")
|
| 413 |
+
|
| 414 |
+
self.run_command("install")
|
| 415 |
+
|
| 416 |
+
impl_tag, abi_tag, plat_tag = self.get_tag()
|
| 417 |
+
archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
|
| 418 |
+
if not self.relative:
|
| 419 |
+
archive_root = self.bdist_dir
|
| 420 |
+
else:
|
| 421 |
+
archive_root = os.path.join(
|
| 422 |
+
self.bdist_dir, self._ensure_relative(install.install_base)
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
self.set_undefined_options("install_egg_info", ("target", "egginfo_dir"))
|
| 426 |
+
distinfo_dirname = (
|
| 427 |
+
f"{safer_name(self.distribution.get_name())}-"
|
| 428 |
+
f"{safer_version(self.distribution.get_version())}.dist-info"
|
| 429 |
+
)
|
| 430 |
+
distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
|
| 431 |
+
if self.dist_info_dir:
|
| 432 |
+
# Use the given dist-info directly.
|
| 433 |
+
log.debug(f"reusing {self.dist_info_dir}")
|
| 434 |
+
shutil.copytree(self.dist_info_dir, distinfo_dir)
|
| 435 |
+
# Egg info is still generated, so remove it now to avoid it getting
|
| 436 |
+
# copied into the wheel.
|
| 437 |
+
_shutil.rmtree(self.egginfo_dir)
|
| 438 |
+
else:
|
| 439 |
+
# Convert the generated egg-info into dist-info.
|
| 440 |
+
self.egg2dist(self.egginfo_dir, distinfo_dir)
|
| 441 |
+
|
| 442 |
+
self.write_wheelfile(distinfo_dir)
|
| 443 |
+
|
| 444 |
+
# Make the archive
|
| 445 |
+
if not os.path.exists(self.dist_dir):
|
| 446 |
+
os.makedirs(self.dist_dir)
|
| 447 |
+
|
| 448 |
+
wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
|
| 449 |
+
with WheelFile(wheel_path, "w", self._zip_compression()) as wf:
|
| 450 |
+
wf.write_files(archive_root)
|
| 451 |
+
|
| 452 |
+
# Add to 'Distribution.dist_files' so that the "upload" command works
|
| 453 |
+
getattr(self.distribution, "dist_files", []).append((
|
| 454 |
+
"bdist_wheel",
|
| 455 |
+
f"{sys.version_info.major}.{sys.version_info.minor}",
|
| 456 |
+
wheel_path,
|
| 457 |
+
))
|
| 458 |
+
|
| 459 |
+
if not self.keep_temp:
|
| 460 |
+
log.info(f"removing {self.bdist_dir}")
|
| 461 |
+
if not self.dry_run:
|
| 462 |
+
_shutil.rmtree(self.bdist_dir)
|
| 463 |
+
|
| 464 |
+
def write_wheelfile(
|
| 465 |
+
self, wheelfile_base: str, generator: str = f"setuptools ({__version__})"
|
| 466 |
+
) -> None:
|
| 467 |
+
from email.message import Message
|
| 468 |
+
|
| 469 |
+
msg = Message()
|
| 470 |
+
msg["Wheel-Version"] = "1.0" # of the spec
|
| 471 |
+
msg["Generator"] = generator
|
| 472 |
+
msg["Root-Is-Purelib"] = str(self.root_is_pure).lower()
|
| 473 |
+
if self.build_number is not None:
|
| 474 |
+
msg["Build"] = self.build_number
|
| 475 |
+
|
| 476 |
+
# Doesn't work for bdist_wininst
|
| 477 |
+
impl_tag, abi_tag, plat_tag = self.get_tag()
|
| 478 |
+
for impl in impl_tag.split("."):
|
| 479 |
+
for abi in abi_tag.split("."):
|
| 480 |
+
for plat in plat_tag.split("."):
|
| 481 |
+
msg["Tag"] = "-".join((impl, abi, plat))
|
| 482 |
+
|
| 483 |
+
wheelfile_path = os.path.join(wheelfile_base, "WHEEL")
|
| 484 |
+
log.info(f"creating {wheelfile_path}")
|
| 485 |
+
with open(wheelfile_path, "wb") as f:
|
| 486 |
+
BytesGenerator(f, maxheaderlen=0).flatten(msg)
|
| 487 |
+
|
| 488 |
+
def _ensure_relative(self, path: str) -> str:
|
| 489 |
+
# copied from dir_util, deleted
|
| 490 |
+
drive, path = os.path.splitdrive(path)
|
| 491 |
+
if path[0:1] == os.sep:
|
| 492 |
+
path = drive + path[1:]
|
| 493 |
+
return path
|
| 494 |
+
|
| 495 |
+
@property
|
| 496 |
+
def license_paths(self) -> Iterable[str]:
|
| 497 |
+
if setuptools_major_version >= 57:
|
| 498 |
+
# Setuptools has resolved any patterns to actual file names
|
| 499 |
+
return self.distribution.metadata.license_files or ()
|
| 500 |
+
|
| 501 |
+
files = set[str]()
|
| 502 |
+
metadata = self.distribution.get_option_dict("metadata")
|
| 503 |
+
if setuptools_major_version >= 42:
|
| 504 |
+
# Setuptools recognizes the license_files option but does not do globbing
|
| 505 |
+
patterns = cast(Sequence[str], self.distribution.metadata.license_files)
|
| 506 |
+
else:
|
| 507 |
+
# Prior to those, wheel is entirely responsible for handling license files
|
| 508 |
+
if "license_files" in metadata:
|
| 509 |
+
patterns = metadata["license_files"][1].split()
|
| 510 |
+
else:
|
| 511 |
+
patterns = ()
|
| 512 |
+
|
| 513 |
+
if "license_file" in metadata:
|
| 514 |
+
warnings.warn(
|
| 515 |
+
'The "license_file" option is deprecated. Use "license_files" instead.',
|
| 516 |
+
DeprecationWarning,
|
| 517 |
+
stacklevel=2,
|
| 518 |
+
)
|
| 519 |
+
files.add(metadata["license_file"][1])
|
| 520 |
+
|
| 521 |
+
if not files and not patterns and not isinstance(patterns, list):
|
| 522 |
+
patterns = ("LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*")
|
| 523 |
+
|
| 524 |
+
for pattern in patterns:
|
| 525 |
+
for path in iglob(pattern):
|
| 526 |
+
if path.endswith("~"):
|
| 527 |
+
log.debug(
|
| 528 |
+
f'ignoring license file "{path}" as it looks like a backup'
|
| 529 |
+
)
|
| 530 |
+
continue
|
| 531 |
+
|
| 532 |
+
if path not in files and os.path.isfile(path):
|
| 533 |
+
log.info(
|
| 534 |
+
f'adding license file "{path}" (matched pattern "{pattern}")'
|
| 535 |
+
)
|
| 536 |
+
files.add(path)
|
| 537 |
+
|
| 538 |
+
return files
|
| 539 |
+
|
| 540 |
+
def egg2dist(self, egginfo_path: str, distinfo_path: str) -> None:
|
| 541 |
+
"""Convert an .egg-info directory into a .dist-info directory"""
|
| 542 |
+
|
| 543 |
+
def adios(p: str) -> None:
|
| 544 |
+
"""Appropriately delete directory, file or link."""
|
| 545 |
+
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
|
| 546 |
+
_shutil.rmtree(p)
|
| 547 |
+
elif os.path.exists(p):
|
| 548 |
+
os.unlink(p)
|
| 549 |
+
|
| 550 |
+
adios(distinfo_path)
|
| 551 |
+
|
| 552 |
+
if not os.path.exists(egginfo_path):
|
| 553 |
+
# There is no egg-info. This is probably because the egg-info
|
| 554 |
+
# file/directory is not named matching the distribution name used
|
| 555 |
+
# to name the archive file. Check for this case and report
|
| 556 |
+
# accordingly.
|
| 557 |
+
import glob
|
| 558 |
+
|
| 559 |
+
pat = os.path.join(os.path.dirname(egginfo_path), "*.egg-info")
|
| 560 |
+
possible = glob.glob(pat)
|
| 561 |
+
err = f"Egg metadata expected at {egginfo_path} but not found"
|
| 562 |
+
if possible:
|
| 563 |
+
alt = os.path.basename(possible[0])
|
| 564 |
+
err += f" ({alt} found - possible misnamed archive file?)"
|
| 565 |
+
|
| 566 |
+
raise ValueError(err)
|
| 567 |
+
|
| 568 |
+
# .egg-info is a directory
|
| 569 |
+
pkginfo_path = os.path.join(egginfo_path, "PKG-INFO")
|
| 570 |
+
|
| 571 |
+
# ignore common egg metadata that is useless to wheel
|
| 572 |
+
shutil.copytree(
|
| 573 |
+
egginfo_path,
|
| 574 |
+
distinfo_path,
|
| 575 |
+
ignore=lambda x, y: {
|
| 576 |
+
"PKG-INFO",
|
| 577 |
+
"requires.txt",
|
| 578 |
+
"SOURCES.txt",
|
| 579 |
+
"not-zip-safe",
|
| 580 |
+
},
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
# delete dependency_links if it is only whitespace
|
| 584 |
+
dependency_links_path = os.path.join(distinfo_path, "dependency_links.txt")
|
| 585 |
+
with open(dependency_links_path, encoding="utf-8") as dependency_links_file:
|
| 586 |
+
dependency_links = dependency_links_file.read().strip()
|
| 587 |
+
if not dependency_links:
|
| 588 |
+
adios(dependency_links_path)
|
| 589 |
+
|
| 590 |
+
metadata_path = os.path.join(distinfo_path, "METADATA")
|
| 591 |
+
shutil.copy(pkginfo_path, metadata_path)
|
| 592 |
+
|
| 593 |
+
for license_path in self.license_paths:
|
| 594 |
+
filename = os.path.basename(license_path)
|
| 595 |
+
shutil.copy(license_path, os.path.join(distinfo_path, filename))
|
| 596 |
+
|
| 597 |
+
adios(egginfo_path)
|
| 598 |
+
|
| 599 |
+
def _zip_compression(self) -> int:
|
| 600 |
+
if (
|
| 601 |
+
isinstance(self.compression, int)
|
| 602 |
+
and self.compression in self.supported_compressions.values()
|
| 603 |
+
):
|
| 604 |
+
return self.compression
|
| 605 |
+
|
| 606 |
+
compression = self.supported_compressions.get(str(self.compression))
|
| 607 |
+
if compression is not None:
|
| 608 |
+
return compression
|
| 609 |
+
|
| 610 |
+
raise ValueError(f"Unsupported compression: {self.compression!r}")
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/build.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Protocol
|
| 4 |
+
|
| 5 |
+
from ..dist import Distribution
|
| 6 |
+
|
| 7 |
+
from distutils.command.build import build as _build
|
| 8 |
+
|
| 9 |
+
_ORIGINAL_SUBCOMMANDS = {"build_py", "build_clib", "build_ext", "build_scripts"}
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class build(_build):
|
| 13 |
+
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
|
| 14 |
+
|
| 15 |
+
# copy to avoid sharing the object with parent class
|
| 16 |
+
sub_commands = _build.sub_commands[:]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class SubCommand(Protocol):
|
| 20 |
+
"""In order to support editable installations (see :pep:`660`) all
|
| 21 |
+
build subcommands **SHOULD** implement this protocol. They also **MUST** inherit
|
| 22 |
+
from ``setuptools.Command``.
|
| 23 |
+
|
| 24 |
+
When creating an :pep:`editable wheel <660>`, ``setuptools`` will try to evaluate
|
| 25 |
+
custom ``build`` subcommands using the following procedure:
|
| 26 |
+
|
| 27 |
+
1. ``setuptools`` will set the ``editable_mode`` attribute to ``True``
|
| 28 |
+
2. ``setuptools`` will execute the ``run()`` command.
|
| 29 |
+
|
| 30 |
+
.. important::
|
| 31 |
+
Subcommands **SHOULD** take advantage of ``editable_mode=True`` to adequate
|
| 32 |
+
its behaviour or perform optimisations.
|
| 33 |
+
|
| 34 |
+
For example, if a subcommand doesn't need to generate an extra file and
|
| 35 |
+
all it does is to copy a source file into the build directory,
|
| 36 |
+
``run()`` **SHOULD** simply "early return".
|
| 37 |
+
|
| 38 |
+
Similarly, if the subcommand creates files that would be placed alongside
|
| 39 |
+
Python files in the final distribution, during an editable install
|
| 40 |
+
the command **SHOULD** generate these files "in place" (i.e. write them to
|
| 41 |
+
the original source directory, instead of using the build directory).
|
| 42 |
+
Note that ``get_output_mapping()`` should reflect that and include mappings
|
| 43 |
+
for "in place" builds accordingly.
|
| 44 |
+
|
| 45 |
+
3. ``setuptools`` use any knowledge it can derive from the return values of
|
| 46 |
+
``get_outputs()`` and ``get_output_mapping()`` to create an editable wheel.
|
| 47 |
+
When relevant ``setuptools`` **MAY** attempt to use file links based on the value
|
| 48 |
+
of ``get_output_mapping()``. Alternatively, ``setuptools`` **MAY** attempt to use
|
| 49 |
+
:doc:`import hooks <python:reference/import>` to redirect any attempt to import
|
| 50 |
+
to the directory with the original source code and other files built in place.
|
| 51 |
+
|
| 52 |
+
Please note that custom sub-commands **SHOULD NOT** rely on ``run()`` being
|
| 53 |
+
executed (or not) to provide correct return values for ``get_outputs()``,
|
| 54 |
+
``get_output_mapping()`` or ``get_source_files()``. The ``get_*`` methods should
|
| 55 |
+
work independently of ``run()``.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
editable_mode: bool = False
|
| 59 |
+
"""Boolean flag that will be set to ``True`` when setuptools is used for an
|
| 60 |
+
editable installation (see :pep:`660`).
|
| 61 |
+
Implementations **SHOULD** explicitly set the default value of this attribute to
|
| 62 |
+
``False``.
|
| 63 |
+
When subcommands run, they can use this flag to perform optimizations or change
|
| 64 |
+
their behaviour accordingly.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
build_lib: str
|
| 68 |
+
"""String representing the directory where the build artifacts should be stored,
|
| 69 |
+
e.g. ``build/lib``.
|
| 70 |
+
For example, if a distribution wants to provide a Python module named ``pkg.mod``,
|
| 71 |
+
then a corresponding file should be written to ``{build_lib}/package/module.py``.
|
| 72 |
+
A way of thinking about this is that the files saved under ``build_lib``
|
| 73 |
+
would be eventually copied to one of the directories in :obj:`site.PREFIXES`
|
| 74 |
+
upon installation.
|
| 75 |
+
|
| 76 |
+
A command that produces platform-independent files (e.g. compiling text templates
|
| 77 |
+
into Python functions), **CAN** initialize ``build_lib`` by copying its value from
|
| 78 |
+
the ``build_py`` command. On the other hand, a command that produces
|
| 79 |
+
platform-specific files **CAN** initialize ``build_lib`` by copying its value from
|
| 80 |
+
the ``build_ext`` command. In general this is done inside the ``finalize_options``
|
| 81 |
+
method with the help of the ``set_undefined_options`` command::
|
| 82 |
+
|
| 83 |
+
def finalize_options(self):
|
| 84 |
+
self.set_undefined_options("build_py", ("build_lib", "build_lib"))
|
| 85 |
+
...
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def initialize_options(self) -> None:
|
| 89 |
+
"""(Required by the original :class:`setuptools.Command` interface)"""
|
| 90 |
+
...
|
| 91 |
+
|
| 92 |
+
def finalize_options(self) -> None:
|
| 93 |
+
"""(Required by the original :class:`setuptools.Command` interface)"""
|
| 94 |
+
...
|
| 95 |
+
|
| 96 |
+
def run(self) -> None:
|
| 97 |
+
"""(Required by the original :class:`setuptools.Command` interface)"""
|
| 98 |
+
...
|
| 99 |
+
|
| 100 |
+
def get_source_files(self) -> list[str]:
|
| 101 |
+
"""
|
| 102 |
+
Return a list of all files that are used by the command to create the expected
|
| 103 |
+
outputs.
|
| 104 |
+
For example, if your build command transpiles Java files into Python, you should
|
| 105 |
+
list here all the Java files.
|
| 106 |
+
The primary purpose of this function is to help populating the ``sdist``
|
| 107 |
+
with all the files necessary to build the distribution.
|
| 108 |
+
All files should be strings relative to the project root directory.
|
| 109 |
+
"""
|
| 110 |
+
...
|
| 111 |
+
|
| 112 |
+
def get_outputs(self) -> list[str]:
|
| 113 |
+
"""
|
| 114 |
+
Return a list of files intended for distribution as they would have been
|
| 115 |
+
produced by the build.
|
| 116 |
+
These files should be strings in the form of
|
| 117 |
+
``"{build_lib}/destination/file/path"``.
|
| 118 |
+
|
| 119 |
+
.. note::
|
| 120 |
+
The return value of ``get_output()`` should include all files used as keys
|
| 121 |
+
in ``get_output_mapping()`` plus files that are generated during the build
|
| 122 |
+
and don't correspond to any source file already present in the project.
|
| 123 |
+
"""
|
| 124 |
+
...
|
| 125 |
+
|
| 126 |
+
def get_output_mapping(self) -> dict[str, str]:
|
| 127 |
+
"""
|
| 128 |
+
Return a mapping between destination files as they would be produced by the
|
| 129 |
+
build (dict keys) into the respective existing (source) files (dict values).
|
| 130 |
+
Existing (source) files should be represented as strings relative to the project
|
| 131 |
+
root directory.
|
| 132 |
+
Destination files should be strings in the form of
|
| 133 |
+
``"{build_lib}/destination/file/path"``.
|
| 134 |
+
"""
|
| 135 |
+
...
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/build_clib.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..dist import Distribution
|
| 2 |
+
from ..modified import newer_pairwise_group
|
| 3 |
+
|
| 4 |
+
import distutils.command.build_clib as orig
|
| 5 |
+
from distutils import log
|
| 6 |
+
from distutils.errors import DistutilsSetupError
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class build_clib(orig.build_clib):
|
| 10 |
+
"""
|
| 11 |
+
Override the default build_clib behaviour to do the following:
|
| 12 |
+
|
| 13 |
+
1. Implement a rudimentary timestamp-based dependency system
|
| 14 |
+
so 'compile()' doesn't run every time.
|
| 15 |
+
2. Add more keys to the 'build_info' dictionary:
|
| 16 |
+
* obj_deps - specify dependencies for each object compiled.
|
| 17 |
+
this should be a dictionary mapping a key
|
| 18 |
+
with the source filename to a list of
|
| 19 |
+
dependencies. Use an empty string for global
|
| 20 |
+
dependencies.
|
| 21 |
+
* cflags - specify a list of additional flags to pass to
|
| 22 |
+
the compiler.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
|
| 26 |
+
|
| 27 |
+
def build_libraries(self, libraries) -> None:
|
| 28 |
+
for lib_name, build_info in libraries:
|
| 29 |
+
sources = build_info.get('sources')
|
| 30 |
+
if sources is None or not isinstance(sources, (list, tuple)):
|
| 31 |
+
raise DistutilsSetupError(
|
| 32 |
+
f"in 'libraries' option (library '{lib_name}'), "
|
| 33 |
+
"'sources' must be present and must be "
|
| 34 |
+
"a list of source filenames"
|
| 35 |
+
)
|
| 36 |
+
sources = sorted(list(sources))
|
| 37 |
+
|
| 38 |
+
log.info("building '%s' library", lib_name)
|
| 39 |
+
|
| 40 |
+
# Make sure everything is the correct type.
|
| 41 |
+
# obj_deps should be a dictionary of keys as sources
|
| 42 |
+
# and a list/tuple of files that are its dependencies.
|
| 43 |
+
obj_deps = build_info.get('obj_deps', dict())
|
| 44 |
+
if not isinstance(obj_deps, dict):
|
| 45 |
+
raise DistutilsSetupError(
|
| 46 |
+
f"in 'libraries' option (library '{lib_name}'), "
|
| 47 |
+
"'obj_deps' must be a dictionary of "
|
| 48 |
+
"type 'source: list'"
|
| 49 |
+
)
|
| 50 |
+
dependencies = []
|
| 51 |
+
|
| 52 |
+
# Get the global dependencies that are specified by the '' key.
|
| 53 |
+
# These will go into every source's dependency list.
|
| 54 |
+
global_deps = obj_deps.get('', list())
|
| 55 |
+
if not isinstance(global_deps, (list, tuple)):
|
| 56 |
+
raise DistutilsSetupError(
|
| 57 |
+
f"in 'libraries' option (library '{lib_name}'), "
|
| 58 |
+
"'obj_deps' must be a dictionary of "
|
| 59 |
+
"type 'source: list'"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
# Build the list to be used by newer_pairwise_group
|
| 63 |
+
# each source will be auto-added to its dependencies.
|
| 64 |
+
for source in sources:
|
| 65 |
+
src_deps = [source]
|
| 66 |
+
src_deps.extend(global_deps)
|
| 67 |
+
extra_deps = obj_deps.get(source, list())
|
| 68 |
+
if not isinstance(extra_deps, (list, tuple)):
|
| 69 |
+
raise DistutilsSetupError(
|
| 70 |
+
f"in 'libraries' option (library '{lib_name}'), "
|
| 71 |
+
"'obj_deps' must be a dictionary of "
|
| 72 |
+
"type 'source: list'"
|
| 73 |
+
)
|
| 74 |
+
src_deps.extend(extra_deps)
|
| 75 |
+
dependencies.append(src_deps)
|
| 76 |
+
|
| 77 |
+
expected_objects = self.compiler.object_filenames(
|
| 78 |
+
sources,
|
| 79 |
+
output_dir=self.build_temp,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
|
| 83 |
+
# First, compile the source code to object files in the library
|
| 84 |
+
# directory. (This should probably change to putting object
|
| 85 |
+
# files in a temporary build directory.)
|
| 86 |
+
macros = build_info.get('macros')
|
| 87 |
+
include_dirs = build_info.get('include_dirs')
|
| 88 |
+
cflags = build_info.get('cflags')
|
| 89 |
+
self.compiler.compile(
|
| 90 |
+
sources,
|
| 91 |
+
output_dir=self.build_temp,
|
| 92 |
+
macros=macros,
|
| 93 |
+
include_dirs=include_dirs,
|
| 94 |
+
extra_postargs=cflags,
|
| 95 |
+
debug=self.debug,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
# Now "link" the object files together into a static library.
|
| 99 |
+
# (On Unix at least, this isn't really linking -- it just
|
| 100 |
+
# builds an archive. Whatever.)
|
| 101 |
+
self.compiler.create_static_lib(
|
| 102 |
+
expected_objects, lib_name, output_dir=self.build_clib, debug=self.debug
|
| 103 |
+
)
|
evalkit_llava/lib/python3.10/site-packages/setuptools/command/build_py.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import fnmatch
|
| 4 |
+
import itertools
|
| 5 |
+
import os
|
| 6 |
+
import stat
|
| 7 |
+
import textwrap
|
| 8 |
+
from collections.abc import Iterable, Iterator
|
| 9 |
+
from functools import partial
|
| 10 |
+
from glob import glob
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
from more_itertools import unique_everseen
|
| 14 |
+
|
| 15 |
+
from .._path import StrPath, StrPathT
|
| 16 |
+
from ..dist import Distribution
|
| 17 |
+
from ..warnings import SetuptoolsDeprecationWarning
|
| 18 |
+
|
| 19 |
+
import distutils.command.build_py as orig
|
| 20 |
+
import distutils.errors
|
| 21 |
+
from distutils.util import convert_path
|
| 22 |
+
|
| 23 |
+
_IMPLICIT_DATA_FILES = ('*.pyi', 'py.typed')
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def make_writable(target) -> None:
|
| 27 |
+
os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class build_py(orig.build_py):
|
| 31 |
+
"""Enhanced 'build_py' command that includes data files with packages
|
| 32 |
+
|
| 33 |
+
The data files are specified via a 'package_data' argument to 'setup()'.
|
| 34 |
+
See 'setuptools.dist.Distribution' for more details.
|
| 35 |
+
|
| 36 |
+
Also, this version of the 'build_py' command allows you to specify both
|
| 37 |
+
'py_modules' and 'packages' in the same setup operation.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
|
| 41 |
+
editable_mode: bool = False
|
| 42 |
+
existing_egg_info_dir: StrPath | None = None #: Private API, internal use only.
|
| 43 |
+
|
| 44 |
+
def finalize_options(self):
|
| 45 |
+
orig.build_py.finalize_options(self)
|
| 46 |
+
self.package_data = self.distribution.package_data
|
| 47 |
+
self.exclude_package_data = self.distribution.exclude_package_data or {}
|
| 48 |
+
if 'data_files' in self.__dict__:
|
| 49 |
+
del self.__dict__['data_files']
|
| 50 |
+
|
| 51 |
+
def copy_file( # type: ignore[override] # No overload, no bytes support
|
| 52 |
+
self,
|
| 53 |
+
infile: StrPath,
|
| 54 |
+
outfile: StrPathT,
|
| 55 |
+
preserve_mode: bool = True,
|
| 56 |
+
preserve_times: bool = True,
|
| 57 |
+
link: str | None = None,
|
| 58 |
+
level: object = 1,
|
| 59 |
+
) -> tuple[StrPathT | str, bool]:
|
| 60 |
+
# Overwrite base class to allow using links
|
| 61 |
+
if link:
|
| 62 |
+
infile = str(Path(infile).resolve())
|
| 63 |
+
outfile = str(Path(outfile).resolve()) # type: ignore[assignment] # Re-assigning a str when outfile is StrPath is ok
|
| 64 |
+
return super().copy_file( # pyright: ignore[reportReturnType] # pypa/distutils#309
|
| 65 |
+
infile, outfile, preserve_mode, preserve_times, link, level
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def run(self) -> None:
|
| 69 |
+
"""Build modules, packages, and copy data files to build directory"""
|
| 70 |
+
if not (self.py_modules or self.packages) or self.editable_mode:
|
| 71 |
+
return
|
| 72 |
+
|
| 73 |
+
if self.py_modules:
|
| 74 |
+
self.build_modules()
|
| 75 |
+
|
| 76 |
+
if self.packages:
|
| 77 |
+
self.build_packages()
|
| 78 |
+
self.build_package_data()
|
| 79 |
+
|
| 80 |
+
# Only compile actual .py files, using our base class' idea of what our
|
| 81 |
+
# output files are.
|
| 82 |
+
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=False))
|
| 83 |
+
|
| 84 |
+
def __getattr__(self, attr: str):
|
| 85 |
+
"lazily compute data files"
|
| 86 |
+
if attr == 'data_files':
|
| 87 |
+
self.data_files = self._get_data_files()
|
| 88 |
+
return self.data_files
|
| 89 |
+
return orig.build_py.__getattr__(self, attr)
|
| 90 |
+
|
| 91 |
+
def _get_data_files(self):
|
| 92 |
+
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
|
| 93 |
+
self.analyze_manifest()
|
| 94 |
+
return list(map(self._get_pkg_data_files, self.packages or ()))
|
| 95 |
+
|
| 96 |
+
def get_data_files_without_manifest(self):
|
| 97 |
+
"""
|
| 98 |
+
Generate list of ``(package,src_dir,build_dir,filenames)`` tuples,
|
| 99 |
+
but without triggering any attempt to analyze or build the manifest.
|
| 100 |
+
"""
|
| 101 |
+
# Prevent eventual errors from unset `manifest_files`
|
| 102 |
+
# (that would otherwise be set by `analyze_manifest`)
|
| 103 |
+
self.__dict__.setdefault('manifest_files', {})
|
| 104 |
+
return list(map(self._get_pkg_data_files, self.packages or ()))
|
| 105 |
+
|
| 106 |
+
def _get_pkg_data_files(self, package):
|
| 107 |
+
# Locate package source directory
|
| 108 |
+
src_dir = self.get_package_dir(package)
|
| 109 |
+
|
| 110 |
+
# Compute package build directory
|
| 111 |
+
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
|
| 112 |
+
|
| 113 |
+
# Strip directory from globbed filenames
|
| 114 |
+
filenames = [
|
| 115 |
+
os.path.relpath(file, src_dir)
|
| 116 |
+
for file in self.find_data_files(package, src_dir)
|
| 117 |
+
]
|
| 118 |
+
return package, src_dir, build_dir, filenames
|
| 119 |
+
|
| 120 |
+
def find_data_files(self, package, src_dir):
|
| 121 |
+
"""Return filenames for package's data files in 'src_dir'"""
|
| 122 |
+
patterns = self._get_platform_patterns(
|
| 123 |
+
self.package_data,
|
| 124 |
+
package,
|
| 125 |
+
src_dir,
|
| 126 |
+
extra_patterns=_IMPLICIT_DATA_FILES,
|
| 127 |
+
)
|
| 128 |
+
globs_expanded = map(partial(glob, recursive=True), patterns)
|
| 129 |
+
# flatten the expanded globs into an iterable of matches
|
| 130 |
+
globs_matches = itertools.chain.from_iterable(globs_expanded)
|
| 131 |
+
glob_files = filter(os.path.isfile, globs_matches)
|
| 132 |
+
files = itertools.chain(
|
| 133 |
+
self.manifest_files.get(package, []),
|
| 134 |
+
glob_files,
|
| 135 |
+
)
|
| 136 |
+
return self.exclude_data_files(package, src_dir, files)
|
| 137 |
+
|
| 138 |
+
def get_outputs(self, include_bytecode: bool = True) -> list[str]: # type: ignore[override] # Using a real boolean instead of 0|1
|
| 139 |
+
"""See :class:`setuptools.commands.build.SubCommand`"""
|
| 140 |
+
if self.editable_mode:
|
| 141 |
+
return list(self.get_output_mapping().keys())
|
| 142 |
+
return super().get_outputs(include_bytecode)
|
| 143 |
+
|
| 144 |
+
def get_output_mapping(self) -> dict[str, str]:
|
| 145 |
+
"""See :class:`setuptools.commands.build.SubCommand`"""
|
| 146 |
+
mapping = itertools.chain(
|
| 147 |
+
self._get_package_data_output_mapping(),
|
| 148 |
+
self._get_module_mapping(),
|
| 149 |
+
)
|
| 150 |
+
return dict(sorted(mapping, key=lambda x: x[0]))
|
| 151 |
+
|
| 152 |
+
def _get_module_mapping(self) -> Iterator[tuple[str, str]]:
|
| 153 |
+
"""Iterate over all modules producing (dest, src) pairs."""
|
| 154 |
+
for package, module, module_file in self.find_all_modules():
|
| 155 |
+
package = package.split('.')
|
| 156 |
+
filename = self.get_module_outfile(self.build_lib, package, module)
|
| 157 |
+
yield (filename, module_file)
|
| 158 |
+
|
| 159 |
+
def _get_package_data_output_mapping(self) -> Iterator[tuple[str, str]]:
|
| 160 |
+
"""Iterate over package data producing (dest, src) pairs."""
|
| 161 |
+
for package, src_dir, build_dir, filenames in self.data_files:
|
| 162 |
+
for filename in filenames:
|
| 163 |
+
target = os.path.join(build_dir, filename)
|
| 164 |
+
srcfile = os.path.join(src_dir, filename)
|
| 165 |
+
yield (target, srcfile)
|
| 166 |
+
|
| 167 |
+
def build_package_data(self) -> None:
|
| 168 |
+
"""Copy data files into build directory"""
|
| 169 |
+
for target, srcfile in self._get_package_data_output_mapping():
|
| 170 |
+
self.mkpath(os.path.dirname(target))
|
| 171 |
+
_outf, _copied = self.copy_file(srcfile, target)
|
| 172 |
+
make_writable(target)
|
| 173 |
+
|
| 174 |
+
def analyze_manifest(self) -> None:
|
| 175 |
+
self.manifest_files: dict[str, list[str]] = {}
|
| 176 |
+
if not self.distribution.include_package_data:
|
| 177 |
+
return
|
| 178 |
+
src_dirs: dict[str, str] = {}
|
| 179 |
+
for package in self.packages or ():
|
| 180 |
+
# Locate package source directory
|
| 181 |
+
src_dirs[assert_relative(self.get_package_dir(package))] = package
|
| 182 |
+
|
| 183 |
+
if (
|
| 184 |
+
self.existing_egg_info_dir
|
| 185 |
+
and Path(self.existing_egg_info_dir, "SOURCES.txt").exists()
|
| 186 |
+
):
|
| 187 |
+
egg_info_dir = self.existing_egg_info_dir
|
| 188 |
+
manifest = Path(egg_info_dir, "SOURCES.txt")
|
| 189 |
+
files = manifest.read_text(encoding="utf-8").splitlines()
|
| 190 |
+
else:
|
| 191 |
+
self.run_command('egg_info')
|
| 192 |
+
ei_cmd = self.get_finalized_command('egg_info')
|
| 193 |
+
egg_info_dir = ei_cmd.egg_info
|
| 194 |
+
files = ei_cmd.filelist.files
|
| 195 |
+
|
| 196 |
+
check = _IncludePackageDataAbuse()
|
| 197 |
+
for path in self._filter_build_files(files, egg_info_dir):
|
| 198 |
+
d, f = os.path.split(assert_relative(path))
|
| 199 |
+
prev = None
|
| 200 |
+
oldf = f
|
| 201 |
+
while d and d != prev and d not in src_dirs:
|
| 202 |
+
prev = d
|
| 203 |
+
d, df = os.path.split(d)
|
| 204 |
+
f = os.path.join(df, f)
|
| 205 |
+
if d in src_dirs:
|
| 206 |
+
if f == oldf:
|
| 207 |
+
if check.is_module(f):
|
| 208 |
+
continue # it's a module, not data
|
| 209 |
+
else:
|
| 210 |
+
importable = check.importable_subpackage(src_dirs[d], f)
|
| 211 |
+
if importable:
|
| 212 |
+
check.warn(importable)
|
| 213 |
+
self.manifest_files.setdefault(src_dirs[d], []).append(path)
|
| 214 |
+
|
| 215 |
+
def _filter_build_files(
|
| 216 |
+
self, files: Iterable[str], egg_info: StrPath
|
| 217 |
+
) -> Iterator[str]:
|
| 218 |
+
"""
|
| 219 |
+
``build_meta`` may try to create egg_info outside of the project directory,
|
| 220 |
+
and this can be problematic for certain plugins (reported in issue #3500).
|
| 221 |
+
|
| 222 |
+
Extensions might also include between their sources files created on the
|
| 223 |
+
``build_lib`` and ``build_temp`` directories.
|
| 224 |
+
|
| 225 |
+
This function should filter this case of invalid files out.
|
| 226 |
+
"""
|
| 227 |
+
build = self.get_finalized_command("build")
|
| 228 |
+
build_dirs = (egg_info, self.build_lib, build.build_temp, build.build_base)
|
| 229 |
+
norm_dirs = [os.path.normpath(p) for p in build_dirs if p]
|
| 230 |
+
|
| 231 |
+
for file in files:
|
| 232 |
+
norm_path = os.path.normpath(file)
|
| 233 |
+
if not os.path.isabs(file) or all(d not in norm_path for d in norm_dirs):
|
| 234 |
+
yield file
|
| 235 |
+
|
| 236 |
+
def get_data_files(self) -> None:
|
| 237 |
+
pass # Lazily compute data files in _get_data_files() function.
|
| 238 |
+
|
| 239 |
+
def check_package(self, package, package_dir):
|
| 240 |
+
"""Check namespace packages' __init__ for declare_namespace"""
|
| 241 |
+
try:
|
| 242 |
+
return self.packages_checked[package]
|
| 243 |
+
except KeyError:
|
| 244 |
+
pass
|
| 245 |
+
|
| 246 |
+
init_py = orig.build_py.check_package(self, package, package_dir)
|
| 247 |
+
self.packages_checked[package] = init_py
|
| 248 |
+
|
| 249 |
+
if not init_py or not self.distribution.namespace_packages:
|
| 250 |
+
return init_py
|
| 251 |
+
|
| 252 |
+
for pkg in self.distribution.namespace_packages:
|
| 253 |
+
if pkg == package or pkg.startswith(package + '.'):
|
| 254 |
+
break
|
| 255 |
+
else:
|
| 256 |
+
return init_py
|
| 257 |
+
|
| 258 |
+
with open(init_py, 'rb') as f:
|
| 259 |
+
contents = f.read()
|
| 260 |
+
if b'declare_namespace' not in contents:
|
| 261 |
+
raise distutils.errors.DistutilsError(
|
| 262 |
+
f"Namespace package problem: {package} is a namespace package, but "
|
| 263 |
+
"its\n__init__.py does not call declare_namespace()! Please "
|
| 264 |
+
'fix it.\n(See the setuptools manual under '
|
| 265 |
+
'"Namespace Packages" for details.)\n"'
|
| 266 |
+
)
|
| 267 |
+
return init_py
|
| 268 |
+
|
| 269 |
+
def initialize_options(self):
|
| 270 |
+
self.packages_checked = {}
|
| 271 |
+
orig.build_py.initialize_options(self)
|
| 272 |
+
self.editable_mode = False
|
| 273 |
+
self.existing_egg_info_dir = None
|
| 274 |
+
|
| 275 |
+
def get_package_dir(self, package):
|
| 276 |
+
res = orig.build_py.get_package_dir(self, package)
|
| 277 |
+
if self.distribution.src_root is not None:
|
| 278 |
+
return os.path.join(self.distribution.src_root, res)
|
| 279 |
+
return res
|
| 280 |
+
|
| 281 |
+
def exclude_data_files(self, package, src_dir, files):
|
| 282 |
+
"""Filter filenames for package's data files in 'src_dir'"""
|
| 283 |
+
files = list(files)
|
| 284 |
+
patterns = self._get_platform_patterns(
|
| 285 |
+
self.exclude_package_data,
|
| 286 |
+
package,
|
| 287 |
+
src_dir,
|
| 288 |
+
)
|
| 289 |
+
match_groups = (fnmatch.filter(files, pattern) for pattern in patterns)
|
| 290 |
+
# flatten the groups of matches into an iterable of matches
|
| 291 |
+
matches = itertools.chain.from_iterable(match_groups)
|
| 292 |
+
bad = set(matches)
|
| 293 |
+
keepers = (fn for fn in files if fn not in bad)
|
| 294 |
+
# ditch dupes
|
| 295 |
+
return list(unique_everseen(keepers))
|
| 296 |
+
|
| 297 |
+
@staticmethod
|
| 298 |
+
def _get_platform_patterns(spec, package, src_dir, extra_patterns=()):
|
| 299 |
+
"""
|
| 300 |
+
yield platform-specific path patterns (suitable for glob
|
| 301 |
+
or fn_match) from a glob-based spec (such as
|
| 302 |
+
self.package_data or self.exclude_package_data)
|
| 303 |
+
matching package in src_dir.
|
| 304 |
+
"""
|
| 305 |
+
raw_patterns = itertools.chain(
|
| 306 |
+
extra_patterns,
|
| 307 |
+
spec.get('', []),
|
| 308 |
+
spec.get(package, []),
|
| 309 |
+
)
|
| 310 |
+
return (
|
| 311 |
+
# Each pattern has to be converted to a platform-specific path
|
| 312 |
+
os.path.join(src_dir, convert_path(pattern))
|
| 313 |
+
for pattern in raw_patterns
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def assert_relative(path):
|
| 318 |
+
if not os.path.isabs(path):
|
| 319 |
+
return path
|
| 320 |
+
from distutils.errors import DistutilsSetupError
|
| 321 |
+
|
| 322 |
+
msg = (
|
| 323 |
+
textwrap.dedent(
|
| 324 |
+
"""
|
| 325 |
+
Error: setup script specifies an absolute path:
|
| 326 |
+
|
| 327 |
+
%s
|
| 328 |
+
|
| 329 |
+
setup() arguments must *always* be /-separated paths relative to the
|
| 330 |
+
setup.py directory, *never* absolute paths.
|
| 331 |
+
"""
|
| 332 |
+
).lstrip()
|
| 333 |
+
% path
|
| 334 |
+
)
|
| 335 |
+
raise DistutilsSetupError(msg)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class _IncludePackageDataAbuse:
|
| 339 |
+
"""Inform users that package or module is included as 'data file'"""
|
| 340 |
+
|
| 341 |
+
class _Warning(SetuptoolsDeprecationWarning):
|
| 342 |
+
_SUMMARY = """
|
| 343 |
+
Package {importable!r} is absent from the `packages` configuration.
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
_DETAILS = """
|
| 347 |
+
############################
|
| 348 |
+
# Package would be ignored #
|
| 349 |
+
############################
|
| 350 |
+
Python recognizes {importable!r} as an importable package[^1],
|
| 351 |
+
but it is absent from setuptools' `packages` configuration.
|
| 352 |
+
|
| 353 |
+
This leads to an ambiguous overall configuration. If you want to distribute this
|
| 354 |
+
package, please make sure that {importable!r} is explicitly added
|
| 355 |
+
to the `packages` configuration field.
|
| 356 |
+
|
| 357 |
+
Alternatively, you can also rely on setuptools' discovery methods
|
| 358 |
+
(for example by using `find_namespace_packages(...)`/`find_namespace:`
|
| 359 |
+
instead of `find_packages(...)`/`find:`).
|
| 360 |
+
|
| 361 |
+
You can read more about "package discovery" on setuptools documentation page:
|
| 362 |
+
|
| 363 |
+
- https://setuptools.pypa.io/en/latest/userguide/package_discovery.html
|
| 364 |
+
|
| 365 |
+
If you don't want {importable!r} to be distributed and are
|
| 366 |
+
already explicitly excluding {importable!r} via
|
| 367 |
+
`find_namespace_packages(...)/find_namespace` or `find_packages(...)/find`,
|
| 368 |
+
you can try to use `exclude_package_data`, or `include-package-data=False` in
|
| 369 |
+
combination with a more fine grained `package-data` configuration.
|
| 370 |
+
|
| 371 |
+
You can read more about "package data files" on setuptools documentation page:
|
| 372 |
+
|
| 373 |
+
- https://setuptools.pypa.io/en/latest/userguide/datafiles.html
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
[^1]: For Python, any directory (with suitable naming) can be imported,
|
| 377 |
+
even if it does not contain any `.py` files.
|
| 378 |
+
On the other hand, currently there is no concept of package data
|
| 379 |
+
directory, all directories are treated like packages.
|
| 380 |
+
"""
|
| 381 |
+
# _DUE_DATE: still not defined as this is particularly controversial.
|
| 382 |
+
# Warning initially introduced in May 2022. See issue #3340 for discussion.
|
| 383 |
+
|
| 384 |
+
def __init__(self):
|
| 385 |
+
self._already_warned = set()
|
| 386 |
+
|
| 387 |
+
def is_module(self, file):
|
| 388 |
+
return file.endswith(".py") and file[: -len(".py")].isidentifier()
|
| 389 |
+
|
| 390 |
+
def importable_subpackage(self, parent, file):
|
| 391 |
+
pkg = Path(file).parent
|
| 392 |
+
parts = list(itertools.takewhile(str.isidentifier, pkg.parts))
|
| 393 |
+
if parts:
|
| 394 |
+
return ".".join([parent, *parts])
|
| 395 |
+
return None
|
| 396 |
+
|
| 397 |
+
def warn(self, importable):
|
| 398 |
+
if importable not in self._already_warned:
|
| 399 |
+
self._Warning.emit(importable=importable)
|
| 400 |
+
self._already_warned.add(importable)
|