koichi12 commited on
Commit
90f4bc9
·
verified ·
1 Parent(s): ffe7255

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/TestLibCython.py +276 -0
  2. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__init__.py +1 -0
  3. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/TestLibCython.cpython-311.pyc +0 -0
  4. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/test_libcython_in_gdb.cpython-311.pyc +0 -0
  5. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/codefile +49 -0
  6. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/test_libcython_in_gdb.py +553 -0
  7. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/test_libpython_in_gdb.py +112 -0
  8. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/Cygdb.cpython-311.pyc +0 -0
  9. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/__init__.cpython-311.pyc +0 -0
  10. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/extension.py +123 -0
  11. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/DFA.pxd +30 -0
  12. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Machines.py +242 -0
  13. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Scanners.pxd +48 -0
  14. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.pxd +22 -0
  15. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/CodeWriter.cpython-311.pyc +0 -0
  16. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Shadow.cpython-311.pyc +0 -0
  17. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/StringIOTree.cpython-311.pyc +0 -0
  18. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/TestUtils.cpython-311.pyc +0 -0
  19. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Utils.cpython-311.pyc +0 -0
  20. tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/__init__.cpython-311.pyc +0 -0
  21. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-311.pyc +0 -0
  22. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py +48 -0
  23. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__init__.py +4 -0
  24. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-311.pyc +0 -0
  25. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-311.pyc +0 -0
  26. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-311.pyc +0 -0
  27. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/cacert.pem +0 -0
  28. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py +82 -0
  29. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py +70 -0
  30. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py +70 -0
  31. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py +129 -0
  32. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py +170 -0
  33. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py +154 -0
  34. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py +518 -0
  35. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py +185 -0
  36. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py +127 -0
  37. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py +963 -0
  38. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py +362 -0
  39. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc +0 -0
  40. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/_mapping.py +589 -0
  41. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py +1198 -0
  42. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/modeline.py +43 -0
  43. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py +72 -0
  44. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/regexopt.py +91 -0
  45. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py +247 -0
  46. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/_mapping.py +54 -0
  47. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py +324 -0
  48. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-311.pyc +0 -0
  49. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-311.pyc +0 -0
  50. tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/requests/__pycache__/api.cpython-311.pyc +0 -0
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/TestLibCython.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import re
4
+ import sys
5
+ import shutil
6
+ import warnings
7
+ import textwrap
8
+ import unittest
9
+ import tempfile
10
+ import subprocess
11
+ #import distutils.core
12
+ #from distutils import sysconfig
13
+ from distutils import ccompiler
14
+
15
+ import runtests
16
+ import Cython.Distutils.extension
17
+ import Cython.Distutils.old_build_ext as build_ext
18
+ from Cython.Debugger import Cygdb as cygdb
19
+
20
+ root = os.path.dirname(os.path.abspath(__file__))
21
+ codefile = os.path.join(root, 'codefile')
22
+ cfuncs_file = os.path.join(root, 'cfuncs.c')
23
+
24
+ with open(codefile) as f:
25
+ source_to_lineno = dict((line.strip(), i + 1) for i, line in enumerate(f))
26
+
27
+
28
+ have_gdb = None
29
+ def test_gdb():
30
+ global have_gdb
31
+ if have_gdb is not None:
32
+ return have_gdb
33
+
34
+ have_gdb = False
35
+ try:
36
+ p = subprocess.Popen(['gdb', '-nx', '--version'], stdout=subprocess.PIPE)
37
+ except OSError:
38
+ # gdb not found
39
+ gdb_version = None
40
+ else:
41
+ stdout, _ = p.communicate()
42
+ # Based on Lib/test/test_gdb.py
43
+ regex = r"GNU gdb [^\d]*(\d+)\.(\d+)"
44
+ gdb_version = re.match(regex, stdout.decode('ascii', 'ignore'))
45
+
46
+ if gdb_version:
47
+ gdb_version_number = list(map(int, gdb_version.groups()))
48
+ if gdb_version_number >= [7, 2]:
49
+ have_gdb = True
50
+ with tempfile.NamedTemporaryFile(mode='w+') as python_version_script:
51
+ python_version_script.write(
52
+ 'python import sys; print("%s %s" % sys.version_info[:2])')
53
+ python_version_script.flush()
54
+ p = subprocess.Popen(['gdb', '-batch', '-x', python_version_script.name],
55
+ stdout=subprocess.PIPE)
56
+ stdout, _ = p.communicate()
57
+ try:
58
+ internal_python_version = list(map(int, stdout.decode('ascii', 'ignore').split()))
59
+ if internal_python_version < [2, 7]:
60
+ have_gdb = False
61
+ except ValueError:
62
+ have_gdb = False
63
+
64
+ if not have_gdb:
65
+ warnings.warn('Skipping gdb tests, need gdb >= 7.2 with Python >= 2.7')
66
+
67
+ return have_gdb
68
+
69
+
70
+ class DebuggerTestCase(unittest.TestCase):
71
+
72
+ def setUp(self):
73
+ """
74
+ Run gdb and have cygdb import the debug information from the code
75
+ defined in TestParseTreeTransforms's setUp method
76
+ """
77
+ if not test_gdb():
78
+ return
79
+
80
+ self.tempdir = tempfile.mkdtemp()
81
+ self.destfile = os.path.join(self.tempdir, 'codefile.pyx')
82
+ self.debug_dest = os.path.join(self.tempdir,
83
+ 'cython_debug',
84
+ 'cython_debug_info_codefile')
85
+ self.cfuncs_destfile = os.path.join(self.tempdir, 'cfuncs')
86
+
87
+ self.cwd = os.getcwd()
88
+ try:
89
+ os.chdir(self.tempdir)
90
+
91
+ shutil.copy(codefile, self.destfile)
92
+ shutil.copy(cfuncs_file, self.cfuncs_destfile + '.c')
93
+ shutil.copy(cfuncs_file.replace('.c', '.h'),
94
+ self.cfuncs_destfile + '.h')
95
+
96
+ compiler = ccompiler.new_compiler()
97
+ compiler.compile(['cfuncs.c'], debug=True, extra_postargs=['-fPIC'])
98
+
99
+ opts = dict(
100
+ test_directory=self.tempdir,
101
+ module='codefile',
102
+ module_path=self.destfile,
103
+ )
104
+
105
+ optimization_disabler = build_ext.Optimization()
106
+
107
+ cython_compile_testcase = runtests.CythonCompileTestCase(
108
+ workdir=self.tempdir,
109
+ # we clean up everything (not only compiled files)
110
+ cleanup_workdir=False,
111
+ tags=runtests.parse_tags(codefile),
112
+ **opts
113
+ )
114
+
115
+
116
+ new_stderr = open(os.devnull, 'w')
117
+
118
+ stderr = sys.stderr
119
+ sys.stderr = new_stderr
120
+
121
+ optimization_disabler.disable_optimization()
122
+ try:
123
+ cython_compile_testcase.run_cython(
124
+ targetdir=self.tempdir,
125
+ incdir=None,
126
+ annotate=False,
127
+ extra_compile_options={
128
+ 'gdb_debug':True,
129
+ 'output_dir':self.tempdir,
130
+ },
131
+ **opts
132
+ )
133
+
134
+ cython_compile_testcase.run_distutils(
135
+ test_directory=opts['test_directory'],
136
+ module=opts['module'],
137
+ workdir=opts['test_directory'],
138
+ incdir=None,
139
+ extra_extension_args={'extra_objects':['cfuncs.o']},
140
+ )
141
+ finally:
142
+ optimization_disabler.restore_state()
143
+ sys.stderr = stderr
144
+ new_stderr.close()
145
+
146
+ # ext = Cython.Distutils.extension.Extension(
147
+ # 'codefile',
148
+ # ['codefile.pyx'],
149
+ # cython_gdb=True,
150
+ # extra_objects=['cfuncs.o'])
151
+ #
152
+ # distutils.core.setup(
153
+ # script_args=['build_ext', '--inplace'],
154
+ # ext_modules=[ext],
155
+ # cmdclass=dict(build_ext=Cython.Distutils.build_ext)
156
+ # )
157
+
158
+ except:
159
+ os.chdir(self.cwd)
160
+ raise
161
+
162
+ def tearDown(self):
163
+ if not test_gdb():
164
+ return
165
+ os.chdir(self.cwd)
166
+ shutil.rmtree(self.tempdir)
167
+
168
+
169
+ class GdbDebuggerTestCase(DebuggerTestCase):
170
+
171
+ def setUp(self):
172
+ if not test_gdb():
173
+ return
174
+
175
+ super(GdbDebuggerTestCase, self).setUp()
176
+
177
+ prefix_code = textwrap.dedent('''\
178
+ python
179
+
180
+ import os
181
+ import sys
182
+ import traceback
183
+
184
+ def excepthook(type, value, tb):
185
+ traceback.print_exception(type, value, tb)
186
+ sys.stderr.flush()
187
+ sys.stdout.flush()
188
+ os._exit(1)
189
+
190
+ sys.excepthook = excepthook
191
+
192
+ # Have tracebacks end up on sys.stderr (gdb replaces sys.stderr
193
+ # with an object that calls gdb.write())
194
+ sys.stderr = sys.__stderr__
195
+
196
+ end
197
+ ''')
198
+
199
+ code = textwrap.dedent('''\
200
+ python
201
+
202
+ from Cython.Debugger.Tests import test_libcython_in_gdb
203
+ test_libcython_in_gdb.main(version=%r)
204
+
205
+ end
206
+ ''' % (sys.version_info[:2],))
207
+
208
+ self.gdb_command_file = cygdb.make_command_file(self.tempdir,
209
+ prefix_code)
210
+
211
+ with open(self.gdb_command_file, 'a') as f:
212
+ f.write(code)
213
+
214
+ args = ['gdb', '-batch', '-x', self.gdb_command_file, '-n', '--args',
215
+ sys.executable, '-c', 'import codefile']
216
+
217
+ paths = []
218
+ path = os.environ.get('PYTHONPATH')
219
+ if path:
220
+ paths.append(path)
221
+ paths.append(os.path.dirname(os.path.dirname(
222
+ os.path.abspath(Cython.__file__))))
223
+ env = dict(os.environ, PYTHONPATH=os.pathsep.join(paths))
224
+
225
+ self.p = subprocess.Popen(
226
+ args,
227
+ stdout=subprocess.PIPE,
228
+ stderr=subprocess.PIPE,
229
+ env=env)
230
+
231
+ def tearDown(self):
232
+ if not test_gdb():
233
+ return
234
+
235
+ try:
236
+ super(GdbDebuggerTestCase, self).tearDown()
237
+ if self.p:
238
+ try: self.p.stdout.close()
239
+ except: pass
240
+ try: self.p.stderr.close()
241
+ except: pass
242
+ self.p.wait()
243
+ finally:
244
+ os.remove(self.gdb_command_file)
245
+
246
+
247
+ class TestAll(GdbDebuggerTestCase):
248
+
249
+ def test_all(self):
250
+ if not test_gdb():
251
+ return
252
+
253
+ out, err = self.p.communicate()
254
+ out = out.decode('UTF-8')
255
+ err = err.decode('UTF-8')
256
+
257
+ exit_status = self.p.returncode
258
+
259
+ if exit_status == 1:
260
+ sys.stderr.write(out)
261
+ sys.stderr.write(err)
262
+ elif exit_status >= 2:
263
+ border = u'*' * 30
264
+ start = u'%s v INSIDE GDB v %s' % (border, border)
265
+ stderr = u'%s v STDERR v %s' % (border, border)
266
+ end = u'%s ^ INSIDE GDB ^ %s' % (border, border)
267
+ errmsg = u'\n%s\n%s%s\n%s%s' % (start, out, stderr, err, end)
268
+
269
+ sys.stderr.write(errmsg)
270
+
271
+ # FIXME: re-enable this to make the test fail on internal failures
272
+ #self.assertEqual(exit_status, 0)
273
+
274
+
275
+ if __name__ == '__main__':
276
+ unittest.main()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # empty file
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/TestLibCython.cpython-311.pyc ADDED
Binary file (13.4 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/__pycache__/test_libcython_in_gdb.cpython-311.pyc ADDED
Binary file (34.8 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/codefile ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cdef extern from "stdio.h":
2
+ int puts(char *s)
3
+
4
+ cdef extern from "cfuncs.h":
5
+ void some_c_function()
6
+
7
+ import os
8
+
9
+ cdef int c_var = 12
10
+ python_var = 13
11
+
12
+ def spam(a=0):
13
+ cdef:
14
+ int b, c
15
+
16
+ b = c = d = 0
17
+
18
+ b = 1
19
+ c = 2
20
+ int(10)
21
+ puts("spam")
22
+ os.path.join("foo", "bar")
23
+ some_c_function()
24
+
25
+ cpdef eggs():
26
+ pass
27
+
28
+ cdef ham():
29
+ pass
30
+
31
+ cdef class SomeClass(object):
32
+ def spam(self):
33
+ pass
34
+
35
+ def outer():
36
+ cdef object a = "an object"
37
+ def inner():
38
+ b = 2
39
+ # access closed over variables
40
+ print(a, b)
41
+ return inner
42
+
43
+ outer()()
44
+
45
+ spam()
46
+ print("bye!")
47
+
48
+ def use_ham():
49
+ ham()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/test_libcython_in_gdb.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that run inside GDB.
3
+
4
+ Note: debug information is already imported by the file generated by
5
+ Cython.Debugger.Cygdb.make_command_file()
6
+ """
7
+
8
+ from __future__ import absolute_import
9
+
10
+ import os
11
+ import re
12
+ import sys
13
+ import trace
14
+ import inspect
15
+ import warnings
16
+ import unittest
17
+ import textwrap
18
+ import tempfile
19
+ import functools
20
+ import traceback
21
+ import itertools
22
+ #from test import test_support
23
+
24
+ import gdb
25
+
26
+ from .. import libcython
27
+ from .. import libpython
28
+ from . import TestLibCython as test_libcython
29
+ from ...Utils import add_metaclass
30
+
31
+ # for some reason sys.argv is missing in gdb
32
+ sys.argv = ['gdb']
33
+
34
+
35
+ def print_on_call_decorator(func):
36
+ @functools.wraps(func)
37
+ def wrapper(self, *args, **kwargs):
38
+ _debug(type(self).__name__, func.__name__)
39
+
40
+ try:
41
+ return func(self, *args, **kwargs)
42
+ except Exception:
43
+ _debug("An exception occurred:", traceback.format_exc())
44
+ raise
45
+
46
+ return wrapper
47
+
48
+ class TraceMethodCallMeta(type):
49
+
50
+ def __init__(self, name, bases, dict):
51
+ for func_name, func in dict.items():
52
+ if inspect.isfunction(func):
53
+ setattr(self, func_name, print_on_call_decorator(func))
54
+
55
+
56
+ @add_metaclass(TraceMethodCallMeta)
57
+ class DebugTestCase(unittest.TestCase):
58
+ """
59
+ Base class for test cases. On teardown it kills the inferior and unsets
60
+ all breakpoints.
61
+ """
62
+
63
+ def __init__(self, name):
64
+ super(DebugTestCase, self).__init__(name)
65
+ self.cy = libcython.cy
66
+ self.module = libcython.cy.cython_namespace['codefile']
67
+ self.spam_func, self.spam_meth = libcython.cy.functions_by_name['spam']
68
+ self.ham_func = libcython.cy.functions_by_qualified_name[
69
+ 'codefile.ham']
70
+ self.eggs_func = libcython.cy.functions_by_qualified_name[
71
+ 'codefile.eggs']
72
+
73
+ def read_var(self, varname, cast_to=None):
74
+ result = gdb.parse_and_eval('$cy_cvalue("%s")' % varname)
75
+ if cast_to:
76
+ result = cast_to(result)
77
+
78
+ return result
79
+
80
+ def local_info(self):
81
+ return gdb.execute('info locals', to_string=True)
82
+
83
+ def lineno_equals(self, source_line=None, lineno=None):
84
+ if source_line is not None:
85
+ lineno = test_libcython.source_to_lineno[source_line]
86
+ frame = gdb.selected_frame()
87
+ self.assertEqual(libcython.cython_info.lineno(frame), lineno)
88
+
89
+ def break_and_run(self, source_line):
90
+ break_lineno = test_libcython.source_to_lineno[source_line]
91
+ gdb.execute('cy break codefile:%d' % break_lineno, to_string=True)
92
+ gdb.execute('run', to_string=True)
93
+
94
+ def tearDown(self):
95
+ gdb.execute('delete breakpoints', to_string=True)
96
+ try:
97
+ gdb.execute('kill inferior 1', to_string=True)
98
+ except RuntimeError:
99
+ pass
100
+
101
+ gdb.execute('set args -c "import codefile"')
102
+
103
+
104
+ class TestDebugInformationClasses(DebugTestCase):
105
+
106
+ def test_CythonModule(self):
107
+ "test that debug information was parsed properly into data structures"
108
+ self.assertEqual(self.module.name, 'codefile')
109
+ global_vars = ('c_var', 'python_var', '__name__',
110
+ '__builtins__', '__doc__', '__file__')
111
+ assert set(global_vars).issubset(self.module.globals)
112
+
113
+ def test_CythonVariable(self):
114
+ module_globals = self.module.globals
115
+ c_var = module_globals['c_var']
116
+ python_var = module_globals['python_var']
117
+ self.assertEqual(c_var.type, libcython.CObject)
118
+ self.assertEqual(python_var.type, libcython.PythonObject)
119
+ self.assertEqual(c_var.qualified_name, 'codefile.c_var')
120
+
121
+ def test_CythonFunction(self):
122
+ self.assertEqual(self.spam_func.qualified_name, 'codefile.spam')
123
+ self.assertEqual(self.spam_meth.qualified_name,
124
+ 'codefile.SomeClass.spam')
125
+ self.assertEqual(self.spam_func.module, self.module)
126
+
127
+ assert self.eggs_func.pf_cname, (self.eggs_func, self.eggs_func.pf_cname)
128
+ assert not self.ham_func.pf_cname
129
+ assert not self.spam_func.pf_cname
130
+ assert not self.spam_meth.pf_cname
131
+
132
+ self.assertEqual(self.spam_func.type, libcython.CObject)
133
+ self.assertEqual(self.ham_func.type, libcython.CObject)
134
+
135
+ self.assertEqual(self.spam_func.arguments, ['a'])
136
+ self.assertEqual(self.spam_func.step_into_functions,
137
+ {'puts', 'some_c_function'})
138
+
139
+ expected_lineno = test_libcython.source_to_lineno['def spam(a=0):']
140
+ self.assertEqual(self.spam_func.lineno, expected_lineno)
141
+ self.assertEqual(sorted(self.spam_func.locals), list('abcd'))
142
+
143
+
144
+ class TestParameters(unittest.TestCase):
145
+
146
+ def test_parameters(self):
147
+ gdb.execute('set cy_colorize_code on')
148
+ assert libcython.parameters.colorize_code
149
+ gdb.execute('set cy_colorize_code off')
150
+ assert not libcython.parameters.colorize_code
151
+
152
+
153
+ class TestBreak(DebugTestCase):
154
+
155
+ def test_break(self):
156
+ breakpoint_amount = len(gdb.breakpoints() or ())
157
+ gdb.execute('cy break codefile.spam')
158
+
159
+ self.assertEqual(len(gdb.breakpoints()), breakpoint_amount + 1)
160
+ bp = gdb.breakpoints()[-1]
161
+ self.assertEqual(bp.type, gdb.BP_BREAKPOINT)
162
+ assert self.spam_func.cname in bp.location
163
+ assert bp.enabled
164
+
165
+ def test_python_break(self):
166
+ gdb.execute('cy break -p join')
167
+ assert 'def join(' in gdb.execute('cy run', to_string=True)
168
+
169
+ def test_break_lineno(self):
170
+ beginline = 'import os'
171
+ nextline = 'cdef int c_var = 12'
172
+
173
+ self.break_and_run(beginline)
174
+ self.lineno_equals(beginline)
175
+ step_result = gdb.execute('cy step', to_string=True)
176
+ self.lineno_equals(nextline)
177
+ assert step_result.rstrip().endswith(nextline)
178
+
179
+
180
+ # I removed this testcase, because it will never work, because
181
+ # gdb.execute(..., to_string=True) does not capture stdout and stderr of python.
182
+ # class TestKilled(DebugTestCase):
183
+ # def test_abort(self):
184
+ # gdb.execute("set args -c 'import os;print(123456789);os.abort()'")
185
+ # output = gdb.execute('cy run', to_string=True)
186
+ # assert 'abort' in output.lower()
187
+
188
+
189
+ class DebugStepperTestCase(DebugTestCase):
190
+
191
+ def step(self, varnames_and_values, source_line=None, lineno=None):
192
+ gdb.execute(self.command)
193
+ for varname, value in varnames_and_values:
194
+ self.assertEqual(self.read_var(varname), value, self.local_info())
195
+
196
+ self.lineno_equals(source_line, lineno)
197
+
198
+
199
+ class TestStep(DebugStepperTestCase):
200
+ """
201
+ Test stepping. Stepping happens in the code found in
202
+ Cython/Debugger/Tests/codefile.
203
+ """
204
+
205
+ def test_cython_step(self):
206
+ gdb.execute('cy break codefile.spam')
207
+
208
+ gdb.execute('run', to_string=True)
209
+ self.lineno_equals('def spam(a=0):')
210
+
211
+ gdb.execute('cy step', to_string=True)
212
+ self.lineno_equals('b = c = d = 0')
213
+
214
+ self.command = 'cy step'
215
+ self.step([('b', 0)], source_line='b = 1')
216
+ self.step([('b', 1), ('c', 0)], source_line='c = 2')
217
+ self.step([('c', 2)], source_line='int(10)')
218
+ self.step([], source_line='puts("spam")')
219
+
220
+ gdb.execute('cont', to_string=True)
221
+ self.assertEqual(len(gdb.inferiors()), 1)
222
+ self.assertEqual(gdb.inferiors()[0].pid, 0)
223
+
224
+ def test_c_step(self):
225
+ self.break_and_run('some_c_function()')
226
+ gdb.execute('cy step', to_string=True)
227
+ self.assertEqual(gdb.selected_frame().name(), 'some_c_function')
228
+
229
+ def test_python_step(self):
230
+ self.break_and_run('os.path.join("foo", "bar")')
231
+
232
+ result = gdb.execute('cy step', to_string=True)
233
+
234
+ curframe = gdb.selected_frame()
235
+ self.assertEqual(curframe.name(), 'PyEval_EvalFrameEx')
236
+
237
+ pyframe = libpython.Frame(curframe).get_pyop()
238
+ # With Python 3 inferiors, pyframe.co_name will return a PyUnicodePtr,
239
+ # be compatible
240
+ frame_name = pyframe.co_name.proxyval(set())
241
+ self.assertEqual(frame_name, 'join')
242
+ assert re.match(r'\d+ def join\(', result), result
243
+
244
+
245
+ class TestNext(DebugStepperTestCase):
246
+
247
+ def test_cython_next(self):
248
+ self.break_and_run('c = 2')
249
+
250
+ lines = (
251
+ 'int(10)',
252
+ 'puts("spam")',
253
+ 'os.path.join("foo", "bar")',
254
+ 'some_c_function()',
255
+ )
256
+
257
+ for line in lines:
258
+ gdb.execute('cy next')
259
+ self.lineno_equals(line)
260
+
261
+
262
+ class TestLocalsGlobals(DebugTestCase):
263
+
264
+ def test_locals(self):
265
+ self.break_and_run('int(10)')
266
+
267
+ result = gdb.execute('cy locals', to_string=True)
268
+ assert 'a = 0', repr(result)
269
+ assert 'b = (int) 1', result
270
+ assert 'c = (int) 2' in result, repr(result)
271
+
272
+ def test_globals(self):
273
+ self.break_and_run('int(10)')
274
+
275
+ result = gdb.execute('cy globals', to_string=True)
276
+ assert '__name__ ' in result, repr(result)
277
+ assert '__doc__ ' in result, repr(result)
278
+ assert 'os ' in result, repr(result)
279
+ assert 'c_var ' in result, repr(result)
280
+ assert 'python_var ' in result, repr(result)
281
+
282
+
283
+ class TestBacktrace(DebugTestCase):
284
+
285
+ def test_backtrace(self):
286
+ libcython.parameters.colorize_code.value = False
287
+
288
+ self.break_and_run('os.path.join("foo", "bar")')
289
+
290
+ def match_backtrace_output(result):
291
+ assert re.search(r'\#\d+ *0x.* in spam\(\) at .*codefile\.pyx:22',
292
+ result), result
293
+ assert 'os.path.join("foo", "bar")' in result, result
294
+
295
+ result = gdb.execute('cy bt', to_string=True)
296
+ match_backtrace_output(result)
297
+
298
+ result = gdb.execute('cy bt -a', to_string=True)
299
+ match_backtrace_output(result)
300
+
301
+ # Apparently not everyone has main()
302
+ # assert re.search(r'\#0 *0x.* in main\(\)', result), result
303
+
304
+
305
+ class TestFunctions(DebugTestCase):
306
+
307
+ def test_functions(self):
308
+ self.break_and_run('c = 2')
309
+ result = gdb.execute('print $cy_cname("b")', to_string=True)
310
+ assert re.search('__pyx_.*b', result), result
311
+
312
+ result = gdb.execute('print $cy_lineno()', to_string=True)
313
+ supposed_lineno = test_libcython.source_to_lineno['c = 2']
314
+ assert str(supposed_lineno) in result, (supposed_lineno, result)
315
+
316
+ result = gdb.execute('print $cy_cvalue("b")', to_string=True)
317
+ assert '= 1' in result
318
+
319
+
320
+ class TestPrint(DebugTestCase):
321
+
322
+ def test_print(self):
323
+ self.break_and_run('c = 2')
324
+ result = gdb.execute('cy print b', to_string=True)
325
+ self.assertEqual('b = (int) 1\n', result)
326
+ result = gdb.execute('cy print python_var', to_string=True)
327
+ self.assertEqual('python_var = 13\n', result)
328
+ result = gdb.execute('cy print c_var', to_string=True)
329
+ self.assertEqual('c_var = (int) 12\n', result)
330
+
331
+ correct_result_test_list_inside_func = '''\
332
+ 14 int b, c
333
+ 15
334
+ 16 b = c = d = 0
335
+ 17
336
+ 18 b = 1
337
+ > 19 c = 2
338
+ 20 int(10)
339
+ 21 puts("spam")
340
+ 22 os.path.join("foo", "bar")
341
+ 23 some_c_function()
342
+ '''
343
+ correct_result_test_list_outside_func = '''\
344
+ 5 void some_c_function()
345
+ 6
346
+ 7 import os
347
+ 8
348
+ 9 cdef int c_var = 12
349
+ > 10 python_var = 13
350
+ 11
351
+ 12 def spam(a=0):
352
+ 13 cdef:
353
+ 14 int b, c
354
+ '''
355
+
356
+
357
+ class TestList(DebugTestCase):
358
+ def workaround_for_coding_style_checker(self, correct_result_wrong_whitespace):
359
+ correct_result = ""
360
+ for line in correct_result_test_list_inside_func.split("\n"):
361
+ if len(line) < 10 and len(line) > 0:
362
+ line += " "*4
363
+ correct_result += line + "\n"
364
+ correct_result = correct_result[:-1]
365
+
366
+ def test_list_inside_func(self):
367
+ self.break_and_run('c = 2')
368
+ result = gdb.execute('cy list', to_string=True)
369
+ # We don't want to fail because of some trailing whitespace,
370
+ # so we remove trailing whitespaces with the following line
371
+ result = "\n".join([line.rstrip() for line in result.split("\n")])
372
+ self.assertEqual(correct_result_test_list_inside_func, result)
373
+
374
+ def test_list_outside_func(self):
375
+ self.break_and_run('python_var = 13')
376
+ result = gdb.execute('cy list', to_string=True)
377
+ # We don't want to fail because of some trailing whitespace,
378
+ # so we remove trailing whitespaces with the following line
379
+ result = "\n".join([line.rstrip() for line in result.split("\n")])
380
+ self.assertEqual(correct_result_test_list_outside_func, result)
381
+
382
+
383
+ class TestUpDown(DebugTestCase):
384
+
385
+ def test_updown(self):
386
+ self.break_and_run('os.path.join("foo", "bar")')
387
+ gdb.execute('cy step')
388
+ self.assertRaises(RuntimeError, gdb.execute, 'cy down')
389
+
390
+ result = gdb.execute('cy up', to_string=True)
391
+ assert 'spam()' in result
392
+ assert 'os.path.join("foo", "bar")' in result
393
+
394
+
395
+ class TestExec(DebugTestCase):
396
+
397
+ def setUp(self):
398
+ super(TestExec, self).setUp()
399
+ self.fd, self.tmpfilename = tempfile.mkstemp()
400
+ self.tmpfile = os.fdopen(self.fd, 'r+')
401
+
402
+ def tearDown(self):
403
+ super(TestExec, self).tearDown()
404
+
405
+ try:
406
+ self.tmpfile.close()
407
+ finally:
408
+ os.remove(self.tmpfilename)
409
+
410
+ def eval_command(self, command):
411
+ gdb.execute('cy exec open(%r, "w").write(str(%s))' %
412
+ (self.tmpfilename, command))
413
+ return self.tmpfile.read().strip()
414
+
415
+ def test_cython_exec(self):
416
+ self.break_and_run('os.path.join("foo", "bar")')
417
+
418
+ # test normal behaviour
419
+ self.assertEqual("[0]", self.eval_command('[a]'))
420
+
421
+ return #The test after this return freezes gdb, so I temporarily removed it.
422
+ # test multiline code
423
+ result = gdb.execute(textwrap.dedent('''\
424
+ cy exec
425
+ pass
426
+
427
+ "nothing"
428
+ end
429
+ '''))
430
+ result = self.tmpfile.read().rstrip()
431
+ self.assertEqual('', result)
432
+
433
+ def test_python_exec(self):
434
+ self.break_and_run('os.path.join("foo", "bar")')
435
+ gdb.execute('cy step')
436
+
437
+ gdb.execute('cy exec some_random_var = 14')
438
+ self.assertEqual('14', self.eval_command('some_random_var'))
439
+
440
+
441
+ class CySet(DebugTestCase):
442
+
443
+ def test_cyset(self):
444
+ self.break_and_run('os.path.join("foo", "bar")')
445
+
446
+ gdb.execute('cy set a = $cy_eval("{None: []}")')
447
+ stringvalue = self.read_var("a", cast_to=str)
448
+ self.assertEqual(stringvalue, "{None: []}")
449
+
450
+
451
+ class TestCyEval(DebugTestCase):
452
+ "Test the $cy_eval() gdb function."
453
+
454
+ def test_cy_eval(self):
455
+ # This function leaks a few objects in the GDB python process. This
456
+ # is no biggie
457
+ self.break_and_run('os.path.join("foo", "bar")')
458
+
459
+ result = gdb.execute('print $cy_eval("None")', to_string=True)
460
+ assert re.match(r'\$\d+ = None\n', result), result
461
+
462
+ result = gdb.execute('print $cy_eval("[a]")', to_string=True)
463
+ assert re.match(r'\$\d+ = \[0\]', result), result
464
+
465
+
466
+ class TestClosure(DebugTestCase):
467
+
468
+ def break_and_run_func(self, funcname):
469
+ gdb.execute('cy break ' + funcname)
470
+ gdb.execute('cy run')
471
+
472
+ def test_inner(self):
473
+ self.break_and_run_func('inner')
474
+ self.assertEqual('', gdb.execute('cy locals', to_string=True))
475
+
476
+ # Allow the Cython-generated code to initialize the scope variable
477
+ gdb.execute('cy step')
478
+
479
+ self.assertEqual(str(self.read_var('a')), "'an object'")
480
+ print_result = gdb.execute('cy print a', to_string=True).strip()
481
+ self.assertEqual(print_result, "a = 'an object'")
482
+
483
+ def test_outer(self):
484
+ self.break_and_run_func('outer')
485
+ self.assertEqual('', gdb.execute('cy locals', to_string=True))
486
+
487
+ # Initialize scope with 'a' uninitialized
488
+ gdb.execute('cy step')
489
+ self.assertEqual('', gdb.execute('cy locals', to_string=True))
490
+
491
+ # Initialize 'a' to 1
492
+ gdb.execute('cy step')
493
+ print_result = gdb.execute('cy print a', to_string=True).strip()
494
+ self.assertEqual(print_result, "a = 'an object'")
495
+
496
+
497
+ _do_debug = os.environ.get('GDB_DEBUG')
498
+ if _do_debug:
499
+ _debug_file = open('/dev/tty', 'w')
500
+
501
+ def _debug(*messages):
502
+ if _do_debug:
503
+ messages = itertools.chain([sys._getframe(1).f_code.co_name, ':'],
504
+ messages)
505
+ _debug_file.write(' '.join(str(msg) for msg in messages) + '\n')
506
+
507
+
508
+ def run_unittest_in_module(modulename):
509
+ try:
510
+ gdb.lookup_type('PyModuleObject')
511
+ except RuntimeError:
512
+ msg = ("Unable to run tests, Python was not compiled with "
513
+ "debugging information. Either compile python with "
514
+ "-g or get a debug build (configure with --with-pydebug).")
515
+ warnings.warn(msg)
516
+ os._exit(1)
517
+ else:
518
+ m = __import__(modulename, fromlist=[''])
519
+ tests = inspect.getmembers(m, inspect.isclass)
520
+
521
+ # test_support.run_unittest(tests)
522
+
523
+ test_loader = unittest.TestLoader()
524
+ suite = unittest.TestSuite(
525
+ [test_loader.loadTestsFromTestCase(cls) for name, cls in tests])
526
+
527
+ result = unittest.TextTestRunner(verbosity=1).run(suite)
528
+ return result.wasSuccessful()
529
+
530
+ def runtests():
531
+ """
532
+ Run the libcython and libpython tests. Ensure that an appropriate status is
533
+ returned to the parent test process.
534
+ """
535
+ from Cython.Debugger.Tests import test_libpython_in_gdb
536
+
537
+ success_libcython = run_unittest_in_module(__name__)
538
+ success_libpython = run_unittest_in_module(test_libpython_in_gdb.__name__)
539
+
540
+ if not success_libcython or not success_libpython:
541
+ sys.exit(2)
542
+
543
+ def main(version, trace_code=False):
544
+ global inferior_python_version
545
+
546
+ inferior_python_version = version
547
+
548
+ if trace_code:
549
+ tracer = trace.Trace(count=False, trace=True, outfile=sys.stderr,
550
+ ignoredirs=[sys.prefix, sys.exec_prefix])
551
+ tracer.runfunc(runtests)
552
+ else:
553
+ runtests()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/Tests/test_libpython_in_gdb.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: UTF-8 -*-
2
+
3
+ """
4
+ Test libpython.py. This is already partly tested by test_libcython_in_gdb and
5
+ Lib/test/test_gdb.py in the Python source. These tests are run in gdb and
6
+ called from test_libcython_in_gdb.main()
7
+ """
8
+
9
+ import gdb
10
+
11
+ from Cython.Debugger import libcython
12
+ from Cython.Debugger import libpython
13
+
14
+ from . import test_libcython_in_gdb
15
+ from .test_libcython_in_gdb import inferior_python_version
16
+
17
+
18
+ class TestPrettyPrinters(test_libcython_in_gdb.DebugTestCase):
19
+ """
20
+ Test whether types of Python objects are correctly inferred and that
21
+ the right libpython.PySomeTypeObjectPtr classes are instantiated.
22
+
23
+ Also test whether values are appropriately formatted (don't be too
24
+ laborious as Lib/test/test_gdb.py already covers this extensively).
25
+
26
+ Don't take care of decreffing newly allocated objects as a new
27
+ interpreter is started for every test anyway.
28
+ """
29
+
30
+ def setUp(self):
31
+ super(TestPrettyPrinters, self).setUp()
32
+ self.break_and_run('b = c = d = 0')
33
+
34
+ def get_pyobject(self, code):
35
+ value = gdb.parse_and_eval(code)
36
+ assert libpython.pointervalue(value) != 0
37
+ return value
38
+
39
+ def pyobject_fromcode(self, code, gdbvar=None):
40
+ if gdbvar is not None:
41
+ d = {'varname':gdbvar, 'code':code}
42
+ gdb.execute('set $%(varname)s = %(code)s' % d)
43
+ code = '$' + gdbvar
44
+
45
+ return libpython.PyObjectPtr.from_pyobject_ptr(self.get_pyobject(code))
46
+
47
+ def get_repr(self, pyobject):
48
+ return pyobject.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
49
+
50
+ def alloc_bytestring(self, string, gdbvar=None):
51
+ if inferior_python_version < (3, 0):
52
+ funcname = 'PyString_FromStringAndSize'
53
+ else:
54
+ funcname = 'PyBytes_FromStringAndSize'
55
+
56
+ assert b'"' not in string
57
+
58
+ # ensure double quotes
59
+ code = '(PyObject *) %s("%s", %d)' % (funcname, string.decode('iso8859-1'), len(string))
60
+ return self.pyobject_fromcode(code, gdbvar=gdbvar)
61
+
62
+ def alloc_unicodestring(self, string, gdbvar=None):
63
+ postfix = libpython.get_inferior_unicode_postfix()
64
+ funcname = 'PyUnicode%s_DecodeUnicodeEscape' % (postfix,)
65
+
66
+ data = string.encode("unicode_escape").decode('iso8859-1')
67
+ return self.pyobject_fromcode(
68
+ '(PyObject *) %s("%s", %d, "strict")' % (
69
+ funcname, data.replace('"', r'\"').replace('\\', r'\\'), len(data)),
70
+ gdbvar=gdbvar)
71
+
72
+ def test_bytestring(self):
73
+ bytestring = self.alloc_bytestring(b"spam")
74
+
75
+ if inferior_python_version < (3, 0):
76
+ bytestring_class = libpython.PyStringObjectPtr
77
+ expected = repr(b"spam")
78
+ else:
79
+ bytestring_class = libpython.PyBytesObjectPtr
80
+ expected = "b'spam'"
81
+
82
+ self.assertEqual(type(bytestring), bytestring_class)
83
+ self.assertEqual(self.get_repr(bytestring), expected)
84
+
85
+ def test_unicode(self):
86
+ unicode_string = self.alloc_unicodestring(u"spam ἄλφα")
87
+
88
+ expected = u"'spam ἄλφα'"
89
+ if inferior_python_version < (3, 0):
90
+ expected = 'u' + expected
91
+
92
+ self.assertEqual(type(unicode_string), libpython.PyUnicodeObjectPtr)
93
+ self.assertEqual(self.get_repr(unicode_string), expected)
94
+
95
+ def test_int(self):
96
+ if inferior_python_version < (3, 0):
97
+ intval = self.pyobject_fromcode('PyInt_FromLong(100)')
98
+ self.assertEqual(type(intval), libpython.PyIntObjectPtr)
99
+ self.assertEqual(self.get_repr(intval), '100')
100
+
101
+ def test_long(self):
102
+ longval = self.pyobject_fromcode('PyLong_FromLong(200)',
103
+ gdbvar='longval')
104
+ assert gdb.parse_and_eval('$longval->ob_type == &PyLong_Type')
105
+
106
+ self.assertEqual(type(longval), libpython.PyLongObjectPtr)
107
+ self.assertEqual(self.get_repr(longval), '200')
108
+
109
+ def test_frame_type(self):
110
+ frame = self.pyobject_fromcode('PyEval_GetFrame()')
111
+
112
+ self.assertEqual(type(frame), libpython.PyFrameObjectPtr)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/Cygdb.cpython-311.pyc ADDED
Binary file (9.26 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Debugger/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (216 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Distutils/extension.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pyrex.Distutils.extension
2
+
3
+ Provides a modified Extension class, that understands how to describe
4
+ Pyrex extension modules in setup scripts."""
5
+
6
+ __revision__ = "$Id:$"
7
+
8
+ import sys
9
+ import distutils.extension as _Extension
10
+
11
+
12
+ class Extension(_Extension.Extension):
13
+ # When adding arguments to this constructor, be sure to update
14
+ # user_options.extend in build_ext.py.
15
+ def __init__(self, name, sources,
16
+ include_dirs=None,
17
+ define_macros=None,
18
+ undef_macros=None,
19
+ library_dirs=None,
20
+ libraries=None,
21
+ runtime_library_dirs=None,
22
+ extra_objects=None,
23
+ extra_compile_args=None,
24
+ extra_link_args=None,
25
+ export_symbols=None,
26
+ #swig_opts=None,
27
+ depends=None,
28
+ language=None,
29
+ cython_include_dirs=None,
30
+ cython_directives=None,
31
+ cython_create_listing=False,
32
+ cython_line_directives=False,
33
+ cython_cplus=False,
34
+ cython_c_in_temp=False,
35
+ cython_gen_pxi=False,
36
+ cython_gdb=False,
37
+ no_c_in_traceback=False,
38
+ cython_compile_time_env=None,
39
+ **kw):
40
+
41
+ # Translate pyrex_X to cython_X for backwards compatibility.
42
+ had_pyrex_options = False
43
+ for key in list(kw):
44
+ if key.startswith('pyrex_'):
45
+ had_pyrex_options = True
46
+ kw['cython' + key[5:]] = kw.pop(key)
47
+ if had_pyrex_options:
48
+ Extension.__init__(
49
+ self, name, sources,
50
+ include_dirs=include_dirs,
51
+ define_macros=define_macros,
52
+ undef_macros=undef_macros,
53
+ library_dirs=library_dirs,
54
+ libraries=libraries,
55
+ runtime_library_dirs=runtime_library_dirs,
56
+ extra_objects=extra_objects,
57
+ extra_compile_args=extra_compile_args,
58
+ extra_link_args=extra_link_args,
59
+ export_symbols=export_symbols,
60
+ #swig_opts=swig_opts,
61
+ depends=depends,
62
+ language=language,
63
+ no_c_in_traceback=no_c_in_traceback,
64
+ **kw)
65
+ return
66
+
67
+ _Extension.Extension.__init__(
68
+ self, name, sources,
69
+ include_dirs=include_dirs,
70
+ define_macros=define_macros,
71
+ undef_macros=undef_macros,
72
+ library_dirs=library_dirs,
73
+ libraries=libraries,
74
+ runtime_library_dirs=runtime_library_dirs,
75
+ extra_objects=extra_objects,
76
+ extra_compile_args=extra_compile_args,
77
+ extra_link_args=extra_link_args,
78
+ export_symbols=export_symbols,
79
+ #swig_opts=swig_opts,
80
+ depends=depends,
81
+ language=language,
82
+ **kw)
83
+
84
+ self.cython_include_dirs = cython_include_dirs or []
85
+ self.cython_directives = cython_directives or {}
86
+ self.cython_create_listing = cython_create_listing
87
+ self.cython_line_directives = cython_line_directives
88
+ self.cython_cplus = cython_cplus
89
+ self.cython_c_in_temp = cython_c_in_temp
90
+ self.cython_gen_pxi = cython_gen_pxi
91
+ self.cython_gdb = cython_gdb
92
+ self.no_c_in_traceback = no_c_in_traceback
93
+ self.cython_compile_time_env = cython_compile_time_env
94
+
95
+ # class Extension
96
+
97
+ read_setup_file = _Extension.read_setup_file
98
+
99
+
100
+ # reuse and extend original docstring from base class (if we can)
101
+ if sys.version_info[0] < 3 and _Extension.Extension.__doc__:
102
+ # -OO discards docstrings
103
+ Extension.__doc__ = _Extension.Extension.__doc__ + """\
104
+ cython_include_dirs : [string]
105
+ list of directories to search for Pyrex header files (.pxd) (in
106
+ Unix form for portability)
107
+ cython_directives : {string:value}
108
+ dict of compiler directives
109
+ cython_create_listing_file : boolean
110
+ write pyrex error messages to a listing (.lis) file.
111
+ cython_line_directives : boolean
112
+ emit pyx line numbers for debugging/profiling
113
+ cython_cplus : boolean
114
+ use the C++ compiler for compiling and linking.
115
+ cython_c_in_temp : boolean
116
+ put generated C files in temp directory.
117
+ cython_gen_pxi : boolean
118
+ generate .pxi file for public declarations
119
+ cython_gdb : boolean
120
+ generate Cython debug information for this extension for cygdb
121
+ no_c_in_traceback : boolean
122
+ emit the c file and line number from the traceback for exceptions
123
+ """
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/DFA.pxd ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: auto_pickle=False
2
+
3
+ cimport cython
4
+
5
+ from . cimport Machines
6
+ from .Transitions cimport TransitionMap
7
+
8
+
9
+ @cython.final
10
+ cdef class StateMap:
11
+ cdef Machines.FastMachine new_machine
12
+ cdef dict old_to_new_dict
13
+ cdef dict new_to_old_dict
14
+
15
+ cdef old_to_new(self, dict old_state_set)
16
+
17
+ @cython.locals(state=Machines.Node)
18
+ cdef highest_priority_action(self, dict state_set)
19
+
20
+ cdef make_key(self, dict state_set)
21
+
22
+
23
+ @cython.locals(new_machine=Machines.FastMachine, transitions=TransitionMap)
24
+ cpdef nfa_to_dfa(Machines.Machine old_machine, debug=*)
25
+
26
+ cdef set_epsilon_closure(dict state_set)
27
+ cdef dict epsilon_closure(Machines.Node state)
28
+
29
+ @cython.locals(state_set_2=dict, state2=Machines.Node)
30
+ cdef add_to_epsilon_closure(dict state_set, Machines.Node state)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Machines.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: auto_pickle=False
2
+ """
3
+ Python Lexical Analyser
4
+
5
+ Classes for building NFAs and DFAs
6
+ """
7
+ from __future__ import absolute_import
8
+
9
+ import cython
10
+ from .Transitions import TransitionMap
11
+
12
+ maxint = 2**31-1 # sentinel value
13
+
14
+ if not cython.compiled:
15
+ try:
16
+ unichr
17
+ except NameError:
18
+ unichr = chr
19
+
20
+ LOWEST_PRIORITY = -maxint
21
+
22
+
23
+ class Machine(object):
24
+ """A collection of Nodes representing an NFA or DFA."""
25
+ def __init__(self):
26
+ self.states = [] # [Node]
27
+ self.initial_states = {} # {(name, bol): Node}
28
+ self.next_state_number = 1
29
+
30
+ def __del__(self):
31
+ for state in self.states:
32
+ state.destroy()
33
+
34
+ def new_state(self):
35
+ """Add a new state to the machine and return it."""
36
+ s = Node()
37
+ n = self.next_state_number
38
+ self.next_state_number = n + 1
39
+ s.number = n
40
+ self.states.append(s)
41
+ return s
42
+
43
+ def new_initial_state(self, name):
44
+ state = self.new_state()
45
+ self.make_initial_state(name, state)
46
+ return state
47
+
48
+ def make_initial_state(self, name, state):
49
+ self.initial_states[name] = state
50
+
51
+ def get_initial_state(self, name):
52
+ return self.initial_states[name]
53
+
54
+ def dump(self, file):
55
+ file.write("Plex.Machine:\n")
56
+ if self.initial_states is not None:
57
+ file.write(" Initial states:\n")
58
+ for (name, state) in sorted(self.initial_states.items()):
59
+ file.write(" '%s': %d\n" % (name, state.number))
60
+ for s in self.states:
61
+ s.dump(file)
62
+
63
+
64
+ class Node(object):
65
+ """A state of an NFA or DFA."""
66
+
67
+ def __init__(self):
68
+ # Preinitialise the list of empty transitions, because
69
+ # the nfa-to-dfa algorithm needs it
70
+ self.transitions = TransitionMap() # TransitionMap
71
+ self.action_priority = LOWEST_PRIORITY # integer
72
+ self.action = None # Action
73
+ self.number = 0 # for debug output
74
+ self.epsilon_closure = None # used by nfa_to_dfa()
75
+
76
+ def destroy(self):
77
+ self.transitions = None
78
+ self.action = None
79
+ self.epsilon_closure = None
80
+
81
+ def add_transition(self, event, new_state):
82
+ self.transitions.add(event, new_state)
83
+
84
+ def link_to(self, state):
85
+ """Add an epsilon-move from this state to another state."""
86
+ self.add_transition('', state)
87
+
88
+ def set_action(self, action, priority):
89
+ """Make this an accepting state with the given action. If
90
+ there is already an action, choose the action with highest
91
+ priority."""
92
+ if priority > self.action_priority:
93
+ self.action = action
94
+ self.action_priority = priority
95
+
96
+ def get_action(self):
97
+ return self.action
98
+
99
+ def get_action_priority(self):
100
+ return self.action_priority
101
+
102
+ def is_accepting(self):
103
+ return self.action is not None
104
+
105
+ def __str__(self):
106
+ return "State %d" % self.number
107
+
108
+ def dump(self, file):
109
+ # Header
110
+ file.write(" State %d:\n" % self.number)
111
+ # Transitions
112
+ # self.dump_transitions(file)
113
+ self.transitions.dump(file)
114
+ # Action
115
+ action = self.action
116
+ priority = self.action_priority
117
+ if action is not None:
118
+ file.write(" %s [priority %d]\n" % (action, priority))
119
+
120
+ def __lt__(self, other):
121
+ return self.number < other.number
122
+
123
+ def __hash__(self):
124
+ # Prevent overflowing hash values due to arbitrarily large unsigned addresses.
125
+ return id(self) & maxint
126
+
127
+
128
+ class FastMachine(object):
129
+ """
130
+ FastMachine is a deterministic machine represented in a way that
131
+ allows fast scanning.
132
+ """
133
+ def __init__(self):
134
+ self.initial_states = {} # {state_name:state}
135
+ self.states = [] # [state] where state = {event:state, 'else':state, 'action':Action}
136
+ self.next_number = 1 # for debugging
137
+ self.new_state_template = {
138
+ '': None, 'bol': None, 'eol': None, 'eof': None, 'else': None
139
+ }
140
+
141
+ def __del__(self):
142
+ for state in self.states:
143
+ state.clear()
144
+
145
+ def new_state(self, action=None):
146
+ number = self.next_number
147
+ self.next_number = number + 1
148
+ result = self.new_state_template.copy()
149
+ result['number'] = number
150
+ result['action'] = action
151
+ self.states.append(result)
152
+ return result
153
+
154
+ def make_initial_state(self, name, state):
155
+ self.initial_states[name] = state
156
+
157
+ @cython.locals(code0=cython.int, code1=cython.int, maxint=cython.int, state=dict)
158
+ def add_transitions(self, state, event, new_state, maxint=maxint):
159
+ if type(event) is tuple:
160
+ code0, code1 = event
161
+ if code0 == -maxint:
162
+ state['else'] = new_state
163
+ elif code1 != maxint:
164
+ while code0 < code1:
165
+ state[unichr(code0)] = new_state
166
+ code0 += 1
167
+ else:
168
+ state[event] = new_state
169
+
170
+ def get_initial_state(self, name):
171
+ return self.initial_states[name]
172
+
173
+ def dump(self, file):
174
+ file.write("Plex.FastMachine:\n")
175
+ file.write(" Initial states:\n")
176
+ for name, state in sorted(self.initial_states.items()):
177
+ file.write(" %s: %s\n" % (repr(name), state['number']))
178
+ for state in self.states:
179
+ self.dump_state(state, file)
180
+
181
+ def dump_state(self, state, file):
182
+ # Header
183
+ file.write(" State %d:\n" % state['number'])
184
+ # Transitions
185
+ self.dump_transitions(state, file)
186
+ # Action
187
+ action = state['action']
188
+ if action is not None:
189
+ file.write(" %s\n" % action)
190
+
191
+ def dump_transitions(self, state, file):
192
+ chars_leading_to_state = {}
193
+ special_to_state = {}
194
+ for (c, s) in state.items():
195
+ if len(c) == 1:
196
+ chars = chars_leading_to_state.get(id(s), None)
197
+ if chars is None:
198
+ chars = []
199
+ chars_leading_to_state[id(s)] = chars
200
+ chars.append(c)
201
+ elif len(c) <= 4:
202
+ special_to_state[c] = s
203
+ ranges_to_state = {}
204
+ for state in self.states:
205
+ char_list = chars_leading_to_state.get(id(state), None)
206
+ if char_list:
207
+ ranges = self.chars_to_ranges(char_list)
208
+ ranges_to_state[ranges] = state
209
+ for ranges in sorted(ranges_to_state):
210
+ key = self.ranges_to_string(ranges)
211
+ state = ranges_to_state[ranges]
212
+ file.write(" %s --> State %d\n" % (key, state['number']))
213
+ for key in ('bol', 'eol', 'eof', 'else'):
214
+ state = special_to_state.get(key, None)
215
+ if state:
216
+ file.write(" %s --> State %d\n" % (key, state['number']))
217
+
218
+ @cython.locals(char_list=list, i=cython.Py_ssize_t, n=cython.Py_ssize_t, c1=cython.long, c2=cython.long)
219
+ def chars_to_ranges(self, char_list):
220
+ char_list.sort()
221
+ i = 0
222
+ n = len(char_list)
223
+ result = []
224
+ while i < n:
225
+ c1 = ord(char_list[i])
226
+ c2 = c1
227
+ i += 1
228
+ while i < n and ord(char_list[i]) == c2 + 1:
229
+ i += 1
230
+ c2 += 1
231
+ result.append((chr(c1), chr(c2)))
232
+ return tuple(result)
233
+
234
+ def ranges_to_string(self, range_list):
235
+ return ','.join(map(self.range_to_string, range_list))
236
+
237
+ def range_to_string(self, range_tuple):
238
+ (c1, c2) = range_tuple
239
+ if c1 == c2:
240
+ return repr(c1)
241
+ else:
242
+ return "%s..%s" % (repr(c1), repr(c2))
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Scanners.pxd ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+
3
+ import cython
4
+
5
+ from Cython.Plex.Actions cimport Action
6
+
7
+ cdef class Scanner:
8
+
9
+ cdef public lexicon
10
+ cdef public stream
11
+ cdef public name
12
+ cdef public unicode buffer
13
+ cdef public Py_ssize_t buf_start_pos
14
+ cdef public Py_ssize_t next_pos
15
+ cdef public Py_ssize_t cur_pos
16
+ cdef public Py_ssize_t cur_line
17
+ cdef public Py_ssize_t cur_line_start
18
+ cdef public Py_ssize_t start_pos
19
+ cdef tuple current_scanner_position_tuple
20
+ cdef public tuple last_token_position_tuple
21
+ cdef public text
22
+ cdef public initial_state # int?
23
+ cdef public state_name
24
+ cdef public list queue
25
+ cdef public bint trace
26
+ cdef public cur_char
27
+ cdef public long input_state
28
+
29
+ cdef public level
30
+
31
+ @cython.locals(input_state=long)
32
+ cdef inline next_char(self)
33
+ @cython.locals(action=Action)
34
+ cpdef tuple read(self)
35
+ cdef inline unread(self, token, value, position)
36
+ cdef inline get_current_scan_pos(self)
37
+ cdef inline tuple scan_a_token(self)
38
+ ##cdef tuple position(self) # used frequently by Parsing.py
39
+
40
+ @cython.final
41
+ @cython.locals(cur_pos=Py_ssize_t, cur_line=Py_ssize_t, cur_line_start=Py_ssize_t,
42
+ input_state=long, next_pos=Py_ssize_t, state=dict,
43
+ buf_start_pos=Py_ssize_t, buf_len=Py_ssize_t, buf_index=Py_ssize_t,
44
+ trace=bint, discard=Py_ssize_t, data=unicode, buffer=unicode)
45
+ cdef run_machine_inlined(self)
46
+
47
+ cdef inline begin(self, state)
48
+ cdef inline produce(self, value, text = *)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.pxd ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport cython
2
+
3
+ cdef long maxint
4
+
5
+ @cython.final
6
+ cdef class TransitionMap:
7
+ cdef list map
8
+ cdef dict special
9
+
10
+ @cython.locals(i=cython.Py_ssize_t, j=cython.Py_ssize_t)
11
+ cpdef add(self, event, new_state)
12
+
13
+ @cython.locals(i=cython.Py_ssize_t, j=cython.Py_ssize_t)
14
+ cpdef add_set(self, event, new_set)
15
+
16
+ @cython.locals(i=cython.Py_ssize_t, n=cython.Py_ssize_t, else_set=cython.bint)
17
+ cpdef iteritems(self)
18
+
19
+ @cython.locals(map=list, lo=cython.Py_ssize_t, mid=cython.Py_ssize_t, hi=cython.Py_ssize_t)
20
+ cdef split(self, long code)
21
+
22
+ cdef get_special(self, event)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/CodeWriter.cpython-311.pyc ADDED
Binary file (53.6 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Shadow.cpython-311.pyc ADDED
Binary file (29.9 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/StringIOTree.cpython-311.pyc ADDED
Binary file (6.29 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/TestUtils.cpython-311.pyc ADDED
Binary file (22.9 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/Utils.cpython-311.pyc ADDED
Binary file (33.1 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (689 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-311.pyc ADDED
Binary file (5.96 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-FileCopyrightText: 2015 Eric Larson
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ from __future__ import annotations
5
+
6
+
7
+ from datetime import datetime, timezone
8
+ from typing import TYPE_CHECKING
9
+
10
+ from pip._vendor.cachecontrol.cache import BaseCache
11
+
12
+ if TYPE_CHECKING:
13
+ from redis import Redis
14
+
15
+
16
+ class RedisCache(BaseCache):
17
+ def __init__(self, conn: Redis[bytes]) -> None:
18
+ self.conn = conn
19
+
20
+ def get(self, key: str) -> bytes | None:
21
+ return self.conn.get(key)
22
+
23
+ def set(
24
+ self, key: str, value: bytes, expires: int | datetime | None = None
25
+ ) -> None:
26
+ if not expires:
27
+ self.conn.set(key, value)
28
+ elif isinstance(expires, datetime):
29
+ now_utc = datetime.now(timezone.utc)
30
+ if expires.tzinfo is None:
31
+ now_utc = now_utc.replace(tzinfo=None)
32
+ delta = expires - now_utc
33
+ self.conn.setex(key, int(delta.total_seconds()), value)
34
+ else:
35
+ self.conn.setex(key, expires, value)
36
+
37
+ def delete(self, key: str) -> None:
38
+ self.conn.delete(key)
39
+
40
+ def clear(self) -> None:
41
+ """Helper for clearing all the keys in a database. Use with
42
+ caution!"""
43
+ for key in self.conn.keys():
44
+ self.conn.delete(key)
45
+
46
+ def close(self) -> None:
47
+ """Redis uses connection pooling, no need to close the connection."""
48
+ pass
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .core import contents, where
2
+
3
+ __all__ = ["contents", "where"]
4
+ __version__ = "2024.08.30"
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (362 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-311.pyc ADDED
Binary file (763 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-311.pyc ADDED
Binary file (3.81 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/certifi/cacert.pem ADDED
The diff for this file is too large to render. See raw diff
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pygments
3
+ ~~~~~~~~
4
+
5
+ Pygments is a syntax highlighting package written in Python.
6
+
7
+ It is a generic syntax highlighter for general use in all kinds of software
8
+ such as forum systems, wikis or other applications that need to prettify
9
+ source code. Highlights are:
10
+
11
+ * a wide range of common languages and markup formats is supported
12
+ * special attention is paid to details, increasing quality by a fair amount
13
+ * support for new languages and formats are added easily
14
+ * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
15
+ formats that PIL supports, and ANSI sequences
16
+ * it is usable as a command-line tool and as a library
17
+ * ... and it highlights even Brainfuck!
18
+
19
+ The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
20
+
21
+ .. _Pygments master branch:
22
+ https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
23
+
24
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
25
+ :license: BSD, see LICENSE for details.
26
+ """
27
+ from io import StringIO, BytesIO
28
+
29
+ __version__ = '2.18.0'
30
+ __docformat__ = 'restructuredtext'
31
+
32
+ __all__ = ['lex', 'format', 'highlight']
33
+
34
+
35
+ def lex(code, lexer):
36
+ """
37
+ Lex `code` with the `lexer` (must be a `Lexer` instance)
38
+ and return an iterable of tokens. Currently, this only calls
39
+ `lexer.get_tokens()`.
40
+ """
41
+ try:
42
+ return lexer.get_tokens(code)
43
+ except TypeError:
44
+ # Heuristic to catch a common mistake.
45
+ from pip._vendor.pygments.lexer import RegexLexer
46
+ if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
47
+ raise TypeError('lex() argument must be a lexer instance, '
48
+ 'not a class')
49
+ raise
50
+
51
+
52
+ def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
53
+ """
54
+ Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
55
+ (a `Formatter` instance).
56
+
57
+ If ``outfile`` is given and a valid file object (an object with a
58
+ ``write`` method), the result will be written to it, otherwise it
59
+ is returned as a string.
60
+ """
61
+ try:
62
+ if not outfile:
63
+ realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
64
+ formatter.format(tokens, realoutfile)
65
+ return realoutfile.getvalue()
66
+ else:
67
+ formatter.format(tokens, outfile)
68
+ except TypeError:
69
+ # Heuristic to catch a common mistake.
70
+ from pip._vendor.pygments.formatter import Formatter
71
+ if isinstance(formatter, type) and issubclass(formatter, Formatter):
72
+ raise TypeError('format() argument must be a formatter instance, '
73
+ 'not a class')
74
+ raise
75
+
76
+
77
+ def highlight(code, lexer, formatter, outfile=None):
78
+ """
79
+ This is the most high-level highlighting function. It combines `lex` and
80
+ `format` in one function.
81
+ """
82
+ return format(lex(code, lexer), formatter, outfile)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.console
3
+ ~~~~~~~~~~~~~~~~
4
+
5
+ Format colored console output.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ esc = "\x1b["
12
+
13
+ codes = {}
14
+ codes[""] = ""
15
+ codes["reset"] = esc + "39;49;00m"
16
+
17
+ codes["bold"] = esc + "01m"
18
+ codes["faint"] = esc + "02m"
19
+ codes["standout"] = esc + "03m"
20
+ codes["underline"] = esc + "04m"
21
+ codes["blink"] = esc + "05m"
22
+ codes["overline"] = esc + "06m"
23
+
24
+ dark_colors = ["black", "red", "green", "yellow", "blue",
25
+ "magenta", "cyan", "gray"]
26
+ light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
27
+ "brightmagenta", "brightcyan", "white"]
28
+
29
+ x = 30
30
+ for dark, light in zip(dark_colors, light_colors):
31
+ codes[dark] = esc + "%im" % x
32
+ codes[light] = esc + "%im" % (60 + x)
33
+ x += 1
34
+
35
+ del dark, light, x
36
+
37
+ codes["white"] = codes["bold"]
38
+
39
+
40
+ def reset_color():
41
+ return codes["reset"]
42
+
43
+
44
+ def colorize(color_key, text):
45
+ return codes[color_key] + text + codes["reset"]
46
+
47
+
48
+ def ansiformat(attr, text):
49
+ """
50
+ Format ``text`` with a color and/or some attributes::
51
+
52
+ color normal color
53
+ *color* bold color
54
+ _color_ underlined color
55
+ +color+ blinking color
56
+ """
57
+ result = []
58
+ if attr[:1] == attr[-1:] == '+':
59
+ result.append(codes['blink'])
60
+ attr = attr[1:-1]
61
+ if attr[:1] == attr[-1:] == '*':
62
+ result.append(codes['bold'])
63
+ attr = attr[1:-1]
64
+ if attr[:1] == attr[-1:] == '_':
65
+ result.append(codes['underline'])
66
+ attr = attr[1:-1]
67
+ result.append(codes[attr])
68
+ result.append(text)
69
+ result.append(codes['reset'])
70
+ return ''.join(result)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.filter
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ Module that implements the default filter.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+
12
+ def apply_filters(stream, filters, lexer=None):
13
+ """
14
+ Use this method to apply an iterable of filters to
15
+ a stream. If lexer is given it's forwarded to the
16
+ filter, otherwise the filter receives `None`.
17
+ """
18
+ def _apply(filter_, stream):
19
+ yield from filter_.filter(lexer, stream)
20
+ for filter_ in filters:
21
+ stream = _apply(filter_, stream)
22
+ return stream
23
+
24
+
25
+ def simplefilter(f):
26
+ """
27
+ Decorator that converts a function into a filter::
28
+
29
+ @simplefilter
30
+ def lowercase(self, lexer, stream, options):
31
+ for ttype, value in stream:
32
+ yield ttype, value.lower()
33
+ """
34
+ return type(f.__name__, (FunctionFilter,), {
35
+ '__module__': getattr(f, '__module__'),
36
+ '__doc__': f.__doc__,
37
+ 'function': f,
38
+ })
39
+
40
+
41
+ class Filter:
42
+ """
43
+ Default filter. Subclass this class or use the `simplefilter`
44
+ decorator to create own filters.
45
+ """
46
+
47
+ def __init__(self, **options):
48
+ self.options = options
49
+
50
+ def filter(self, lexer, stream):
51
+ raise NotImplementedError()
52
+
53
+
54
+ class FunctionFilter(Filter):
55
+ """
56
+ Abstract class used by `simplefilter` to create simple
57
+ function filters on the fly. The `simplefilter` decorator
58
+ automatically creates subclasses of this class for
59
+ functions passed to it.
60
+ """
61
+ function = None
62
+
63
+ def __init__(self, **options):
64
+ if not hasattr(self, 'function'):
65
+ raise TypeError(f'{self.__class__.__name__!r} used without bound function')
66
+ Filter.__init__(self, **options)
67
+
68
+ def filter(self, lexer, stream):
69
+ # pylint: disable=not-callable
70
+ yield from self.function(lexer, stream, self.options)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatter
3
+ ~~~~~~~~~~~~~~~~~~
4
+
5
+ Base formatter class.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import codecs
12
+
13
+ from pip._vendor.pygments.util import get_bool_opt
14
+ from pip._vendor.pygments.styles import get_style_by_name
15
+
16
+ __all__ = ['Formatter']
17
+
18
+
19
+ def _lookup_style(style):
20
+ if isinstance(style, str):
21
+ return get_style_by_name(style)
22
+ return style
23
+
24
+
25
+ class Formatter:
26
+ """
27
+ Converts a token stream to text.
28
+
29
+ Formatters should have attributes to help selecting them. These
30
+ are similar to the corresponding :class:`~pygments.lexer.Lexer`
31
+ attributes.
32
+
33
+ .. autoattribute:: name
34
+ :no-value:
35
+
36
+ .. autoattribute:: aliases
37
+ :no-value:
38
+
39
+ .. autoattribute:: filenames
40
+ :no-value:
41
+
42
+ You can pass options as keyword arguments to the constructor.
43
+ All formatters accept these basic options:
44
+
45
+ ``style``
46
+ The style to use, can be a string or a Style subclass
47
+ (default: "default"). Not used by e.g. the
48
+ TerminalFormatter.
49
+ ``full``
50
+ Tells the formatter to output a "full" document, i.e.
51
+ a complete self-contained document. This doesn't have
52
+ any effect for some formatters (default: false).
53
+ ``title``
54
+ If ``full`` is true, the title that should be used to
55
+ caption the document (default: '').
56
+ ``encoding``
57
+ If given, must be an encoding name. This will be used to
58
+ convert the Unicode token strings to byte strings in the
59
+ output. If it is "" or None, Unicode strings will be written
60
+ to the output file, which most file-like objects do not
61
+ support (default: None).
62
+ ``outencoding``
63
+ Overrides ``encoding`` if given.
64
+
65
+ """
66
+
67
+ #: Full name for the formatter, in human-readable form.
68
+ name = None
69
+
70
+ #: A list of short, unique identifiers that can be used to lookup
71
+ #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
72
+ aliases = []
73
+
74
+ #: A list of fnmatch patterns that match filenames for which this
75
+ #: formatter can produce output. The patterns in this list should be unique
76
+ #: among all formatters.
77
+ filenames = []
78
+
79
+ #: If True, this formatter outputs Unicode strings when no encoding
80
+ #: option is given.
81
+ unicodeoutput = True
82
+
83
+ def __init__(self, **options):
84
+ """
85
+ As with lexers, this constructor takes arbitrary optional arguments,
86
+ and if you override it, you should first process your own options, then
87
+ call the base class implementation.
88
+ """
89
+ self.style = _lookup_style(options.get('style', 'default'))
90
+ self.full = get_bool_opt(options, 'full', False)
91
+ self.title = options.get('title', '')
92
+ self.encoding = options.get('encoding', None) or None
93
+ if self.encoding in ('guess', 'chardet'):
94
+ # can happen for e.g. pygmentize -O encoding=guess
95
+ self.encoding = 'utf-8'
96
+ self.encoding = options.get('outencoding') or self.encoding
97
+ self.options = options
98
+
99
+ def get_style_defs(self, arg=''):
100
+ """
101
+ This method must return statements or declarations suitable to define
102
+ the current style for subsequent highlighted text (e.g. CSS classes
103
+ in the `HTMLFormatter`).
104
+
105
+ The optional argument `arg` can be used to modify the generation and
106
+ is formatter dependent (it is standardized because it can be given on
107
+ the command line).
108
+
109
+ This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
110
+ the `arg` is then given by the ``-a`` option.
111
+ """
112
+ return ''
113
+
114
+ def format(self, tokensource, outfile):
115
+ """
116
+ This method must format the tokens from the `tokensource` iterable and
117
+ write the formatted version to the file object `outfile`.
118
+
119
+ Formatter options can control how exactly the tokens are converted.
120
+ """
121
+ if self.encoding:
122
+ # wrap the outfile in a StreamWriter
123
+ outfile = codecs.lookup(self.encoding)[3](outfile)
124
+ return self.format_unencoded(tokensource, outfile)
125
+
126
+ # Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to
127
+ # Formatter. This helps when using third-party type stubs from typeshed.
128
+ def __class_getitem__(cls, name):
129
+ return cls
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.groff
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for groff output.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import math
12
+ from pip._vendor.pygments.formatter import Formatter
13
+ from pip._vendor.pygments.util import get_bool_opt, get_int_opt
14
+
15
+ __all__ = ['GroffFormatter']
16
+
17
+
18
+ class GroffFormatter(Formatter):
19
+ """
20
+ Format tokens with groff escapes to change their color and font style.
21
+
22
+ .. versionadded:: 2.11
23
+
24
+ Additional options accepted:
25
+
26
+ `style`
27
+ The style to use, can be a string or a Style subclass (default:
28
+ ``'default'``).
29
+
30
+ `monospaced`
31
+ If set to true, monospace font will be used (default: ``true``).
32
+
33
+ `linenos`
34
+ If set to true, print the line numbers (default: ``false``).
35
+
36
+ `wrap`
37
+ Wrap lines to the specified number of characters. Disabled if set to 0
38
+ (default: ``0``).
39
+ """
40
+
41
+ name = 'groff'
42
+ aliases = ['groff','troff','roff']
43
+ filenames = []
44
+
45
+ def __init__(self, **options):
46
+ Formatter.__init__(self, **options)
47
+
48
+ self.monospaced = get_bool_opt(options, 'monospaced', True)
49
+ self.linenos = get_bool_opt(options, 'linenos', False)
50
+ self._lineno = 0
51
+ self.wrap = get_int_opt(options, 'wrap', 0)
52
+ self._linelen = 0
53
+
54
+ self.styles = {}
55
+ self._make_styles()
56
+
57
+
58
+ def _make_styles(self):
59
+ regular = '\\f[CR]' if self.monospaced else '\\f[R]'
60
+ bold = '\\f[CB]' if self.monospaced else '\\f[B]'
61
+ italic = '\\f[CI]' if self.monospaced else '\\f[I]'
62
+
63
+ for ttype, ndef in self.style:
64
+ start = end = ''
65
+ if ndef['color']:
66
+ start += '\\m[{}]'.format(ndef['color'])
67
+ end = '\\m[]' + end
68
+ if ndef['bold']:
69
+ start += bold
70
+ end = regular + end
71
+ if ndef['italic']:
72
+ start += italic
73
+ end = regular + end
74
+ if ndef['bgcolor']:
75
+ start += '\\M[{}]'.format(ndef['bgcolor'])
76
+ end = '\\M[]' + end
77
+
78
+ self.styles[ttype] = start, end
79
+
80
+
81
+ def _define_colors(self, outfile):
82
+ colors = set()
83
+ for _, ndef in self.style:
84
+ if ndef['color'] is not None:
85
+ colors.add(ndef['color'])
86
+
87
+ for color in sorted(colors):
88
+ outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
89
+
90
+
91
+ def _write_lineno(self, outfile):
92
+ self._lineno += 1
93
+ outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
94
+
95
+
96
+ def _wrap_line(self, line):
97
+ length = len(line.rstrip('\n'))
98
+ space = ' ' if self.linenos else ''
99
+ newline = ''
100
+
101
+ if length > self.wrap:
102
+ for i in range(0, math.floor(length / self.wrap)):
103
+ chunk = line[i*self.wrap:i*self.wrap+self.wrap]
104
+ newline += (chunk + '\n' + space)
105
+ remainder = length % self.wrap
106
+ if remainder > 0:
107
+ newline += line[-remainder-1:]
108
+ self._linelen = remainder
109
+ elif self._linelen + length > self.wrap:
110
+ newline = ('\n' + space) + line
111
+ self._linelen = length
112
+ else:
113
+ newline = line
114
+ self._linelen += length
115
+
116
+ return newline
117
+
118
+
119
+ def _escape_chars(self, text):
120
+ text = text.replace('\\', '\\[u005C]'). \
121
+ replace('.', '\\[char46]'). \
122
+ replace('\'', '\\[u0027]'). \
123
+ replace('`', '\\[u0060]'). \
124
+ replace('~', '\\[u007E]')
125
+ copy = text
126
+
127
+ for char in copy:
128
+ if len(char) != len(char.encode()):
129
+ uni = char.encode('unicode_escape') \
130
+ .decode()[1:] \
131
+ .replace('x', 'u00') \
132
+ .upper()
133
+ text = text.replace(char, '\\[u' + uni[1:] + ']')
134
+
135
+ return text
136
+
137
+
138
+ def format_unencoded(self, tokensource, outfile):
139
+ self._define_colors(outfile)
140
+
141
+ outfile.write('.nf\n\\f[CR]\n')
142
+
143
+ if self.linenos:
144
+ self._write_lineno(outfile)
145
+
146
+ for ttype, value in tokensource:
147
+ while ttype not in self.styles:
148
+ ttype = ttype.parent
149
+ start, end = self.styles[ttype]
150
+
151
+ for line in value.splitlines(True):
152
+ if self.wrap > 0:
153
+ line = self._wrap_line(line)
154
+
155
+ if start and end:
156
+ text = self._escape_chars(line.rstrip('\n'))
157
+ if text != '':
158
+ outfile.write(''.join((start, text, end)))
159
+ else:
160
+ outfile.write(self._escape_chars(line.rstrip('\n')))
161
+
162
+ if line.endswith('\n'):
163
+ if self.linenos:
164
+ self._write_lineno(outfile)
165
+ self._linelen = 0
166
+ else:
167
+ outfile.write('\n')
168
+ self._linelen = 0
169
+
170
+ outfile.write('\n.fi')
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.irc
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for IRC output
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pip._vendor.pygments.formatter import Formatter
12
+ from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
13
+ Number, Operator, Generic, Token, Whitespace
14
+ from pip._vendor.pygments.util import get_choice_opt
15
+
16
+
17
+ __all__ = ['IRCFormatter']
18
+
19
+
20
+ #: Map token types to a tuple of color values for light and dark
21
+ #: backgrounds.
22
+ IRC_COLORS = {
23
+ Token: ('', ''),
24
+
25
+ Whitespace: ('gray', 'brightblack'),
26
+ Comment: ('gray', 'brightblack'),
27
+ Comment.Preproc: ('cyan', 'brightcyan'),
28
+ Keyword: ('blue', 'brightblue'),
29
+ Keyword.Type: ('cyan', 'brightcyan'),
30
+ Operator.Word: ('magenta', 'brightcyan'),
31
+ Name.Builtin: ('cyan', 'brightcyan'),
32
+ Name.Function: ('green', 'brightgreen'),
33
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
34
+ Name.Class: ('_green_', '_brightgreen_'),
35
+ Name.Exception: ('cyan', 'brightcyan'),
36
+ Name.Decorator: ('brightblack', 'gray'),
37
+ Name.Variable: ('red', 'brightred'),
38
+ Name.Constant: ('red', 'brightred'),
39
+ Name.Attribute: ('cyan', 'brightcyan'),
40
+ Name.Tag: ('brightblue', 'brightblue'),
41
+ String: ('yellow', 'yellow'),
42
+ Number: ('blue', 'brightblue'),
43
+
44
+ Generic.Deleted: ('brightred', 'brightred'),
45
+ Generic.Inserted: ('green', 'brightgreen'),
46
+ Generic.Heading: ('**', '**'),
47
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
48
+ Generic.Error: ('brightred', 'brightred'),
49
+
50
+ Error: ('_brightred_', '_brightred_'),
51
+ }
52
+
53
+
54
+ IRC_COLOR_MAP = {
55
+ 'white': 0,
56
+ 'black': 1,
57
+ 'blue': 2,
58
+ 'brightgreen': 3,
59
+ 'brightred': 4,
60
+ 'yellow': 5,
61
+ 'magenta': 6,
62
+ 'orange': 7,
63
+ 'green': 7, #compat w/ ansi
64
+ 'brightyellow': 8,
65
+ 'lightgreen': 9,
66
+ 'brightcyan': 9, # compat w/ ansi
67
+ 'cyan': 10,
68
+ 'lightblue': 11,
69
+ 'red': 11, # compat w/ ansi
70
+ 'brightblue': 12,
71
+ 'brightmagenta': 13,
72
+ 'brightblack': 14,
73
+ 'gray': 15,
74
+ }
75
+
76
+ def ircformat(color, text):
77
+ if len(color) < 1:
78
+ return text
79
+ add = sub = ''
80
+ if '_' in color: # italic
81
+ add += '\x1D'
82
+ sub = '\x1D' + sub
83
+ color = color.strip('_')
84
+ if '*' in color: # bold
85
+ add += '\x02'
86
+ sub = '\x02' + sub
87
+ color = color.strip('*')
88
+ # underline (\x1F) not supported
89
+ # backgrounds (\x03FF,BB) not supported
90
+ if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
91
+ add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
92
+ sub = '\x03' + sub
93
+ return add + text + sub
94
+ return '<'+add+'>'+text+'</'+sub+'>'
95
+
96
+
97
+ class IRCFormatter(Formatter):
98
+ r"""
99
+ Format tokens with IRC color sequences
100
+
101
+ The `get_style_defs()` method doesn't do anything special since there is
102
+ no support for common styles.
103
+
104
+ Options accepted:
105
+
106
+ `bg`
107
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
108
+ (default: ``"light"``).
109
+
110
+ `colorscheme`
111
+ A dictionary mapping token types to (lightbg, darkbg) color names or
112
+ ``None`` (default: ``None`` = use builtin colorscheme).
113
+
114
+ `linenos`
115
+ Set to ``True`` to have line numbers in the output as well
116
+ (default: ``False`` = no line numbers).
117
+ """
118
+ name = 'IRC'
119
+ aliases = ['irc', 'IRC']
120
+ filenames = []
121
+
122
+ def __init__(self, **options):
123
+ Formatter.__init__(self, **options)
124
+ self.darkbg = get_choice_opt(options, 'bg',
125
+ ['light', 'dark'], 'light') == 'dark'
126
+ self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
127
+ self.linenos = options.get('linenos', False)
128
+ self._lineno = 0
129
+
130
+ def _write_lineno(self, outfile):
131
+ if self.linenos:
132
+ self._lineno += 1
133
+ outfile.write("%04d: " % self._lineno)
134
+
135
+ def format_unencoded(self, tokensource, outfile):
136
+ self._write_lineno(outfile)
137
+
138
+ for ttype, value in tokensource:
139
+ color = self.colorscheme.get(ttype)
140
+ while color is None:
141
+ ttype = ttype[:-1]
142
+ color = self.colorscheme.get(ttype)
143
+ if color:
144
+ color = color[self.darkbg]
145
+ spl = value.split('\n')
146
+ for line in spl[:-1]:
147
+ if line:
148
+ outfile.write(ircformat(color, line))
149
+ outfile.write('\n')
150
+ self._write_lineno(outfile)
151
+ if spl[-1]:
152
+ outfile.write(ircformat(color, spl[-1]))
153
+ else:
154
+ outfile.write(value)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.latex
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for LaTeX fancyvrb output.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from io import StringIO
12
+
13
+ from pip._vendor.pygments.formatter import Formatter
14
+ from pip._vendor.pygments.lexer import Lexer, do_insertions
15
+ from pip._vendor.pygments.token import Token, STANDARD_TYPES
16
+ from pip._vendor.pygments.util import get_bool_opt, get_int_opt
17
+
18
+
19
+ __all__ = ['LatexFormatter']
20
+
21
+
22
+ def escape_tex(text, commandprefix):
23
+ return text.replace('\\', '\x00'). \
24
+ replace('{', '\x01'). \
25
+ replace('}', '\x02'). \
26
+ replace('\x00', rf'\{commandprefix}Zbs{{}}'). \
27
+ replace('\x01', rf'\{commandprefix}Zob{{}}'). \
28
+ replace('\x02', rf'\{commandprefix}Zcb{{}}'). \
29
+ replace('^', rf'\{commandprefix}Zca{{}}'). \
30
+ replace('_', rf'\{commandprefix}Zus{{}}'). \
31
+ replace('&', rf'\{commandprefix}Zam{{}}'). \
32
+ replace('<', rf'\{commandprefix}Zlt{{}}'). \
33
+ replace('>', rf'\{commandprefix}Zgt{{}}'). \
34
+ replace('#', rf'\{commandprefix}Zsh{{}}'). \
35
+ replace('%', rf'\{commandprefix}Zpc{{}}'). \
36
+ replace('$', rf'\{commandprefix}Zdl{{}}'). \
37
+ replace('-', rf'\{commandprefix}Zhy{{}}'). \
38
+ replace("'", rf'\{commandprefix}Zsq{{}}'). \
39
+ replace('"', rf'\{commandprefix}Zdq{{}}'). \
40
+ replace('~', rf'\{commandprefix}Zti{{}}')
41
+
42
+
43
+ DOC_TEMPLATE = r'''
44
+ \documentclass{%(docclass)s}
45
+ \usepackage{fancyvrb}
46
+ \usepackage{color}
47
+ \usepackage[%(encoding)s]{inputenc}
48
+ %(preamble)s
49
+
50
+ %(styledefs)s
51
+
52
+ \begin{document}
53
+
54
+ \section*{%(title)s}
55
+
56
+ %(code)s
57
+ \end{document}
58
+ '''
59
+
60
+ ## Small explanation of the mess below :)
61
+ #
62
+ # The previous version of the LaTeX formatter just assigned a command to
63
+ # each token type defined in the current style. That obviously is
64
+ # problematic if the highlighted code is produced for a different style
65
+ # than the style commands themselves.
66
+ #
67
+ # This version works much like the HTML formatter which assigns multiple
68
+ # CSS classes to each <span> tag, from the most specific to the least
69
+ # specific token type, thus falling back to the parent token type if one
70
+ # is not defined. Here, the classes are there too and use the same short
71
+ # forms given in token.STANDARD_TYPES.
72
+ #
73
+ # Highlighted code now only uses one custom command, which by default is
74
+ # \PY and selectable by the commandprefix option (and in addition the
75
+ # escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
76
+ # backwards compatibility purposes).
77
+ #
78
+ # \PY has two arguments: the classes, separated by +, and the text to
79
+ # render in that style. The classes are resolved into the respective
80
+ # style commands by magic, which serves to ignore unknown classes.
81
+ #
82
+ # The magic macros are:
83
+ # * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
84
+ # to render in \PY@do. Their definition determines the style.
85
+ # * \PY@reset resets \PY@it etc. to do nothing.
86
+ # * \PY@toks parses the list of classes, using magic inspired by the
87
+ # keyval package (but modified to use plusses instead of commas
88
+ # because fancyvrb redefines commas inside its environments).
89
+ # * \PY@tok processes one class, calling the \PY@tok@classname command
90
+ # if it exists.
91
+ # * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
92
+ # for its class.
93
+ # * \PY resets the style, parses the classnames and then calls \PY@do.
94
+ #
95
+ # Tip: to read this code, print it out in substituted form using e.g.
96
+ # >>> print STYLE_TEMPLATE % {'cp': 'PY'}
97
+
98
+ STYLE_TEMPLATE = r'''
99
+ \makeatletter
100
+ \def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
101
+ \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
102
+ \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
103
+ \def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
104
+ \def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
105
+ \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
106
+ \def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
107
+ \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
108
+ \def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
109
+
110
+ %(styles)s
111
+
112
+ \def\%(cp)sZbs{\char`\\}
113
+ \def\%(cp)sZus{\char`\_}
114
+ \def\%(cp)sZob{\char`\{}
115
+ \def\%(cp)sZcb{\char`\}}
116
+ \def\%(cp)sZca{\char`\^}
117
+ \def\%(cp)sZam{\char`\&}
118
+ \def\%(cp)sZlt{\char`\<}
119
+ \def\%(cp)sZgt{\char`\>}
120
+ \def\%(cp)sZsh{\char`\#}
121
+ \def\%(cp)sZpc{\char`\%%}
122
+ \def\%(cp)sZdl{\char`\$}
123
+ \def\%(cp)sZhy{\char`\-}
124
+ \def\%(cp)sZsq{\char`\'}
125
+ \def\%(cp)sZdq{\char`\"}
126
+ \def\%(cp)sZti{\char`\~}
127
+ %% for compatibility with earlier versions
128
+ \def\%(cp)sZat{@}
129
+ \def\%(cp)sZlb{[}
130
+ \def\%(cp)sZrb{]}
131
+ \makeatother
132
+ '''
133
+
134
+
135
+ def _get_ttype_name(ttype):
136
+ fname = STANDARD_TYPES.get(ttype)
137
+ if fname:
138
+ return fname
139
+ aname = ''
140
+ while fname is None:
141
+ aname = ttype[-1] + aname
142
+ ttype = ttype.parent
143
+ fname = STANDARD_TYPES.get(ttype)
144
+ return fname + aname
145
+
146
+
147
+ class LatexFormatter(Formatter):
148
+ r"""
149
+ Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
150
+ standard packages.
151
+
152
+ Without the `full` option, code is formatted as one ``Verbatim``
153
+ environment, like this:
154
+
155
+ .. sourcecode:: latex
156
+
157
+ \begin{Verbatim}[commandchars=\\\{\}]
158
+ \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
159
+ \PY{k}{pass}
160
+ \end{Verbatim}
161
+
162
+ Wrapping can be disabled using the `nowrap` option.
163
+
164
+ The special command used here (``\PY``) and all the other macros it needs
165
+ are output by the `get_style_defs` method.
166
+
167
+ With the `full` option, a complete LaTeX document is output, including
168
+ the command definitions in the preamble.
169
+
170
+ The `get_style_defs()` method of a `LatexFormatter` returns a string
171
+ containing ``\def`` commands defining the macros needed inside the
172
+ ``Verbatim`` environments.
173
+
174
+ Additional options accepted:
175
+
176
+ `nowrap`
177
+ If set to ``True``, don't wrap the tokens at all, not even inside a
178
+ ``\begin{Verbatim}`` environment. This disables most other options
179
+ (default: ``False``).
180
+
181
+ `style`
182
+ The style to use, can be a string or a Style subclass (default:
183
+ ``'default'``).
184
+
185
+ `full`
186
+ Tells the formatter to output a "full" document, i.e. a complete
187
+ self-contained document (default: ``False``).
188
+
189
+ `title`
190
+ If `full` is true, the title that should be used to caption the
191
+ document (default: ``''``).
192
+
193
+ `docclass`
194
+ If the `full` option is enabled, this is the document class to use
195
+ (default: ``'article'``).
196
+
197
+ `preamble`
198
+ If the `full` option is enabled, this can be further preamble commands,
199
+ e.g. ``\usepackage`` (default: ``''``).
200
+
201
+ `linenos`
202
+ If set to ``True``, output line numbers (default: ``False``).
203
+
204
+ `linenostart`
205
+ The line number for the first line (default: ``1``).
206
+
207
+ `linenostep`
208
+ If set to a number n > 1, only every nth line number is printed.
209
+
210
+ `verboptions`
211
+ Additional options given to the Verbatim environment (see the *fancyvrb*
212
+ docs for possible values) (default: ``''``).
213
+
214
+ `commandprefix`
215
+ The LaTeX commands used to produce colored output are constructed
216
+ using this prefix and some letters (default: ``'PY'``).
217
+
218
+ .. versionadded:: 0.7
219
+ .. versionchanged:: 0.10
220
+ The default is now ``'PY'`` instead of ``'C'``.
221
+
222
+ `texcomments`
223
+ If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
224
+ in comment tokens is not escaped so that LaTeX can render it (default:
225
+ ``False``).
226
+
227
+ .. versionadded:: 1.2
228
+
229
+ `mathescape`
230
+ If set to ``True``, enables LaTeX math mode escape in comments. That
231
+ is, ``'$...$'`` inside a comment will trigger math mode (default:
232
+ ``False``).
233
+
234
+ .. versionadded:: 1.2
235
+
236
+ `escapeinside`
237
+ If set to a string of length 2, enables escaping to LaTeX. Text
238
+ delimited by these 2 characters is read as LaTeX code and
239
+ typeset accordingly. It has no effect in string literals. It has
240
+ no effect in comments if `texcomments` or `mathescape` is
241
+ set. (default: ``''``).
242
+
243
+ .. versionadded:: 2.0
244
+
245
+ `envname`
246
+ Allows you to pick an alternative environment name replacing Verbatim.
247
+ The alternate environment still has to support Verbatim's option syntax.
248
+ (default: ``'Verbatim'``).
249
+
250
+ .. versionadded:: 2.0
251
+ """
252
+ name = 'LaTeX'
253
+ aliases = ['latex', 'tex']
254
+ filenames = ['*.tex']
255
+
256
+ def __init__(self, **options):
257
+ Formatter.__init__(self, **options)
258
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
259
+ self.docclass = options.get('docclass', 'article')
260
+ self.preamble = options.get('preamble', '')
261
+ self.linenos = get_bool_opt(options, 'linenos', False)
262
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
263
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
264
+ self.verboptions = options.get('verboptions', '')
265
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
266
+ self.commandprefix = options.get('commandprefix', 'PY')
267
+ self.texcomments = get_bool_opt(options, 'texcomments', False)
268
+ self.mathescape = get_bool_opt(options, 'mathescape', False)
269
+ self.escapeinside = options.get('escapeinside', '')
270
+ if len(self.escapeinside) == 2:
271
+ self.left = self.escapeinside[0]
272
+ self.right = self.escapeinside[1]
273
+ else:
274
+ self.escapeinside = ''
275
+ self.envname = options.get('envname', 'Verbatim')
276
+
277
+ self._create_stylesheet()
278
+
279
+ def _create_stylesheet(self):
280
+ t2n = self.ttype2name = {Token: ''}
281
+ c2d = self.cmd2def = {}
282
+ cp = self.commandprefix
283
+
284
+ def rgbcolor(col):
285
+ if col:
286
+ return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
287
+ for i in (0, 2, 4)])
288
+ else:
289
+ return '1,1,1'
290
+
291
+ for ttype, ndef in self.style:
292
+ name = _get_ttype_name(ttype)
293
+ cmndef = ''
294
+ if ndef['bold']:
295
+ cmndef += r'\let\$$@bf=\textbf'
296
+ if ndef['italic']:
297
+ cmndef += r'\let\$$@it=\textit'
298
+ if ndef['underline']:
299
+ cmndef += r'\let\$$@ul=\underline'
300
+ if ndef['roman']:
301
+ cmndef += r'\let\$$@ff=\textrm'
302
+ if ndef['sans']:
303
+ cmndef += r'\let\$$@ff=\textsf'
304
+ if ndef['mono']:
305
+ cmndef += r'\let\$$@ff=\textsf'
306
+ if ndef['color']:
307
+ cmndef += (r'\def\$$@tc##1{{\textcolor[rgb]{{{}}}{{##1}}}}'.format(rgbcolor(ndef['color'])))
308
+ if ndef['border']:
309
+ cmndef += (r'\def\$$@bc##1{{{{\setlength{{\fboxsep}}{{\string -\fboxrule}}'
310
+ r'\fcolorbox[rgb]{{{}}}{{{}}}{{\strut ##1}}}}}}'.format(rgbcolor(ndef['border']),
311
+ rgbcolor(ndef['bgcolor'])))
312
+ elif ndef['bgcolor']:
313
+ cmndef += (r'\def\$$@bc##1{{{{\setlength{{\fboxsep}}{{0pt}}'
314
+ r'\colorbox[rgb]{{{}}}{{\strut ##1}}}}}}'.format(rgbcolor(ndef['bgcolor'])))
315
+ if cmndef == '':
316
+ continue
317
+ cmndef = cmndef.replace('$$', cp)
318
+ t2n[ttype] = name
319
+ c2d[name] = cmndef
320
+
321
+ def get_style_defs(self, arg=''):
322
+ """
323
+ Return the command sequences needed to define the commands
324
+ used to format text in the verbatim environment. ``arg`` is ignored.
325
+ """
326
+ cp = self.commandprefix
327
+ styles = []
328
+ for name, definition in self.cmd2def.items():
329
+ styles.append(rf'\@namedef{{{cp}@tok@{name}}}{{{definition}}}')
330
+ return STYLE_TEMPLATE % {'cp': self.commandprefix,
331
+ 'styles': '\n'.join(styles)}
332
+
333
+ def format_unencoded(self, tokensource, outfile):
334
+ # TODO: add support for background colors
335
+ t2n = self.ttype2name
336
+ cp = self.commandprefix
337
+
338
+ if self.full:
339
+ realoutfile = outfile
340
+ outfile = StringIO()
341
+
342
+ if not self.nowrap:
343
+ outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
344
+ if self.linenos:
345
+ start, step = self.linenostart, self.linenostep
346
+ outfile.write(',numbers=left' +
347
+ (start and ',firstnumber=%d' % start or '') +
348
+ (step and ',stepnumber=%d' % step or ''))
349
+ if self.mathescape or self.texcomments or self.escapeinside:
350
+ outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
351
+ '\\catcode`\\_=8\\relax}')
352
+ if self.verboptions:
353
+ outfile.write(',' + self.verboptions)
354
+ outfile.write(']\n')
355
+
356
+ for ttype, value in tokensource:
357
+ if ttype in Token.Comment:
358
+ if self.texcomments:
359
+ # Try to guess comment starting lexeme and escape it ...
360
+ start = value[0:1]
361
+ for i in range(1, len(value)):
362
+ if start[0] != value[i]:
363
+ break
364
+ start += value[i]
365
+
366
+ value = value[len(start):]
367
+ start = escape_tex(start, cp)
368
+
369
+ # ... but do not escape inside comment.
370
+ value = start + value
371
+ elif self.mathescape:
372
+ # Only escape parts not inside a math environment.
373
+ parts = value.split('$')
374
+ in_math = False
375
+ for i, part in enumerate(parts):
376
+ if not in_math:
377
+ parts[i] = escape_tex(part, cp)
378
+ in_math = not in_math
379
+ value = '$'.join(parts)
380
+ elif self.escapeinside:
381
+ text = value
382
+ value = ''
383
+ while text:
384
+ a, sep1, text = text.partition(self.left)
385
+ if sep1:
386
+ b, sep2, text = text.partition(self.right)
387
+ if sep2:
388
+ value += escape_tex(a, cp) + b
389
+ else:
390
+ value += escape_tex(a + sep1 + b, cp)
391
+ else:
392
+ value += escape_tex(a, cp)
393
+ else:
394
+ value = escape_tex(value, cp)
395
+ elif ttype not in Token.Escape:
396
+ value = escape_tex(value, cp)
397
+ styles = []
398
+ while ttype is not Token:
399
+ try:
400
+ styles.append(t2n[ttype])
401
+ except KeyError:
402
+ # not in current style
403
+ styles.append(_get_ttype_name(ttype))
404
+ ttype = ttype.parent
405
+ styleval = '+'.join(reversed(styles))
406
+ if styleval:
407
+ spl = value.split('\n')
408
+ for line in spl[:-1]:
409
+ if line:
410
+ outfile.write(f"\\{cp}{{{styleval}}}{{{line}}}")
411
+ outfile.write('\n')
412
+ if spl[-1]:
413
+ outfile.write(f"\\{cp}{{{styleval}}}{{{spl[-1]}}}")
414
+ else:
415
+ outfile.write(value)
416
+
417
+ if not self.nowrap:
418
+ outfile.write('\\end{' + self.envname + '}\n')
419
+
420
+ if self.full:
421
+ encoding = self.encoding or 'utf8'
422
+ # map known existings encodings from LaTeX distribution
423
+ encoding = {
424
+ 'utf_8': 'utf8',
425
+ 'latin_1': 'latin1',
426
+ 'iso_8859_1': 'latin1',
427
+ }.get(encoding.replace('-', '_'), encoding)
428
+ realoutfile.write(DOC_TEMPLATE %
429
+ dict(docclass = self.docclass,
430
+ preamble = self.preamble,
431
+ title = self.title,
432
+ encoding = encoding,
433
+ styledefs = self.get_style_defs(),
434
+ code = outfile.getvalue()))
435
+
436
+
437
+ class LatexEmbeddedLexer(Lexer):
438
+ """
439
+ This lexer takes one lexer as argument, the lexer for the language
440
+ being formatted, and the left and right delimiters for escaped text.
441
+
442
+ First everything is scanned using the language lexer to obtain
443
+ strings and comments. All other consecutive tokens are merged and
444
+ the resulting text is scanned for escaped segments, which are given
445
+ the Token.Escape type. Finally text that is not escaped is scanned
446
+ again with the language lexer.
447
+ """
448
+ def __init__(self, left, right, lang, **options):
449
+ self.left = left
450
+ self.right = right
451
+ self.lang = lang
452
+ Lexer.__init__(self, **options)
453
+
454
+ def get_tokens_unprocessed(self, text):
455
+ # find and remove all the escape tokens (replace with an empty string)
456
+ # this is very similar to DelegatingLexer.get_tokens_unprocessed.
457
+ buffered = ''
458
+ insertions = []
459
+ insertion_buf = []
460
+ for i, t, v in self._find_safe_escape_tokens(text):
461
+ if t is None:
462
+ if insertion_buf:
463
+ insertions.append((len(buffered), insertion_buf))
464
+ insertion_buf = []
465
+ buffered += v
466
+ else:
467
+ insertion_buf.append((i, t, v))
468
+ if insertion_buf:
469
+ insertions.append((len(buffered), insertion_buf))
470
+ return do_insertions(insertions,
471
+ self.lang.get_tokens_unprocessed(buffered))
472
+
473
+ def _find_safe_escape_tokens(self, text):
474
+ """ find escape tokens that are not in strings or comments """
475
+ for i, t, v in self._filter_to(
476
+ self.lang.get_tokens_unprocessed(text),
477
+ lambda t: t in Token.Comment or t in Token.String
478
+ ):
479
+ if t is None:
480
+ for i2, t2, v2 in self._find_escape_tokens(v):
481
+ yield i + i2, t2, v2
482
+ else:
483
+ yield i, None, v
484
+
485
+ def _filter_to(self, it, pred):
486
+ """ Keep only the tokens that match `pred`, merge the others together """
487
+ buf = ''
488
+ idx = 0
489
+ for i, t, v in it:
490
+ if pred(t):
491
+ if buf:
492
+ yield idx, None, buf
493
+ buf = ''
494
+ yield i, t, v
495
+ else:
496
+ if not buf:
497
+ idx = i
498
+ buf += v
499
+ if buf:
500
+ yield idx, None, buf
501
+
502
+ def _find_escape_tokens(self, text):
503
+ """ Find escape tokens within text, give token=None otherwise """
504
+ index = 0
505
+ while text:
506
+ a, sep1, text = text.partition(self.left)
507
+ if a:
508
+ yield index, None, a
509
+ index += len(a)
510
+ if sep1:
511
+ b, sep2, text = text.partition(self.right)
512
+ if sep2:
513
+ yield index + len(sep1), Token.Escape, b
514
+ index += len(sep1) + len(b) + len(sep2)
515
+ else:
516
+ yield index, Token.Error, sep1
517
+ index += len(sep1)
518
+ text = b
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.svg
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for SVG output.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pip._vendor.pygments.formatter import Formatter
12
+ from pip._vendor.pygments.token import Comment
13
+ from pip._vendor.pygments.util import get_bool_opt, get_int_opt
14
+
15
+ __all__ = ['SvgFormatter']
16
+
17
+
18
+ def escape_html(text):
19
+ """Escape &, <, > as well as single and double quotes for HTML."""
20
+ return text.replace('&', '&amp;'). \
21
+ replace('<', '&lt;'). \
22
+ replace('>', '&gt;'). \
23
+ replace('"', '&quot;'). \
24
+ replace("'", '&#39;')
25
+
26
+
27
+ class2style = {}
28
+
29
+ class SvgFormatter(Formatter):
30
+ """
31
+ Format tokens as an SVG graphics file. This formatter is still experimental.
32
+ Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
33
+ coordinates containing ``<tspan>`` elements with the individual token styles.
34
+
35
+ By default, this formatter outputs a full SVG document including doctype
36
+ declaration and the ``<svg>`` root element.
37
+
38
+ .. versionadded:: 0.9
39
+
40
+ Additional options accepted:
41
+
42
+ `nowrap`
43
+ Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
44
+ don't add a XML declaration and a doctype. If true, the `fontfamily`
45
+ and `fontsize` options are ignored. Defaults to ``False``.
46
+
47
+ `fontfamily`
48
+ The value to give the wrapping ``<g>`` element's ``font-family``
49
+ attribute, defaults to ``"monospace"``.
50
+
51
+ `fontsize`
52
+ The value to give the wrapping ``<g>`` element's ``font-size``
53
+ attribute, defaults to ``"14px"``.
54
+
55
+ `linenos`
56
+ If ``True``, add line numbers (default: ``False``).
57
+
58
+ `linenostart`
59
+ The line number for the first line (default: ``1``).
60
+
61
+ `linenostep`
62
+ If set to a number n > 1, only every nth line number is printed.
63
+
64
+ `linenowidth`
65
+ Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
66
+ for up to 4-digit line numbers. Increase width for longer code blocks).
67
+
68
+ `xoffset`
69
+ Starting offset in X direction, defaults to ``0``.
70
+
71
+ `yoffset`
72
+ Starting offset in Y direction, defaults to the font size if it is given
73
+ in pixels, or ``20`` else. (This is necessary since text coordinates
74
+ refer to the text baseline, not the top edge.)
75
+
76
+ `ystep`
77
+ Offset to add to the Y coordinate for each subsequent line. This should
78
+ roughly be the text size plus 5. It defaults to that value if the text
79
+ size is given in pixels, or ``25`` else.
80
+
81
+ `spacehack`
82
+ Convert spaces in the source to ``&#160;``, which are non-breaking
83
+ spaces. SVG provides the ``xml:space`` attribute to control how
84
+ whitespace inside tags is handled, in theory, the ``preserve`` value
85
+ could be used to keep all whitespace as-is. However, many current SVG
86
+ viewers don't obey that rule, so this option is provided as a workaround
87
+ and defaults to ``True``.
88
+ """
89
+ name = 'SVG'
90
+ aliases = ['svg']
91
+ filenames = ['*.svg']
92
+
93
+ def __init__(self, **options):
94
+ Formatter.__init__(self, **options)
95
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
96
+ self.fontfamily = options.get('fontfamily', 'monospace')
97
+ self.fontsize = options.get('fontsize', '14px')
98
+ self.xoffset = get_int_opt(options, 'xoffset', 0)
99
+ fs = self.fontsize.strip()
100
+ if fs.endswith('px'):
101
+ fs = fs[:-2].strip()
102
+ try:
103
+ int_fs = int(fs)
104
+ except ValueError:
105
+ int_fs = 20
106
+ self.yoffset = get_int_opt(options, 'yoffset', int_fs)
107
+ self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
108
+ self.spacehack = get_bool_opt(options, 'spacehack', True)
109
+ self.linenos = get_bool_opt(options,'linenos',False)
110
+ self.linenostart = get_int_opt(options,'linenostart',1)
111
+ self.linenostep = get_int_opt(options,'linenostep',1)
112
+ self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
113
+ self._stylecache = {}
114
+
115
+ def format_unencoded(self, tokensource, outfile):
116
+ """
117
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
118
+ tuples and write it into ``outfile``.
119
+
120
+ For our implementation we put all lines in their own 'line group'.
121
+ """
122
+ x = self.xoffset
123
+ y = self.yoffset
124
+ if not self.nowrap:
125
+ if self.encoding:
126
+ outfile.write(f'<?xml version="1.0" encoding="{self.encoding}"?>\n')
127
+ else:
128
+ outfile.write('<?xml version="1.0"?>\n')
129
+ outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
130
+ '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
131
+ 'svg10.dtd">\n')
132
+ outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
133
+ outfile.write(f'<g font-family="{self.fontfamily}" font-size="{self.fontsize}">\n')
134
+
135
+ counter = self.linenostart
136
+ counter_step = self.linenostep
137
+ counter_style = self._get_style(Comment)
138
+ line_x = x
139
+
140
+ if self.linenos:
141
+ if counter % counter_step == 0:
142
+ outfile.write(f'<text x="{x+self.linenowidth}" y="{y}" {counter_style} text-anchor="end">{counter}</text>')
143
+ line_x += self.linenowidth + self.ystep
144
+ counter += 1
145
+
146
+ outfile.write(f'<text x="{line_x}" y="{y}" xml:space="preserve">')
147
+ for ttype, value in tokensource:
148
+ style = self._get_style(ttype)
149
+ tspan = style and '<tspan' + style + '>' or ''
150
+ tspanend = tspan and '</tspan>' or ''
151
+ value = escape_html(value)
152
+ if self.spacehack:
153
+ value = value.expandtabs().replace(' ', '&#160;')
154
+ parts = value.split('\n')
155
+ for part in parts[:-1]:
156
+ outfile.write(tspan + part + tspanend)
157
+ y += self.ystep
158
+ outfile.write('</text>\n')
159
+ if self.linenos and counter % counter_step == 0:
160
+ outfile.write(f'<text x="{x+self.linenowidth}" y="{y}" text-anchor="end" {counter_style}>{counter}</text>')
161
+
162
+ counter += 1
163
+ outfile.write(f'<text x="{line_x}" y="{y}" ' 'xml:space="preserve">')
164
+ outfile.write(tspan + parts[-1] + tspanend)
165
+ outfile.write('</text>')
166
+
167
+ if not self.nowrap:
168
+ outfile.write('</g></svg>\n')
169
+
170
+ def _get_style(self, tokentype):
171
+ if tokentype in self._stylecache:
172
+ return self._stylecache[tokentype]
173
+ otokentype = tokentype
174
+ while not self.style.styles_token(tokentype):
175
+ tokentype = tokentype.parent
176
+ value = self.style.style_for_token(tokentype)
177
+ result = ''
178
+ if value['color']:
179
+ result = ' fill="#' + value['color'] + '"'
180
+ if value['bold']:
181
+ result += ' font-weight="bold"'
182
+ if value['italic']:
183
+ result += ' font-style="italic"'
184
+ self._stylecache[otokentype] = result
185
+ return result
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.terminal
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for terminal output with ANSI sequences.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pip._vendor.pygments.formatter import Formatter
12
+ from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
13
+ Number, Operator, Generic, Token, Whitespace
14
+ from pip._vendor.pygments.console import ansiformat
15
+ from pip._vendor.pygments.util import get_choice_opt
16
+
17
+
18
+ __all__ = ['TerminalFormatter']
19
+
20
+
21
+ #: Map token types to a tuple of color values for light and dark
22
+ #: backgrounds.
23
+ TERMINAL_COLORS = {
24
+ Token: ('', ''),
25
+
26
+ Whitespace: ('gray', 'brightblack'),
27
+ Comment: ('gray', 'brightblack'),
28
+ Comment.Preproc: ('cyan', 'brightcyan'),
29
+ Keyword: ('blue', 'brightblue'),
30
+ Keyword.Type: ('cyan', 'brightcyan'),
31
+ Operator.Word: ('magenta', 'brightmagenta'),
32
+ Name.Builtin: ('cyan', 'brightcyan'),
33
+ Name.Function: ('green', 'brightgreen'),
34
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
35
+ Name.Class: ('_green_', '_brightgreen_'),
36
+ Name.Exception: ('cyan', 'brightcyan'),
37
+ Name.Decorator: ('brightblack', 'gray'),
38
+ Name.Variable: ('red', 'brightred'),
39
+ Name.Constant: ('red', 'brightred'),
40
+ Name.Attribute: ('cyan', 'brightcyan'),
41
+ Name.Tag: ('brightblue', 'brightblue'),
42
+ String: ('yellow', 'yellow'),
43
+ Number: ('blue', 'brightblue'),
44
+
45
+ Generic.Deleted: ('brightred', 'brightred'),
46
+ Generic.Inserted: ('green', 'brightgreen'),
47
+ Generic.Heading: ('**', '**'),
48
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
49
+ Generic.Prompt: ('**', '**'),
50
+ Generic.Error: ('brightred', 'brightred'),
51
+
52
+ Error: ('_brightred_', '_brightred_'),
53
+ }
54
+
55
+
56
+ class TerminalFormatter(Formatter):
57
+ r"""
58
+ Format tokens with ANSI color sequences, for output in a text console.
59
+ Color sequences are terminated at newlines, so that paging the output
60
+ works correctly.
61
+
62
+ The `get_style_defs()` method doesn't do anything special since there is
63
+ no support for common styles.
64
+
65
+ Options accepted:
66
+
67
+ `bg`
68
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
69
+ (default: ``"light"``).
70
+
71
+ `colorscheme`
72
+ A dictionary mapping token types to (lightbg, darkbg) color names or
73
+ ``None`` (default: ``None`` = use builtin colorscheme).
74
+
75
+ `linenos`
76
+ Set to ``True`` to have line numbers on the terminal output as well
77
+ (default: ``False`` = no line numbers).
78
+ """
79
+ name = 'Terminal'
80
+ aliases = ['terminal', 'console']
81
+ filenames = []
82
+
83
+ def __init__(self, **options):
84
+ Formatter.__init__(self, **options)
85
+ self.darkbg = get_choice_opt(options, 'bg',
86
+ ['light', 'dark'], 'light') == 'dark'
87
+ self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
88
+ self.linenos = options.get('linenos', False)
89
+ self._lineno = 0
90
+
91
+ def format(self, tokensource, outfile):
92
+ return Formatter.format(self, tokensource, outfile)
93
+
94
+ def _write_lineno(self, outfile):
95
+ self._lineno += 1
96
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
97
+
98
+ def _get_color(self, ttype):
99
+ # self.colorscheme is a dict containing usually generic types, so we
100
+ # have to walk the tree of dots. The base Token type must be a key,
101
+ # even if it's empty string, as in the default above.
102
+ colors = self.colorscheme.get(ttype)
103
+ while colors is None:
104
+ ttype = ttype.parent
105
+ colors = self.colorscheme.get(ttype)
106
+ return colors[self.darkbg]
107
+
108
+ def format_unencoded(self, tokensource, outfile):
109
+ if self.linenos:
110
+ self._write_lineno(outfile)
111
+
112
+ for ttype, value in tokensource:
113
+ color = self._get_color(ttype)
114
+
115
+ for line in value.splitlines(True):
116
+ if color:
117
+ outfile.write(ansiformat(color, line.rstrip('\n')))
118
+ else:
119
+ outfile.write(line.rstrip('\n'))
120
+ if line.endswith('\n'):
121
+ if self.linenos:
122
+ self._write_lineno(outfile)
123
+ else:
124
+ outfile.write('\n')
125
+
126
+ if self.linenos:
127
+ outfile.write("\n")
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py ADDED
@@ -0,0 +1,963 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexer
3
+ ~~~~~~~~~~~~~~
4
+
5
+ Base lexer classes.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+ import sys
13
+ import time
14
+
15
+ from pip._vendor.pygments.filter import apply_filters, Filter
16
+ from pip._vendor.pygments.filters import get_filter_by_name
17
+ from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
18
+ from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
19
+ make_analysator, Future, guess_decode
20
+ from pip._vendor.pygments.regexopt import regex_opt
21
+
22
+ __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23
+ 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24
+ 'default', 'words', 'line_re']
25
+
26
+ line_re = re.compile('.*?\n')
27
+
28
+ _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
29
+ (b'\xff\xfe\0\0', 'utf-32'),
30
+ (b'\0\0\xfe\xff', 'utf-32be'),
31
+ (b'\xff\xfe', 'utf-16'),
32
+ (b'\xfe\xff', 'utf-16be')]
33
+
34
+ _default_analyse = staticmethod(lambda x: 0.0)
35
+
36
+
37
+ class LexerMeta(type):
38
+ """
39
+ This metaclass automagically converts ``analyse_text`` methods into
40
+ static methods which always return float values.
41
+ """
42
+
43
+ def __new__(mcs, name, bases, d):
44
+ if 'analyse_text' in d:
45
+ d['analyse_text'] = make_analysator(d['analyse_text'])
46
+ return type.__new__(mcs, name, bases, d)
47
+
48
+
49
+ class Lexer(metaclass=LexerMeta):
50
+ """
51
+ Lexer for a specific language.
52
+
53
+ See also :doc:`lexerdevelopment`, a high-level guide to writing
54
+ lexers.
55
+
56
+ Lexer classes have attributes used for choosing the most appropriate
57
+ lexer based on various criteria.
58
+
59
+ .. autoattribute:: name
60
+ :no-value:
61
+ .. autoattribute:: aliases
62
+ :no-value:
63
+ .. autoattribute:: filenames
64
+ :no-value:
65
+ .. autoattribute:: alias_filenames
66
+ .. autoattribute:: mimetypes
67
+ :no-value:
68
+ .. autoattribute:: priority
69
+
70
+ Lexers included in Pygments should have two additional attributes:
71
+
72
+ .. autoattribute:: url
73
+ :no-value:
74
+ .. autoattribute:: version_added
75
+ :no-value:
76
+
77
+ Lexers included in Pygments may have additional attributes:
78
+
79
+ .. autoattribute:: _example
80
+ :no-value:
81
+
82
+ You can pass options to the constructor. The basic options recognized
83
+ by all lexers and processed by the base `Lexer` class are:
84
+
85
+ ``stripnl``
86
+ Strip leading and trailing newlines from the input (default: True).
87
+ ``stripall``
88
+ Strip all leading and trailing whitespace from the input
89
+ (default: False).
90
+ ``ensurenl``
91
+ Make sure that the input ends with a newline (default: True). This
92
+ is required for some lexers that consume input linewise.
93
+
94
+ .. versionadded:: 1.3
95
+
96
+ ``tabsize``
97
+ If given and greater than 0, expand tabs in the input (default: 0).
98
+ ``encoding``
99
+ If given, must be an encoding name. This encoding will be used to
100
+ convert the input string to Unicode, if it is not already a Unicode
101
+ string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
102
+ Latin1 detection. Can also be ``'chardet'`` to use the chardet
103
+ library, if it is installed.
104
+ ``inencoding``
105
+ Overrides the ``encoding`` if given.
106
+ """
107
+
108
+ #: Full name of the lexer, in human-readable form
109
+ name = None
110
+
111
+ #: A list of short, unique identifiers that can be used to look
112
+ #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
113
+ aliases = []
114
+
115
+ #: A list of `fnmatch` patterns that match filenames which contain
116
+ #: content for this lexer. The patterns in this list should be unique among
117
+ #: all lexers.
118
+ filenames = []
119
+
120
+ #: A list of `fnmatch` patterns that match filenames which may or may not
121
+ #: contain content for this lexer. This list is used by the
122
+ #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
123
+ #: are then included in guessing the correct one. That means that
124
+ #: e.g. every lexer for HTML and a template language should include
125
+ #: ``\*.html`` in this list.
126
+ alias_filenames = []
127
+
128
+ #: A list of MIME types for content that can be lexed with this lexer.
129
+ mimetypes = []
130
+
131
+ #: Priority, should multiple lexers match and no content is provided
132
+ priority = 0
133
+
134
+ #: URL of the language specification/definition. Used in the Pygments
135
+ #: documentation. Set to an empty string to disable.
136
+ url = None
137
+
138
+ #: Version of Pygments in which the lexer was added.
139
+ version_added = None
140
+
141
+ #: Example file name. Relative to the ``tests/examplefiles`` directory.
142
+ #: This is used by the documentation generator to show an example.
143
+ _example = None
144
+
145
+ def __init__(self, **options):
146
+ """
147
+ This constructor takes arbitrary options as keyword arguments.
148
+ Every subclass must first process its own options and then call
149
+ the `Lexer` constructor, since it processes the basic
150
+ options like `stripnl`.
151
+
152
+ An example looks like this:
153
+
154
+ .. sourcecode:: python
155
+
156
+ def __init__(self, **options):
157
+ self.compress = options.get('compress', '')
158
+ Lexer.__init__(self, **options)
159
+
160
+ As these options must all be specifiable as strings (due to the
161
+ command line usage), there are various utility functions
162
+ available to help with that, see `Utilities`_.
163
+ """
164
+ self.options = options
165
+ self.stripnl = get_bool_opt(options, 'stripnl', True)
166
+ self.stripall = get_bool_opt(options, 'stripall', False)
167
+ self.ensurenl = get_bool_opt(options, 'ensurenl', True)
168
+ self.tabsize = get_int_opt(options, 'tabsize', 0)
169
+ self.encoding = options.get('encoding', 'guess')
170
+ self.encoding = options.get('inencoding') or self.encoding
171
+ self.filters = []
172
+ for filter_ in get_list_opt(options, 'filters', ()):
173
+ self.add_filter(filter_)
174
+
175
+ def __repr__(self):
176
+ if self.options:
177
+ return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
178
+ else:
179
+ return f'<pygments.lexers.{self.__class__.__name__}>'
180
+
181
+ def add_filter(self, filter_, **options):
182
+ """
183
+ Add a new stream filter to this lexer.
184
+ """
185
+ if not isinstance(filter_, Filter):
186
+ filter_ = get_filter_by_name(filter_, **options)
187
+ self.filters.append(filter_)
188
+
189
+ def analyse_text(text):
190
+ """
191
+ A static method which is called for lexer guessing.
192
+
193
+ It should analyse the text and return a float in the range
194
+ from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
195
+ will not be selected as the most probable one, if it returns
196
+ ``1.0``, it will be selected immediately. This is used by
197
+ `guess_lexer`.
198
+
199
+ The `LexerMeta` metaclass automatically wraps this function so
200
+ that it works like a static method (no ``self`` or ``cls``
201
+ parameter) and the return value is automatically converted to
202
+ `float`. If the return value is an object that is boolean `False`
203
+ it's the same as if the return values was ``0.0``.
204
+ """
205
+
206
+ def _preprocess_lexer_input(self, text):
207
+ """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
208
+
209
+ if not isinstance(text, str):
210
+ if self.encoding == 'guess':
211
+ text, _ = guess_decode(text)
212
+ elif self.encoding == 'chardet':
213
+ try:
214
+ # pip vendoring note: this code is not reachable by pip,
215
+ # removed import of chardet to make it clear.
216
+ raise ImportError('chardet is not vendored by pip')
217
+ except ImportError as e:
218
+ raise ImportError('To enable chardet encoding guessing, '
219
+ 'please install the chardet library '
220
+ 'from http://chardet.feedparser.org/') from e
221
+ # check for BOM first
222
+ decoded = None
223
+ for bom, encoding in _encoding_map:
224
+ if text.startswith(bom):
225
+ decoded = text[len(bom):].decode(encoding, 'replace')
226
+ break
227
+ # no BOM found, so use chardet
228
+ if decoded is None:
229
+ enc = chardet.detect(text[:1024]) # Guess using first 1KB
230
+ decoded = text.decode(enc.get('encoding') or 'utf-8',
231
+ 'replace')
232
+ text = decoded
233
+ else:
234
+ text = text.decode(self.encoding)
235
+ if text.startswith('\ufeff'):
236
+ text = text[len('\ufeff'):]
237
+ else:
238
+ if text.startswith('\ufeff'):
239
+ text = text[len('\ufeff'):]
240
+
241
+ # text now *is* a unicode string
242
+ text = text.replace('\r\n', '\n')
243
+ text = text.replace('\r', '\n')
244
+ if self.stripall:
245
+ text = text.strip()
246
+ elif self.stripnl:
247
+ text = text.strip('\n')
248
+ if self.tabsize > 0:
249
+ text = text.expandtabs(self.tabsize)
250
+ if self.ensurenl and not text.endswith('\n'):
251
+ text += '\n'
252
+
253
+ return text
254
+
255
+ def get_tokens(self, text, unfiltered=False):
256
+ """
257
+ This method is the basic interface of a lexer. It is called by
258
+ the `highlight()` function. It must process the text and return an
259
+ iterable of ``(tokentype, value)`` pairs from `text`.
260
+
261
+ Normally, you don't need to override this method. The default
262
+ implementation processes the options recognized by all lexers
263
+ (`stripnl`, `stripall` and so on), and then yields all tokens
264
+ from `get_tokens_unprocessed()`, with the ``index`` dropped.
265
+
266
+ If `unfiltered` is set to `True`, the filtering mechanism is
267
+ bypassed even if filters are defined.
268
+ """
269
+ text = self._preprocess_lexer_input(text)
270
+
271
+ def streamer():
272
+ for _, t, v in self.get_tokens_unprocessed(text):
273
+ yield t, v
274
+ stream = streamer()
275
+ if not unfiltered:
276
+ stream = apply_filters(stream, self.filters, self)
277
+ return stream
278
+
279
+ def get_tokens_unprocessed(self, text):
280
+ """
281
+ This method should process the text and return an iterable of
282
+ ``(index, tokentype, value)`` tuples where ``index`` is the starting
283
+ position of the token within the input text.
284
+
285
+ It must be overridden by subclasses. It is recommended to
286
+ implement it as a generator to maximize effectiveness.
287
+ """
288
+ raise NotImplementedError
289
+
290
+
291
+ class DelegatingLexer(Lexer):
292
+ """
293
+ This lexer takes two lexer as arguments. A root lexer and
294
+ a language lexer. First everything is scanned using the language
295
+ lexer, afterwards all ``Other`` tokens are lexed using the root
296
+ lexer.
297
+
298
+ The lexers from the ``template`` lexer package use this base lexer.
299
+ """
300
+
301
+ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
302
+ self.root_lexer = _root_lexer(**options)
303
+ self.language_lexer = _language_lexer(**options)
304
+ self.needle = _needle
305
+ Lexer.__init__(self, **options)
306
+
307
+ def get_tokens_unprocessed(self, text):
308
+ buffered = ''
309
+ insertions = []
310
+ lng_buffer = []
311
+ for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
312
+ if t is self.needle:
313
+ if lng_buffer:
314
+ insertions.append((len(buffered), lng_buffer))
315
+ lng_buffer = []
316
+ buffered += v
317
+ else:
318
+ lng_buffer.append((i, t, v))
319
+ if lng_buffer:
320
+ insertions.append((len(buffered), lng_buffer))
321
+ return do_insertions(insertions,
322
+ self.root_lexer.get_tokens_unprocessed(buffered))
323
+
324
+
325
+ # ------------------------------------------------------------------------------
326
+ # RegexLexer and ExtendedRegexLexer
327
+ #
328
+
329
+
330
+ class include(str): # pylint: disable=invalid-name
331
+ """
332
+ Indicates that a state should include rules from another state.
333
+ """
334
+ pass
335
+
336
+
337
+ class _inherit:
338
+ """
339
+ Indicates the a state should inherit from its superclass.
340
+ """
341
+ def __repr__(self):
342
+ return 'inherit'
343
+
344
+ inherit = _inherit() # pylint: disable=invalid-name
345
+
346
+
347
+ class combined(tuple): # pylint: disable=invalid-name
348
+ """
349
+ Indicates a state combined from multiple states.
350
+ """
351
+
352
+ def __new__(cls, *args):
353
+ return tuple.__new__(cls, args)
354
+
355
+ def __init__(self, *args):
356
+ # tuple.__init__ doesn't do anything
357
+ pass
358
+
359
+
360
+ class _PseudoMatch:
361
+ """
362
+ A pseudo match object constructed from a string.
363
+ """
364
+
365
+ def __init__(self, start, text):
366
+ self._text = text
367
+ self._start = start
368
+
369
+ def start(self, arg=None):
370
+ return self._start
371
+
372
+ def end(self, arg=None):
373
+ return self._start + len(self._text)
374
+
375
+ def group(self, arg=None):
376
+ if arg:
377
+ raise IndexError('No such group')
378
+ return self._text
379
+
380
+ def groups(self):
381
+ return (self._text,)
382
+
383
+ def groupdict(self):
384
+ return {}
385
+
386
+
387
+ def bygroups(*args):
388
+ """
389
+ Callback that yields multiple actions for each group in the match.
390
+ """
391
+ def callback(lexer, match, ctx=None):
392
+ for i, action in enumerate(args):
393
+ if action is None:
394
+ continue
395
+ elif type(action) is _TokenType:
396
+ data = match.group(i + 1)
397
+ if data:
398
+ yield match.start(i + 1), action, data
399
+ else:
400
+ data = match.group(i + 1)
401
+ if data is not None:
402
+ if ctx:
403
+ ctx.pos = match.start(i + 1)
404
+ for item in action(lexer,
405
+ _PseudoMatch(match.start(i + 1), data), ctx):
406
+ if item:
407
+ yield item
408
+ if ctx:
409
+ ctx.pos = match.end()
410
+ return callback
411
+
412
+
413
+ class _This:
414
+ """
415
+ Special singleton used for indicating the caller class.
416
+ Used by ``using``.
417
+ """
418
+
419
+ this = _This()
420
+
421
+
422
+ def using(_other, **kwargs):
423
+ """
424
+ Callback that processes the match with a different lexer.
425
+
426
+ The keyword arguments are forwarded to the lexer, except `state` which
427
+ is handled separately.
428
+
429
+ `state` specifies the state that the new lexer will start in, and can
430
+ be an enumerable such as ('root', 'inline', 'string') or a simple
431
+ string which is assumed to be on top of the root state.
432
+
433
+ Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
434
+ """
435
+ gt_kwargs = {}
436
+ if 'state' in kwargs:
437
+ s = kwargs.pop('state')
438
+ if isinstance(s, (list, tuple)):
439
+ gt_kwargs['stack'] = s
440
+ else:
441
+ gt_kwargs['stack'] = ('root', s)
442
+
443
+ if _other is this:
444
+ def callback(lexer, match, ctx=None):
445
+ # if keyword arguments are given the callback
446
+ # function has to create a new lexer instance
447
+ if kwargs:
448
+ # XXX: cache that somehow
449
+ kwargs.update(lexer.options)
450
+ lx = lexer.__class__(**kwargs)
451
+ else:
452
+ lx = lexer
453
+ s = match.start()
454
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
455
+ yield i + s, t, v
456
+ if ctx:
457
+ ctx.pos = match.end()
458
+ else:
459
+ def callback(lexer, match, ctx=None):
460
+ # XXX: cache that somehow
461
+ kwargs.update(lexer.options)
462
+ lx = _other(**kwargs)
463
+
464
+ s = match.start()
465
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
466
+ yield i + s, t, v
467
+ if ctx:
468
+ ctx.pos = match.end()
469
+ return callback
470
+
471
+
472
+ class default:
473
+ """
474
+ Indicates a state or state action (e.g. #pop) to apply.
475
+ For example default('#pop') is equivalent to ('', Token, '#pop')
476
+ Note that state tuples may be used as well.
477
+
478
+ .. versionadded:: 2.0
479
+ """
480
+ def __init__(self, state):
481
+ self.state = state
482
+
483
+
484
+ class words(Future):
485
+ """
486
+ Indicates a list of literal words that is transformed into an optimized
487
+ regex that matches any of the words.
488
+
489
+ .. versionadded:: 2.0
490
+ """
491
+ def __init__(self, words, prefix='', suffix=''):
492
+ self.words = words
493
+ self.prefix = prefix
494
+ self.suffix = suffix
495
+
496
+ def get(self):
497
+ return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
498
+
499
+
500
+ class RegexLexerMeta(LexerMeta):
501
+ """
502
+ Metaclass for RegexLexer, creates the self._tokens attribute from
503
+ self.tokens on the first instantiation.
504
+ """
505
+
506
+ def _process_regex(cls, regex, rflags, state):
507
+ """Preprocess the regular expression component of a token definition."""
508
+ if isinstance(regex, Future):
509
+ regex = regex.get()
510
+ return re.compile(regex, rflags).match
511
+
512
+ def _process_token(cls, token):
513
+ """Preprocess the token component of a token definition."""
514
+ assert type(token) is _TokenType or callable(token), \
515
+ f'token type must be simple type or callable, not {token!r}'
516
+ return token
517
+
518
+ def _process_new_state(cls, new_state, unprocessed, processed):
519
+ """Preprocess the state transition action of a token definition."""
520
+ if isinstance(new_state, str):
521
+ # an existing state
522
+ if new_state == '#pop':
523
+ return -1
524
+ elif new_state in unprocessed:
525
+ return (new_state,)
526
+ elif new_state == '#push':
527
+ return new_state
528
+ elif new_state[:5] == '#pop:':
529
+ return -int(new_state[5:])
530
+ else:
531
+ assert False, f'unknown new state {new_state!r}'
532
+ elif isinstance(new_state, combined):
533
+ # combine a new state from existing ones
534
+ tmp_state = '_tmp_%d' % cls._tmpname
535
+ cls._tmpname += 1
536
+ itokens = []
537
+ for istate in new_state:
538
+ assert istate != new_state, f'circular state ref {istate!r}'
539
+ itokens.extend(cls._process_state(unprocessed,
540
+ processed, istate))
541
+ processed[tmp_state] = itokens
542
+ return (tmp_state,)
543
+ elif isinstance(new_state, tuple):
544
+ # push more than one state
545
+ for istate in new_state:
546
+ assert (istate in unprocessed or
547
+ istate in ('#pop', '#push')), \
548
+ 'unknown new state ' + istate
549
+ return new_state
550
+ else:
551
+ assert False, f'unknown new state def {new_state!r}'
552
+
553
+ def _process_state(cls, unprocessed, processed, state):
554
+ """Preprocess a single state definition."""
555
+ assert isinstance(state, str), f"wrong state name {state!r}"
556
+ assert state[0] != '#', f"invalid state name {state!r}"
557
+ if state in processed:
558
+ return processed[state]
559
+ tokens = processed[state] = []
560
+ rflags = cls.flags
561
+ for tdef in unprocessed[state]:
562
+ if isinstance(tdef, include):
563
+ # it's a state reference
564
+ assert tdef != state, f"circular state reference {state!r}"
565
+ tokens.extend(cls._process_state(unprocessed, processed,
566
+ str(tdef)))
567
+ continue
568
+ if isinstance(tdef, _inherit):
569
+ # should be processed already, but may not in the case of:
570
+ # 1. the state has no counterpart in any parent
571
+ # 2. the state includes more than one 'inherit'
572
+ continue
573
+ if isinstance(tdef, default):
574
+ new_state = cls._process_new_state(tdef.state, unprocessed, processed)
575
+ tokens.append((re.compile('').match, None, new_state))
576
+ continue
577
+
578
+ assert type(tdef) is tuple, f"wrong rule def {tdef!r}"
579
+
580
+ try:
581
+ rex = cls._process_regex(tdef[0], rflags, state)
582
+ except Exception as err:
583
+ raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err
584
+
585
+ token = cls._process_token(tdef[1])
586
+
587
+ if len(tdef) == 2:
588
+ new_state = None
589
+ else:
590
+ new_state = cls._process_new_state(tdef[2],
591
+ unprocessed, processed)
592
+
593
+ tokens.append((rex, token, new_state))
594
+ return tokens
595
+
596
+ def process_tokendef(cls, name, tokendefs=None):
597
+ """Preprocess a dictionary of token definitions."""
598
+ processed = cls._all_tokens[name] = {}
599
+ tokendefs = tokendefs or cls.tokens[name]
600
+ for state in list(tokendefs):
601
+ cls._process_state(tokendefs, processed, state)
602
+ return processed
603
+
604
+ def get_tokendefs(cls):
605
+ """
606
+ Merge tokens from superclasses in MRO order, returning a single tokendef
607
+ dictionary.
608
+
609
+ Any state that is not defined by a subclass will be inherited
610
+ automatically. States that *are* defined by subclasses will, by
611
+ default, override that state in the superclass. If a subclass wishes to
612
+ inherit definitions from a superclass, it can use the special value
613
+ "inherit", which will cause the superclass' state definition to be
614
+ included at that point in the state.
615
+ """
616
+ tokens = {}
617
+ inheritable = {}
618
+ for c in cls.__mro__:
619
+ toks = c.__dict__.get('tokens', {})
620
+
621
+ for state, items in toks.items():
622
+ curitems = tokens.get(state)
623
+ if curitems is None:
624
+ # N.b. because this is assigned by reference, sufficiently
625
+ # deep hierarchies are processed incrementally (e.g. for
626
+ # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
627
+ # will not see any inherits in B).
628
+ tokens[state] = items
629
+ try:
630
+ inherit_ndx = items.index(inherit)
631
+ except ValueError:
632
+ continue
633
+ inheritable[state] = inherit_ndx
634
+ continue
635
+
636
+ inherit_ndx = inheritable.pop(state, None)
637
+ if inherit_ndx is None:
638
+ continue
639
+
640
+ # Replace the "inherit" value with the items
641
+ curitems[inherit_ndx:inherit_ndx+1] = items
642
+ try:
643
+ # N.b. this is the index in items (that is, the superclass
644
+ # copy), so offset required when storing below.
645
+ new_inh_ndx = items.index(inherit)
646
+ except ValueError:
647
+ pass
648
+ else:
649
+ inheritable[state] = inherit_ndx + new_inh_ndx
650
+
651
+ return tokens
652
+
653
+ def __call__(cls, *args, **kwds):
654
+ """Instantiate cls after preprocessing its token definitions."""
655
+ if '_tokens' not in cls.__dict__:
656
+ cls._all_tokens = {}
657
+ cls._tmpname = 0
658
+ if hasattr(cls, 'token_variants') and cls.token_variants:
659
+ # don't process yet
660
+ pass
661
+ else:
662
+ cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
663
+
664
+ return type.__call__(cls, *args, **kwds)
665
+
666
+
667
+ class RegexLexer(Lexer, metaclass=RegexLexerMeta):
668
+ """
669
+ Base for simple stateful regular expression-based lexers.
670
+ Simplifies the lexing process so that you need only
671
+ provide a list of states and regular expressions.
672
+ """
673
+
674
+ #: Flags for compiling the regular expressions.
675
+ #: Defaults to MULTILINE.
676
+ flags = re.MULTILINE
677
+
678
+ #: At all time there is a stack of states. Initially, the stack contains
679
+ #: a single state 'root'. The top of the stack is called "the current state".
680
+ #:
681
+ #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
682
+ #:
683
+ #: ``new_state`` can be omitted to signify no state transition.
684
+ #: If ``new_state`` is a string, it is pushed on the stack. This ensure
685
+ #: the new current state is ``new_state``.
686
+ #: If ``new_state`` is a tuple of strings, all of those strings are pushed
687
+ #: on the stack and the current state will be the last element of the list.
688
+ #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
689
+ #: to signify a new, anonymous state combined from the rules of two
690
+ #: or more existing ones.
691
+ #: Furthermore, it can be '#pop' to signify going back one step in
692
+ #: the state stack, or '#push' to push the current state on the stack
693
+ #: again. Note that if you push while in a combined state, the combined
694
+ #: state itself is pushed, and not only the state in which the rule is
695
+ #: defined.
696
+ #:
697
+ #: The tuple can also be replaced with ``include('state')``, in which
698
+ #: case the rules from the state named by the string are included in the
699
+ #: current one.
700
+ tokens = {}
701
+
702
+ def get_tokens_unprocessed(self, text, stack=('root',)):
703
+ """
704
+ Split ``text`` into (tokentype, text) pairs.
705
+
706
+ ``stack`` is the initial stack (default: ``['root']``)
707
+ """
708
+ pos = 0
709
+ tokendefs = self._tokens
710
+ statestack = list(stack)
711
+ statetokens = tokendefs[statestack[-1]]
712
+ while 1:
713
+ for rexmatch, action, new_state in statetokens:
714
+ m = rexmatch(text, pos)
715
+ if m:
716
+ if action is not None:
717
+ if type(action) is _TokenType:
718
+ yield pos, action, m.group()
719
+ else:
720
+ yield from action(self, m)
721
+ pos = m.end()
722
+ if new_state is not None:
723
+ # state transition
724
+ if isinstance(new_state, tuple):
725
+ for state in new_state:
726
+ if state == '#pop':
727
+ if len(statestack) > 1:
728
+ statestack.pop()
729
+ elif state == '#push':
730
+ statestack.append(statestack[-1])
731
+ else:
732
+ statestack.append(state)
733
+ elif isinstance(new_state, int):
734
+ # pop, but keep at least one state on the stack
735
+ # (random code leading to unexpected pops should
736
+ # not allow exceptions)
737
+ if abs(new_state) >= len(statestack):
738
+ del statestack[1:]
739
+ else:
740
+ del statestack[new_state:]
741
+ elif new_state == '#push':
742
+ statestack.append(statestack[-1])
743
+ else:
744
+ assert False, f"wrong state def: {new_state!r}"
745
+ statetokens = tokendefs[statestack[-1]]
746
+ break
747
+ else:
748
+ # We are here only if all state tokens have been considered
749
+ # and there was not a match on any of them.
750
+ try:
751
+ if text[pos] == '\n':
752
+ # at EOL, reset state to "root"
753
+ statestack = ['root']
754
+ statetokens = tokendefs['root']
755
+ yield pos, Whitespace, '\n'
756
+ pos += 1
757
+ continue
758
+ yield pos, Error, text[pos]
759
+ pos += 1
760
+ except IndexError:
761
+ break
762
+
763
+
764
+ class LexerContext:
765
+ """
766
+ A helper object that holds lexer position data.
767
+ """
768
+
769
+ def __init__(self, text, pos, stack=None, end=None):
770
+ self.text = text
771
+ self.pos = pos
772
+ self.end = end or len(text) # end=0 not supported ;-)
773
+ self.stack = stack or ['root']
774
+
775
+ def __repr__(self):
776
+ return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
777
+
778
+
779
+ class ExtendedRegexLexer(RegexLexer):
780
+ """
781
+ A RegexLexer that uses a context object to store its state.
782
+ """
783
+
784
+ def get_tokens_unprocessed(self, text=None, context=None):
785
+ """
786
+ Split ``text`` into (tokentype, text) pairs.
787
+ If ``context`` is given, use this lexer context instead.
788
+ """
789
+ tokendefs = self._tokens
790
+ if not context:
791
+ ctx = LexerContext(text, 0)
792
+ statetokens = tokendefs['root']
793
+ else:
794
+ ctx = context
795
+ statetokens = tokendefs[ctx.stack[-1]]
796
+ text = ctx.text
797
+ while 1:
798
+ for rexmatch, action, new_state in statetokens:
799
+ m = rexmatch(text, ctx.pos, ctx.end)
800
+ if m:
801
+ if action is not None:
802
+ if type(action) is _TokenType:
803
+ yield ctx.pos, action, m.group()
804
+ ctx.pos = m.end()
805
+ else:
806
+ yield from action(self, m, ctx)
807
+ if not new_state:
808
+ # altered the state stack?
809
+ statetokens = tokendefs[ctx.stack[-1]]
810
+ # CAUTION: callback must set ctx.pos!
811
+ if new_state is not None:
812
+ # state transition
813
+ if isinstance(new_state, tuple):
814
+ for state in new_state:
815
+ if state == '#pop':
816
+ if len(ctx.stack) > 1:
817
+ ctx.stack.pop()
818
+ elif state == '#push':
819
+ ctx.stack.append(ctx.stack[-1])
820
+ else:
821
+ ctx.stack.append(state)
822
+ elif isinstance(new_state, int):
823
+ # see RegexLexer for why this check is made
824
+ if abs(new_state) >= len(ctx.stack):
825
+ del ctx.stack[1:]
826
+ else:
827
+ del ctx.stack[new_state:]
828
+ elif new_state == '#push':
829
+ ctx.stack.append(ctx.stack[-1])
830
+ else:
831
+ assert False, f"wrong state def: {new_state!r}"
832
+ statetokens = tokendefs[ctx.stack[-1]]
833
+ break
834
+ else:
835
+ try:
836
+ if ctx.pos >= ctx.end:
837
+ break
838
+ if text[ctx.pos] == '\n':
839
+ # at EOL, reset state to "root"
840
+ ctx.stack = ['root']
841
+ statetokens = tokendefs['root']
842
+ yield ctx.pos, Text, '\n'
843
+ ctx.pos += 1
844
+ continue
845
+ yield ctx.pos, Error, text[ctx.pos]
846
+ ctx.pos += 1
847
+ except IndexError:
848
+ break
849
+
850
+
851
+ def do_insertions(insertions, tokens):
852
+ """
853
+ Helper for lexers which must combine the results of several
854
+ sublexers.
855
+
856
+ ``insertions`` is a list of ``(index, itokens)`` pairs.
857
+ Each ``itokens`` iterable should be inserted at position
858
+ ``index`` into the token stream given by the ``tokens``
859
+ argument.
860
+
861
+ The result is a combined token stream.
862
+
863
+ TODO: clean up the code here.
864
+ """
865
+ insertions = iter(insertions)
866
+ try:
867
+ index, itokens = next(insertions)
868
+ except StopIteration:
869
+ # no insertions
870
+ yield from tokens
871
+ return
872
+
873
+ realpos = None
874
+ insleft = True
875
+
876
+ # iterate over the token stream where we want to insert
877
+ # the tokens from the insertion list.
878
+ for i, t, v in tokens:
879
+ # first iteration. store the position of first item
880
+ if realpos is None:
881
+ realpos = i
882
+ oldi = 0
883
+ while insleft and i + len(v) >= index:
884
+ tmpval = v[oldi:index - i]
885
+ if tmpval:
886
+ yield realpos, t, tmpval
887
+ realpos += len(tmpval)
888
+ for it_index, it_token, it_value in itokens:
889
+ yield realpos, it_token, it_value
890
+ realpos += len(it_value)
891
+ oldi = index - i
892
+ try:
893
+ index, itokens = next(insertions)
894
+ except StopIteration:
895
+ insleft = False
896
+ break # not strictly necessary
897
+ if oldi < len(v):
898
+ yield realpos, t, v[oldi:]
899
+ realpos += len(v) - oldi
900
+
901
+ # leftover tokens
902
+ while insleft:
903
+ # no normal tokens, set realpos to zero
904
+ realpos = realpos or 0
905
+ for p, t, v in itokens:
906
+ yield realpos, t, v
907
+ realpos += len(v)
908
+ try:
909
+ index, itokens = next(insertions)
910
+ except StopIteration:
911
+ insleft = False
912
+ break # not strictly necessary
913
+
914
+
915
+ class ProfilingRegexLexerMeta(RegexLexerMeta):
916
+ """Metaclass for ProfilingRegexLexer, collects regex timing info."""
917
+
918
+ def _process_regex(cls, regex, rflags, state):
919
+ if isinstance(regex, words):
920
+ rex = regex_opt(regex.words, prefix=regex.prefix,
921
+ suffix=regex.suffix)
922
+ else:
923
+ rex = regex
924
+ compiled = re.compile(rex, rflags)
925
+
926
+ def match_func(text, pos, endpos=sys.maxsize):
927
+ info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
928
+ t0 = time.time()
929
+ res = compiled.match(text, pos, endpos)
930
+ t1 = time.time()
931
+ info[0] += 1
932
+ info[1] += t1 - t0
933
+ return res
934
+ return match_func
935
+
936
+
937
+ class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
938
+ """Drop-in replacement for RegexLexer that does profiling of its regexes."""
939
+
940
+ _prof_data = []
941
+ _prof_sort_index = 4 # defaults to time per call
942
+
943
+ def get_tokens_unprocessed(self, text, stack=('root',)):
944
+ # this needs to be a stack, since using(this) will produce nested calls
945
+ self.__class__._prof_data.append({})
946
+ yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
947
+ rawdata = self.__class__._prof_data.pop()
948
+ data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
949
+ n, 1000 * t, 1000 * t / n)
950
+ for ((s, r), (n, t)) in rawdata.items()),
951
+ key=lambda x: x[self._prof_sort_index],
952
+ reverse=True)
953
+ sum_total = sum(x[3] for x in data)
954
+
955
+ print()
956
+ print('Profiling result for %s lexing %d chars in %.3f ms' %
957
+ (self.__class__.__name__, len(text), sum_total))
958
+ print('=' * 110)
959
+ print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
960
+ print('-' * 110)
961
+ for d in data:
962
+ print('%-20s %-65s %5d %8.4f %8.4f' % d)
963
+ print('=' * 110)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ Pygments lexers.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+ import sys
13
+ import types
14
+ import fnmatch
15
+ from os.path import basename
16
+
17
+ from pip._vendor.pygments.lexers._mapping import LEXERS
18
+ from pip._vendor.pygments.modeline import get_filetype_from_buffer
19
+ from pip._vendor.pygments.plugin import find_plugin_lexers
20
+ from pip._vendor.pygments.util import ClassNotFound, guess_decode
21
+
22
+ COMPAT = {
23
+ 'Python3Lexer': 'PythonLexer',
24
+ 'Python3TracebackLexer': 'PythonTracebackLexer',
25
+ 'LeanLexer': 'Lean3Lexer',
26
+ }
27
+
28
+ __all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
29
+ 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
30
+
31
+ _lexer_cache = {}
32
+ _pattern_cache = {}
33
+
34
+
35
+ def _fn_matches(fn, glob):
36
+ """Return whether the supplied file name fn matches pattern filename."""
37
+ if glob not in _pattern_cache:
38
+ pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
39
+ return pattern.match(fn)
40
+ return _pattern_cache[glob].match(fn)
41
+
42
+
43
+ def _load_lexers(module_name):
44
+ """Load a lexer (and all others in the module too)."""
45
+ mod = __import__(module_name, None, None, ['__all__'])
46
+ for lexer_name in mod.__all__:
47
+ cls = getattr(mod, lexer_name)
48
+ _lexer_cache[cls.name] = cls
49
+
50
+
51
+ def get_all_lexers(plugins=True):
52
+ """Return a generator of tuples in the form ``(name, aliases,
53
+ filenames, mimetypes)`` of all know lexers.
54
+
55
+ If *plugins* is true (the default), plugin lexers supplied by entrypoints
56
+ are also returned. Otherwise, only builtin ones are considered.
57
+ """
58
+ for item in LEXERS.values():
59
+ yield item[1:]
60
+ if plugins:
61
+ for lexer in find_plugin_lexers():
62
+ yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
63
+
64
+
65
+ def find_lexer_class(name):
66
+ """
67
+ Return the `Lexer` subclass that with the *name* attribute as given by
68
+ the *name* argument.
69
+ """
70
+ if name in _lexer_cache:
71
+ return _lexer_cache[name]
72
+ # lookup builtin lexers
73
+ for module_name, lname, aliases, _, _ in LEXERS.values():
74
+ if name == lname:
75
+ _load_lexers(module_name)
76
+ return _lexer_cache[name]
77
+ # continue with lexers from setuptools entrypoints
78
+ for cls in find_plugin_lexers():
79
+ if cls.name == name:
80
+ return cls
81
+
82
+
83
+ def find_lexer_class_by_name(_alias):
84
+ """
85
+ Return the `Lexer` subclass that has `alias` in its aliases list, without
86
+ instantiating it.
87
+
88
+ Like `get_lexer_by_name`, but does not instantiate the class.
89
+
90
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
91
+ found.
92
+
93
+ .. versionadded:: 2.2
94
+ """
95
+ if not _alias:
96
+ raise ClassNotFound(f'no lexer for alias {_alias!r} found')
97
+ # lookup builtin lexers
98
+ for module_name, name, aliases, _, _ in LEXERS.values():
99
+ if _alias.lower() in aliases:
100
+ if name not in _lexer_cache:
101
+ _load_lexers(module_name)
102
+ return _lexer_cache[name]
103
+ # continue with lexers from setuptools entrypoints
104
+ for cls in find_plugin_lexers():
105
+ if _alias.lower() in cls.aliases:
106
+ return cls
107
+ raise ClassNotFound(f'no lexer for alias {_alias!r} found')
108
+
109
+
110
+ def get_lexer_by_name(_alias, **options):
111
+ """
112
+ Return an instance of a `Lexer` subclass that has `alias` in its
113
+ aliases list. The lexer is given the `options` at its
114
+ instantiation.
115
+
116
+ Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
117
+ found.
118
+ """
119
+ if not _alias:
120
+ raise ClassNotFound(f'no lexer for alias {_alias!r} found')
121
+
122
+ # lookup builtin lexers
123
+ for module_name, name, aliases, _, _ in LEXERS.values():
124
+ if _alias.lower() in aliases:
125
+ if name not in _lexer_cache:
126
+ _load_lexers(module_name)
127
+ return _lexer_cache[name](**options)
128
+ # continue with lexers from setuptools entrypoints
129
+ for cls in find_plugin_lexers():
130
+ if _alias.lower() in cls.aliases:
131
+ return cls(**options)
132
+ raise ClassNotFound(f'no lexer for alias {_alias!r} found')
133
+
134
+
135
+ def load_lexer_from_file(filename, lexername="CustomLexer", **options):
136
+ """Load a lexer from a file.
137
+
138
+ This method expects a file located relative to the current working
139
+ directory, which contains a Lexer class. By default, it expects the
140
+ Lexer to be name CustomLexer; you can specify your own class name
141
+ as the second argument to this function.
142
+
143
+ Users should be very careful with the input, because this method
144
+ is equivalent to running eval on the input file.
145
+
146
+ Raises ClassNotFound if there are any problems importing the Lexer.
147
+
148
+ .. versionadded:: 2.2
149
+ """
150
+ try:
151
+ # This empty dict will contain the namespace for the exec'd file
152
+ custom_namespace = {}
153
+ with open(filename, 'rb') as f:
154
+ exec(f.read(), custom_namespace)
155
+ # Retrieve the class `lexername` from that namespace
156
+ if lexername not in custom_namespace:
157
+ raise ClassNotFound(f'no valid {lexername} class found in {filename}')
158
+ lexer_class = custom_namespace[lexername]
159
+ # And finally instantiate it with the options
160
+ return lexer_class(**options)
161
+ except OSError as err:
162
+ raise ClassNotFound(f'cannot read {filename}: {err}')
163
+ except ClassNotFound:
164
+ raise
165
+ except Exception as err:
166
+ raise ClassNotFound(f'error when loading custom lexer: {err}')
167
+
168
+
169
+ def find_lexer_class_for_filename(_fn, code=None):
170
+ """Get a lexer for a filename.
171
+
172
+ If multiple lexers match the filename pattern, use ``analyse_text()`` to
173
+ figure out which one is more appropriate.
174
+
175
+ Returns None if not found.
176
+ """
177
+ matches = []
178
+ fn = basename(_fn)
179
+ for modname, name, _, filenames, _ in LEXERS.values():
180
+ for filename in filenames:
181
+ if _fn_matches(fn, filename):
182
+ if name not in _lexer_cache:
183
+ _load_lexers(modname)
184
+ matches.append((_lexer_cache[name], filename))
185
+ for cls in find_plugin_lexers():
186
+ for filename in cls.filenames:
187
+ if _fn_matches(fn, filename):
188
+ matches.append((cls, filename))
189
+
190
+ if isinstance(code, bytes):
191
+ # decode it, since all analyse_text functions expect unicode
192
+ code = guess_decode(code)
193
+
194
+ def get_rating(info):
195
+ cls, filename = info
196
+ # explicit patterns get a bonus
197
+ bonus = '*' not in filename and 0.5 or 0
198
+ # The class _always_ defines analyse_text because it's included in
199
+ # the Lexer class. The default implementation returns None which
200
+ # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
201
+ # to find lexers which need it overridden.
202
+ if code:
203
+ return cls.analyse_text(code) + bonus, cls.__name__
204
+ return cls.priority + bonus, cls.__name__
205
+
206
+ if matches:
207
+ matches.sort(key=get_rating)
208
+ # print "Possible lexers, after sort:", matches
209
+ return matches[-1][0]
210
+
211
+
212
+ def get_lexer_for_filename(_fn, code=None, **options):
213
+ """Get a lexer for a filename.
214
+
215
+ Return a `Lexer` subclass instance that has a filename pattern
216
+ matching `fn`. The lexer is given the `options` at its
217
+ instantiation.
218
+
219
+ Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
220
+ is found.
221
+
222
+ If multiple lexers match the filename pattern, use their ``analyse_text()``
223
+ methods to figure out which one is more appropriate.
224
+ """
225
+ res = find_lexer_class_for_filename(_fn, code)
226
+ if not res:
227
+ raise ClassNotFound(f'no lexer for filename {_fn!r} found')
228
+ return res(**options)
229
+
230
+
231
+ def get_lexer_for_mimetype(_mime, **options):
232
+ """
233
+ Return a `Lexer` subclass instance that has `mime` in its mimetype
234
+ list. The lexer is given the `options` at its instantiation.
235
+
236
+ Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
237
+ is found.
238
+ """
239
+ for modname, name, _, _, mimetypes in LEXERS.values():
240
+ if _mime in mimetypes:
241
+ if name not in _lexer_cache:
242
+ _load_lexers(modname)
243
+ return _lexer_cache[name](**options)
244
+ for cls in find_plugin_lexers():
245
+ if _mime in cls.mimetypes:
246
+ return cls(**options)
247
+ raise ClassNotFound(f'no lexer for mimetype {_mime!r} found')
248
+
249
+
250
+ def _iter_lexerclasses(plugins=True):
251
+ """Return an iterator over all lexer classes."""
252
+ for key in sorted(LEXERS):
253
+ module_name, name = LEXERS[key][:2]
254
+ if name not in _lexer_cache:
255
+ _load_lexers(module_name)
256
+ yield _lexer_cache[name]
257
+ if plugins:
258
+ yield from find_plugin_lexers()
259
+
260
+
261
+ def guess_lexer_for_filename(_fn, _text, **options):
262
+ """
263
+ As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
264
+ or `alias_filenames` that matches `filename` are taken into consideration.
265
+
266
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
267
+ handle the content.
268
+ """
269
+ fn = basename(_fn)
270
+ primary = {}
271
+ matching_lexers = set()
272
+ for lexer in _iter_lexerclasses():
273
+ for filename in lexer.filenames:
274
+ if _fn_matches(fn, filename):
275
+ matching_lexers.add(lexer)
276
+ primary[lexer] = True
277
+ for filename in lexer.alias_filenames:
278
+ if _fn_matches(fn, filename):
279
+ matching_lexers.add(lexer)
280
+ primary[lexer] = False
281
+ if not matching_lexers:
282
+ raise ClassNotFound(f'no lexer for filename {fn!r} found')
283
+ if len(matching_lexers) == 1:
284
+ return matching_lexers.pop()(**options)
285
+ result = []
286
+ for lexer in matching_lexers:
287
+ rv = lexer.analyse_text(_text)
288
+ if rv == 1.0:
289
+ return lexer(**options)
290
+ result.append((rv, lexer))
291
+
292
+ def type_sort(t):
293
+ # sort by:
294
+ # - analyse score
295
+ # - is primary filename pattern?
296
+ # - priority
297
+ # - last resort: class name
298
+ return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
299
+ result.sort(key=type_sort)
300
+
301
+ return result[-1][1](**options)
302
+
303
+
304
+ def guess_lexer(_text, **options):
305
+ """
306
+ Return a `Lexer` subclass instance that's guessed from the text in
307
+ `text`. For that, the :meth:`.analyse_text()` method of every known lexer
308
+ class is called with the text as argument, and the lexer which returned the
309
+ highest value will be instantiated and returned.
310
+
311
+ :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
312
+ handle the content.
313
+ """
314
+
315
+ if not isinstance(_text, str):
316
+ inencoding = options.get('inencoding', options.get('encoding'))
317
+ if inencoding:
318
+ _text = _text.decode(inencoding or 'utf8')
319
+ else:
320
+ _text, _ = guess_decode(_text)
321
+
322
+ # try to get a vim modeline first
323
+ ft = get_filetype_from_buffer(_text)
324
+
325
+ if ft is not None:
326
+ try:
327
+ return get_lexer_by_name(ft, **options)
328
+ except ClassNotFound:
329
+ pass
330
+
331
+ best_lexer = [0.0, None]
332
+ for lexer in _iter_lexerclasses():
333
+ rv = lexer.analyse_text(_text)
334
+ if rv == 1.0:
335
+ return lexer(**options)
336
+ if rv > best_lexer[0]:
337
+ best_lexer[:] = (rv, lexer)
338
+ if not best_lexer[0] or best_lexer[1] is None:
339
+ raise ClassNotFound('no lexer matching the text found')
340
+ return best_lexer[1](**options)
341
+
342
+
343
+ class _automodule(types.ModuleType):
344
+ """Automatically import lexers."""
345
+
346
+ def __getattr__(self, name):
347
+ info = LEXERS.get(name)
348
+ if info:
349
+ _load_lexers(info[0])
350
+ cls = _lexer_cache[info[1]]
351
+ setattr(self, name, cls)
352
+ return cls
353
+ if name in COMPAT:
354
+ return getattr(self, COMPAT[name])
355
+ raise AttributeError(name)
356
+
357
+
358
+ oldmod = sys.modules[__name__]
359
+ newmod = _automodule(__name__)
360
+ newmod.__dict__.update(oldmod.__dict__)
361
+ sys.modules[__name__] = newmod
362
+ del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc ADDED
Binary file (68.7 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/_mapping.py ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Automatically generated by scripts/gen_mapfiles.py.
2
+ # DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
3
+
4
+ LEXERS = {
5
+ 'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
6
+ 'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
7
+ 'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
8
+ 'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
9
+ 'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
10
+ 'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
11
+ 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
12
+ 'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
13
+ 'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
14
+ 'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
15
+ 'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
16
+ 'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
17
+ 'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
18
+ 'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
19
+ 'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
20
+ 'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
21
+ 'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
22
+ 'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
23
+ 'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
24
+ 'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
25
+ 'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
26
+ 'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
27
+ 'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
28
+ 'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
29
+ 'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
30
+ 'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
31
+ 'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
32
+ 'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
33
+ 'ArturoLexer': ('pip._vendor.pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()),
34
+ 'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature', 'application/pem-certificate-chain')),
35
+ 'Asn1Lexer': ('pip._vendor.pygments.lexers.asn1', 'ASN.1', ('asn1',), ('*.asn1',), ()),
36
+ 'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
37
+ 'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
38
+ 'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
39
+ 'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
40
+ 'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
41
+ 'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
42
+ 'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
43
+ 'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
44
+ 'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
45
+ 'BQNLexer': ('pip._vendor.pygments.lexers.bqn', 'BQN', ('bqn',), ('*.bqn',), ()),
46
+ 'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
47
+ 'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
48
+ 'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
49
+ 'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell', 'openrc'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
50
+ 'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
51
+ 'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
52
+ 'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
53
+ 'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
54
+ 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
55
+ 'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
56
+ 'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
57
+ 'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
58
+ 'BlueprintLexer': ('pip._vendor.pygments.lexers.blueprint', 'Blueprint', ('blueprint',), ('*.blp',), ('text/x-blueprint',)),
59
+ 'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
60
+ 'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
61
+ 'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
62
+ 'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
63
+ 'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
64
+ 'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
65
+ 'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
66
+ 'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
67
+ 'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
68
+ 'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
69
+ 'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
70
+ 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
71
+ 'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
72
+ 'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
73
+ 'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
74
+ 'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
75
+ 'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
76
+ 'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
77
+ 'CarbonLexer': ('pip._vendor.pygments.lexers.carbon', 'Carbon', ('carbon',), ('*.carbon',), ('text/x-carbon',)),
78
+ 'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
79
+ 'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
80
+ 'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
81
+ 'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
82
+ 'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
83
+ 'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
84
+ 'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
85
+ 'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
86
+ 'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
87
+ 'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
88
+ 'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
89
+ 'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
90
+ 'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
91
+ 'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
92
+ 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
93
+ 'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
94
+ 'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
95
+ 'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
96
+ 'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
97
+ 'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
98
+ 'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
99
+ 'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
100
+ 'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
101
+ 'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
102
+ 'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
103
+ 'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
104
+ 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
105
+ 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
106
+ 'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
107
+ 'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
108
+ 'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
109
+ 'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
110
+ 'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
111
+ 'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
112
+ 'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
113
+ 'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
114
+ 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
115
+ 'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
116
+ 'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
117
+ 'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
118
+ 'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
119
+ 'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
120
+ 'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
121
+ 'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
122
+ 'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
123
+ 'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
124
+ 'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
125
+ 'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
126
+ 'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
127
+ 'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
128
+ 'DaxLexer': ('pip._vendor.pygments.lexers.dax', 'Dax', ('dax',), ('*.dax',), ()),
129
+ 'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
130
+ 'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
131
+ 'DesktopLexer': ('pip._vendor.pygments.lexers.configs', 'Desktop file', ('desktop',), ('*.desktop',), ('application/x-desktop',)),
132
+ 'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
133
+ 'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
134
+ 'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
135
+ 'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
136
+ 'DnsZoneLexer': ('pip._vendor.pygments.lexers.dns', 'Zone', ('zone',), ('*.zone',), ('text/dns',)),
137
+ 'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
138
+ 'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
139
+ 'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
140
+ 'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
141
+ 'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
142
+ 'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
143
+ 'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
144
+ 'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
145
+ 'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
146
+ 'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
147
+ 'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
148
+ 'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
149
+ 'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
150
+ 'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
151
+ 'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
152
+ 'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
153
+ 'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
154
+ 'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
155
+ 'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
156
+ 'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
157
+ 'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
158
+ 'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
159
+ 'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
160
+ 'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
161
+ 'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
162
+ 'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
163
+ 'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)),
164
+ 'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
165
+ 'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
166
+ 'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
167
+ 'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
168
+ 'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
169
+ 'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
170
+ 'FiftLexer': ('pip._vendor.pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()),
171
+ 'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
172
+ 'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
173
+ 'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
174
+ 'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
175
+ 'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
176
+ 'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
177
+ 'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
178
+ 'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
179
+ 'FuncLexer': ('pip._vendor.pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()),
180
+ 'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
181
+ 'GAPConsoleLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()),
182
+ 'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
183
+ 'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
184
+ 'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
185
+ 'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
186
+ 'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
187
+ 'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
188
+ 'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
189
+ 'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
190
+ 'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
191
+ 'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
192
+ 'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
193
+ 'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
194
+ 'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
195
+ 'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
196
+ 'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
197
+ 'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
198
+ 'GraphQLLexer': ('pip._vendor.pygments.lexers.graphql', 'GraphQL', ('graphql',), ('*.graphql',), ()),
199
+ 'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
200
+ 'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
201
+ 'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
202
+ 'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
203
+ 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
204
+ 'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
205
+ 'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
206
+ 'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
207
+ 'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
208
+ 'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
209
+ 'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
210
+ 'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
211
+ 'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()),
212
+ 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
213
+ 'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
214
+ 'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
215
+ 'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
216
+ 'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
217
+ 'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
218
+ 'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
219
+ 'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang', 'hy'), ('*.hy',), ('text/x-hy', 'application/x-hy')),
220
+ 'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris',), ('*.hyb',), ('text/x-hybris', 'application/x-hybris')),
221
+ 'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
222
+ 'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
223
+ 'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
224
+ 'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
225
+ 'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
226
+ 'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
227
+ 'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
228
+ 'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig'), ('text/x-ini', 'text/inf')),
229
+ 'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
230
+ 'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
231
+ 'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
232
+ 'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
233
+ 'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
234
+ 'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
235
+ 'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
236
+ 'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
237
+ 'JanetLexer': ('pip._vendor.pygments.lexers.lisp', 'Janet', ('janet',), ('*.janet', '*.jdn'), ('text/x-janet', 'application/x-janet')),
238
+ 'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
239
+ 'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
240
+ 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
241
+ 'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
242
+ 'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
243
+ 'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
244
+ 'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
245
+ 'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
246
+ 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
247
+ 'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
248
+ 'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
249
+ 'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
250
+ 'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
251
+ 'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock'), ('application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq')),
252
+ 'JsonnetLexer': ('pip._vendor.pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()),
253
+ 'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
254
+ 'JsxLexer': ('pip._vendor.pygments.lexers.jsx', 'JSX', ('jsx', 'react'), ('*.jsx', '*.react'), ('text/jsx', 'text/typescript-jsx')),
255
+ 'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
256
+ 'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
257
+ 'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
258
+ 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
259
+ 'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
260
+ 'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
261
+ 'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
262
+ 'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
263
+ 'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
264
+ 'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
265
+ 'KustoLexer': ('pip._vendor.pygments.lexers.kusto', 'Kusto', ('kql', 'kusto'), ('*.kql', '*.kusto', '.csl'), ()),
266
+ 'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
267
+ 'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
268
+ 'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
269
+ 'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
270
+ 'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
271
+ 'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
272
+ 'LdaprcLexer': ('pip._vendor.pygments.lexers.ldap', 'LDAP configuration file', ('ldapconf', 'ldaprc'), ('.ldaprc', 'ldaprc', 'ldap.conf'), ('text/x-ldapconf',)),
273
+ 'LdifLexer': ('pip._vendor.pygments.lexers.ldap', 'LDIF', ('ldif',), ('*.ldif',), ('text/x-ldif',)),
274
+ 'Lean3Lexer': ('pip._vendor.pygments.lexers.lean', 'Lean', ('lean', 'lean3'), ('*.lean',), ('text/x-lean', 'text/x-lean3')),
275
+ 'Lean4Lexer': ('pip._vendor.pygments.lexers.lean', 'Lean4', ('lean4',), ('*.lean',), ('text/x-lean4',)),
276
+ 'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
277
+ 'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
278
+ 'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
279
+ 'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
280
+ 'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
281
+ 'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
282
+ 'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
283
+ 'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
284
+ 'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
285
+ 'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
286
+ 'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
287
+ 'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
288
+ 'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
289
+ 'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
290
+ 'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
291
+ 'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
292
+ 'LuauLexer': ('pip._vendor.pygments.lexers.scripting', 'Luau', ('luau',), ('*.luau',), ()),
293
+ 'MCFunctionLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
294
+ 'MCSchemaLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)),
295
+ 'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
296
+ 'MIPSLexer': ('pip._vendor.pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()),
297
+ 'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
298
+ 'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
299
+ 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
300
+ 'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
301
+ 'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
302
+ 'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
303
+ 'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
304
+ 'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
305
+ 'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
306
+ 'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
307
+ 'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
308
+ 'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
309
+ 'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
310
+ 'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
311
+ 'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
312
+ 'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
313
+ 'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
314
+ 'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
315
+ 'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
316
+ 'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
317
+ 'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
318
+ 'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
319
+ 'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
320
+ 'MojoLexer': ('pip._vendor.pygments.lexers.mojo', 'Mojo', ('mojo', '🔥'), ('*.mojo', '*.🔥'), ('text/x-mojo', 'application/x-mojo')),
321
+ 'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
322
+ 'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
323
+ 'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
324
+ 'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
325
+ 'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
326
+ 'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
327
+ 'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
328
+ 'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
329
+ 'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
330
+ 'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
331
+ 'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
332
+ 'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
333
+ 'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
334
+ 'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
335
+ 'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
336
+ 'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
337
+ 'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
338
+ 'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
339
+ 'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
340
+ 'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
341
+ 'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
342
+ 'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)),
343
+ 'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
344
+ 'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
345
+ 'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
346
+ 'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
347
+ 'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
348
+ 'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
349
+ 'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
350
+ 'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
351
+ 'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
352
+ 'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
353
+ 'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
354
+ 'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
355
+ 'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
356
+ 'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
357
+ 'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
358
+ 'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
359
+ 'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
360
+ 'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
361
+ 'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
362
+ 'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
363
+ 'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
364
+ 'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
365
+ 'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
366
+ 'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
367
+ 'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
368
+ 'OpenScadLexer': ('pip._vendor.pygments.lexers.openscad', 'OpenSCAD', ('openscad',), ('*.scad',), ('application/x-openscad',)),
369
+ 'OrgLexer': ('pip._vendor.pygments.lexers.markup', 'Org Mode', ('org', 'orgmode', 'org-mode'), ('*.org',), ('text/org',)),
370
+ 'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()),
371
+ 'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
372
+ 'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
373
+ 'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
374
+ 'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
375
+ 'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
376
+ 'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
377
+ 'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
378
+ 'PhixLexer': ('pip._vendor.pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)),
379
+ 'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
380
+ 'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
381
+ 'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
382
+ 'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
383
+ 'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
384
+ 'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
385
+ 'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
386
+ 'PortugolLexer': ('pip._vendor.pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()),
387
+ 'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
388
+ 'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
389
+ 'PostgresExplainLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL EXPLAIN dialect', ('postgres-explain',), ('*.explain',), ('text/x-postgresql-explain',)),
390
+ 'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
391
+ 'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
392
+ 'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
393
+ 'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
394
+ 'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
395
+ 'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
396
+ 'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
397
+ 'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
398
+ 'PromelaLexer': ('pip._vendor.pygments.lexers.c_like', 'Promela', ('promela',), ('*.pml', '*.prom', '*.prm', '*.promela', '*.pr', '*.pm'), ('text/x-promela',)),
399
+ 'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
400
+ 'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
401
+ 'PrqlLexer': ('pip._vendor.pygments.lexers.prql', 'PRQL', ('prql',), ('*.prql',), ('application/prql', 'application/x-prql')),
402
+ 'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
403
+ 'PtxLexer': ('pip._vendor.pygments.lexers.ptx', 'PTX', ('ptx',), ('*.ptx',), ('text/x-ptx',)),
404
+ 'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
405
+ 'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
406
+ 'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
407
+ 'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
408
+ 'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
409
+ 'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon', 'python-console'), (), ('text/x-python-doctest',)),
410
+ 'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
411
+ 'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
412
+ 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
413
+ 'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
414
+ 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
415
+ 'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
416
+ 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
417
+ 'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
418
+ 'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
419
+ 'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
420
+ 'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
421
+ 'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
422
+ 'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
423
+ 'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
424
+ 'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
425
+ 'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
426
+ 'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
427
+ 'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
428
+ 'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
429
+ 'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
430
+ 'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
431
+ 'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
432
+ 'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
433
+ 'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
434
+ 'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
435
+ 'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
436
+ 'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
437
+ 'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
438
+ 'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
439
+ 'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
440
+ 'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
441
+ 'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
442
+ 'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
443
+ 'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
444
+ 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
445
+ 'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
446
+ 'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
447
+ 'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
448
+ 'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
449
+ 'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
450
+ 'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
451
+ 'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
452
+ 'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
453
+ 'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
454
+ 'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
455
+ 'SNBTLexer': ('pip._vendor.pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
456
+ 'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
457
+ 'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
458
+ 'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
459
+ 'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
460
+ 'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
461
+ 'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
462
+ 'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
463
+ 'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
464
+ 'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
465
+ 'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
466
+ 'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
467
+ 'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
468
+ 'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
469
+ 'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
470
+ 'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
471
+ 'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
472
+ 'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
473
+ 'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
474
+ 'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
475
+ 'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
476
+ 'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
477
+ 'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
478
+ 'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
479
+ 'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
480
+ 'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
481
+ 'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
482
+ 'SoongLexer': ('pip._vendor.pygments.lexers.soong', 'Soong', ('androidbp', 'bp', 'soong'), ('Android.bp',), ()),
483
+ 'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
484
+ 'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
485
+ 'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
486
+ 'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
487
+ 'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
488
+ 'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
489
+ 'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
490
+ 'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
491
+ 'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
492
+ 'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
493
+ 'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
494
+ 'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
495
+ 'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
496
+ 'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
497
+ 'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
498
+ 'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
499
+ 'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
500
+ 'SystemdLexer': ('pip._vendor.pygments.lexers.configs', 'Systemd', ('systemd',), ('*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ()),
501
+ 'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
502
+ 'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
503
+ 'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ('application/toml',)),
504
+ 'TactLexer': ('pip._vendor.pygments.lexers.tact', 'Tact', ('tact',), ('*.tact',), ()),
505
+ 'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
506
+ 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
507
+ 'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
508
+ 'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
509
+ 'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
510
+ 'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
511
+ 'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
512
+ 'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
513
+ 'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
514
+ 'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
515
+ 'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
516
+ 'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf', 'hcl'), ('*.tf', '*.hcl'), ('application/x-tf', 'application/x-terraform')),
517
+ 'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
518
+ 'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
519
+ 'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
520
+ 'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
521
+ 'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
522
+ 'TlbLexer': ('pip._vendor.pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()),
523
+ 'TlsLexer': ('pip._vendor.pygments.lexers.tls', 'TLS Presentation Language', ('tls',), (), ()),
524
+ 'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
525
+ 'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
526
+ 'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
527
+ 'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
528
+ 'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
529
+ 'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
530
+ 'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
531
+ 'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
532
+ 'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
533
+ 'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
534
+ 'TypstLexer': ('pip._vendor.pygments.lexers.typst', 'Typst', ('typst',), ('*.typ',), ('text/x-typst',)),
535
+ 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
536
+ 'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
537
+ 'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
538
+ 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
539
+ 'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
540
+ 'UrlEncodedLexer': ('pip._vendor.pygments.lexers.html', 'urlencoded', ('urlencoded',), (), ('application/x-www-form-urlencoded',)),
541
+ 'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
542
+ 'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
543
+ 'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
544
+ 'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
545
+ 'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
546
+ 'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
547
+ 'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
548
+ 'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
549
+ 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas', 'visual-basic', 'visualbasic'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
550
+ 'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
551
+ 'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
552
+ 'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
553
+ 'VerifpalLexer': ('pip._vendor.pygments.lexers.verifpal', 'Verifpal', ('verifpal',), ('*.vp',), ('text/x-verifpal',)),
554
+ 'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
555
+ 'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
556
+ 'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
557
+ 'VisualPrologGrammarLexer': ('pip._vendor.pygments.lexers.vip', 'Visual Prolog Grammar', ('visualprologgrammar',), ('*.vipgrm',), ()),
558
+ 'VisualPrologLexer': ('pip._vendor.pygments.lexers.vip', 'Visual Prolog', ('visualprolog',), ('*.pro', '*.cl', '*.i', '*.pack', '*.ph'), ()),
559
+ 'VyperLexer': ('pip._vendor.pygments.lexers.vyper', 'Vyper', ('vyper',), ('*.vy',), ()),
560
+ 'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
561
+ 'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
562
+ 'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
563
+ 'WgslLexer': ('pip._vendor.pygments.lexers.wgsl', 'WebGPU Shading Language', ('wgsl',), ('*.wgsl',), ('text/wgsl',)),
564
+ 'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
565
+ 'WikitextLexer': ('pip._vendor.pygments.lexers.markup', 'Wikitext', ('wikitext', 'mediawiki'), (), ('text/x-wiki',)),
566
+ 'WoWTocLexer': ('pip._vendor.pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()),
567
+ 'WrenLexer': ('pip._vendor.pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()),
568
+ 'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
569
+ 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
570
+ 'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
571
+ 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
572
+ 'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
573
+ 'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
574
+ 'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
575
+ 'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
576
+ 'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
577
+ 'XppLexer': ('pip._vendor.pygments.lexers.dotnet', 'X++', ('xpp', 'x++'), ('*.xpp',), ()),
578
+ 'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
579
+ 'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
580
+ 'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
581
+ 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
582
+ 'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
583
+ 'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
584
+ 'YaraLexer': ('pip._vendor.pygments.lexers.yara', 'YARA', ('yara', 'yar'), ('*.yar',), ('text/x-yara',)),
585
+ 'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
586
+ 'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
587
+ 'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
588
+ 'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
589
+ }
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py ADDED
@@ -0,0 +1,1198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.python
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexers for Python and related languages.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import keyword
12
+
13
+ from pip._vendor.pygments.lexer import DelegatingLexer, RegexLexer, include, \
14
+ bygroups, using, default, words, combined, this
15
+ from pip._vendor.pygments.util import get_bool_opt, shebang_matches
16
+ from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
17
+ Number, Punctuation, Generic, Other, Error, Whitespace
18
+ from pip._vendor.pygments import unistring as uni
19
+
20
+ __all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
21
+ 'Python2Lexer', 'Python2TracebackLexer',
22
+ 'CythonLexer', 'DgLexer', 'NumPyLexer']
23
+
24
+
25
+ class PythonLexer(RegexLexer):
26
+ """
27
+ For Python source code (version 3.x).
28
+
29
+ .. versionchanged:: 2.5
30
+ This is now the default ``PythonLexer``. It is still available as the
31
+ alias ``Python3Lexer``.
32
+ """
33
+
34
+ name = 'Python'
35
+ url = 'https://www.python.org'
36
+ aliases = ['python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark']
37
+ filenames = [
38
+ '*.py',
39
+ '*.pyw',
40
+ # Type stubs
41
+ '*.pyi',
42
+ # Jython
43
+ '*.jy',
44
+ # Sage
45
+ '*.sage',
46
+ # SCons
47
+ '*.sc',
48
+ 'SConstruct',
49
+ 'SConscript',
50
+ # Skylark/Starlark (used by Bazel, Buck, and Pants)
51
+ '*.bzl',
52
+ 'BUCK',
53
+ 'BUILD',
54
+ 'BUILD.bazel',
55
+ 'WORKSPACE',
56
+ # Twisted Application infrastructure
57
+ '*.tac',
58
+ ]
59
+ mimetypes = ['text/x-python', 'application/x-python',
60
+ 'text/x-python3', 'application/x-python3']
61
+ version_added = '0.10'
62
+
63
+ uni_name = f"[{uni.xid_start}][{uni.xid_continue}]*"
64
+
65
+ def innerstring_rules(ttype):
66
+ return [
67
+ # the old style '%s' % (...) string formatting (still valid in Py3)
68
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
69
+ '[hlL]?[E-GXc-giorsaux%]', String.Interpol),
70
+ # the new style '{}'.format(...) string formatting
71
+ (r'\{'
72
+ r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
73
+ r'(\![sra])?' # conversion
74
+ r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
75
+ r'\}', String.Interpol),
76
+
77
+ # backslashes, quotes and formatting signs must be parsed one at a time
78
+ (r'[^\\\'"%{\n]+', ttype),
79
+ (r'[\'"\\]', ttype),
80
+ # unhandled string formatting sign
81
+ (r'%|(\{{1,2})', ttype)
82
+ # newlines are an error (use "nl" state)
83
+ ]
84
+
85
+ def fstring_rules(ttype):
86
+ return [
87
+ # Assuming that a '}' is the closing brace after format specifier.
88
+ # Sadly, this means that we won't detect syntax error. But it's
89
+ # more important to parse correct syntax correctly, than to
90
+ # highlight invalid syntax.
91
+ (r'\}', String.Interpol),
92
+ (r'\{', String.Interpol, 'expr-inside-fstring'),
93
+ # backslashes, quotes and formatting signs must be parsed one at a time
94
+ (r'[^\\\'"{}\n]+', ttype),
95
+ (r'[\'"\\]', ttype),
96
+ # newlines are an error (use "nl" state)
97
+ ]
98
+
99
+ tokens = {
100
+ 'root': [
101
+ (r'\n', Whitespace),
102
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
103
+ bygroups(Whitespace, String.Affix, String.Doc)),
104
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
105
+ bygroups(Whitespace, String.Affix, String.Doc)),
106
+ (r'\A#!.+$', Comment.Hashbang),
107
+ (r'#.*$', Comment.Single),
108
+ (r'\\\n', Text),
109
+ (r'\\', Text),
110
+ include('keywords'),
111
+ include('soft-keywords'),
112
+ (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
113
+ (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
114
+ (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
115
+ 'fromimport'),
116
+ (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
117
+ 'import'),
118
+ include('expr'),
119
+ ],
120
+ 'expr': [
121
+ # raw f-strings
122
+ ('(?i)(rf|fr)(""")',
123
+ bygroups(String.Affix, String.Double),
124
+ combined('rfstringescape', 'tdqf')),
125
+ ("(?i)(rf|fr)(''')",
126
+ bygroups(String.Affix, String.Single),
127
+ combined('rfstringescape', 'tsqf')),
128
+ ('(?i)(rf|fr)(")',
129
+ bygroups(String.Affix, String.Double),
130
+ combined('rfstringescape', 'dqf')),
131
+ ("(?i)(rf|fr)(')",
132
+ bygroups(String.Affix, String.Single),
133
+ combined('rfstringescape', 'sqf')),
134
+ # non-raw f-strings
135
+ ('([fF])(""")', bygroups(String.Affix, String.Double),
136
+ combined('fstringescape', 'tdqf')),
137
+ ("([fF])(''')", bygroups(String.Affix, String.Single),
138
+ combined('fstringescape', 'tsqf')),
139
+ ('([fF])(")', bygroups(String.Affix, String.Double),
140
+ combined('fstringescape', 'dqf')),
141
+ ("([fF])(')", bygroups(String.Affix, String.Single),
142
+ combined('fstringescape', 'sqf')),
143
+ # raw bytes and strings
144
+ ('(?i)(rb|br|r)(""")',
145
+ bygroups(String.Affix, String.Double), 'tdqs'),
146
+ ("(?i)(rb|br|r)(''')",
147
+ bygroups(String.Affix, String.Single), 'tsqs'),
148
+ ('(?i)(rb|br|r)(")',
149
+ bygroups(String.Affix, String.Double), 'dqs'),
150
+ ("(?i)(rb|br|r)(')",
151
+ bygroups(String.Affix, String.Single), 'sqs'),
152
+ # non-raw strings
153
+ ('([uU]?)(""")', bygroups(String.Affix, String.Double),
154
+ combined('stringescape', 'tdqs')),
155
+ ("([uU]?)(''')", bygroups(String.Affix, String.Single),
156
+ combined('stringescape', 'tsqs')),
157
+ ('([uU]?)(")', bygroups(String.Affix, String.Double),
158
+ combined('stringescape', 'dqs')),
159
+ ("([uU]?)(')", bygroups(String.Affix, String.Single),
160
+ combined('stringescape', 'sqs')),
161
+ # non-raw bytes
162
+ ('([bB])(""")', bygroups(String.Affix, String.Double),
163
+ combined('bytesescape', 'tdqs')),
164
+ ("([bB])(''')", bygroups(String.Affix, String.Single),
165
+ combined('bytesescape', 'tsqs')),
166
+ ('([bB])(")', bygroups(String.Affix, String.Double),
167
+ combined('bytesescape', 'dqs')),
168
+ ("([bB])(')", bygroups(String.Affix, String.Single),
169
+ combined('bytesescape', 'sqs')),
170
+
171
+ (r'[^\S\n]+', Text),
172
+ include('numbers'),
173
+ (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
174
+ (r'[]{}:(),;[]', Punctuation),
175
+ (r'(in|is|and|or|not)\b', Operator.Word),
176
+ include('expr-keywords'),
177
+ include('builtins'),
178
+ include('magicfuncs'),
179
+ include('magicvars'),
180
+ include('name'),
181
+ ],
182
+ 'expr-inside-fstring': [
183
+ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
184
+ # without format specifier
185
+ (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
186
+ r'(\![sraf])?' # conversion
187
+ r'\}', String.Interpol, '#pop'),
188
+ # with format specifier
189
+ # we'll catch the remaining '}' in the outer scope
190
+ (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
191
+ r'(\![sraf])?' # conversion
192
+ r':', String.Interpol, '#pop'),
193
+ (r'\s+', Whitespace), # allow new lines
194
+ include('expr'),
195
+ ],
196
+ 'expr-inside-fstring-inner': [
197
+ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
198
+ (r'[])}]', Punctuation, '#pop'),
199
+ (r'\s+', Whitespace), # allow new lines
200
+ include('expr'),
201
+ ],
202
+ 'expr-keywords': [
203
+ # Based on https://docs.python.org/3/reference/expressions.html
204
+ (words((
205
+ 'async for', 'await', 'else', 'for', 'if', 'lambda',
206
+ 'yield', 'yield from'), suffix=r'\b'),
207
+ Keyword),
208
+ (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
209
+ ],
210
+ 'keywords': [
211
+ (words((
212
+ 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
213
+ 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
214
+ 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
215
+ 'yield from', 'as', 'with'), suffix=r'\b'),
216
+ Keyword),
217
+ (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
218
+ ],
219
+ 'soft-keywords': [
220
+ # `match`, `case` and `_` soft keywords
221
+ (r'(^[ \t]*)' # at beginning of line + possible indentation
222
+ r'(match|case)\b' # a possible keyword
223
+ r'(?![ \t]*(?:' # not followed by...
224
+ r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
225
+ # pattern matching (but None/True/False is ok)
226
+ r'|'.join(k for k in keyword.kwlist if k[0].islower()) + r')\b))',
227
+ bygroups(Text, Keyword), 'soft-keywords-inner'),
228
+ ],
229
+ 'soft-keywords-inner': [
230
+ # optional `_` keyword
231
+ (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)),
232
+ default('#pop')
233
+ ],
234
+ 'builtins': [
235
+ (words((
236
+ '__import__', 'abs', 'aiter', 'all', 'any', 'bin', 'bool', 'bytearray',
237
+ 'breakpoint', 'bytes', 'callable', 'chr', 'classmethod', 'compile',
238
+ 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval',
239
+ 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals',
240
+ 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'isinstance',
241
+ 'issubclass', 'iter', 'len', 'list', 'locals', 'map', 'max',
242
+ 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow',
243
+ 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set',
244
+ 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super',
245
+ 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
246
+ Name.Builtin),
247
+ (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
248
+ (words((
249
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
250
+ 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
251
+ 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
252
+ 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
253
+ 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
254
+ 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
255
+ 'NotImplementedError', 'OSError', 'OverflowError',
256
+ 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
257
+ 'RuntimeError', 'RuntimeWarning', 'StopIteration',
258
+ 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
259
+ 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
260
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
261
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError',
262
+ 'Warning', 'WindowsError', 'ZeroDivisionError',
263
+ # new builtin exceptions from PEP 3151
264
+ 'BlockingIOError', 'ChildProcessError', 'ConnectionError',
265
+ 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
266
+ 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
267
+ 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
268
+ 'PermissionError', 'ProcessLookupError', 'TimeoutError',
269
+ # others new in Python 3
270
+ 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
271
+ 'EncodingWarning'),
272
+ prefix=r'(?<!\.)', suffix=r'\b'),
273
+ Name.Exception),
274
+ ],
275
+ 'magicfuncs': [
276
+ (words((
277
+ '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__',
278
+ '__and__', '__anext__', '__await__', '__bool__', '__bytes__',
279
+ '__call__', '__complex__', '__contains__', '__del__', '__delattr__',
280
+ '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__',
281
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__format__',
282
+ '__ge__', '__get__', '__getattr__', '__getattribute__',
283
+ '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__',
284
+ '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__',
285
+ '__imul__', '__index__', '__init__', '__instancecheck__',
286
+ '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
287
+ '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__',
288
+ '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__',
289
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
290
+ '__new__', '__next__', '__or__', '__pos__', '__pow__',
291
+ '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__',
292
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__',
293
+ '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__',
294
+ '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
295
+ '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__',
296
+ '__sub__', '__subclasscheck__', '__truediv__',
297
+ '__xor__'), suffix=r'\b'),
298
+ Name.Function.Magic),
299
+ ],
300
+ 'magicvars': [
301
+ (words((
302
+ '__annotations__', '__bases__', '__class__', '__closure__',
303
+ '__code__', '__defaults__', '__dict__', '__doc__', '__file__',
304
+ '__func__', '__globals__', '__kwdefaults__', '__module__',
305
+ '__mro__', '__name__', '__objclass__', '__qualname__',
306
+ '__self__', '__slots__', '__weakref__'), suffix=r'\b'),
307
+ Name.Variable.Magic),
308
+ ],
309
+ 'numbers': [
310
+ (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
311
+ r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
312
+ (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
313
+ (r'0[oO](?:_?[0-7])+', Number.Oct),
314
+ (r'0[bB](?:_?[01])+', Number.Bin),
315
+ (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
316
+ (r'\d(?:_?\d)*', Number.Integer),
317
+ ],
318
+ 'name': [
319
+ (r'@' + uni_name, Name.Decorator),
320
+ (r'@', Operator), # new matrix multiplication operator
321
+ (uni_name, Name),
322
+ ],
323
+ 'funcname': [
324
+ include('magicfuncs'),
325
+ (uni_name, Name.Function, '#pop'),
326
+ default('#pop'),
327
+ ],
328
+ 'classname': [
329
+ (uni_name, Name.Class, '#pop'),
330
+ ],
331
+ 'import': [
332
+ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
333
+ (r'\.', Name.Namespace),
334
+ (uni_name, Name.Namespace),
335
+ (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
336
+ default('#pop') # all else: go back
337
+ ],
338
+ 'fromimport': [
339
+ (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
340
+ (r'\.', Name.Namespace),
341
+ # if None occurs here, it's "raise x from None", since None can
342
+ # never be a module name
343
+ (r'None\b', Keyword.Constant, '#pop'),
344
+ (uni_name, Name.Namespace),
345
+ default('#pop'),
346
+ ],
347
+ 'rfstringescape': [
348
+ (r'\{\{', String.Escape),
349
+ (r'\}\}', String.Escape),
350
+ ],
351
+ 'fstringescape': [
352
+ include('rfstringescape'),
353
+ include('stringescape'),
354
+ ],
355
+ 'bytesescape': [
356
+ (r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
357
+ ],
358
+ 'stringescape': [
359
+ (r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape),
360
+ include('bytesescape')
361
+ ],
362
+ 'fstrings-single': fstring_rules(String.Single),
363
+ 'fstrings-double': fstring_rules(String.Double),
364
+ 'strings-single': innerstring_rules(String.Single),
365
+ 'strings-double': innerstring_rules(String.Double),
366
+ 'dqf': [
367
+ (r'"', String.Double, '#pop'),
368
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
369
+ include('fstrings-double')
370
+ ],
371
+ 'sqf': [
372
+ (r"'", String.Single, '#pop'),
373
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
374
+ include('fstrings-single')
375
+ ],
376
+ 'dqs': [
377
+ (r'"', String.Double, '#pop'),
378
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
379
+ include('strings-double')
380
+ ],
381
+ 'sqs': [
382
+ (r"'", String.Single, '#pop'),
383
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
384
+ include('strings-single')
385
+ ],
386
+ 'tdqf': [
387
+ (r'"""', String.Double, '#pop'),
388
+ include('fstrings-double'),
389
+ (r'\n', String.Double)
390
+ ],
391
+ 'tsqf': [
392
+ (r"'''", String.Single, '#pop'),
393
+ include('fstrings-single'),
394
+ (r'\n', String.Single)
395
+ ],
396
+ 'tdqs': [
397
+ (r'"""', String.Double, '#pop'),
398
+ include('strings-double'),
399
+ (r'\n', String.Double)
400
+ ],
401
+ 'tsqs': [
402
+ (r"'''", String.Single, '#pop'),
403
+ include('strings-single'),
404
+ (r'\n', String.Single)
405
+ ],
406
+ }
407
+
408
+ def analyse_text(text):
409
+ return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \
410
+ 'import ' in text[:1000]
411
+
412
+
413
+ Python3Lexer = PythonLexer
414
+
415
+
416
+ class Python2Lexer(RegexLexer):
417
+ """
418
+ For Python 2.x source code.
419
+
420
+ .. versionchanged:: 2.5
421
+ This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
422
+ refers to the Python 3 variant. File name patterns like ``*.py`` have
423
+ been moved to Python 3 as well.
424
+ """
425
+
426
+ name = 'Python 2.x'
427
+ url = 'https://www.python.org'
428
+ aliases = ['python2', 'py2']
429
+ filenames = [] # now taken over by PythonLexer (3.x)
430
+ mimetypes = ['text/x-python2', 'application/x-python2']
431
+ version_added = ''
432
+
433
+ def innerstring_rules(ttype):
434
+ return [
435
+ # the old style '%s' % (...) string formatting
436
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
437
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
438
+ # backslashes, quotes and formatting signs must be parsed one at a time
439
+ (r'[^\\\'"%\n]+', ttype),
440
+ (r'[\'"\\]', ttype),
441
+ # unhandled string formatting sign
442
+ (r'%', ttype),
443
+ # newlines are an error (use "nl" state)
444
+ ]
445
+
446
+ tokens = {
447
+ 'root': [
448
+ (r'\n', Whitespace),
449
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
450
+ bygroups(Whitespace, String.Affix, String.Doc)),
451
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
452
+ bygroups(Whitespace, String.Affix, String.Doc)),
453
+ (r'[^\S\n]+', Text),
454
+ (r'\A#!.+$', Comment.Hashbang),
455
+ (r'#.*$', Comment.Single),
456
+ (r'[]{}:(),;[]', Punctuation),
457
+ (r'\\\n', Text),
458
+ (r'\\', Text),
459
+ (r'(in|is|and|or|not)\b', Operator.Word),
460
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
461
+ include('keywords'),
462
+ (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
463
+ (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
464
+ (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
465
+ 'fromimport'),
466
+ (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
467
+ 'import'),
468
+ include('builtins'),
469
+ include('magicfuncs'),
470
+ include('magicvars'),
471
+ include('backtick'),
472
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
473
+ bygroups(String.Affix, String.Double), 'tdqs'),
474
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
475
+ bygroups(String.Affix, String.Single), 'tsqs'),
476
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
477
+ bygroups(String.Affix, String.Double), 'dqs'),
478
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
479
+ bygroups(String.Affix, String.Single), 'sqs'),
480
+ ('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
481
+ combined('stringescape', 'tdqs')),
482
+ ("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
483
+ combined('stringescape', 'tsqs')),
484
+ ('([uUbB]?)(")', bygroups(String.Affix, String.Double),
485
+ combined('stringescape', 'dqs')),
486
+ ("([uUbB]?)(')", bygroups(String.Affix, String.Single),
487
+ combined('stringescape', 'sqs')),
488
+ include('name'),
489
+ include('numbers'),
490
+ ],
491
+ 'keywords': [
492
+ (words((
493
+ 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
494
+ 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
495
+ 'print', 'raise', 'return', 'try', 'while', 'yield',
496
+ 'yield from', 'as', 'with'), suffix=r'\b'),
497
+ Keyword),
498
+ ],
499
+ 'builtins': [
500
+ (words((
501
+ '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
502
+ 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
503
+ 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
504
+ 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
505
+ 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
506
+ 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
507
+ 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
508
+ 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
509
+ 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
510
+ 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
511
+ 'unichr', 'unicode', 'vars', 'xrange', 'zip'),
512
+ prefix=r'(?<!\.)', suffix=r'\b'),
513
+ Name.Builtin),
514
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
515
+ r')\b', Name.Builtin.Pseudo),
516
+ (words((
517
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
518
+ 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
519
+ 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
520
+ 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
521
+ 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
522
+ 'MemoryError', 'NameError',
523
+ 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
524
+ 'PendingDeprecationWarning', 'ReferenceError',
525
+ 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
526
+ 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
527
+ 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
528
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
529
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
530
+ 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
531
+ Name.Exception),
532
+ ],
533
+ 'magicfuncs': [
534
+ (words((
535
+ '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
536
+ '__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
537
+ '__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
538
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
539
+ '__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
540
+ '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
541
+ '__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
542
+ '__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
543
+ '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
544
+ '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
545
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
546
+ '__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
547
+ '__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
548
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
549
+ '__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
550
+ '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
551
+ '__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
552
+ '__unicode__', '__xor__'), suffix=r'\b'),
553
+ Name.Function.Magic),
554
+ ],
555
+ 'magicvars': [
556
+ (words((
557
+ '__bases__', '__class__', '__closure__', '__code__', '__defaults__',
558
+ '__dict__', '__doc__', '__file__', '__func__', '__globals__',
559
+ '__metaclass__', '__module__', '__mro__', '__name__', '__self__',
560
+ '__slots__', '__weakref__'),
561
+ suffix=r'\b'),
562
+ Name.Variable.Magic),
563
+ ],
564
+ 'numbers': [
565
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
566
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
567
+ (r'0[0-7]+j?', Number.Oct),
568
+ (r'0[bB][01]+', Number.Bin),
569
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
570
+ (r'\d+L', Number.Integer.Long),
571
+ (r'\d+j?', Number.Integer)
572
+ ],
573
+ 'backtick': [
574
+ ('`.*?`', String.Backtick),
575
+ ],
576
+ 'name': [
577
+ (r'@[\w.]+', Name.Decorator),
578
+ (r'[a-zA-Z_]\w*', Name),
579
+ ],
580
+ 'funcname': [
581
+ include('magicfuncs'),
582
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
583
+ default('#pop'),
584
+ ],
585
+ 'classname': [
586
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
587
+ ],
588
+ 'import': [
589
+ (r'(?:[ \t]|\\\n)+', Text),
590
+ (r'as\b', Keyword.Namespace),
591
+ (r',', Operator),
592
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
593
+ default('#pop') # all else: go back
594
+ ],
595
+ 'fromimport': [
596
+ (r'(?:[ \t]|\\\n)+', Text),
597
+ (r'import\b', Keyword.Namespace, '#pop'),
598
+ # if None occurs here, it's "raise x from None", since None can
599
+ # never be a module name
600
+ (r'None\b', Name.Builtin.Pseudo, '#pop'),
601
+ # sadly, in "raise x from y" y will be highlighted as namespace too
602
+ (r'[a-zA-Z_.][\w.]*', Name.Namespace),
603
+ # anything else here also means "raise x from y" and is therefore
604
+ # not an error
605
+ default('#pop'),
606
+ ],
607
+ 'stringescape': [
608
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
609
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
610
+ ],
611
+ 'strings-single': innerstring_rules(String.Single),
612
+ 'strings-double': innerstring_rules(String.Double),
613
+ 'dqs': [
614
+ (r'"', String.Double, '#pop'),
615
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
616
+ include('strings-double')
617
+ ],
618
+ 'sqs': [
619
+ (r"'", String.Single, '#pop'),
620
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
621
+ include('strings-single')
622
+ ],
623
+ 'tdqs': [
624
+ (r'"""', String.Double, '#pop'),
625
+ include('strings-double'),
626
+ (r'\n', String.Double)
627
+ ],
628
+ 'tsqs': [
629
+ (r"'''", String.Single, '#pop'),
630
+ include('strings-single'),
631
+ (r'\n', String.Single)
632
+ ],
633
+ }
634
+
635
+ def analyse_text(text):
636
+ return shebang_matches(text, r'pythonw?2(\.\d)?')
637
+
638
+ class _PythonConsoleLexerBase(RegexLexer):
639
+ name = 'Python console session'
640
+ aliases = ['pycon', 'python-console']
641
+ mimetypes = ['text/x-python-doctest']
642
+
643
+ """Auxiliary lexer for `PythonConsoleLexer`.
644
+
645
+ Code tokens are output as ``Token.Other.Code``, traceback tokens as
646
+ ``Token.Other.Traceback``.
647
+ """
648
+ tokens = {
649
+ 'root': [
650
+ (r'(>>> )(.*\n)', bygroups(Generic.Prompt, Other.Code), 'continuations'),
651
+ # This happens, e.g., when tracebacks are embedded in documentation;
652
+ # trailing whitespaces are often stripped in such contexts.
653
+ (r'(>>>)(\n)', bygroups(Generic.Prompt, Whitespace)),
654
+ (r'(\^C)?Traceback \(most recent call last\):\n', Other.Traceback, 'traceback'),
655
+ # SyntaxError starts with this
656
+ (r' File "[^"]+", line \d+', Other.Traceback, 'traceback'),
657
+ (r'.*\n', Generic.Output),
658
+ ],
659
+ 'continuations': [
660
+ (r'(\.\.\. )(.*\n)', bygroups(Generic.Prompt, Other.Code)),
661
+ # See above.
662
+ (r'(\.\.\.)(\n)', bygroups(Generic.Prompt, Whitespace)),
663
+ default('#pop'),
664
+ ],
665
+ 'traceback': [
666
+ # As soon as we see a traceback, consume everything until the next
667
+ # >>> prompt.
668
+ (r'(?=>>>( |$))', Text, '#pop'),
669
+ (r'(KeyboardInterrupt)(\n)', bygroups(Name.Class, Whitespace)),
670
+ (r'.*\n', Other.Traceback),
671
+ ],
672
+ }
673
+
674
+ class PythonConsoleLexer(DelegatingLexer):
675
+ """
676
+ For Python console output or doctests, such as:
677
+
678
+ .. sourcecode:: pycon
679
+
680
+ >>> a = 'foo'
681
+ >>> print(a)
682
+ foo
683
+ >>> 1 / 0
684
+ Traceback (most recent call last):
685
+ File "<stdin>", line 1, in <module>
686
+ ZeroDivisionError: integer division or modulo by zero
687
+
688
+ Additional options:
689
+
690
+ `python3`
691
+ Use Python 3 lexer for code. Default is ``True``.
692
+
693
+ .. versionadded:: 1.0
694
+ .. versionchanged:: 2.5
695
+ Now defaults to ``True``.
696
+ """
697
+
698
+ name = 'Python console session'
699
+ aliases = ['pycon', 'python-console']
700
+ mimetypes = ['text/x-python-doctest']
701
+ url = 'https://python.org'
702
+ version_added = ''
703
+
704
+ def __init__(self, **options):
705
+ python3 = get_bool_opt(options, 'python3', True)
706
+ if python3:
707
+ pylexer = PythonLexer
708
+ tblexer = PythonTracebackLexer
709
+ else:
710
+ pylexer = Python2Lexer
711
+ tblexer = Python2TracebackLexer
712
+ # We have two auxiliary lexers. Use DelegatingLexer twice with
713
+ # different tokens. TODO: DelegatingLexer should support this
714
+ # directly, by accepting a tuplet of auxiliary lexers and a tuple of
715
+ # distinguishing tokens. Then we wouldn't need this intermediary
716
+ # class.
717
+ class _ReplaceInnerCode(DelegatingLexer):
718
+ def __init__(self, **options):
719
+ super().__init__(pylexer, _PythonConsoleLexerBase, Other.Code, **options)
720
+ super().__init__(tblexer, _ReplaceInnerCode, Other.Traceback, **options)
721
+
722
+ class PythonTracebackLexer(RegexLexer):
723
+ """
724
+ For Python 3.x tracebacks, with support for chained exceptions.
725
+
726
+ .. versionchanged:: 2.5
727
+ This is now the default ``PythonTracebackLexer``. It is still available
728
+ as the alias ``Python3TracebackLexer``.
729
+ """
730
+
731
+ name = 'Python Traceback'
732
+ aliases = ['pytb', 'py3tb']
733
+ filenames = ['*.pytb', '*.py3tb']
734
+ mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
735
+ url = 'https://python.org'
736
+ version_added = '1.0'
737
+
738
+ tokens = {
739
+ 'root': [
740
+ (r'\n', Whitespace),
741
+ (r'^(\^C)?Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
742
+ (r'^During handling of the above exception, another '
743
+ r'exception occurred:\n\n', Generic.Traceback),
744
+ (r'^The above exception was the direct cause of the '
745
+ r'following exception:\n\n', Generic.Traceback),
746
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
747
+ (r'^.*\n', Other),
748
+ ],
749
+ 'intb': [
750
+ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
751
+ bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
752
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
753
+ bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
754
+ (r'^( )(.+)(\n)',
755
+ bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'),
756
+ (r'^([ \t]*)(\.\.\.)(\n)',
757
+ bygroups(Whitespace, Comment, Whitespace)), # for doctests...
758
+ (r'^([^:]+)(: )(.+)(\n)',
759
+ bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
760
+ (r'^([a-zA-Z_][\w.]*)(:?\n)',
761
+ bygroups(Generic.Error, Whitespace), '#pop'),
762
+ default('#pop'),
763
+ ],
764
+ 'markers': [
765
+ # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
766
+ # error locations in Python 3.11+, or single-caret markers
767
+ # for syntax errors before that.
768
+ (r'^( {4,})([~^]+)(\n)',
769
+ bygroups(Whitespace, Punctuation.Marker, Whitespace),
770
+ '#pop'),
771
+ default('#pop'),
772
+ ],
773
+ }
774
+
775
+
776
+ Python3TracebackLexer = PythonTracebackLexer
777
+
778
+
779
+ class Python2TracebackLexer(RegexLexer):
780
+ """
781
+ For Python tracebacks.
782
+
783
+ .. versionchanged:: 2.5
784
+ This class has been renamed from ``PythonTracebackLexer``.
785
+ ``PythonTracebackLexer`` now refers to the Python 3 variant.
786
+ """
787
+
788
+ name = 'Python 2.x Traceback'
789
+ aliases = ['py2tb']
790
+ filenames = ['*.py2tb']
791
+ mimetypes = ['text/x-python2-traceback']
792
+ url = 'https://python.org'
793
+ version_added = '0.7'
794
+
795
+ tokens = {
796
+ 'root': [
797
+ # Cover both (most recent call last) and (innermost last)
798
+ # The optional ^C allows us to catch keyboard interrupt signals.
799
+ (r'^(\^C)?(Traceback.*\n)',
800
+ bygroups(Text, Generic.Traceback), 'intb'),
801
+ # SyntaxError starts with this.
802
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
803
+ (r'^.*\n', Other),
804
+ ],
805
+ 'intb': [
806
+ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
807
+ bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)),
808
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
809
+ bygroups(Text, Name.Builtin, Text, Number, Whitespace)),
810
+ (r'^( )(.+)(\n)',
811
+ bygroups(Text, using(Python2Lexer), Whitespace), 'marker'),
812
+ (r'^([ \t]*)(\.\.\.)(\n)',
813
+ bygroups(Text, Comment, Whitespace)), # for doctests...
814
+ (r'^([^:]+)(: )(.+)(\n)',
815
+ bygroups(Generic.Error, Text, Name, Whitespace), '#pop'),
816
+ (r'^([a-zA-Z_]\w*)(:?\n)',
817
+ bygroups(Generic.Error, Whitespace), '#pop')
818
+ ],
819
+ 'marker': [
820
+ # For syntax errors.
821
+ (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
822
+ default('#pop'),
823
+ ],
824
+ }
825
+
826
+
827
+ class CythonLexer(RegexLexer):
828
+ """
829
+ For Pyrex and Cython source code.
830
+ """
831
+
832
+ name = 'Cython'
833
+ url = 'https://cython.org'
834
+ aliases = ['cython', 'pyx', 'pyrex']
835
+ filenames = ['*.pyx', '*.pxd', '*.pxi']
836
+ mimetypes = ['text/x-cython', 'application/x-cython']
837
+ version_added = '1.1'
838
+
839
+ tokens = {
840
+ 'root': [
841
+ (r'\n', Whitespace),
842
+ (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)),
843
+ (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)),
844
+ (r'[^\S\n]+', Text),
845
+ (r'#.*$', Comment),
846
+ (r'[]{}:(),;[]', Punctuation),
847
+ (r'\\\n', Whitespace),
848
+ (r'\\', Text),
849
+ (r'(in|is|and|or|not)\b', Operator.Word),
850
+ (r'(<)([a-zA-Z0-9.?]+)(>)',
851
+ bygroups(Punctuation, Keyword.Type, Punctuation)),
852
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
853
+ (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
854
+ bygroups(Keyword, Number.Integer, Operator, Name, Operator,
855
+ Name, Punctuation)),
856
+ include('keywords'),
857
+ (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
858
+ (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
859
+ # (should actually start a block with only cdefs)
860
+ (r'(cdef)(:)', bygroups(Keyword, Punctuation)),
861
+ (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
862
+ (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
863
+ (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
864
+ include('builtins'),
865
+ include('backtick'),
866
+ ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
867
+ ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
868
+ ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
869
+ ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
870
+ ('[uU]?"""', String, combined('stringescape', 'tdqs')),
871
+ ("[uU]?'''", String, combined('stringescape', 'tsqs')),
872
+ ('[uU]?"', String, combined('stringescape', 'dqs')),
873
+ ("[uU]?'", String, combined('stringescape', 'sqs')),
874
+ include('name'),
875
+ include('numbers'),
876
+ ],
877
+ 'keywords': [
878
+ (words((
879
+ 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
880
+ 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
881
+ 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
882
+ 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
883
+ Keyword),
884
+ (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
885
+ ],
886
+ 'builtins': [
887
+ (words((
888
+ '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
889
+ 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
890
+ 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
891
+ 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
892
+ 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
893
+ 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
894
+ 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
895
+ 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
896
+ 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
897
+ 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
898
+ 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
899
+ 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
900
+ Name.Builtin),
901
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
902
+ r')\b', Name.Builtin.Pseudo),
903
+ (words((
904
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
905
+ 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
906
+ 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
907
+ 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
908
+ 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
909
+ 'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
910
+ 'OSError', 'OverflowError', 'OverflowWarning',
911
+ 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError',
912
+ 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError',
913
+ 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
914
+ 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
915
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
916
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
917
+ 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
918
+ Name.Exception),
919
+ ],
920
+ 'numbers': [
921
+ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
922
+ (r'0\d+', Number.Oct),
923
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
924
+ (r'\d+L', Number.Integer.Long),
925
+ (r'\d+', Number.Integer)
926
+ ],
927
+ 'backtick': [
928
+ ('`.*?`', String.Backtick),
929
+ ],
930
+ 'name': [
931
+ (r'@\w+', Name.Decorator),
932
+ (r'[a-zA-Z_]\w*', Name),
933
+ ],
934
+ 'funcname': [
935
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop')
936
+ ],
937
+ 'cdef': [
938
+ (r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
939
+ (r'(struct|enum|union|class)\b', Keyword),
940
+ (r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
941
+ bygroups(Name.Function, Text), '#pop'),
942
+ (r'([a-zA-Z_]\w*)(\s*)(,)',
943
+ bygroups(Name.Function, Text, Punctuation)),
944
+ (r'from\b', Keyword, '#pop'),
945
+ (r'as\b', Keyword),
946
+ (r':', Punctuation, '#pop'),
947
+ (r'(?=["\'])', Text, '#pop'),
948
+ (r'[a-zA-Z_]\w*', Keyword.Type),
949
+ (r'.', Text),
950
+ ],
951
+ 'classname': [
952
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
953
+ ],
954
+ 'import': [
955
+ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
956
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
957
+ (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
958
+ default('#pop') # all else: go back
959
+ ],
960
+ 'fromimport': [
961
+ (r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
962
+ (r'[a-zA-Z_.][\w.]*', Name.Namespace),
963
+ # ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
964
+ default('#pop'),
965
+ ],
966
+ 'stringescape': [
967
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
968
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
969
+ ],
970
+ 'strings': [
971
+ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
972
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
973
+ (r'[^\\\'"%\n]+', String),
974
+ # quotes, percents and backslashes must be parsed one at a time
975
+ (r'[\'"\\]', String),
976
+ # unhandled string formatting sign
977
+ (r'%', String)
978
+ # newlines are an error (use "nl" state)
979
+ ],
980
+ 'nl': [
981
+ (r'\n', String)
982
+ ],
983
+ 'dqs': [
984
+ (r'"', String, '#pop'),
985
+ (r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
986
+ include('strings')
987
+ ],
988
+ 'sqs': [
989
+ (r"'", String, '#pop'),
990
+ (r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
991
+ include('strings')
992
+ ],
993
+ 'tdqs': [
994
+ (r'"""', String, '#pop'),
995
+ include('strings'),
996
+ include('nl')
997
+ ],
998
+ 'tsqs': [
999
+ (r"'''", String, '#pop'),
1000
+ include('strings'),
1001
+ include('nl')
1002
+ ],
1003
+ }
1004
+
1005
+
1006
+ class DgLexer(RegexLexer):
1007
+ """
1008
+ Lexer for dg,
1009
+ a functional and object-oriented programming language
1010
+ running on the CPython 3 VM.
1011
+ """
1012
+ name = 'dg'
1013
+ aliases = ['dg']
1014
+ filenames = ['*.dg']
1015
+ mimetypes = ['text/x-dg']
1016
+ url = 'http://pyos.github.io/dg'
1017
+ version_added = '1.6'
1018
+
1019
+ tokens = {
1020
+ 'root': [
1021
+ (r'\s+', Text),
1022
+ (r'#.*?$', Comment.Single),
1023
+
1024
+ (r'(?i)0b[01]+', Number.Bin),
1025
+ (r'(?i)0o[0-7]+', Number.Oct),
1026
+ (r'(?i)0x[0-9a-f]+', Number.Hex),
1027
+ (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
1028
+ (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
1029
+ (r'(?i)[+-]?[0-9]+j?', Number.Integer),
1030
+
1031
+ (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
1032
+ (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
1033
+ (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
1034
+ (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
1035
+
1036
+ (r"`\w+'*`", Operator),
1037
+ (r'\b(and|in|is|or|where)\b', Operator.Word),
1038
+ (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
1039
+
1040
+ (words((
1041
+ 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
1042
+ 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
1043
+ 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
1044
+ 'super', 'tuple', 'tuple\'', 'type'),
1045
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
1046
+ Name.Builtin),
1047
+ (words((
1048
+ '__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
1049
+ 'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
1050
+ 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
1051
+ 'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
1052
+ 'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
1053
+ 'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
1054
+ 'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
1055
+ 'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
1056
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
1057
+ Name.Builtin),
1058
+ (r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
1059
+ Name.Builtin.Pseudo),
1060
+
1061
+ (r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
1062
+ Name.Exception),
1063
+ (r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
1064
+ r"SystemExit)(?!['\w])", Name.Exception),
1065
+
1066
+ (r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
1067
+ r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
1068
+
1069
+ (r"[A-Z_]+'*(?!['\w])", Name),
1070
+ (r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
1071
+ (r"\w+'*", Name),
1072
+
1073
+ (r'[()]', Punctuation),
1074
+ (r'.', Error),
1075
+ ],
1076
+ 'stringescape': [
1077
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
1078
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
1079
+ ],
1080
+ 'string': [
1081
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
1082
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
1083
+ (r'[^\\\'"%\n]+', String),
1084
+ # quotes, percents and backslashes must be parsed one at a time
1085
+ (r'[\'"\\]', String),
1086
+ # unhandled string formatting sign
1087
+ (r'%', String),
1088
+ (r'\n', String)
1089
+ ],
1090
+ 'dqs': [
1091
+ (r'"', String, '#pop')
1092
+ ],
1093
+ 'sqs': [
1094
+ (r"'", String, '#pop')
1095
+ ],
1096
+ 'tdqs': [
1097
+ (r'"""', String, '#pop')
1098
+ ],
1099
+ 'tsqs': [
1100
+ (r"'''", String, '#pop')
1101
+ ],
1102
+ }
1103
+
1104
+
1105
+ class NumPyLexer(PythonLexer):
1106
+ """
1107
+ A Python lexer recognizing Numerical Python builtins.
1108
+ """
1109
+
1110
+ name = 'NumPy'
1111
+ url = 'https://numpy.org/'
1112
+ aliases = ['numpy']
1113
+ version_added = '0.10'
1114
+
1115
+ # override the mimetypes to not inherit them from python
1116
+ mimetypes = []
1117
+ filenames = []
1118
+
1119
+ EXTRA_KEYWORDS = {
1120
+ 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
1121
+ 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
1122
+ 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
1123
+ 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
1124
+ 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
1125
+ 'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
1126
+ 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
1127
+ 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
1128
+ 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
1129
+ 'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
1130
+ 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
1131
+ 'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
1132
+ 'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
1133
+ 'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
1134
+ 'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
1135
+ 'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
1136
+ 'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
1137
+ 'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
1138
+ 'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
1139
+ 'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
1140
+ 'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
1141
+ 'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
1142
+ 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
1143
+ 'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
1144
+ 'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
1145
+ 'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
1146
+ 'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
1147
+ 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
1148
+ 'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
1149
+ 'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
1150
+ 'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
1151
+ 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
1152
+ 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
1153
+ 'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
1154
+ 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
1155
+ 'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
1156
+ 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
1157
+ 'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
1158
+ 'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
1159
+ 'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
1160
+ 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
1161
+ 'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
1162
+ 'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
1163
+ 'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
1164
+ 'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
1165
+ 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
1166
+ 'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
1167
+ 'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
1168
+ 'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
1169
+ 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
1170
+ 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
1171
+ 'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
1172
+ 'set_numeric_ops', 'set_printoptions', 'set_string_function',
1173
+ 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
1174
+ 'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
1175
+ 'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
1176
+ 'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
1177
+ 'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
1178
+ 'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
1179
+ 'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
1180
+ 'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
1181
+ 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
1182
+ 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
1183
+ 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
1184
+ }
1185
+
1186
+ def get_tokens_unprocessed(self, text):
1187
+ for index, token, value in \
1188
+ PythonLexer.get_tokens_unprocessed(self, text):
1189
+ if token is Name and value in self.EXTRA_KEYWORDS:
1190
+ yield index, Keyword.Pseudo, value
1191
+ else:
1192
+ yield index, token, value
1193
+
1194
+ def analyse_text(text):
1195
+ ltext = text[:1000]
1196
+ return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
1197
+ 'import ' in ltext) \
1198
+ and ('import numpy' in ltext or 'from numpy import' in ltext)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/modeline.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.modeline
3
+ ~~~~~~~~~~~~~~~~~
4
+
5
+ A simple modeline parser (based on pymodeline).
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+
13
+ __all__ = ['get_filetype_from_buffer']
14
+
15
+
16
+ modeline_re = re.compile(r'''
17
+ (?: vi | vim | ex ) (?: [<=>]? \d* )? :
18
+ .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
19
+ ''', re.VERBOSE)
20
+
21
+
22
+ def get_filetype_from_line(l): # noqa: E741
23
+ m = modeline_re.search(l)
24
+ if m:
25
+ return m.group(1)
26
+
27
+
28
+ def get_filetype_from_buffer(buf, max_lines=5):
29
+ """
30
+ Scan the buffer for modelines and return filetype if one is found.
31
+ """
32
+ lines = buf.splitlines()
33
+ for line in lines[-1:-max_lines-1:-1]:
34
+ ret = get_filetype_from_line(line)
35
+ if ret:
36
+ return ret
37
+ for i in range(max_lines, -1, -1):
38
+ if i < len(lines):
39
+ ret = get_filetype_from_line(lines[i])
40
+ if ret:
41
+ return ret
42
+
43
+ return None
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.plugin
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ Pygments plugin interface.
6
+
7
+ lexer plugins::
8
+
9
+ [pygments.lexers]
10
+ yourlexer = yourmodule:YourLexer
11
+
12
+ formatter plugins::
13
+
14
+ [pygments.formatters]
15
+ yourformatter = yourformatter:YourFormatter
16
+ /.ext = yourformatter:YourFormatter
17
+
18
+ As you can see, you can define extensions for the formatter
19
+ with a leading slash.
20
+
21
+ syntax plugins::
22
+
23
+ [pygments.styles]
24
+ yourstyle = yourstyle:YourStyle
25
+
26
+ filter plugin::
27
+
28
+ [pygments.filter]
29
+ yourfilter = yourfilter:YourFilter
30
+
31
+
32
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
33
+ :license: BSD, see LICENSE for details.
34
+ """
35
+ from importlib.metadata import entry_points
36
+
37
+ LEXER_ENTRY_POINT = 'pygments.lexers'
38
+ FORMATTER_ENTRY_POINT = 'pygments.formatters'
39
+ STYLE_ENTRY_POINT = 'pygments.styles'
40
+ FILTER_ENTRY_POINT = 'pygments.filters'
41
+
42
+
43
+ def iter_entry_points(group_name):
44
+ groups = entry_points()
45
+ if hasattr(groups, 'select'):
46
+ # New interface in Python 3.10 and newer versions of the
47
+ # importlib_metadata backport.
48
+ return groups.select(group=group_name)
49
+ else:
50
+ # Older interface, deprecated in Python 3.10 and recent
51
+ # importlib_metadata, but we need it in Python 3.8 and 3.9.
52
+ return groups.get(group_name, [])
53
+
54
+
55
+ def find_plugin_lexers():
56
+ for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
57
+ yield entrypoint.load()
58
+
59
+
60
+ def find_plugin_formatters():
61
+ for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
62
+ yield entrypoint.name, entrypoint.load()
63
+
64
+
65
+ def find_plugin_styles():
66
+ for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
67
+ yield entrypoint.name, entrypoint.load()
68
+
69
+
70
+ def find_plugin_filters():
71
+ for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
72
+ yield entrypoint.name, entrypoint.load()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/regexopt.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.regexopt
3
+ ~~~~~~~~~~~~~~~~~
4
+
5
+ An algorithm that generates optimized regexes for matching long lists of
6
+ literal strings.
7
+
8
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
9
+ :license: BSD, see LICENSE for details.
10
+ """
11
+
12
+ import re
13
+ from re import escape
14
+ from os.path import commonprefix
15
+ from itertools import groupby
16
+ from operator import itemgetter
17
+
18
+ CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
19
+ FIRST_ELEMENT = itemgetter(0)
20
+
21
+
22
+ def make_charset(letters):
23
+ return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
24
+
25
+
26
+ def regex_opt_inner(strings, open_paren):
27
+ """Return a regex that matches any string in the sorted list of strings."""
28
+ close_paren = open_paren and ')' or ''
29
+ # print strings, repr(open_paren)
30
+ if not strings:
31
+ # print '-> nothing left'
32
+ return ''
33
+ first = strings[0]
34
+ if len(strings) == 1:
35
+ # print '-> only 1 string'
36
+ return open_paren + escape(first) + close_paren
37
+ if not first:
38
+ # print '-> first string empty'
39
+ return open_paren + regex_opt_inner(strings[1:], '(?:') \
40
+ + '?' + close_paren
41
+ if len(first) == 1:
42
+ # multiple one-char strings? make a charset
43
+ oneletter = []
44
+ rest = []
45
+ for s in strings:
46
+ if len(s) == 1:
47
+ oneletter.append(s)
48
+ else:
49
+ rest.append(s)
50
+ if len(oneletter) > 1: # do we have more than one oneletter string?
51
+ if rest:
52
+ # print '-> 1-character + rest'
53
+ return open_paren + regex_opt_inner(rest, '') + '|' \
54
+ + make_charset(oneletter) + close_paren
55
+ # print '-> only 1-character'
56
+ return open_paren + make_charset(oneletter) + close_paren
57
+ prefix = commonprefix(strings)
58
+ if prefix:
59
+ plen = len(prefix)
60
+ # we have a prefix for all strings
61
+ # print '-> prefix:', prefix
62
+ return open_paren + escape(prefix) \
63
+ + regex_opt_inner([s[plen:] for s in strings], '(?:') \
64
+ + close_paren
65
+ # is there a suffix?
66
+ strings_rev = [s[::-1] for s in strings]
67
+ suffix = commonprefix(strings_rev)
68
+ if suffix:
69
+ slen = len(suffix)
70
+ # print '-> suffix:', suffix[::-1]
71
+ return open_paren \
72
+ + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
73
+ + escape(suffix[::-1]) + close_paren
74
+ # recurse on common 1-string prefixes
75
+ # print '-> last resort'
76
+ return open_paren + \
77
+ '|'.join(regex_opt_inner(list(group[1]), '')
78
+ for group in groupby(strings, lambda s: s[0] == first[0])) \
79
+ + close_paren
80
+
81
+
82
+ def regex_opt(strings, prefix='', suffix=''):
83
+ """Return a compiled regex that matches any string in the given list.
84
+
85
+ The strings to match must be literal strings, not regexes. They will be
86
+ regex-escaped.
87
+
88
+ *prefix* and *suffix* are pre- and appended to the final regex.
89
+ """
90
+ strings = sorted(strings)
91
+ return prefix + regex_opt_inner(strings, '(') + suffix
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.sphinxext
3
+ ~~~~~~~~~~~~~~~~~~
4
+
5
+ Sphinx extension to generate automatic documentation of lexers,
6
+ formatters and filters.
7
+
8
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
9
+ :license: BSD, see LICENSE for details.
10
+ """
11
+
12
+ import sys
13
+
14
+ from docutils import nodes
15
+ from docutils.statemachine import ViewList
16
+ from docutils.parsers.rst import Directive
17
+ from sphinx.util.nodes import nested_parse_with_titles
18
+
19
+
20
+ MODULEDOC = '''
21
+ .. module:: %s
22
+
23
+ %s
24
+ %s
25
+ '''
26
+
27
+ LEXERDOC = '''
28
+ .. class:: %s
29
+
30
+ :Short names: %s
31
+ :Filenames: %s
32
+ :MIME types: %s
33
+
34
+ %s
35
+
36
+ %s
37
+
38
+ '''
39
+
40
+ FMTERDOC = '''
41
+ .. class:: %s
42
+
43
+ :Short names: %s
44
+ :Filenames: %s
45
+
46
+ %s
47
+
48
+ '''
49
+
50
+ FILTERDOC = '''
51
+ .. class:: %s
52
+
53
+ :Name: %s
54
+
55
+ %s
56
+
57
+ '''
58
+
59
+
60
+ class PygmentsDoc(Directive):
61
+ """
62
+ A directive to collect all lexers/formatters/filters and generate
63
+ autoclass directives for them.
64
+ """
65
+ has_content = False
66
+ required_arguments = 1
67
+ optional_arguments = 0
68
+ final_argument_whitespace = False
69
+ option_spec = {}
70
+
71
+ def run(self):
72
+ self.filenames = set()
73
+ if self.arguments[0] == 'lexers':
74
+ out = self.document_lexers()
75
+ elif self.arguments[0] == 'formatters':
76
+ out = self.document_formatters()
77
+ elif self.arguments[0] == 'filters':
78
+ out = self.document_filters()
79
+ elif self.arguments[0] == 'lexers_overview':
80
+ out = self.document_lexers_overview()
81
+ else:
82
+ raise Exception('invalid argument for "pygmentsdoc" directive')
83
+ node = nodes.compound()
84
+ vl = ViewList(out.split('\n'), source='')
85
+ nested_parse_with_titles(self.state, vl, node)
86
+ for fn in self.filenames:
87
+ self.state.document.settings.record_dependencies.add(fn)
88
+ return node.children
89
+
90
+ def document_lexers_overview(self):
91
+ """Generate a tabular overview of all lexers.
92
+
93
+ The columns are the lexer name, the extensions handled by this lexer
94
+ (or "None"), the aliases and a link to the lexer class."""
95
+ from pip._vendor.pygments.lexers._mapping import LEXERS
96
+ from pip._vendor.pygments.lexers import find_lexer_class
97
+ out = []
98
+
99
+ table = []
100
+
101
+ def format_link(name, url):
102
+ if url:
103
+ return f'`{name} <{url}>`_'
104
+ return name
105
+
106
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
107
+ lexer_cls = find_lexer_class(data[1])
108
+ extensions = lexer_cls.filenames + lexer_cls.alias_filenames
109
+
110
+ table.append({
111
+ 'name': format_link(data[1], lexer_cls.url),
112
+ 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
113
+ 'aliases': ', '.join(data[2]),
114
+ 'class': f'{data[0]}.{classname}'
115
+ })
116
+
117
+ column_names = ['name', 'extensions', 'aliases', 'class']
118
+ column_lengths = [max([len(row[column]) for row in table if row[column]])
119
+ for column in column_names]
120
+
121
+ def write_row(*columns):
122
+ """Format a table row"""
123
+ out = []
124
+ for length, col in zip(column_lengths, columns):
125
+ if col:
126
+ out.append(col.ljust(length))
127
+ else:
128
+ out.append(' '*length)
129
+
130
+ return ' '.join(out)
131
+
132
+ def write_seperator():
133
+ """Write a table separator row"""
134
+ sep = ['='*c for c in column_lengths]
135
+ return write_row(*sep)
136
+
137
+ out.append(write_seperator())
138
+ out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
139
+ out.append(write_seperator())
140
+ for row in table:
141
+ out.append(write_row(
142
+ row['name'],
143
+ row['extensions'],
144
+ row['aliases'],
145
+ f':class:`~{row["class"]}`'))
146
+ out.append(write_seperator())
147
+
148
+ return '\n'.join(out)
149
+
150
+ def document_lexers(self):
151
+ from pip._vendor.pygments.lexers._mapping import LEXERS
152
+ from pip._vendor import pygments
153
+ import inspect
154
+ import pathlib
155
+
156
+ out = []
157
+ modules = {}
158
+ moduledocstrings = {}
159
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
160
+ module = data[0]
161
+ mod = __import__(module, None, None, [classname])
162
+ self.filenames.add(mod.__file__)
163
+ cls = getattr(mod, classname)
164
+ if not cls.__doc__:
165
+ print(f"Warning: {classname} does not have a docstring.")
166
+ docstring = cls.__doc__
167
+ if isinstance(docstring, bytes):
168
+ docstring = docstring.decode('utf8')
169
+
170
+ example_file = getattr(cls, '_example', None)
171
+ if example_file:
172
+ p = pathlib.Path(inspect.getabsfile(pygments)).parent.parent /\
173
+ 'tests' / 'examplefiles' / example_file
174
+ content = p.read_text(encoding='utf-8')
175
+ if not content:
176
+ raise Exception(
177
+ f"Empty example file '{example_file}' for lexer "
178
+ f"{classname}")
179
+
180
+ if data[2]:
181
+ lexer_name = data[2][0]
182
+ docstring += '\n\n .. admonition:: Example\n'
183
+ docstring += f'\n .. code-block:: {lexer_name}\n\n'
184
+ for line in content.splitlines():
185
+ docstring += f' {line}\n'
186
+
187
+ if cls.version_added:
188
+ version_line = f'.. versionadded:: {cls.version_added}'
189
+ else:
190
+ version_line = ''
191
+
192
+ modules.setdefault(module, []).append((
193
+ classname,
194
+ ', '.join(data[2]) or 'None',
195
+ ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
196
+ ', '.join(data[4]) or 'None',
197
+ docstring,
198
+ version_line))
199
+ if module not in moduledocstrings:
200
+ moddoc = mod.__doc__
201
+ if isinstance(moddoc, bytes):
202
+ moddoc = moddoc.decode('utf8')
203
+ moduledocstrings[module] = moddoc
204
+
205
+ for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
206
+ if moduledocstrings[module] is None:
207
+ raise Exception(f"Missing docstring for {module}")
208
+ heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
209
+ out.append(MODULEDOC % (module, heading, '-'*len(heading)))
210
+ for data in lexers:
211
+ out.append(LEXERDOC % data)
212
+
213
+ return ''.join(out)
214
+
215
+ def document_formatters(self):
216
+ from pip._vendor.pygments.formatters import FORMATTERS
217
+
218
+ out = []
219
+ for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
220
+ module = data[0]
221
+ mod = __import__(module, None, None, [classname])
222
+ self.filenames.add(mod.__file__)
223
+ cls = getattr(mod, classname)
224
+ docstring = cls.__doc__
225
+ if isinstance(docstring, bytes):
226
+ docstring = docstring.decode('utf8')
227
+ heading = cls.__name__
228
+ out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
229
+ ', '.join(data[3]).replace('*', '\\*') or 'None',
230
+ docstring))
231
+ return ''.join(out)
232
+
233
+ def document_filters(self):
234
+ from pip._vendor.pygments.filters import FILTERS
235
+
236
+ out = []
237
+ for name, cls in FILTERS.items():
238
+ self.filenames.add(sys.modules[cls.__module__].__file__)
239
+ docstring = cls.__doc__
240
+ if isinstance(docstring, bytes):
241
+ docstring = docstring.decode('utf8')
242
+ out.append(FILTERDOC % (cls.__name__, name, docstring))
243
+ return ''.join(out)
244
+
245
+
246
+ def setup(app):
247
+ app.add_directive('pygmentsdoc', PygmentsDoc)
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/_mapping.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Automatically generated by scripts/gen_mapfiles.py.
2
+ # DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
3
+
4
+ STYLES = {
5
+ 'AbapStyle': ('pygments.styles.abap', 'abap', ()),
6
+ 'AlgolStyle': ('pygments.styles.algol', 'algol', ()),
7
+ 'Algol_NuStyle': ('pygments.styles.algol_nu', 'algol_nu', ()),
8
+ 'ArduinoStyle': ('pygments.styles.arduino', 'arduino', ()),
9
+ 'AutumnStyle': ('pygments.styles.autumn', 'autumn', ()),
10
+ 'BlackWhiteStyle': ('pygments.styles.bw', 'bw', ()),
11
+ 'BorlandStyle': ('pygments.styles.borland', 'borland', ()),
12
+ 'CoffeeStyle': ('pygments.styles.coffee', 'coffee', ()),
13
+ 'ColorfulStyle': ('pygments.styles.colorful', 'colorful', ()),
14
+ 'DefaultStyle': ('pygments.styles.default', 'default', ()),
15
+ 'DraculaStyle': ('pygments.styles.dracula', 'dracula', ()),
16
+ 'EmacsStyle': ('pygments.styles.emacs', 'emacs', ()),
17
+ 'FriendlyGrayscaleStyle': ('pygments.styles.friendly_grayscale', 'friendly_grayscale', ()),
18
+ 'FriendlyStyle': ('pygments.styles.friendly', 'friendly', ()),
19
+ 'FruityStyle': ('pygments.styles.fruity', 'fruity', ()),
20
+ 'GhDarkStyle': ('pygments.styles.gh_dark', 'github-dark', ()),
21
+ 'GruvboxDarkStyle': ('pygments.styles.gruvbox', 'gruvbox-dark', ()),
22
+ 'GruvboxLightStyle': ('pygments.styles.gruvbox', 'gruvbox-light', ()),
23
+ 'IgorStyle': ('pygments.styles.igor', 'igor', ()),
24
+ 'InkPotStyle': ('pygments.styles.inkpot', 'inkpot', ()),
25
+ 'LightbulbStyle': ('pygments.styles.lightbulb', 'lightbulb', ()),
26
+ 'LilyPondStyle': ('pygments.styles.lilypond', 'lilypond', ()),
27
+ 'LovelaceStyle': ('pygments.styles.lovelace', 'lovelace', ()),
28
+ 'ManniStyle': ('pygments.styles.manni', 'manni', ()),
29
+ 'MaterialStyle': ('pygments.styles.material', 'material', ()),
30
+ 'MonokaiStyle': ('pygments.styles.monokai', 'monokai', ()),
31
+ 'MurphyStyle': ('pygments.styles.murphy', 'murphy', ()),
32
+ 'NativeStyle': ('pygments.styles.native', 'native', ()),
33
+ 'NordDarkerStyle': ('pygments.styles.nord', 'nord-darker', ()),
34
+ 'NordStyle': ('pygments.styles.nord', 'nord', ()),
35
+ 'OneDarkStyle': ('pygments.styles.onedark', 'one-dark', ()),
36
+ 'ParaisoDarkStyle': ('pygments.styles.paraiso_dark', 'paraiso-dark', ()),
37
+ 'ParaisoLightStyle': ('pygments.styles.paraiso_light', 'paraiso-light', ()),
38
+ 'PastieStyle': ('pygments.styles.pastie', 'pastie', ()),
39
+ 'PerldocStyle': ('pygments.styles.perldoc', 'perldoc', ()),
40
+ 'RainbowDashStyle': ('pygments.styles.rainbow_dash', 'rainbow_dash', ()),
41
+ 'RrtStyle': ('pygments.styles.rrt', 'rrt', ()),
42
+ 'SasStyle': ('pygments.styles.sas', 'sas', ()),
43
+ 'SolarizedDarkStyle': ('pygments.styles.solarized', 'solarized-dark', ()),
44
+ 'SolarizedLightStyle': ('pygments.styles.solarized', 'solarized-light', ()),
45
+ 'StarofficeStyle': ('pygments.styles.staroffice', 'staroffice', ()),
46
+ 'StataDarkStyle': ('pygments.styles.stata_dark', 'stata-dark', ()),
47
+ 'StataLightStyle': ('pygments.styles.stata_light', 'stata-light', ()),
48
+ 'TangoStyle': ('pygments.styles.tango', 'tango', ()),
49
+ 'TracStyle': ('pygments.styles.trac', 'trac', ()),
50
+ 'VimStyle': ('pygments.styles.vim', 'vim', ()),
51
+ 'VisualStudioStyle': ('pygments.styles.vs', 'vs', ()),
52
+ 'XcodeStyle': ('pygments.styles.xcode', 'xcode', ()),
53
+ 'ZenburnStyle': ('pygments.styles.zenburn', 'zenburn', ()),
54
+ }
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.util
3
+ ~~~~~~~~~~~~~
4
+
5
+ Utility functions.
6
+
7
+ :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+ from io import TextIOWrapper
13
+
14
+
15
+ split_path_re = re.compile(r'[/\\ ]')
16
+ doctype_lookup_re = re.compile(r'''
17
+ <!DOCTYPE\s+(
18
+ [a-zA-Z_][a-zA-Z0-9]*
19
+ (?: \s+ # optional in HTML5
20
+ [a-zA-Z_][a-zA-Z0-9]*\s+
21
+ "[^"]*")?
22
+ )
23
+ [^>]*>
24
+ ''', re.DOTALL | re.MULTILINE | re.VERBOSE)
25
+ tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
26
+ re.IGNORECASE | re.DOTALL | re.MULTILINE)
27
+ xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
28
+
29
+
30
+ class ClassNotFound(ValueError):
31
+ """Raised if one of the lookup functions didn't find a matching class."""
32
+
33
+
34
+ class OptionError(Exception):
35
+ """
36
+ This exception will be raised by all option processing functions if
37
+ the type or value of the argument is not correct.
38
+ """
39
+
40
+ def get_choice_opt(options, optname, allowed, default=None, normcase=False):
41
+ """
42
+ If the key `optname` from the dictionary is not in the sequence
43
+ `allowed`, raise an error, otherwise return it.
44
+ """
45
+ string = options.get(optname, default)
46
+ if normcase:
47
+ string = string.lower()
48
+ if string not in allowed:
49
+ raise OptionError('Value for option {} must be one of {}'.format(optname, ', '.join(map(str, allowed))))
50
+ return string
51
+
52
+
53
+ def get_bool_opt(options, optname, default=None):
54
+ """
55
+ Intuitively, this is `options.get(optname, default)`, but restricted to
56
+ Boolean value. The Booleans can be represented as string, in order to accept
57
+ Boolean value from the command line arguments. If the key `optname` is
58
+ present in the dictionary `options` and is not associated with a Boolean,
59
+ raise an `OptionError`. If it is absent, `default` is returned instead.
60
+
61
+ The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
62
+ ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
63
+ (matched case-insensitively).
64
+ """
65
+ string = options.get(optname, default)
66
+ if isinstance(string, bool):
67
+ return string
68
+ elif isinstance(string, int):
69
+ return bool(string)
70
+ elif not isinstance(string, str):
71
+ raise OptionError(f'Invalid type {string!r} for option {optname}; use '
72
+ '1/0, yes/no, true/false, on/off')
73
+ elif string.lower() in ('1', 'yes', 'true', 'on'):
74
+ return True
75
+ elif string.lower() in ('0', 'no', 'false', 'off'):
76
+ return False
77
+ else:
78
+ raise OptionError(f'Invalid value {string!r} for option {optname}; use '
79
+ '1/0, yes/no, true/false, on/off')
80
+
81
+
82
+ def get_int_opt(options, optname, default=None):
83
+ """As :func:`get_bool_opt`, but interpret the value as an integer."""
84
+ string = options.get(optname, default)
85
+ try:
86
+ return int(string)
87
+ except TypeError:
88
+ raise OptionError(f'Invalid type {string!r} for option {optname}; you '
89
+ 'must give an integer value')
90
+ except ValueError:
91
+ raise OptionError(f'Invalid value {string!r} for option {optname}; you '
92
+ 'must give an integer value')
93
+
94
+ def get_list_opt(options, optname, default=None):
95
+ """
96
+ If the key `optname` from the dictionary `options` is a string,
97
+ split it at whitespace and return it. If it is already a list
98
+ or a tuple, it is returned as a list.
99
+ """
100
+ val = options.get(optname, default)
101
+ if isinstance(val, str):
102
+ return val.split()
103
+ elif isinstance(val, (list, tuple)):
104
+ return list(val)
105
+ else:
106
+ raise OptionError(f'Invalid type {val!r} for option {optname}; you '
107
+ 'must give a list value')
108
+
109
+
110
+ def docstring_headline(obj):
111
+ if not obj.__doc__:
112
+ return ''
113
+ res = []
114
+ for line in obj.__doc__.strip().splitlines():
115
+ if line.strip():
116
+ res.append(" " + line.strip())
117
+ else:
118
+ break
119
+ return ''.join(res).lstrip()
120
+
121
+
122
+ def make_analysator(f):
123
+ """Return a static text analyser function that returns float values."""
124
+ def text_analyse(text):
125
+ try:
126
+ rv = f(text)
127
+ except Exception:
128
+ return 0.0
129
+ if not rv:
130
+ return 0.0
131
+ try:
132
+ return min(1.0, max(0.0, float(rv)))
133
+ except (ValueError, TypeError):
134
+ return 0.0
135
+ text_analyse.__doc__ = f.__doc__
136
+ return staticmethod(text_analyse)
137
+
138
+
139
+ def shebang_matches(text, regex):
140
+ r"""Check if the given regular expression matches the last part of the
141
+ shebang if one exists.
142
+
143
+ >>> from pygments.util import shebang_matches
144
+ >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
145
+ True
146
+ >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
147
+ True
148
+ >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
149
+ False
150
+ >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
151
+ False
152
+ >>> shebang_matches('#!/usr/bin/startsomethingwith python',
153
+ ... r'python(2\.\d)?')
154
+ True
155
+
156
+ It also checks for common windows executable file extensions::
157
+
158
+ >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
159
+ True
160
+
161
+ Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
162
+ the same as ``'perl -e'``)
163
+
164
+ Note that this method automatically searches the whole string (eg:
165
+ the regular expression is wrapped in ``'^$'``)
166
+ """
167
+ index = text.find('\n')
168
+ if index >= 0:
169
+ first_line = text[:index].lower()
170
+ else:
171
+ first_line = text.lower()
172
+ if first_line.startswith('#!'):
173
+ try:
174
+ found = [x for x in split_path_re.split(first_line[2:].strip())
175
+ if x and not x.startswith('-')][-1]
176
+ except IndexError:
177
+ return False
178
+ regex = re.compile(rf'^{regex}(\.(exe|cmd|bat|bin))?$', re.IGNORECASE)
179
+ if regex.search(found) is not None:
180
+ return True
181
+ return False
182
+
183
+
184
+ def doctype_matches(text, regex):
185
+ """Check if the doctype matches a regular expression (if present).
186
+
187
+ Note that this method only checks the first part of a DOCTYPE.
188
+ eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
189
+ """
190
+ m = doctype_lookup_re.search(text)
191
+ if m is None:
192
+ return False
193
+ doctype = m.group(1)
194
+ return re.compile(regex, re.I).match(doctype.strip()) is not None
195
+
196
+
197
+ def html_doctype_matches(text):
198
+ """Check if the file looks like it has a html doctype."""
199
+ return doctype_matches(text, r'html')
200
+
201
+
202
+ _looks_like_xml_cache = {}
203
+
204
+
205
+ def looks_like_xml(text):
206
+ """Check if a doctype exists or if we have some tags."""
207
+ if xml_decl_re.match(text):
208
+ return True
209
+ key = hash(text)
210
+ try:
211
+ return _looks_like_xml_cache[key]
212
+ except KeyError:
213
+ m = doctype_lookup_re.search(text)
214
+ if m is not None:
215
+ return True
216
+ rv = tag_re.search(text[:1000]) is not None
217
+ _looks_like_xml_cache[key] = rv
218
+ return rv
219
+
220
+
221
+ def surrogatepair(c):
222
+ """Given a unicode character code with length greater than 16 bits,
223
+ return the two 16 bit surrogate pair.
224
+ """
225
+ # From example D28 of:
226
+ # http://www.unicode.org/book/ch03.pdf
227
+ return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
228
+
229
+
230
+ def format_lines(var_name, seq, raw=False, indent_level=0):
231
+ """Formats a sequence of strings for output."""
232
+ lines = []
233
+ base_indent = ' ' * indent_level * 4
234
+ inner_indent = ' ' * (indent_level + 1) * 4
235
+ lines.append(base_indent + var_name + ' = (')
236
+ if raw:
237
+ # These should be preformatted reprs of, say, tuples.
238
+ for i in seq:
239
+ lines.append(inner_indent + i + ',')
240
+ else:
241
+ for i in seq:
242
+ # Force use of single quotes
243
+ r = repr(i + '"')
244
+ lines.append(inner_indent + r[:-2] + r[-1] + ',')
245
+ lines.append(base_indent + ')')
246
+ return '\n'.join(lines)
247
+
248
+
249
+ def duplicates_removed(it, already_seen=()):
250
+ """
251
+ Returns a list with duplicates removed from the iterable `it`.
252
+
253
+ Order is preserved.
254
+ """
255
+ lst = []
256
+ seen = set()
257
+ for i in it:
258
+ if i in seen or i in already_seen:
259
+ continue
260
+ lst.append(i)
261
+ seen.add(i)
262
+ return lst
263
+
264
+
265
+ class Future:
266
+ """Generic class to defer some work.
267
+
268
+ Handled specially in RegexLexerMeta, to support regex string construction at
269
+ first use.
270
+ """
271
+ def get(self):
272
+ raise NotImplementedError
273
+
274
+
275
+ def guess_decode(text):
276
+ """Decode *text* with guessed encoding.
277
+
278
+ First try UTF-8; this should fail for non-UTF-8 encodings.
279
+ Then try the preferred locale encoding.
280
+ Fall back to latin-1, which always works.
281
+ """
282
+ try:
283
+ text = text.decode('utf-8')
284
+ return text, 'utf-8'
285
+ except UnicodeDecodeError:
286
+ try:
287
+ import locale
288
+ prefencoding = locale.getpreferredencoding()
289
+ text = text.decode()
290
+ return text, prefencoding
291
+ except (UnicodeDecodeError, LookupError):
292
+ text = text.decode('latin1')
293
+ return text, 'latin1'
294
+
295
+
296
+ def guess_decode_from_terminal(text, term):
297
+ """Decode *text* coming from terminal *term*.
298
+
299
+ First try the terminal encoding, if given.
300
+ Then try UTF-8. Then try the preferred locale encoding.
301
+ Fall back to latin-1, which always works.
302
+ """
303
+ if getattr(term, 'encoding', None):
304
+ try:
305
+ text = text.decode(term.encoding)
306
+ except UnicodeDecodeError:
307
+ pass
308
+ else:
309
+ return text, term.encoding
310
+ return guess_decode(text)
311
+
312
+
313
+ def terminal_encoding(term):
314
+ """Return our best guess of encoding for the given *term*."""
315
+ if getattr(term, 'encoding', None):
316
+ return term.encoding
317
+ import locale
318
+ return locale.getpreferredencoding()
319
+
320
+
321
+ class UnclosingTextIOWrapper(TextIOWrapper):
322
+ # Don't close underlying buffer on destruction.
323
+ def close(self):
324
+ self.flush()
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-311.pyc ADDED
Binary file (608 Bytes). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-311.pyc ADDED
Binary file (30.8 kB). View file
 
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/requests/__pycache__/api.cpython-311.pyc ADDED
Binary file (7.53 kB). View file