ZTWHHH commited on
Commit
039851b
·
verified ·
1 Parent(s): 9bba789

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. mgm/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc +3 -0
  3. mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/INSTALLER +1 -0
  4. mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/LICENSE +3 -0
  5. mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/RECORD +41 -0
  6. mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/REQUESTED +0 -0
  7. mgm/lib/python3.10/site-packages/pygments/__init__.py +82 -0
  8. mgm/lib/python3.10/site-packages/pygments/__main__.py +17 -0
  9. mgm/lib/python3.10/site-packages/pygments/cmdline.py +668 -0
  10. mgm/lib/python3.10/site-packages/pygments/console.py +70 -0
  11. mgm/lib/python3.10/site-packages/pygments/filter.py +70 -0
  12. mgm/lib/python3.10/site-packages/pygments/filters/__init__.py +940 -0
  13. mgm/lib/python3.10/site-packages/pygments/filters/__pycache__/__init__.cpython-310.pyc +0 -0
  14. mgm/lib/python3.10/site-packages/pygments/formatter.py +129 -0
  15. mgm/lib/python3.10/site-packages/pygments/formatters/__init__.py +157 -0
  16. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/__init__.cpython-310.pyc +0 -0
  17. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/_mapping.cpython-310.pyc +0 -0
  18. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/bbcode.cpython-310.pyc +0 -0
  19. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/groff.cpython-310.pyc +0 -0
  20. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/html.cpython-310.pyc +0 -0
  21. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/img.cpython-310.pyc +0 -0
  22. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/irc.cpython-310.pyc +0 -0
  23. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/latex.cpython-310.pyc +0 -0
  24. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/other.cpython-310.pyc +0 -0
  25. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/pangomarkup.cpython-310.pyc +0 -0
  26. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/rtf.cpython-310.pyc +0 -0
  27. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/svg.cpython-310.pyc +0 -0
  28. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/terminal.cpython-310.pyc +0 -0
  29. mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/terminal256.cpython-310.pyc +0 -0
  30. mgm/lib/python3.10/site-packages/pygments/formatters/_mapping.py +23 -0
  31. mgm/lib/python3.10/site-packages/pygments/formatters/bbcode.py +108 -0
  32. mgm/lib/python3.10/site-packages/pygments/formatters/groff.py +170 -0
  33. mgm/lib/python3.10/site-packages/pygments/formatters/html.py +995 -0
  34. mgm/lib/python3.10/site-packages/pygments/formatters/img.py +686 -0
  35. mgm/lib/python3.10/site-packages/pygments/formatters/irc.py +154 -0
  36. mgm/lib/python3.10/site-packages/pygments/formatters/latex.py +518 -0
  37. mgm/lib/python3.10/site-packages/pygments/formatters/other.py +160 -0
  38. mgm/lib/python3.10/site-packages/pygments/formatters/pangomarkup.py +83 -0
  39. mgm/lib/python3.10/site-packages/pygments/formatters/rtf.py +349 -0
  40. mgm/lib/python3.10/site-packages/pygments/formatters/svg.py +185 -0
  41. mgm/lib/python3.10/site-packages/pygments/formatters/terminal.py +127 -0
  42. mgm/lib/python3.10/site-packages/pygments/formatters/terminal256.py +338 -0
  43. mgm/lib/python3.10/site-packages/pygments/lexer.py +961 -0
  44. mgm/lib/python3.10/site-packages/pygments/lexers/_scilab_builtins.py +3093 -0
  45. mgm/lib/python3.10/site-packages/pygments/lexers/algebra.py +299 -0
  46. mgm/lib/python3.10/site-packages/pygments/lexers/apdlexer.py +593 -0
  47. mgm/lib/python3.10/site-packages/pygments/lexers/blueprint.py +173 -0
  48. mgm/lib/python3.10/site-packages/pygments/lexers/devicetree.py +108 -0
  49. mgm/lib/python3.10/site-packages/pygments/lexers/factor.py +363 -0
  50. mgm/lib/python3.10/site-packages/pygments/lexers/futhark.py +105 -0
.gitattributes CHANGED
@@ -1088,3 +1088,4 @@ vila/lib/python3.10/site-packages/opencv_python.libs/libavcodec-402e4b05.so.59.3
1088
  mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1089
  videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
1090
  mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
1088
  mgm/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1089
  videollama2/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
1090
  mgm/lib/python3.10/site-packages/sklearn/metrics/cluster/_expected_mutual_info_fast.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1091
+ mgm/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
mgm/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5124faafc65970d6d9627df82aeb6a3d16a4d21da9804a752d74acda4f0a0b
3
+ size 107104
mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/LICENSE ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ This software is made available under the terms of *either* of the licenses
2
+ found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
3
+ under the terms of *both* these licenses.
mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/RECORD ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ packaging-24.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ packaging-24.2.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197
3
+ packaging-24.2.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174
4
+ packaging-24.2.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344
5
+ packaging-24.2.dist-info/METADATA,sha256=ohH86s6k5mIfQxY2TS0LcSfADeOFa4BiCC-bxZV-pNs,3204
6
+ packaging-24.2.dist-info/RECORD,,
7
+ packaging-24.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ packaging-24.2.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
9
+ packaging/__init__.py,sha256=dk4Ta_vmdVJxYHDcfyhvQNw8V3PgSBomKNXqg-D2JDY,494
10
+ packaging/__pycache__/__init__.cpython-310.pyc,,
11
+ packaging/__pycache__/_elffile.cpython-310.pyc,,
12
+ packaging/__pycache__/_manylinux.cpython-310.pyc,,
13
+ packaging/__pycache__/_musllinux.cpython-310.pyc,,
14
+ packaging/__pycache__/_parser.cpython-310.pyc,,
15
+ packaging/__pycache__/_structures.cpython-310.pyc,,
16
+ packaging/__pycache__/_tokenizer.cpython-310.pyc,,
17
+ packaging/__pycache__/markers.cpython-310.pyc,,
18
+ packaging/__pycache__/metadata.cpython-310.pyc,,
19
+ packaging/__pycache__/requirements.cpython-310.pyc,,
20
+ packaging/__pycache__/specifiers.cpython-310.pyc,,
21
+ packaging/__pycache__/tags.cpython-310.pyc,,
22
+ packaging/__pycache__/utils.cpython-310.pyc,,
23
+ packaging/__pycache__/version.cpython-310.pyc,,
24
+ packaging/_elffile.py,sha256=cflAQAkE25tzhYmq_aCi72QfbT_tn891tPzfpbeHOwE,3306
25
+ packaging/_manylinux.py,sha256=vl5OCoz4kx80H5rwXKeXWjl9WNISGmr4ZgTpTP9lU9c,9612
26
+ packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694
27
+ packaging/_parser.py,sha256=s_TvTvDNK0NrM2QB3VKThdWFM4Nc0P6JnkObkl3MjpM,10236
28
+ packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
29
+ packaging/_tokenizer.py,sha256=J6v5H7Jzvb-g81xp_2QACKwO7LxHQA6ikryMU7zXwN8,5273
30
+ packaging/licenses/__init__.py,sha256=1x5M1nEYjcgwEbLt0dXwz2ukjr18DiCzC0sraQqJ-Ww,5715
31
+ packaging/licenses/__pycache__/__init__.cpython-310.pyc,,
32
+ packaging/licenses/__pycache__/_spdx.cpython-310.pyc,,
33
+ packaging/licenses/_spdx.py,sha256=oAm1ztPFwlsmCKe7lAAsv_OIOfS1cWDu9bNBkeu-2ns,48398
34
+ packaging/markers.py,sha256=c89TNzB7ZdGYhkovm6PYmqGyHxXlYVaLW591PHUNKD8,10561
35
+ packaging/metadata.py,sha256=YJibM7GYe4re8-0a3OlXmGS-XDgTEoO4tlBt2q25Bng,34762
36
+ packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
+ packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947
38
+ packaging/specifiers.py,sha256=GG1wPNMcL0fMJO68vF53wKMdwnfehDcaI-r9NpTfilA,40074
39
+ packaging/tags.py,sha256=CFqrJzAzc2XNGexerH__T-Y5Iwq7WbsYXsiLERLWxY0,21014
40
+ packaging/utils.py,sha256=0F3Hh9OFuRgrhTgGZUl5K22Fv1YP2tZl1z_2gO6kJiA,5050
41
+ packaging/version.py,sha256=olfyuk_DPbflNkJ4wBWetXQ17c74x3DB501degUv7DY,16676
mgm/lib/python3.10/site-packages/packaging-24.2.dist-info/REQUESTED ADDED
File without changes
mgm/lib/python3.10/site-packages/pygments/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pygments
3
+ ~~~~~~~~
4
+
5
+ Pygments is a syntax highlighting package written in Python.
6
+
7
+ It is a generic syntax highlighter for general use in all kinds of software
8
+ such as forum systems, wikis or other applications that need to prettify
9
+ source code. Highlights are:
10
+
11
+ * a wide range of common languages and markup formats is supported
12
+ * special attention is paid to details, increasing quality by a fair amount
13
+ * support for new languages and formats are added easily
14
+ * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
15
+ formats that PIL supports, and ANSI sequences
16
+ * it is usable as a command-line tool and as a library
17
+ * ... and it highlights even Brainfuck!
18
+
19
+ The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
20
+
21
+ .. _Pygments master branch:
22
+ https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
23
+
24
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
25
+ :license: BSD, see LICENSE for details.
26
+ """
27
+ from io import StringIO, BytesIO
28
+
29
+ __version__ = '2.19.1'
30
+ __docformat__ = 'restructuredtext'
31
+
32
+ __all__ = ['lex', 'format', 'highlight']
33
+
34
+
35
+ def lex(code, lexer):
36
+ """
37
+ Lex `code` with the `lexer` (must be a `Lexer` instance)
38
+ and return an iterable of tokens. Currently, this only calls
39
+ `lexer.get_tokens()`.
40
+ """
41
+ try:
42
+ return lexer.get_tokens(code)
43
+ except TypeError:
44
+ # Heuristic to catch a common mistake.
45
+ from pygments.lexer import RegexLexer
46
+ if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
47
+ raise TypeError('lex() argument must be a lexer instance, '
48
+ 'not a class')
49
+ raise
50
+
51
+
52
+ def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
53
+ """
54
+ Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
55
+ (a `Formatter` instance).
56
+
57
+ If ``outfile`` is given and a valid file object (an object with a
58
+ ``write`` method), the result will be written to it, otherwise it
59
+ is returned as a string.
60
+ """
61
+ try:
62
+ if not outfile:
63
+ realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
64
+ formatter.format(tokens, realoutfile)
65
+ return realoutfile.getvalue()
66
+ else:
67
+ formatter.format(tokens, outfile)
68
+ except TypeError:
69
+ # Heuristic to catch a common mistake.
70
+ from pygments.formatter import Formatter
71
+ if isinstance(formatter, type) and issubclass(formatter, Formatter):
72
+ raise TypeError('format() argument must be a formatter instance, '
73
+ 'not a class')
74
+ raise
75
+
76
+
77
+ def highlight(code, lexer, formatter, outfile=None):
78
+ """
79
+ This is the most high-level highlighting function. It combines `lex` and
80
+ `format` in one function.
81
+ """
82
+ return format(lex(code, lexer), formatter, outfile)
mgm/lib/python3.10/site-packages/pygments/__main__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.__main__
3
+ ~~~~~~~~~~~~~~~~~
4
+
5
+ Main entry point for ``python -m pygments``.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import sys
12
+ import pygments.cmdline
13
+
14
+ try:
15
+ sys.exit(pygments.cmdline.main(sys.argv))
16
+ except KeyboardInterrupt:
17
+ sys.exit(1)
mgm/lib/python3.10/site-packages/pygments/cmdline.py ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.cmdline
3
+ ~~~~~~~~~~~~~~~~
4
+
5
+ Command line interface.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import os
12
+ import sys
13
+ import shutil
14
+ import argparse
15
+ from textwrap import dedent
16
+
17
+ from pygments import __version__, highlight
18
+ from pygments.util import ClassNotFound, OptionError, docstring_headline, \
19
+ guess_decode, guess_decode_from_terminal, terminal_encoding, \
20
+ UnclosingTextIOWrapper
21
+ from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
22
+ load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
23
+ from pygments.lexers.special import TextLexer
24
+ from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
25
+ from pygments.formatters import get_all_formatters, get_formatter_by_name, \
26
+ load_formatter_from_file, get_formatter_for_filename, find_formatter_class
27
+ from pygments.formatters.terminal import TerminalFormatter
28
+ from pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
29
+ from pygments.filters import get_all_filters, find_filter_class
30
+ from pygments.styles import get_all_styles, get_style_by_name
31
+
32
+
33
+ def _parse_options(o_strs):
34
+ opts = {}
35
+ if not o_strs:
36
+ return opts
37
+ for o_str in o_strs:
38
+ if not o_str.strip():
39
+ continue
40
+ o_args = o_str.split(',')
41
+ for o_arg in o_args:
42
+ o_arg = o_arg.strip()
43
+ try:
44
+ o_key, o_val = o_arg.split('=', 1)
45
+ o_key = o_key.strip()
46
+ o_val = o_val.strip()
47
+ except ValueError:
48
+ opts[o_arg] = True
49
+ else:
50
+ opts[o_key] = o_val
51
+ return opts
52
+
53
+
54
+ def _parse_filters(f_strs):
55
+ filters = []
56
+ if not f_strs:
57
+ return filters
58
+ for f_str in f_strs:
59
+ if ':' in f_str:
60
+ fname, fopts = f_str.split(':', 1)
61
+ filters.append((fname, _parse_options([fopts])))
62
+ else:
63
+ filters.append((f_str, {}))
64
+ return filters
65
+
66
+
67
+ def _print_help(what, name):
68
+ try:
69
+ if what == 'lexer':
70
+ cls = get_lexer_by_name(name)
71
+ print(f"Help on the {cls.name} lexer:")
72
+ print(dedent(cls.__doc__))
73
+ elif what == 'formatter':
74
+ cls = find_formatter_class(name)
75
+ print(f"Help on the {cls.name} formatter:")
76
+ print(dedent(cls.__doc__))
77
+ elif what == 'filter':
78
+ cls = find_filter_class(name)
79
+ print(f"Help on the {name} filter:")
80
+ print(dedent(cls.__doc__))
81
+ return 0
82
+ except (AttributeError, ValueError):
83
+ print(f"{what} not found!", file=sys.stderr)
84
+ return 1
85
+
86
+
87
+ def _print_list(what):
88
+ if what == 'lexer':
89
+ print()
90
+ print("Lexers:")
91
+ print("~~~~~~~")
92
+
93
+ info = []
94
+ for fullname, names, exts, _ in get_all_lexers():
95
+ tup = (', '.join(names)+':', fullname,
96
+ exts and '(filenames ' + ', '.join(exts) + ')' or '')
97
+ info.append(tup)
98
+ info.sort()
99
+ for i in info:
100
+ print(('* {}\n {} {}').format(*i))
101
+
102
+ elif what == 'formatter':
103
+ print()
104
+ print("Formatters:")
105
+ print("~~~~~~~~~~~")
106
+
107
+ info = []
108
+ for cls in get_all_formatters():
109
+ doc = docstring_headline(cls)
110
+ tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
111
+ '(filenames ' + ', '.join(cls.filenames) + ')' or '')
112
+ info.append(tup)
113
+ info.sort()
114
+ for i in info:
115
+ print(('* {}\n {} {}').format(*i))
116
+
117
+ elif what == 'filter':
118
+ print()
119
+ print("Filters:")
120
+ print("~~~~~~~~")
121
+
122
+ for name in get_all_filters():
123
+ cls = find_filter_class(name)
124
+ print("* " + name + ':')
125
+ print(f" {docstring_headline(cls)}")
126
+
127
+ elif what == 'style':
128
+ print()
129
+ print("Styles:")
130
+ print("~~~~~~~")
131
+
132
+ for name in get_all_styles():
133
+ cls = get_style_by_name(name)
134
+ print("* " + name + ':')
135
+ print(f" {docstring_headline(cls)}")
136
+
137
+
138
+ def _print_list_as_json(requested_items):
139
+ import json
140
+ result = {}
141
+ if 'lexer' in requested_items:
142
+ info = {}
143
+ for fullname, names, filenames, mimetypes in get_all_lexers():
144
+ info[fullname] = {
145
+ 'aliases': names,
146
+ 'filenames': filenames,
147
+ 'mimetypes': mimetypes
148
+ }
149
+ result['lexers'] = info
150
+
151
+ if 'formatter' in requested_items:
152
+ info = {}
153
+ for cls in get_all_formatters():
154
+ doc = docstring_headline(cls)
155
+ info[cls.name] = {
156
+ 'aliases': cls.aliases,
157
+ 'filenames': cls.filenames,
158
+ 'doc': doc
159
+ }
160
+ result['formatters'] = info
161
+
162
+ if 'filter' in requested_items:
163
+ info = {}
164
+ for name in get_all_filters():
165
+ cls = find_filter_class(name)
166
+ info[name] = {
167
+ 'doc': docstring_headline(cls)
168
+ }
169
+ result['filters'] = info
170
+
171
+ if 'style' in requested_items:
172
+ info = {}
173
+ for name in get_all_styles():
174
+ cls = get_style_by_name(name)
175
+ info[name] = {
176
+ 'doc': docstring_headline(cls)
177
+ }
178
+ result['styles'] = info
179
+
180
+ json.dump(result, sys.stdout)
181
+
182
+ def main_inner(parser, argns):
183
+ if argns.help:
184
+ parser.print_help()
185
+ return 0
186
+
187
+ if argns.V:
188
+ print(f'Pygments version {__version__}, (c) 2006-2024 by Georg Brandl, Matthäus '
189
+ 'Chajdas and contributors.')
190
+ return 0
191
+
192
+ def is_only_option(opt):
193
+ return not any(v for (k, v) in vars(argns).items() if k != opt)
194
+
195
+ # handle ``pygmentize -L``
196
+ if argns.L is not None:
197
+ arg_set = set()
198
+ for k, v in vars(argns).items():
199
+ if v:
200
+ arg_set.add(k)
201
+
202
+ arg_set.discard('L')
203
+ arg_set.discard('json')
204
+
205
+ if arg_set:
206
+ parser.print_help(sys.stderr)
207
+ return 2
208
+
209
+ # print version
210
+ if not argns.json:
211
+ main(['', '-V'])
212
+ allowed_types = {'lexer', 'formatter', 'filter', 'style'}
213
+ largs = [arg.rstrip('s') for arg in argns.L]
214
+ if any(arg not in allowed_types for arg in largs):
215
+ parser.print_help(sys.stderr)
216
+ return 0
217
+ if not largs:
218
+ largs = allowed_types
219
+ if not argns.json:
220
+ for arg in largs:
221
+ _print_list(arg)
222
+ else:
223
+ _print_list_as_json(largs)
224
+ return 0
225
+
226
+ # handle ``pygmentize -H``
227
+ if argns.H:
228
+ if not is_only_option('H'):
229
+ parser.print_help(sys.stderr)
230
+ return 2
231
+ what, name = argns.H
232
+ if what not in ('lexer', 'formatter', 'filter'):
233
+ parser.print_help(sys.stderr)
234
+ return 2
235
+ return _print_help(what, name)
236
+
237
+ # parse -O options
238
+ parsed_opts = _parse_options(argns.O or [])
239
+
240
+ # parse -P options
241
+ for p_opt in argns.P or []:
242
+ try:
243
+ name, value = p_opt.split('=', 1)
244
+ except ValueError:
245
+ parsed_opts[p_opt] = True
246
+ else:
247
+ parsed_opts[name] = value
248
+
249
+ # encodings
250
+ inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
251
+ outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
252
+
253
+ # handle ``pygmentize -N``
254
+ if argns.N:
255
+ lexer = find_lexer_class_for_filename(argns.N)
256
+ if lexer is None:
257
+ lexer = TextLexer
258
+
259
+ print(lexer.aliases[0])
260
+ return 0
261
+
262
+ # handle ``pygmentize -C``
263
+ if argns.C:
264
+ inp = sys.stdin.buffer.read()
265
+ try:
266
+ lexer = guess_lexer(inp, inencoding=inencoding)
267
+ except ClassNotFound:
268
+ lexer = TextLexer
269
+
270
+ print(lexer.aliases[0])
271
+ return 0
272
+
273
+ # handle ``pygmentize -S``
274
+ S_opt = argns.S
275
+ a_opt = argns.a
276
+ if S_opt is not None:
277
+ f_opt = argns.f
278
+ if not f_opt:
279
+ parser.print_help(sys.stderr)
280
+ return 2
281
+ if argns.l or argns.INPUTFILE:
282
+ parser.print_help(sys.stderr)
283
+ return 2
284
+
285
+ try:
286
+ parsed_opts['style'] = S_opt
287
+ fmter = get_formatter_by_name(f_opt, **parsed_opts)
288
+ except ClassNotFound as err:
289
+ print(err, file=sys.stderr)
290
+ return 1
291
+
292
+ print(fmter.get_style_defs(a_opt or ''))
293
+ return 0
294
+
295
+ # if no -S is given, -a is not allowed
296
+ if argns.a is not None:
297
+ parser.print_help(sys.stderr)
298
+ return 2
299
+
300
+ # parse -F options
301
+ F_opts = _parse_filters(argns.F or [])
302
+
303
+ # -x: allow custom (eXternal) lexers and formatters
304
+ allow_custom_lexer_formatter = bool(argns.x)
305
+
306
+ # select lexer
307
+ lexer = None
308
+
309
+ # given by name?
310
+ lexername = argns.l
311
+ if lexername:
312
+ # custom lexer, located relative to user's cwd
313
+ if allow_custom_lexer_formatter and '.py' in lexername:
314
+ try:
315
+ filename = None
316
+ name = None
317
+ if ':' in lexername:
318
+ filename, name = lexername.rsplit(':', 1)
319
+
320
+ if '.py' in name:
321
+ # This can happen on Windows: If the lexername is
322
+ # C:\lexer.py -- return to normal load path in that case
323
+ name = None
324
+
325
+ if filename and name:
326
+ lexer = load_lexer_from_file(filename, name,
327
+ **parsed_opts)
328
+ else:
329
+ lexer = load_lexer_from_file(lexername, **parsed_opts)
330
+ except ClassNotFound as err:
331
+ print('Error:', err, file=sys.stderr)
332
+ return 1
333
+ else:
334
+ try:
335
+ lexer = get_lexer_by_name(lexername, **parsed_opts)
336
+ except (OptionError, ClassNotFound) as err:
337
+ print('Error:', err, file=sys.stderr)
338
+ return 1
339
+
340
+ # read input code
341
+ code = None
342
+
343
+ if argns.INPUTFILE:
344
+ if argns.s:
345
+ print('Error: -s option not usable when input file specified',
346
+ file=sys.stderr)
347
+ return 2
348
+
349
+ infn = argns.INPUTFILE
350
+ try:
351
+ with open(infn, 'rb') as infp:
352
+ code = infp.read()
353
+ except Exception as err:
354
+ print('Error: cannot read infile:', err, file=sys.stderr)
355
+ return 1
356
+ if not inencoding:
357
+ code, inencoding = guess_decode(code)
358
+
359
+ # do we have to guess the lexer?
360
+ if not lexer:
361
+ try:
362
+ lexer = get_lexer_for_filename(infn, code, **parsed_opts)
363
+ except ClassNotFound as err:
364
+ if argns.g:
365
+ try:
366
+ lexer = guess_lexer(code, **parsed_opts)
367
+ except ClassNotFound:
368
+ lexer = TextLexer(**parsed_opts)
369
+ else:
370
+ print('Error:', err, file=sys.stderr)
371
+ return 1
372
+ except OptionError as err:
373
+ print('Error:', err, file=sys.stderr)
374
+ return 1
375
+
376
+ elif not argns.s: # treat stdin as full file (-s support is later)
377
+ # read code from terminal, always in binary mode since we want to
378
+ # decode ourselves and be tolerant with it
379
+ code = sys.stdin.buffer.read() # use .buffer to get a binary stream
380
+ if not inencoding:
381
+ code, inencoding = guess_decode_from_terminal(code, sys.stdin)
382
+ # else the lexer will do the decoding
383
+ if not lexer:
384
+ try:
385
+ lexer = guess_lexer(code, **parsed_opts)
386
+ except ClassNotFound:
387
+ lexer = TextLexer(**parsed_opts)
388
+
389
+ else: # -s option needs a lexer with -l
390
+ if not lexer:
391
+ print('Error: when using -s a lexer has to be selected with -l',
392
+ file=sys.stderr)
393
+ return 2
394
+
395
+ # process filters
396
+ for fname, fopts in F_opts:
397
+ try:
398
+ lexer.add_filter(fname, **fopts)
399
+ except ClassNotFound as err:
400
+ print('Error:', err, file=sys.stderr)
401
+ return 1
402
+
403
+ # select formatter
404
+ outfn = argns.o
405
+ fmter = argns.f
406
+ if fmter:
407
+ # custom formatter, located relative to user's cwd
408
+ if allow_custom_lexer_formatter and '.py' in fmter:
409
+ try:
410
+ filename = None
411
+ name = None
412
+ if ':' in fmter:
413
+ # Same logic as above for custom lexer
414
+ filename, name = fmter.rsplit(':', 1)
415
+
416
+ if '.py' in name:
417
+ name = None
418
+
419
+ if filename and name:
420
+ fmter = load_formatter_from_file(filename, name,
421
+ **parsed_opts)
422
+ else:
423
+ fmter = load_formatter_from_file(fmter, **parsed_opts)
424
+ except ClassNotFound as err:
425
+ print('Error:', err, file=sys.stderr)
426
+ return 1
427
+ else:
428
+ try:
429
+ fmter = get_formatter_by_name(fmter, **parsed_opts)
430
+ except (OptionError, ClassNotFound) as err:
431
+ print('Error:', err, file=sys.stderr)
432
+ return 1
433
+
434
+ if outfn:
435
+ if not fmter:
436
+ try:
437
+ fmter = get_formatter_for_filename(outfn, **parsed_opts)
438
+ except (OptionError, ClassNotFound) as err:
439
+ print('Error:', err, file=sys.stderr)
440
+ return 1
441
+ try:
442
+ outfile = open(outfn, 'wb')
443
+ except Exception as err:
444
+ print('Error: cannot open outfile:', err, file=sys.stderr)
445
+ return 1
446
+ else:
447
+ if not fmter:
448
+ if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
449
+ fmter = TerminalTrueColorFormatter(**parsed_opts)
450
+ elif '256' in os.environ.get('TERM', ''):
451
+ fmter = Terminal256Formatter(**parsed_opts)
452
+ else:
453
+ fmter = TerminalFormatter(**parsed_opts)
454
+ outfile = sys.stdout.buffer
455
+
456
+ # determine output encoding if not explicitly selected
457
+ if not outencoding:
458
+ if outfn:
459
+ # output file? use lexer encoding for now (can still be None)
460
+ fmter.encoding = inencoding
461
+ else:
462
+ # else use terminal encoding
463
+ fmter.encoding = terminal_encoding(sys.stdout)
464
+
465
+ # provide coloring under Windows, if possible
466
+ if not outfn and sys.platform in ('win32', 'cygwin') and \
467
+ fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
468
+ # unfortunately colorama doesn't support binary streams on Py3
469
+ outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
470
+ fmter.encoding = None
471
+ try:
472
+ import colorama.initialise
473
+ except ImportError:
474
+ pass
475
+ else:
476
+ outfile = colorama.initialise.wrap_stream(
477
+ outfile, convert=None, strip=None, autoreset=False, wrap=True)
478
+
479
+ # When using the LaTeX formatter and the option `escapeinside` is
480
+ # specified, we need a special lexer which collects escaped text
481
+ # before running the chosen language lexer.
482
+ escapeinside = parsed_opts.get('escapeinside', '')
483
+ if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
484
+ left = escapeinside[0]
485
+ right = escapeinside[1]
486
+ lexer = LatexEmbeddedLexer(left, right, lexer)
487
+
488
+ # ... and do it!
489
+ if not argns.s:
490
+ # process whole input as per normal...
491
+ try:
492
+ highlight(code, lexer, fmter, outfile)
493
+ finally:
494
+ if outfn:
495
+ outfile.close()
496
+ return 0
497
+ else:
498
+ # line by line processing of stdin (eg: for 'tail -f')...
499
+ try:
500
+ while 1:
501
+ line = sys.stdin.buffer.readline()
502
+ if not line:
503
+ break
504
+ if not inencoding:
505
+ line = guess_decode_from_terminal(line, sys.stdin)[0]
506
+ highlight(line, lexer, fmter, outfile)
507
+ if hasattr(outfile, 'flush'):
508
+ outfile.flush()
509
+ return 0
510
+ except KeyboardInterrupt: # pragma: no cover
511
+ return 0
512
+ finally:
513
+ if outfn:
514
+ outfile.close()
515
+
516
+
517
+ class HelpFormatter(argparse.HelpFormatter):
518
+ def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
519
+ if width is None:
520
+ try:
521
+ width = shutil.get_terminal_size().columns - 2
522
+ except Exception:
523
+ pass
524
+ argparse.HelpFormatter.__init__(self, prog, indent_increment,
525
+ max_help_position, width)
526
+
527
+
528
+ def main(args=sys.argv):
529
+ """
530
+ Main command line entry point.
531
+ """
532
+ desc = "Highlight an input file and write the result to an output file."
533
+ parser = argparse.ArgumentParser(description=desc, add_help=False,
534
+ formatter_class=HelpFormatter)
535
+
536
+ operation = parser.add_argument_group('Main operation')
537
+ lexersel = operation.add_mutually_exclusive_group()
538
+ lexersel.add_argument(
539
+ '-l', metavar='LEXER',
540
+ help='Specify the lexer to use. (Query names with -L.) If not '
541
+ 'given and -g is not present, the lexer is guessed from the filename.')
542
+ lexersel.add_argument(
543
+ '-g', action='store_true',
544
+ help='Guess the lexer from the file contents, or pass through '
545
+ 'as plain text if nothing can be guessed.')
546
+ operation.add_argument(
547
+ '-F', metavar='FILTER[:options]', action='append',
548
+ help='Add a filter to the token stream. (Query names with -L.) '
549
+ 'Filter options are given after a colon if necessary.')
550
+ operation.add_argument(
551
+ '-f', metavar='FORMATTER',
552
+ help='Specify the formatter to use. (Query names with -L.) '
553
+ 'If not given, the formatter is guessed from the output filename, '
554
+ 'and defaults to the terminal formatter if the output is to the '
555
+ 'terminal or an unknown file extension.')
556
+ operation.add_argument(
557
+ '-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
558
+ help='Give options to the lexer and formatter as a comma-separated '
559
+ 'list of key-value pairs. '
560
+ 'Example: `-O bg=light,python=cool`.')
561
+ operation.add_argument(
562
+ '-P', metavar='OPTION=value', action='append',
563
+ help='Give a single option to the lexer and formatter - with this '
564
+ 'you can pass options whose value contains commas and equal signs. '
565
+ 'Example: `-P "heading=Pygments, the Python highlighter"`.')
566
+ operation.add_argument(
567
+ '-o', metavar='OUTPUTFILE',
568
+ help='Where to write the output. Defaults to standard output.')
569
+
570
+ operation.add_argument(
571
+ 'INPUTFILE', nargs='?',
572
+ help='Where to read the input. Defaults to standard input.')
573
+
574
+ flags = parser.add_argument_group('Operation flags')
575
+ flags.add_argument(
576
+ '-v', action='store_true',
577
+ help='Print a detailed traceback on unhandled exceptions, which '
578
+ 'is useful for debugging and bug reports.')
579
+ flags.add_argument(
580
+ '-s', action='store_true',
581
+ help='Process lines one at a time until EOF, rather than waiting to '
582
+ 'process the entire file. This only works for stdin, only for lexers '
583
+ 'with no line-spanning constructs, and is intended for streaming '
584
+ 'input such as you get from `tail -f`. '
585
+ 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
586
+ flags.add_argument(
587
+ '-x', action='store_true',
588
+ help='Allow custom lexers and formatters to be loaded from a .py file '
589
+ 'relative to the current working directory. For example, '
590
+ '`-l ./customlexer.py -x`. By default, this option expects a file '
591
+ 'with a class named CustomLexer or CustomFormatter; you can also '
592
+ 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
593
+ 'Users should be very careful not to use this option with untrusted '
594
+ 'files, because it will import and run them.')
595
+ flags.add_argument('--json', help='Output as JSON. This can '
596
+ 'be only used in conjunction with -L.',
597
+ default=False,
598
+ action='store_true')
599
+
600
+ special_modes_group = parser.add_argument_group(
601
+ 'Special modes - do not do any highlighting')
602
+ special_modes = special_modes_group.add_mutually_exclusive_group()
603
+ special_modes.add_argument(
604
+ '-S', metavar='STYLE -f formatter',
605
+ help='Print style definitions for STYLE for a formatter '
606
+ 'given with -f. The argument given by -a is formatter '
607
+ 'dependent.')
608
+ special_modes.add_argument(
609
+ '-L', nargs='*', metavar='WHAT',
610
+ help='List lexers, formatters, styles or filters -- '
611
+ 'give additional arguments for the thing(s) you want to list '
612
+ '(e.g. "styles"), or omit them to list everything.')
613
+ special_modes.add_argument(
614
+ '-N', metavar='FILENAME',
615
+ help='Guess and print out a lexer name based solely on the given '
616
+ 'filename. Does not take input or highlight anything. If no specific '
617
+ 'lexer can be determined, "text" is printed.')
618
+ special_modes.add_argument(
619
+ '-C', action='store_true',
620
+ help='Like -N, but print out a lexer name based solely on '
621
+ 'a given content from standard input.')
622
+ special_modes.add_argument(
623
+ '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
624
+ help='Print detailed help for the object <name> of type <type>, '
625
+ 'where <type> is one of "lexer", "formatter" or "filter".')
626
+ special_modes.add_argument(
627
+ '-V', action='store_true',
628
+ help='Print the package version.')
629
+ special_modes.add_argument(
630
+ '-h', '--help', action='store_true',
631
+ help='Print this help.')
632
+ special_modes_group.add_argument(
633
+ '-a', metavar='ARG',
634
+ help='Formatter-specific additional argument for the -S (print '
635
+ 'style sheet) mode.')
636
+
637
+ argns = parser.parse_args(args[1:])
638
+
639
+ try:
640
+ return main_inner(parser, argns)
641
+ except BrokenPipeError:
642
+ # someone closed our stdout, e.g. by quitting a pager.
643
+ return 0
644
+ except Exception:
645
+ if argns.v:
646
+ print(file=sys.stderr)
647
+ print('*' * 65, file=sys.stderr)
648
+ print('An unhandled exception occurred while highlighting.',
649
+ file=sys.stderr)
650
+ print('Please report the whole traceback to the issue tracker at',
651
+ file=sys.stderr)
652
+ print('<https://github.com/pygments/pygments/issues>.',
653
+ file=sys.stderr)
654
+ print('*' * 65, file=sys.stderr)
655
+ print(file=sys.stderr)
656
+ raise
657
+ import traceback
658
+ info = traceback.format_exception(*sys.exc_info())
659
+ msg = info[-1].strip()
660
+ if len(info) >= 3:
661
+ # extract relevant file and position info
662
+ msg += '\n (f{})'.format(info[-2].split('\n')[0].strip()[1:])
663
+ print(file=sys.stderr)
664
+ print('*** Error while highlighting:', file=sys.stderr)
665
+ print(msg, file=sys.stderr)
666
+ print('*** If this is a bug you want to report, please rerun with -v.',
667
+ file=sys.stderr)
668
+ return 1
mgm/lib/python3.10/site-packages/pygments/console.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.console
3
+ ~~~~~~~~~~~~~~~~
4
+
5
+ Format colored console output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ esc = "\x1b["
12
+
13
+ codes = {}
14
+ codes[""] = ""
15
+ codes["reset"] = esc + "39;49;00m"
16
+
17
+ codes["bold"] = esc + "01m"
18
+ codes["faint"] = esc + "02m"
19
+ codes["standout"] = esc + "03m"
20
+ codes["underline"] = esc + "04m"
21
+ codes["blink"] = esc + "05m"
22
+ codes["overline"] = esc + "06m"
23
+
24
+ dark_colors = ["black", "red", "green", "yellow", "blue",
25
+ "magenta", "cyan", "gray"]
26
+ light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
27
+ "brightmagenta", "brightcyan", "white"]
28
+
29
+ x = 30
30
+ for dark, light in zip(dark_colors, light_colors):
31
+ codes[dark] = esc + "%im" % x
32
+ codes[light] = esc + "%im" % (60 + x)
33
+ x += 1
34
+
35
+ del dark, light, x
36
+
37
+ codes["white"] = codes["bold"]
38
+
39
+
40
+ def reset_color():
41
+ return codes["reset"]
42
+
43
+
44
+ def colorize(color_key, text):
45
+ return codes[color_key] + text + codes["reset"]
46
+
47
+
48
+ def ansiformat(attr, text):
49
+ """
50
+ Format ``text`` with a color and/or some attributes::
51
+
52
+ color normal color
53
+ *color* bold color
54
+ _color_ underlined color
55
+ +color+ blinking color
56
+ """
57
+ result = []
58
+ if attr[:1] == attr[-1:] == '+':
59
+ result.append(codes['blink'])
60
+ attr = attr[1:-1]
61
+ if attr[:1] == attr[-1:] == '*':
62
+ result.append(codes['bold'])
63
+ attr = attr[1:-1]
64
+ if attr[:1] == attr[-1:] == '_':
65
+ result.append(codes['underline'])
66
+ attr = attr[1:-1]
67
+ result.append(codes[attr])
68
+ result.append(text)
69
+ result.append(codes['reset'])
70
+ return ''.join(result)
mgm/lib/python3.10/site-packages/pygments/filter.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.filter
3
+ ~~~~~~~~~~~~~~~
4
+
5
+ Module that implements the default filter.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+
12
+ def apply_filters(stream, filters, lexer=None):
13
+ """
14
+ Use this method to apply an iterable of filters to
15
+ a stream. If lexer is given it's forwarded to the
16
+ filter, otherwise the filter receives `None`.
17
+ """
18
+ def _apply(filter_, stream):
19
+ yield from filter_.filter(lexer, stream)
20
+ for filter_ in filters:
21
+ stream = _apply(filter_, stream)
22
+ return stream
23
+
24
+
25
+ def simplefilter(f):
26
+ """
27
+ Decorator that converts a function into a filter::
28
+
29
+ @simplefilter
30
+ def lowercase(self, lexer, stream, options):
31
+ for ttype, value in stream:
32
+ yield ttype, value.lower()
33
+ """
34
+ return type(f.__name__, (FunctionFilter,), {
35
+ '__module__': getattr(f, '__module__'),
36
+ '__doc__': f.__doc__,
37
+ 'function': f,
38
+ })
39
+
40
+
41
+ class Filter:
42
+ """
43
+ Default filter. Subclass this class or use the `simplefilter`
44
+ decorator to create own filters.
45
+ """
46
+
47
+ def __init__(self, **options):
48
+ self.options = options
49
+
50
+ def filter(self, lexer, stream):
51
+ raise NotImplementedError()
52
+
53
+
54
+ class FunctionFilter(Filter):
55
+ """
56
+ Abstract class used by `simplefilter` to create simple
57
+ function filters on the fly. The `simplefilter` decorator
58
+ automatically creates subclasses of this class for
59
+ functions passed to it.
60
+ """
61
+ function = None
62
+
63
+ def __init__(self, **options):
64
+ if not hasattr(self, 'function'):
65
+ raise TypeError(f'{self.__class__.__name__!r} used without bound function')
66
+ Filter.__init__(self, **options)
67
+
68
+ def filter(self, lexer, stream):
69
+ # pylint: disable=not-callable
70
+ yield from self.function(lexer, stream, self.options)
mgm/lib/python3.10/site-packages/pygments/filters/__init__.py ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.filters
3
+ ~~~~~~~~~~~~~~~~
4
+
5
+ Module containing filter lookup functions and default
6
+ filters.
7
+
8
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
9
+ :license: BSD, see LICENSE for details.
10
+ """
11
+
12
+ import re
13
+
14
+ from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
15
+ string_to_tokentype
16
+ from pygments.filter import Filter
17
+ from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
18
+ get_choice_opt, ClassNotFound, OptionError
19
+ from pygments.plugin import find_plugin_filters
20
+
21
+
22
+ def find_filter_class(filtername):
23
+ """Lookup a filter by name. Return None if not found."""
24
+ if filtername in FILTERS:
25
+ return FILTERS[filtername]
26
+ for name, cls in find_plugin_filters():
27
+ if name == filtername:
28
+ return cls
29
+ return None
30
+
31
+
32
+ def get_filter_by_name(filtername, **options):
33
+ """Return an instantiated filter.
34
+
35
+ Options are passed to the filter initializer if wanted.
36
+ Raise a ClassNotFound if not found.
37
+ """
38
+ cls = find_filter_class(filtername)
39
+ if cls:
40
+ return cls(**options)
41
+ else:
42
+ raise ClassNotFound(f'filter {filtername!r} not found')
43
+
44
+
45
+ def get_all_filters():
46
+ """Return a generator of all filter names."""
47
+ yield from FILTERS
48
+ for name, _ in find_plugin_filters():
49
+ yield name
50
+
51
+
52
+ def _replace_special(ttype, value, regex, specialttype,
53
+ replacefunc=lambda x: x):
54
+ last = 0
55
+ for match in regex.finditer(value):
56
+ start, end = match.start(), match.end()
57
+ if start != last:
58
+ yield ttype, value[last:start]
59
+ yield specialttype, replacefunc(value[start:end])
60
+ last = end
61
+ if last != len(value):
62
+ yield ttype, value[last:]
63
+
64
+
65
+ class CodeTagFilter(Filter):
66
+ """Highlight special code tags in comments and docstrings.
67
+
68
+ Options accepted:
69
+
70
+ `codetags` : list of strings
71
+ A list of strings that are flagged as code tags. The default is to
72
+ highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
73
+
74
+ .. versionchanged:: 2.13
75
+ Now recognizes ``FIXME`` by default.
76
+ """
77
+
78
+ def __init__(self, **options):
79
+ Filter.__init__(self, **options)
80
+ tags = get_list_opt(options, 'codetags',
81
+ ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
82
+ self.tag_re = re.compile(r'\b({})\b'.format('|'.join([
83
+ re.escape(tag) for tag in tags if tag
84
+ ])))
85
+
86
+ def filter(self, lexer, stream):
87
+ regex = self.tag_re
88
+ for ttype, value in stream:
89
+ if ttype in String.Doc or \
90
+ ttype in Comment and \
91
+ ttype not in Comment.Preproc:
92
+ yield from _replace_special(ttype, value, regex, Comment.Special)
93
+ else:
94
+ yield ttype, value
95
+
96
+
97
+ class SymbolFilter(Filter):
98
+ """Convert mathematical symbols such as \\<longrightarrow> in Isabelle
99
+ or \\longrightarrow in LaTeX into Unicode characters.
100
+
101
+ This is mostly useful for HTML or console output when you want to
102
+ approximate the source rendering you'd see in an IDE.
103
+
104
+ Options accepted:
105
+
106
+ `lang` : string
107
+ The symbol language. Must be one of ``'isabelle'`` or
108
+ ``'latex'``. The default is ``'isabelle'``.
109
+ """
110
+
111
+ latex_symbols = {
112
+ '\\alpha' : '\U000003b1',
113
+ '\\beta' : '\U000003b2',
114
+ '\\gamma' : '\U000003b3',
115
+ '\\delta' : '\U000003b4',
116
+ '\\varepsilon' : '\U000003b5',
117
+ '\\zeta' : '\U000003b6',
118
+ '\\eta' : '\U000003b7',
119
+ '\\vartheta' : '\U000003b8',
120
+ '\\iota' : '\U000003b9',
121
+ '\\kappa' : '\U000003ba',
122
+ '\\lambda' : '\U000003bb',
123
+ '\\mu' : '\U000003bc',
124
+ '\\nu' : '\U000003bd',
125
+ '\\xi' : '\U000003be',
126
+ '\\pi' : '\U000003c0',
127
+ '\\varrho' : '\U000003c1',
128
+ '\\sigma' : '\U000003c3',
129
+ '\\tau' : '\U000003c4',
130
+ '\\upsilon' : '\U000003c5',
131
+ '\\varphi' : '\U000003c6',
132
+ '\\chi' : '\U000003c7',
133
+ '\\psi' : '\U000003c8',
134
+ '\\omega' : '\U000003c9',
135
+ '\\Gamma' : '\U00000393',
136
+ '\\Delta' : '\U00000394',
137
+ '\\Theta' : '\U00000398',
138
+ '\\Lambda' : '\U0000039b',
139
+ '\\Xi' : '\U0000039e',
140
+ '\\Pi' : '\U000003a0',
141
+ '\\Sigma' : '\U000003a3',
142
+ '\\Upsilon' : '\U000003a5',
143
+ '\\Phi' : '\U000003a6',
144
+ '\\Psi' : '\U000003a8',
145
+ '\\Omega' : '\U000003a9',
146
+ '\\leftarrow' : '\U00002190',
147
+ '\\longleftarrow' : '\U000027f5',
148
+ '\\rightarrow' : '\U00002192',
149
+ '\\longrightarrow' : '\U000027f6',
150
+ '\\Leftarrow' : '\U000021d0',
151
+ '\\Longleftarrow' : '\U000027f8',
152
+ '\\Rightarrow' : '\U000021d2',
153
+ '\\Longrightarrow' : '\U000027f9',
154
+ '\\leftrightarrow' : '\U00002194',
155
+ '\\longleftrightarrow' : '\U000027f7',
156
+ '\\Leftrightarrow' : '\U000021d4',
157
+ '\\Longleftrightarrow' : '\U000027fa',
158
+ '\\mapsto' : '\U000021a6',
159
+ '\\longmapsto' : '\U000027fc',
160
+ '\\relbar' : '\U00002500',
161
+ '\\Relbar' : '\U00002550',
162
+ '\\hookleftarrow' : '\U000021a9',
163
+ '\\hookrightarrow' : '\U000021aa',
164
+ '\\leftharpoondown' : '\U000021bd',
165
+ '\\rightharpoondown' : '\U000021c1',
166
+ '\\leftharpoonup' : '\U000021bc',
167
+ '\\rightharpoonup' : '\U000021c0',
168
+ '\\rightleftharpoons' : '\U000021cc',
169
+ '\\leadsto' : '\U0000219d',
170
+ '\\downharpoonleft' : '\U000021c3',
171
+ '\\downharpoonright' : '\U000021c2',
172
+ '\\upharpoonleft' : '\U000021bf',
173
+ '\\upharpoonright' : '\U000021be',
174
+ '\\restriction' : '\U000021be',
175
+ '\\uparrow' : '\U00002191',
176
+ '\\Uparrow' : '\U000021d1',
177
+ '\\downarrow' : '\U00002193',
178
+ '\\Downarrow' : '\U000021d3',
179
+ '\\updownarrow' : '\U00002195',
180
+ '\\Updownarrow' : '\U000021d5',
181
+ '\\langle' : '\U000027e8',
182
+ '\\rangle' : '\U000027e9',
183
+ '\\lceil' : '\U00002308',
184
+ '\\rceil' : '\U00002309',
185
+ '\\lfloor' : '\U0000230a',
186
+ '\\rfloor' : '\U0000230b',
187
+ '\\flqq' : '\U000000ab',
188
+ '\\frqq' : '\U000000bb',
189
+ '\\bot' : '\U000022a5',
190
+ '\\top' : '\U000022a4',
191
+ '\\wedge' : '\U00002227',
192
+ '\\bigwedge' : '\U000022c0',
193
+ '\\vee' : '\U00002228',
194
+ '\\bigvee' : '\U000022c1',
195
+ '\\forall' : '\U00002200',
196
+ '\\exists' : '\U00002203',
197
+ '\\nexists' : '\U00002204',
198
+ '\\neg' : '\U000000ac',
199
+ '\\Box' : '\U000025a1',
200
+ '\\Diamond' : '\U000025c7',
201
+ '\\vdash' : '\U000022a2',
202
+ '\\models' : '\U000022a8',
203
+ '\\dashv' : '\U000022a3',
204
+ '\\surd' : '\U0000221a',
205
+ '\\le' : '\U00002264',
206
+ '\\ge' : '\U00002265',
207
+ '\\ll' : '\U0000226a',
208
+ '\\gg' : '\U0000226b',
209
+ '\\lesssim' : '\U00002272',
210
+ '\\gtrsim' : '\U00002273',
211
+ '\\lessapprox' : '\U00002a85',
212
+ '\\gtrapprox' : '\U00002a86',
213
+ '\\in' : '\U00002208',
214
+ '\\notin' : '\U00002209',
215
+ '\\subset' : '\U00002282',
216
+ '\\supset' : '\U00002283',
217
+ '\\subseteq' : '\U00002286',
218
+ '\\supseteq' : '\U00002287',
219
+ '\\sqsubset' : '\U0000228f',
220
+ '\\sqsupset' : '\U00002290',
221
+ '\\sqsubseteq' : '\U00002291',
222
+ '\\sqsupseteq' : '\U00002292',
223
+ '\\cap' : '\U00002229',
224
+ '\\bigcap' : '\U000022c2',
225
+ '\\cup' : '\U0000222a',
226
+ '\\bigcup' : '\U000022c3',
227
+ '\\sqcup' : '\U00002294',
228
+ '\\bigsqcup' : '\U00002a06',
229
+ '\\sqcap' : '\U00002293',
230
+ '\\Bigsqcap' : '\U00002a05',
231
+ '\\setminus' : '\U00002216',
232
+ '\\propto' : '\U0000221d',
233
+ '\\uplus' : '\U0000228e',
234
+ '\\bigplus' : '\U00002a04',
235
+ '\\sim' : '\U0000223c',
236
+ '\\doteq' : '\U00002250',
237
+ '\\simeq' : '\U00002243',
238
+ '\\approx' : '\U00002248',
239
+ '\\asymp' : '\U0000224d',
240
+ '\\cong' : '\U00002245',
241
+ '\\equiv' : '\U00002261',
242
+ '\\Join' : '\U000022c8',
243
+ '\\bowtie' : '\U00002a1d',
244
+ '\\prec' : '\U0000227a',
245
+ '\\succ' : '\U0000227b',
246
+ '\\preceq' : '\U0000227c',
247
+ '\\succeq' : '\U0000227d',
248
+ '\\parallel' : '\U00002225',
249
+ '\\mid' : '\U000000a6',
250
+ '\\pm' : '\U000000b1',
251
+ '\\mp' : '\U00002213',
252
+ '\\times' : '\U000000d7',
253
+ '\\div' : '\U000000f7',
254
+ '\\cdot' : '\U000022c5',
255
+ '\\star' : '\U000022c6',
256
+ '\\circ' : '\U00002218',
257
+ '\\dagger' : '\U00002020',
258
+ '\\ddagger' : '\U00002021',
259
+ '\\lhd' : '\U000022b2',
260
+ '\\rhd' : '\U000022b3',
261
+ '\\unlhd' : '\U000022b4',
262
+ '\\unrhd' : '\U000022b5',
263
+ '\\triangleleft' : '\U000025c3',
264
+ '\\triangleright' : '\U000025b9',
265
+ '\\triangle' : '\U000025b3',
266
+ '\\triangleq' : '\U0000225c',
267
+ '\\oplus' : '\U00002295',
268
+ '\\bigoplus' : '\U00002a01',
269
+ '\\otimes' : '\U00002297',
270
+ '\\bigotimes' : '\U00002a02',
271
+ '\\odot' : '\U00002299',
272
+ '\\bigodot' : '\U00002a00',
273
+ '\\ominus' : '\U00002296',
274
+ '\\oslash' : '\U00002298',
275
+ '\\dots' : '\U00002026',
276
+ '\\cdots' : '\U000022ef',
277
+ '\\sum' : '\U00002211',
278
+ '\\prod' : '\U0000220f',
279
+ '\\coprod' : '\U00002210',
280
+ '\\infty' : '\U0000221e',
281
+ '\\int' : '\U0000222b',
282
+ '\\oint' : '\U0000222e',
283
+ '\\clubsuit' : '\U00002663',
284
+ '\\diamondsuit' : '\U00002662',
285
+ '\\heartsuit' : '\U00002661',
286
+ '\\spadesuit' : '\U00002660',
287
+ '\\aleph' : '\U00002135',
288
+ '\\emptyset' : '\U00002205',
289
+ '\\nabla' : '\U00002207',
290
+ '\\partial' : '\U00002202',
291
+ '\\flat' : '\U0000266d',
292
+ '\\natural' : '\U0000266e',
293
+ '\\sharp' : '\U0000266f',
294
+ '\\angle' : '\U00002220',
295
+ '\\copyright' : '\U000000a9',
296
+ '\\textregistered' : '\U000000ae',
297
+ '\\textonequarter' : '\U000000bc',
298
+ '\\textonehalf' : '\U000000bd',
299
+ '\\textthreequarters' : '\U000000be',
300
+ '\\textordfeminine' : '\U000000aa',
301
+ '\\textordmasculine' : '\U000000ba',
302
+ '\\euro' : '\U000020ac',
303
+ '\\pounds' : '\U000000a3',
304
+ '\\yen' : '\U000000a5',
305
+ '\\textcent' : '\U000000a2',
306
+ '\\textcurrency' : '\U000000a4',
307
+ '\\textdegree' : '\U000000b0',
308
+ }
309
+
310
+ isabelle_symbols = {
311
+ '\\<zero>' : '\U0001d7ec',
312
+ '\\<one>' : '\U0001d7ed',
313
+ '\\<two>' : '\U0001d7ee',
314
+ '\\<three>' : '\U0001d7ef',
315
+ '\\<four>' : '\U0001d7f0',
316
+ '\\<five>' : '\U0001d7f1',
317
+ '\\<six>' : '\U0001d7f2',
318
+ '\\<seven>' : '\U0001d7f3',
319
+ '\\<eight>' : '\U0001d7f4',
320
+ '\\<nine>' : '\U0001d7f5',
321
+ '\\<A>' : '\U0001d49c',
322
+ '\\<B>' : '\U0000212c',
323
+ '\\<C>' : '\U0001d49e',
324
+ '\\<D>' : '\U0001d49f',
325
+ '\\<E>' : '\U00002130',
326
+ '\\<F>' : '\U00002131',
327
+ '\\<G>' : '\U0001d4a2',
328
+ '\\<H>' : '\U0000210b',
329
+ '\\<I>' : '\U00002110',
330
+ '\\<J>' : '\U0001d4a5',
331
+ '\\<K>' : '\U0001d4a6',
332
+ '\\<L>' : '\U00002112',
333
+ '\\<M>' : '\U00002133',
334
+ '\\<N>' : '\U0001d4a9',
335
+ '\\<O>' : '\U0001d4aa',
336
+ '\\<P>' : '\U0001d4ab',
337
+ '\\<Q>' : '\U0001d4ac',
338
+ '\\<R>' : '\U0000211b',
339
+ '\\<S>' : '\U0001d4ae',
340
+ '\\<T>' : '\U0001d4af',
341
+ '\\<U>' : '\U0001d4b0',
342
+ '\\<V>' : '\U0001d4b1',
343
+ '\\<W>' : '\U0001d4b2',
344
+ '\\<X>' : '\U0001d4b3',
345
+ '\\<Y>' : '\U0001d4b4',
346
+ '\\<Z>' : '\U0001d4b5',
347
+ '\\<a>' : '\U0001d5ba',
348
+ '\\<b>' : '\U0001d5bb',
349
+ '\\<c>' : '\U0001d5bc',
350
+ '\\<d>' : '\U0001d5bd',
351
+ '\\<e>' : '\U0001d5be',
352
+ '\\<f>' : '\U0001d5bf',
353
+ '\\<g>' : '\U0001d5c0',
354
+ '\\<h>' : '\U0001d5c1',
355
+ '\\<i>' : '\U0001d5c2',
356
+ '\\<j>' : '\U0001d5c3',
357
+ '\\<k>' : '\U0001d5c4',
358
+ '\\<l>' : '\U0001d5c5',
359
+ '\\<m>' : '\U0001d5c6',
360
+ '\\<n>' : '\U0001d5c7',
361
+ '\\<o>' : '\U0001d5c8',
362
+ '\\<p>' : '\U0001d5c9',
363
+ '\\<q>' : '\U0001d5ca',
364
+ '\\<r>' : '\U0001d5cb',
365
+ '\\<s>' : '\U0001d5cc',
366
+ '\\<t>' : '\U0001d5cd',
367
+ '\\<u>' : '\U0001d5ce',
368
+ '\\<v>' : '\U0001d5cf',
369
+ '\\<w>' : '\U0001d5d0',
370
+ '\\<x>' : '\U0001d5d1',
371
+ '\\<y>' : '\U0001d5d2',
372
+ '\\<z>' : '\U0001d5d3',
373
+ '\\<AA>' : '\U0001d504',
374
+ '\\<BB>' : '\U0001d505',
375
+ '\\<CC>' : '\U0000212d',
376
+ '\\<DD>' : '\U0001d507',
377
+ '\\<EE>' : '\U0001d508',
378
+ '\\<FF>' : '\U0001d509',
379
+ '\\<GG>' : '\U0001d50a',
380
+ '\\<HH>' : '\U0000210c',
381
+ '\\<II>' : '\U00002111',
382
+ '\\<JJ>' : '\U0001d50d',
383
+ '\\<KK>' : '\U0001d50e',
384
+ '\\<LL>' : '\U0001d50f',
385
+ '\\<MM>' : '\U0001d510',
386
+ '\\<NN>' : '\U0001d511',
387
+ '\\<OO>' : '\U0001d512',
388
+ '\\<PP>' : '\U0001d513',
389
+ '\\<QQ>' : '\U0001d514',
390
+ '\\<RR>' : '\U0000211c',
391
+ '\\<SS>' : '\U0001d516',
392
+ '\\<TT>' : '\U0001d517',
393
+ '\\<UU>' : '\U0001d518',
394
+ '\\<VV>' : '\U0001d519',
395
+ '\\<WW>' : '\U0001d51a',
396
+ '\\<XX>' : '\U0001d51b',
397
+ '\\<YY>' : '\U0001d51c',
398
+ '\\<ZZ>' : '\U00002128',
399
+ '\\<aa>' : '\U0001d51e',
400
+ '\\<bb>' : '\U0001d51f',
401
+ '\\<cc>' : '\U0001d520',
402
+ '\\<dd>' : '\U0001d521',
403
+ '\\<ee>' : '\U0001d522',
404
+ '\\<ff>' : '\U0001d523',
405
+ '\\<gg>' : '\U0001d524',
406
+ '\\<hh>' : '\U0001d525',
407
+ '\\<ii>' : '\U0001d526',
408
+ '\\<jj>' : '\U0001d527',
409
+ '\\<kk>' : '\U0001d528',
410
+ '\\<ll>' : '\U0001d529',
411
+ '\\<mm>' : '\U0001d52a',
412
+ '\\<nn>' : '\U0001d52b',
413
+ '\\<oo>' : '\U0001d52c',
414
+ '\\<pp>' : '\U0001d52d',
415
+ '\\<qq>' : '\U0001d52e',
416
+ '\\<rr>' : '\U0001d52f',
417
+ '\\<ss>' : '\U0001d530',
418
+ '\\<tt>' : '\U0001d531',
419
+ '\\<uu>' : '\U0001d532',
420
+ '\\<vv>' : '\U0001d533',
421
+ '\\<ww>' : '\U0001d534',
422
+ '\\<xx>' : '\U0001d535',
423
+ '\\<yy>' : '\U0001d536',
424
+ '\\<zz>' : '\U0001d537',
425
+ '\\<alpha>' : '\U000003b1',
426
+ '\\<beta>' : '\U000003b2',
427
+ '\\<gamma>' : '\U000003b3',
428
+ '\\<delta>' : '\U000003b4',
429
+ '\\<epsilon>' : '\U000003b5',
430
+ '\\<zeta>' : '\U000003b6',
431
+ '\\<eta>' : '\U000003b7',
432
+ '\\<theta>' : '\U000003b8',
433
+ '\\<iota>' : '\U000003b9',
434
+ '\\<kappa>' : '\U000003ba',
435
+ '\\<lambda>' : '\U000003bb',
436
+ '\\<mu>' : '\U000003bc',
437
+ '\\<nu>' : '\U000003bd',
438
+ '\\<xi>' : '\U000003be',
439
+ '\\<pi>' : '\U000003c0',
440
+ '\\<rho>' : '\U000003c1',
441
+ '\\<sigma>' : '\U000003c3',
442
+ '\\<tau>' : '\U000003c4',
443
+ '\\<upsilon>' : '\U000003c5',
444
+ '\\<phi>' : '\U000003c6',
445
+ '\\<chi>' : '\U000003c7',
446
+ '\\<psi>' : '\U000003c8',
447
+ '\\<omega>' : '\U000003c9',
448
+ '\\<Gamma>' : '\U00000393',
449
+ '\\<Delta>' : '\U00000394',
450
+ '\\<Theta>' : '\U00000398',
451
+ '\\<Lambda>' : '\U0000039b',
452
+ '\\<Xi>' : '\U0000039e',
453
+ '\\<Pi>' : '\U000003a0',
454
+ '\\<Sigma>' : '\U000003a3',
455
+ '\\<Upsilon>' : '\U000003a5',
456
+ '\\<Phi>' : '\U000003a6',
457
+ '\\<Psi>' : '\U000003a8',
458
+ '\\<Omega>' : '\U000003a9',
459
+ '\\<bool>' : '\U0001d539',
460
+ '\\<complex>' : '\U00002102',
461
+ '\\<nat>' : '\U00002115',
462
+ '\\<rat>' : '\U0000211a',
463
+ '\\<real>' : '\U0000211d',
464
+ '\\<int>' : '\U00002124',
465
+ '\\<leftarrow>' : '\U00002190',
466
+ '\\<longleftarrow>' : '\U000027f5',
467
+ '\\<rightarrow>' : '\U00002192',
468
+ '\\<longrightarrow>' : '\U000027f6',
469
+ '\\<Leftarrow>' : '\U000021d0',
470
+ '\\<Longleftarrow>' : '\U000027f8',
471
+ '\\<Rightarrow>' : '\U000021d2',
472
+ '\\<Longrightarrow>' : '\U000027f9',
473
+ '\\<leftrightarrow>' : '\U00002194',
474
+ '\\<longleftrightarrow>' : '\U000027f7',
475
+ '\\<Leftrightarrow>' : '\U000021d4',
476
+ '\\<Longleftrightarrow>' : '\U000027fa',
477
+ '\\<mapsto>' : '\U000021a6',
478
+ '\\<longmapsto>' : '\U000027fc',
479
+ '\\<midarrow>' : '\U00002500',
480
+ '\\<Midarrow>' : '\U00002550',
481
+ '\\<hookleftarrow>' : '\U000021a9',
482
+ '\\<hookrightarrow>' : '\U000021aa',
483
+ '\\<leftharpoondown>' : '\U000021bd',
484
+ '\\<rightharpoondown>' : '\U000021c1',
485
+ '\\<leftharpoonup>' : '\U000021bc',
486
+ '\\<rightharpoonup>' : '\U000021c0',
487
+ '\\<rightleftharpoons>' : '\U000021cc',
488
+ '\\<leadsto>' : '\U0000219d',
489
+ '\\<downharpoonleft>' : '\U000021c3',
490
+ '\\<downharpoonright>' : '\U000021c2',
491
+ '\\<upharpoonleft>' : '\U000021bf',
492
+ '\\<upharpoonright>' : '\U000021be',
493
+ '\\<restriction>' : '\U000021be',
494
+ '\\<Colon>' : '\U00002237',
495
+ '\\<up>' : '\U00002191',
496
+ '\\<Up>' : '\U000021d1',
497
+ '\\<down>' : '\U00002193',
498
+ '\\<Down>' : '\U000021d3',
499
+ '\\<updown>' : '\U00002195',
500
+ '\\<Updown>' : '\U000021d5',
501
+ '\\<langle>' : '\U000027e8',
502
+ '\\<rangle>' : '\U000027e9',
503
+ '\\<lceil>' : '\U00002308',
504
+ '\\<rceil>' : '\U00002309',
505
+ '\\<lfloor>' : '\U0000230a',
506
+ '\\<rfloor>' : '\U0000230b',
507
+ '\\<lparr>' : '\U00002987',
508
+ '\\<rparr>' : '\U00002988',
509
+ '\\<lbrakk>' : '\U000027e6',
510
+ '\\<rbrakk>' : '\U000027e7',
511
+ '\\<lbrace>' : '\U00002983',
512
+ '\\<rbrace>' : '\U00002984',
513
+ '\\<guillemotleft>' : '\U000000ab',
514
+ '\\<guillemotright>' : '\U000000bb',
515
+ '\\<bottom>' : '\U000022a5',
516
+ '\\<top>' : '\U000022a4',
517
+ '\\<and>' : '\U00002227',
518
+ '\\<And>' : '\U000022c0',
519
+ '\\<or>' : '\U00002228',
520
+ '\\<Or>' : '\U000022c1',
521
+ '\\<forall>' : '\U00002200',
522
+ '\\<exists>' : '\U00002203',
523
+ '\\<nexists>' : '\U00002204',
524
+ '\\<not>' : '\U000000ac',
525
+ '\\<box>' : '\U000025a1',
526
+ '\\<diamond>' : '\U000025c7',
527
+ '\\<turnstile>' : '\U000022a2',
528
+ '\\<Turnstile>' : '\U000022a8',
529
+ '\\<tturnstile>' : '\U000022a9',
530
+ '\\<TTurnstile>' : '\U000022ab',
531
+ '\\<stileturn>' : '\U000022a3',
532
+ '\\<surd>' : '\U0000221a',
533
+ '\\<le>' : '\U00002264',
534
+ '\\<ge>' : '\U00002265',
535
+ '\\<lless>' : '\U0000226a',
536
+ '\\<ggreater>' : '\U0000226b',
537
+ '\\<lesssim>' : '\U00002272',
538
+ '\\<greatersim>' : '\U00002273',
539
+ '\\<lessapprox>' : '\U00002a85',
540
+ '\\<greaterapprox>' : '\U00002a86',
541
+ '\\<in>' : '\U00002208',
542
+ '\\<notin>' : '\U00002209',
543
+ '\\<subset>' : '\U00002282',
544
+ '\\<supset>' : '\U00002283',
545
+ '\\<subseteq>' : '\U00002286',
546
+ '\\<supseteq>' : '\U00002287',
547
+ '\\<sqsubset>' : '\U0000228f',
548
+ '\\<sqsupset>' : '\U00002290',
549
+ '\\<sqsubseteq>' : '\U00002291',
550
+ '\\<sqsupseteq>' : '\U00002292',
551
+ '\\<inter>' : '\U00002229',
552
+ '\\<Inter>' : '\U000022c2',
553
+ '\\<union>' : '\U0000222a',
554
+ '\\<Union>' : '\U000022c3',
555
+ '\\<squnion>' : '\U00002294',
556
+ '\\<Squnion>' : '\U00002a06',
557
+ '\\<sqinter>' : '\U00002293',
558
+ '\\<Sqinter>' : '\U00002a05',
559
+ '\\<setminus>' : '\U00002216',
560
+ '\\<propto>' : '\U0000221d',
561
+ '\\<uplus>' : '\U0000228e',
562
+ '\\<Uplus>' : '\U00002a04',
563
+ '\\<noteq>' : '\U00002260',
564
+ '\\<sim>' : '\U0000223c',
565
+ '\\<doteq>' : '\U00002250',
566
+ '\\<simeq>' : '\U00002243',
567
+ '\\<approx>' : '\U00002248',
568
+ '\\<asymp>' : '\U0000224d',
569
+ '\\<cong>' : '\U00002245',
570
+ '\\<smile>' : '\U00002323',
571
+ '\\<equiv>' : '\U00002261',
572
+ '\\<frown>' : '\U00002322',
573
+ '\\<Join>' : '\U000022c8',
574
+ '\\<bowtie>' : '\U00002a1d',
575
+ '\\<prec>' : '\U0000227a',
576
+ '\\<succ>' : '\U0000227b',
577
+ '\\<preceq>' : '\U0000227c',
578
+ '\\<succeq>' : '\U0000227d',
579
+ '\\<parallel>' : '\U00002225',
580
+ '\\<bar>' : '\U000000a6',
581
+ '\\<plusminus>' : '\U000000b1',
582
+ '\\<minusplus>' : '\U00002213',
583
+ '\\<times>' : '\U000000d7',
584
+ '\\<div>' : '\U000000f7',
585
+ '\\<cdot>' : '\U000022c5',
586
+ '\\<star>' : '\U000022c6',
587
+ '\\<bullet>' : '\U00002219',
588
+ '\\<circ>' : '\U00002218',
589
+ '\\<dagger>' : '\U00002020',
590
+ '\\<ddagger>' : '\U00002021',
591
+ '\\<lhd>' : '\U000022b2',
592
+ '\\<rhd>' : '\U000022b3',
593
+ '\\<unlhd>' : '\U000022b4',
594
+ '\\<unrhd>' : '\U000022b5',
595
+ '\\<triangleleft>' : '\U000025c3',
596
+ '\\<triangleright>' : '\U000025b9',
597
+ '\\<triangle>' : '\U000025b3',
598
+ '\\<triangleq>' : '\U0000225c',
599
+ '\\<oplus>' : '\U00002295',
600
+ '\\<Oplus>' : '\U00002a01',
601
+ '\\<otimes>' : '\U00002297',
602
+ '\\<Otimes>' : '\U00002a02',
603
+ '\\<odot>' : '\U00002299',
604
+ '\\<Odot>' : '\U00002a00',
605
+ '\\<ominus>' : '\U00002296',
606
+ '\\<oslash>' : '\U00002298',
607
+ '\\<dots>' : '\U00002026',
608
+ '\\<cdots>' : '\U000022ef',
609
+ '\\<Sum>' : '\U00002211',
610
+ '\\<Prod>' : '\U0000220f',
611
+ '\\<Coprod>' : '\U00002210',
612
+ '\\<infinity>' : '\U0000221e',
613
+ '\\<integral>' : '\U0000222b',
614
+ '\\<ointegral>' : '\U0000222e',
615
+ '\\<clubsuit>' : '\U00002663',
616
+ '\\<diamondsuit>' : '\U00002662',
617
+ '\\<heartsuit>' : '\U00002661',
618
+ '\\<spadesuit>' : '\U00002660',
619
+ '\\<aleph>' : '\U00002135',
620
+ '\\<emptyset>' : '\U00002205',
621
+ '\\<nabla>' : '\U00002207',
622
+ '\\<partial>' : '\U00002202',
623
+ '\\<flat>' : '\U0000266d',
624
+ '\\<natural>' : '\U0000266e',
625
+ '\\<sharp>' : '\U0000266f',
626
+ '\\<angle>' : '\U00002220',
627
+ '\\<copyright>' : '\U000000a9',
628
+ '\\<registered>' : '\U000000ae',
629
+ '\\<hyphen>' : '\U000000ad',
630
+ '\\<inverse>' : '\U000000af',
631
+ '\\<onequarter>' : '\U000000bc',
632
+ '\\<onehalf>' : '\U000000bd',
633
+ '\\<threequarters>' : '\U000000be',
634
+ '\\<ordfeminine>' : '\U000000aa',
635
+ '\\<ordmasculine>' : '\U000000ba',
636
+ '\\<section>' : '\U000000a7',
637
+ '\\<paragraph>' : '\U000000b6',
638
+ '\\<exclamdown>' : '\U000000a1',
639
+ '\\<questiondown>' : '\U000000bf',
640
+ '\\<euro>' : '\U000020ac',
641
+ '\\<pounds>' : '\U000000a3',
642
+ '\\<yen>' : '\U000000a5',
643
+ '\\<cent>' : '\U000000a2',
644
+ '\\<currency>' : '\U000000a4',
645
+ '\\<degree>' : '\U000000b0',
646
+ '\\<amalg>' : '\U00002a3f',
647
+ '\\<mho>' : '\U00002127',
648
+ '\\<lozenge>' : '\U000025ca',
649
+ '\\<wp>' : '\U00002118',
650
+ '\\<wrong>' : '\U00002240',
651
+ '\\<struct>' : '\U000022c4',
652
+ '\\<acute>' : '\U000000b4',
653
+ '\\<index>' : '\U00000131',
654
+ '\\<dieresis>' : '\U000000a8',
655
+ '\\<cedilla>' : '\U000000b8',
656
+ '\\<hungarumlaut>' : '\U000002dd',
657
+ '\\<some>' : '\U000003f5',
658
+ '\\<newline>' : '\U000023ce',
659
+ '\\<open>' : '\U00002039',
660
+ '\\<close>' : '\U0000203a',
661
+ '\\<here>' : '\U00002302',
662
+ '\\<^sub>' : '\U000021e9',
663
+ '\\<^sup>' : '\U000021e7',
664
+ '\\<^bold>' : '\U00002759',
665
+ '\\<^bsub>' : '\U000021d8',
666
+ '\\<^esub>' : '\U000021d9',
667
+ '\\<^bsup>' : '\U000021d7',
668
+ '\\<^esup>' : '\U000021d6',
669
+ }
670
+
671
+ lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
672
+
673
+ def __init__(self, **options):
674
+ Filter.__init__(self, **options)
675
+ lang = get_choice_opt(options, 'lang',
676
+ ['isabelle', 'latex'], 'isabelle')
677
+ self.symbols = self.lang_map[lang]
678
+
679
+ def filter(self, lexer, stream):
680
+ for ttype, value in stream:
681
+ if value in self.symbols:
682
+ yield ttype, self.symbols[value]
683
+ else:
684
+ yield ttype, value
685
+
686
+
687
+ class KeywordCaseFilter(Filter):
688
+ """Convert keywords to lowercase or uppercase or capitalize them, which
689
+ means first letter uppercase, rest lowercase.
690
+
691
+ This can be useful e.g. if you highlight Pascal code and want to adapt the
692
+ code to your styleguide.
693
+
694
+ Options accepted:
695
+
696
+ `case` : string
697
+ The casing to convert keywords to. Must be one of ``'lower'``,
698
+ ``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
699
+ """
700
+
701
+ def __init__(self, **options):
702
+ Filter.__init__(self, **options)
703
+ case = get_choice_opt(options, 'case',
704
+ ['lower', 'upper', 'capitalize'], 'lower')
705
+ self.convert = getattr(str, case)
706
+
707
+ def filter(self, lexer, stream):
708
+ for ttype, value in stream:
709
+ if ttype in Keyword:
710
+ yield ttype, self.convert(value)
711
+ else:
712
+ yield ttype, value
713
+
714
+
715
+ class NameHighlightFilter(Filter):
716
+ """Highlight a normal Name (and Name.*) token with a different token type.
717
+
718
+ Example::
719
+
720
+ filter = NameHighlightFilter(
721
+ names=['foo', 'bar', 'baz'],
722
+ tokentype=Name.Function,
723
+ )
724
+
725
+ This would highlight the names "foo", "bar" and "baz"
726
+ as functions. `Name.Function` is the default token type.
727
+
728
+ Options accepted:
729
+
730
+ `names` : list of strings
731
+ A list of names that should be given the different token type.
732
+ There is no default.
733
+ `tokentype` : TokenType or string
734
+ A token type or a string containing a token type name that is
735
+ used for highlighting the strings in `names`. The default is
736
+ `Name.Function`.
737
+ """
738
+
739
+ def __init__(self, **options):
740
+ Filter.__init__(self, **options)
741
+ self.names = set(get_list_opt(options, 'names', []))
742
+ tokentype = options.get('tokentype')
743
+ if tokentype:
744
+ self.tokentype = string_to_tokentype(tokentype)
745
+ else:
746
+ self.tokentype = Name.Function
747
+
748
+ def filter(self, lexer, stream):
749
+ for ttype, value in stream:
750
+ if ttype in Name and value in self.names:
751
+ yield self.tokentype, value
752
+ else:
753
+ yield ttype, value
754
+
755
+
756
+ class ErrorToken(Exception):
757
+ pass
758
+
759
+
760
+ class RaiseOnErrorTokenFilter(Filter):
761
+ """Raise an exception when the lexer generates an error token.
762
+
763
+ Options accepted:
764
+
765
+ `excclass` : Exception class
766
+ The exception class to raise.
767
+ The default is `pygments.filters.ErrorToken`.
768
+
769
+ .. versionadded:: 0.8
770
+ """
771
+
772
+ def __init__(self, **options):
773
+ Filter.__init__(self, **options)
774
+ self.exception = options.get('excclass', ErrorToken)
775
+ try:
776
+ # issubclass() will raise TypeError if first argument is not a class
777
+ if not issubclass(self.exception, Exception):
778
+ raise TypeError
779
+ except TypeError:
780
+ raise OptionError('excclass option is not an exception class')
781
+
782
+ def filter(self, lexer, stream):
783
+ for ttype, value in stream:
784
+ if ttype is Error:
785
+ raise self.exception(value)
786
+ yield ttype, value
787
+
788
+
789
+ class VisibleWhitespaceFilter(Filter):
790
+ """Convert tabs, newlines and/or spaces to visible characters.
791
+
792
+ Options accepted:
793
+
794
+ `spaces` : string or bool
795
+ If this is a one-character string, spaces will be replaces by this string.
796
+ If it is another true value, spaces will be replaced by ``·`` (unicode
797
+ MIDDLE DOT). If it is a false value, spaces will not be replaced. The
798
+ default is ``False``.
799
+ `tabs` : string or bool
800
+ The same as for `spaces`, but the default replacement character is ``»``
801
+ (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
802
+ is ``False``. Note: this will not work if the `tabsize` option for the
803
+ lexer is nonzero, as tabs will already have been expanded then.
804
+ `tabsize` : int
805
+ If tabs are to be replaced by this filter (see the `tabs` option), this
806
+ is the total number of characters that a tab should be expanded to.
807
+ The default is ``8``.
808
+ `newlines` : string or bool
809
+ The same as for `spaces`, but the default replacement character is ``¶``
810
+ (unicode PILCROW SIGN). The default value is ``False``.
811
+ `wstokentype` : bool
812
+ If true, give whitespace the special `Whitespace` token type. This allows
813
+ styling the visible whitespace differently (e.g. greyed out), but it can
814
+ disrupt background colors. The default is ``True``.
815
+
816
+ .. versionadded:: 0.8
817
+ """
818
+
819
+ def __init__(self, **options):
820
+ Filter.__init__(self, **options)
821
+ for name, default in [('spaces', '·'),
822
+ ('tabs', '»'),
823
+ ('newlines', '¶')]:
824
+ opt = options.get(name, False)
825
+ if isinstance(opt, str) and len(opt) == 1:
826
+ setattr(self, name, opt)
827
+ else:
828
+ setattr(self, name, (opt and default or ''))
829
+ tabsize = get_int_opt(options, 'tabsize', 8)
830
+ if self.tabs:
831
+ self.tabs += ' ' * (tabsize - 1)
832
+ if self.newlines:
833
+ self.newlines += '\n'
834
+ self.wstt = get_bool_opt(options, 'wstokentype', True)
835
+
836
+ def filter(self, lexer, stream):
837
+ if self.wstt:
838
+ spaces = self.spaces or ' '
839
+ tabs = self.tabs or '\t'
840
+ newlines = self.newlines or '\n'
841
+ regex = re.compile(r'\s')
842
+
843
+ def replacefunc(wschar):
844
+ if wschar == ' ':
845
+ return spaces
846
+ elif wschar == '\t':
847
+ return tabs
848
+ elif wschar == '\n':
849
+ return newlines
850
+ return wschar
851
+
852
+ for ttype, value in stream:
853
+ yield from _replace_special(ttype, value, regex, Whitespace,
854
+ replacefunc)
855
+ else:
856
+ spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
857
+ # simpler processing
858
+ for ttype, value in stream:
859
+ if spaces:
860
+ value = value.replace(' ', spaces)
861
+ if tabs:
862
+ value = value.replace('\t', tabs)
863
+ if newlines:
864
+ value = value.replace('\n', newlines)
865
+ yield ttype, value
866
+
867
+
868
+ class GobbleFilter(Filter):
869
+ """Gobbles source code lines (eats initial characters).
870
+
871
+ This filter drops the first ``n`` characters off every line of code. This
872
+ may be useful when the source code fed to the lexer is indented by a fixed
873
+ amount of space that isn't desired in the output.
874
+
875
+ Options accepted:
876
+
877
+ `n` : int
878
+ The number of characters to gobble.
879
+
880
+ .. versionadded:: 1.2
881
+ """
882
+ def __init__(self, **options):
883
+ Filter.__init__(self, **options)
884
+ self.n = get_int_opt(options, 'n', 0)
885
+
886
+ def gobble(self, value, left):
887
+ if left < len(value):
888
+ return value[left:], 0
889
+ else:
890
+ return '', left - len(value)
891
+
892
+ def filter(self, lexer, stream):
893
+ n = self.n
894
+ left = n # How many characters left to gobble.
895
+ for ttype, value in stream:
896
+ # Remove ``left`` tokens from first line, ``n`` from all others.
897
+ parts = value.split('\n')
898
+ (parts[0], left) = self.gobble(parts[0], left)
899
+ for i in range(1, len(parts)):
900
+ (parts[i], left) = self.gobble(parts[i], n)
901
+ value = '\n'.join(parts)
902
+
903
+ if value != '':
904
+ yield ttype, value
905
+
906
+
907
+ class TokenMergeFilter(Filter):
908
+ """Merges consecutive tokens with the same token type in the output
909
+ stream of a lexer.
910
+
911
+ .. versionadded:: 1.2
912
+ """
913
+ def __init__(self, **options):
914
+ Filter.__init__(self, **options)
915
+
916
+ def filter(self, lexer, stream):
917
+ current_type = None
918
+ current_value = None
919
+ for ttype, value in stream:
920
+ if ttype is current_type:
921
+ current_value += value
922
+ else:
923
+ if current_type is not None:
924
+ yield current_type, current_value
925
+ current_type = ttype
926
+ current_value = value
927
+ if current_type is not None:
928
+ yield current_type, current_value
929
+
930
+
931
+ FILTERS = {
932
+ 'codetagify': CodeTagFilter,
933
+ 'keywordcase': KeywordCaseFilter,
934
+ 'highlight': NameHighlightFilter,
935
+ 'raiseonerror': RaiseOnErrorTokenFilter,
936
+ 'whitespace': VisibleWhitespaceFilter,
937
+ 'gobble': GobbleFilter,
938
+ 'tokenmerge': TokenMergeFilter,
939
+ 'symbols': SymbolFilter,
940
+ }
mgm/lib/python3.10/site-packages/pygments/filters/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatter.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatter
3
+ ~~~~~~~~~~~~~~~~~~
4
+
5
+ Base formatter class.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import codecs
12
+
13
+ from pygments.util import get_bool_opt
14
+ from pygments.styles import get_style_by_name
15
+
16
+ __all__ = ['Formatter']
17
+
18
+
19
+ def _lookup_style(style):
20
+ if isinstance(style, str):
21
+ return get_style_by_name(style)
22
+ return style
23
+
24
+
25
+ class Formatter:
26
+ """
27
+ Converts a token stream to text.
28
+
29
+ Formatters should have attributes to help selecting them. These
30
+ are similar to the corresponding :class:`~pygments.lexer.Lexer`
31
+ attributes.
32
+
33
+ .. autoattribute:: name
34
+ :no-value:
35
+
36
+ .. autoattribute:: aliases
37
+ :no-value:
38
+
39
+ .. autoattribute:: filenames
40
+ :no-value:
41
+
42
+ You can pass options as keyword arguments to the constructor.
43
+ All formatters accept these basic options:
44
+
45
+ ``style``
46
+ The style to use, can be a string or a Style subclass
47
+ (default: "default"). Not used by e.g. the
48
+ TerminalFormatter.
49
+ ``full``
50
+ Tells the formatter to output a "full" document, i.e.
51
+ a complete self-contained document. This doesn't have
52
+ any effect for some formatters (default: false).
53
+ ``title``
54
+ If ``full`` is true, the title that should be used to
55
+ caption the document (default: '').
56
+ ``encoding``
57
+ If given, must be an encoding name. This will be used to
58
+ convert the Unicode token strings to byte strings in the
59
+ output. If it is "" or None, Unicode strings will be written
60
+ to the output file, which most file-like objects do not
61
+ support (default: None).
62
+ ``outencoding``
63
+ Overrides ``encoding`` if given.
64
+
65
+ """
66
+
67
+ #: Full name for the formatter, in human-readable form.
68
+ name = None
69
+
70
+ #: A list of short, unique identifiers that can be used to lookup
71
+ #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
72
+ aliases = []
73
+
74
+ #: A list of fnmatch patterns that match filenames for which this
75
+ #: formatter can produce output. The patterns in this list should be unique
76
+ #: among all formatters.
77
+ filenames = []
78
+
79
+ #: If True, this formatter outputs Unicode strings when no encoding
80
+ #: option is given.
81
+ unicodeoutput = True
82
+
83
+ def __init__(self, **options):
84
+ """
85
+ As with lexers, this constructor takes arbitrary optional arguments,
86
+ and if you override it, you should first process your own options, then
87
+ call the base class implementation.
88
+ """
89
+ self.style = _lookup_style(options.get('style', 'default'))
90
+ self.full = get_bool_opt(options, 'full', False)
91
+ self.title = options.get('title', '')
92
+ self.encoding = options.get('encoding', None) or None
93
+ if self.encoding in ('guess', 'chardet'):
94
+ # can happen for e.g. pygmentize -O encoding=guess
95
+ self.encoding = 'utf-8'
96
+ self.encoding = options.get('outencoding') or self.encoding
97
+ self.options = options
98
+
99
+ def get_style_defs(self, arg=''):
100
+ """
101
+ This method must return statements or declarations suitable to define
102
+ the current style for subsequent highlighted text (e.g. CSS classes
103
+ in the `HTMLFormatter`).
104
+
105
+ The optional argument `arg` can be used to modify the generation and
106
+ is formatter dependent (it is standardized because it can be given on
107
+ the command line).
108
+
109
+ This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
110
+ the `arg` is then given by the ``-a`` option.
111
+ """
112
+ return ''
113
+
114
+ def format(self, tokensource, outfile):
115
+ """
116
+ This method must format the tokens from the `tokensource` iterable and
117
+ write the formatted version to the file object `outfile`.
118
+
119
+ Formatter options can control how exactly the tokens are converted.
120
+ """
121
+ if self.encoding:
122
+ # wrap the outfile in a StreamWriter
123
+ outfile = codecs.lookup(self.encoding)[3](outfile)
124
+ return self.format_unencoded(tokensource, outfile)
125
+
126
+ # Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to
127
+ # Formatter. This helps when using third-party type stubs from typeshed.
128
+ def __class_getitem__(cls, name):
129
+ return cls
mgm/lib/python3.10/site-packages/pygments/formatters/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters
3
+ ~~~~~~~~~~~~~~~~~~~
4
+
5
+ Pygments formatters.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+ import sys
13
+ import types
14
+ import fnmatch
15
+ from os.path import basename
16
+
17
+ from pygments.formatters._mapping import FORMATTERS
18
+ from pygments.plugin import find_plugin_formatters
19
+ from pygments.util import ClassNotFound
20
+
21
+ __all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
22
+ 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
23
+
24
+ _formatter_cache = {} # classes by name
25
+ _pattern_cache = {}
26
+
27
+
28
+ def _fn_matches(fn, glob):
29
+ """Return whether the supplied file name fn matches pattern filename."""
30
+ if glob not in _pattern_cache:
31
+ pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
32
+ return pattern.match(fn)
33
+ return _pattern_cache[glob].match(fn)
34
+
35
+
36
+ def _load_formatters(module_name):
37
+ """Load a formatter (and all others in the module too)."""
38
+ mod = __import__(module_name, None, None, ['__all__'])
39
+ for formatter_name in mod.__all__:
40
+ cls = getattr(mod, formatter_name)
41
+ _formatter_cache[cls.name] = cls
42
+
43
+
44
+ def get_all_formatters():
45
+ """Return a generator for all formatter classes."""
46
+ # NB: this returns formatter classes, not info like get_all_lexers().
47
+ for info in FORMATTERS.values():
48
+ if info[1] not in _formatter_cache:
49
+ _load_formatters(info[0])
50
+ yield _formatter_cache[info[1]]
51
+ for _, formatter in find_plugin_formatters():
52
+ yield formatter
53
+
54
+
55
+ def find_formatter_class(alias):
56
+ """Lookup a formatter by alias.
57
+
58
+ Returns None if not found.
59
+ """
60
+ for module_name, name, aliases, _, _ in FORMATTERS.values():
61
+ if alias in aliases:
62
+ if name not in _formatter_cache:
63
+ _load_formatters(module_name)
64
+ return _formatter_cache[name]
65
+ for _, cls in find_plugin_formatters():
66
+ if alias in cls.aliases:
67
+ return cls
68
+
69
+
70
+ def get_formatter_by_name(_alias, **options):
71
+ """
72
+ Return an instance of a :class:`.Formatter` subclass that has `alias` in its
73
+ aliases list. The formatter is given the `options` at its instantiation.
74
+
75
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
76
+ alias is found.
77
+ """
78
+ cls = find_formatter_class(_alias)
79
+ if cls is None:
80
+ raise ClassNotFound(f"no formatter found for name {_alias!r}")
81
+ return cls(**options)
82
+
83
+
84
+ def load_formatter_from_file(filename, formattername="CustomFormatter", **options):
85
+ """
86
+ Return a `Formatter` subclass instance loaded from the provided file, relative
87
+ to the current directory.
88
+
89
+ The file is expected to contain a Formatter class named ``formattername``
90
+ (by default, CustomFormatter). Users should be very careful with the input, because
91
+ this method is equivalent to running ``eval()`` on the input file. The formatter is
92
+ given the `options` at its instantiation.
93
+
94
+ :exc:`pygments.util.ClassNotFound` is raised if there are any errors loading
95
+ the formatter.
96
+
97
+ .. versionadded:: 2.2
98
+ """
99
+ try:
100
+ # This empty dict will contain the namespace for the exec'd file
101
+ custom_namespace = {}
102
+ with open(filename, 'rb') as f:
103
+ exec(f.read(), custom_namespace)
104
+ # Retrieve the class `formattername` from that namespace
105
+ if formattername not in custom_namespace:
106
+ raise ClassNotFound(f'no valid {formattername} class found in {filename}')
107
+ formatter_class = custom_namespace[formattername]
108
+ # And finally instantiate it with the options
109
+ return formatter_class(**options)
110
+ except OSError as err:
111
+ raise ClassNotFound(f'cannot read {filename}: {err}')
112
+ except ClassNotFound:
113
+ raise
114
+ except Exception as err:
115
+ raise ClassNotFound(f'error when loading custom formatter: {err}')
116
+
117
+
118
+ def get_formatter_for_filename(fn, **options):
119
+ """
120
+ Return a :class:`.Formatter` subclass instance that has a filename pattern
121
+ matching `fn`. The formatter is given the `options` at its instantiation.
122
+
123
+ Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
124
+ is found.
125
+ """
126
+ fn = basename(fn)
127
+ for modname, name, _, filenames, _ in FORMATTERS.values():
128
+ for filename in filenames:
129
+ if _fn_matches(fn, filename):
130
+ if name not in _formatter_cache:
131
+ _load_formatters(modname)
132
+ return _formatter_cache[name](**options)
133
+ for _name, cls in find_plugin_formatters():
134
+ for filename in cls.filenames:
135
+ if _fn_matches(fn, filename):
136
+ return cls(**options)
137
+ raise ClassNotFound(f"no formatter found for file name {fn!r}")
138
+
139
+
140
+ class _automodule(types.ModuleType):
141
+ """Automatically import formatters."""
142
+
143
+ def __getattr__(self, name):
144
+ info = FORMATTERS.get(name)
145
+ if info:
146
+ _load_formatters(info[0])
147
+ cls = _formatter_cache[info[1]]
148
+ setattr(self, name, cls)
149
+ return cls
150
+ raise AttributeError(name)
151
+
152
+
153
+ oldmod = sys.modules[__name__]
154
+ newmod = _automodule(__name__)
155
+ newmod.__dict__.update(oldmod.__dict__)
156
+ sys.modules[__name__] = newmod
157
+ del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/_mapping.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/bbcode.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/groff.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/html.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/img.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/irc.cpython-310.pyc ADDED
Binary file (4.03 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/latex.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/other.cpython-310.pyc ADDED
Binary file (4.73 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/pangomarkup.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/rtf.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/svg.cpython-310.pyc ADDED
Binary file (6.25 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/terminal.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/__pycache__/terminal256.cpython-310.pyc ADDED
Binary file (9.18 kB). View file
 
mgm/lib/python3.10/site-packages/pygments/formatters/_mapping.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Automatically generated by scripts/gen_mapfiles.py.
2
+ # DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
3
+
4
+ FORMATTERS = {
5
+ 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
6
+ 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
7
+ 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
8
+ 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
9
+ 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). The ``<div>``'s CSS class can be set by the `cssclass` option."),
10
+ 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
11
+ 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
12
+ 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
13
+ 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
14
+ 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
15
+ 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
16
+ 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
17
+ 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
18
+ 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
19
+ 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
20
+ 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
21
+ 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
22
+ 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
23
+ }
mgm/lib/python3.10/site-packages/pygments/formatters/bbcode.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.bbcode
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ BBcode formatter.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+
12
+ from pygments.formatter import Formatter
13
+ from pygments.util import get_bool_opt
14
+
15
+ __all__ = ['BBCodeFormatter']
16
+
17
+
18
+ class BBCodeFormatter(Formatter):
19
+ """
20
+ Format tokens with BBcodes. These formatting codes are used by many
21
+ bulletin boards, so you can highlight your sourcecode with pygments before
22
+ posting it there.
23
+
24
+ This formatter has no support for background colors and borders, as there
25
+ are no common BBcode tags for that.
26
+
27
+ Some board systems (e.g. phpBB) don't support colors in their [code] tag,
28
+ so you can't use the highlighting together with that tag.
29
+ Text in a [code] tag usually is shown with a monospace font (which this
30
+ formatter can do with the ``monofont`` option) and no spaces (which you
31
+ need for indentation) are removed.
32
+
33
+ Additional options accepted:
34
+
35
+ `style`
36
+ The style to use, can be a string or a Style subclass (default:
37
+ ``'default'``).
38
+
39
+ `codetag`
40
+ If set to true, put the output into ``[code]`` tags (default:
41
+ ``false``)
42
+
43
+ `monofont`
44
+ If set to true, add a tag to show the code with a monospace font
45
+ (default: ``false``).
46
+ """
47
+ name = 'BBCode'
48
+ aliases = ['bbcode', 'bb']
49
+ filenames = []
50
+
51
+ def __init__(self, **options):
52
+ Formatter.__init__(self, **options)
53
+ self._code = get_bool_opt(options, 'codetag', False)
54
+ self._mono = get_bool_opt(options, 'monofont', False)
55
+
56
+ self.styles = {}
57
+ self._make_styles()
58
+
59
+ def _make_styles(self):
60
+ for ttype, ndef in self.style:
61
+ start = end = ''
62
+ if ndef['color']:
63
+ start += '[color=#{}]'.format(ndef['color'])
64
+ end = '[/color]' + end
65
+ if ndef['bold']:
66
+ start += '[b]'
67
+ end = '[/b]' + end
68
+ if ndef['italic']:
69
+ start += '[i]'
70
+ end = '[/i]' + end
71
+ if ndef['underline']:
72
+ start += '[u]'
73
+ end = '[/u]' + end
74
+ # there are no common BBcodes for background-color and border
75
+
76
+ self.styles[ttype] = start, end
77
+
78
+ def format_unencoded(self, tokensource, outfile):
79
+ if self._code:
80
+ outfile.write('[code]')
81
+ if self._mono:
82
+ outfile.write('[font=monospace]')
83
+
84
+ lastval = ''
85
+ lasttype = None
86
+
87
+ for ttype, value in tokensource:
88
+ while ttype not in self.styles:
89
+ ttype = ttype.parent
90
+ if ttype == lasttype:
91
+ lastval += value
92
+ else:
93
+ if lastval:
94
+ start, end = self.styles[lasttype]
95
+ outfile.write(''.join((start, lastval, end)))
96
+ lastval = value
97
+ lasttype = ttype
98
+
99
+ if lastval:
100
+ start, end = self.styles[lasttype]
101
+ outfile.write(''.join((start, lastval, end)))
102
+
103
+ if self._mono:
104
+ outfile.write('[/font]')
105
+ if self._code:
106
+ outfile.write('[/code]')
107
+ if self._code or self._mono:
108
+ outfile.write('\n')
mgm/lib/python3.10/site-packages/pygments/formatters/groff.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.groff
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for groff output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import math
12
+ from pygments.formatter import Formatter
13
+ from pygments.util import get_bool_opt, get_int_opt
14
+
15
+ __all__ = ['GroffFormatter']
16
+
17
+
18
+ class GroffFormatter(Formatter):
19
+ """
20
+ Format tokens with groff escapes to change their color and font style.
21
+
22
+ .. versionadded:: 2.11
23
+
24
+ Additional options accepted:
25
+
26
+ `style`
27
+ The style to use, can be a string or a Style subclass (default:
28
+ ``'default'``).
29
+
30
+ `monospaced`
31
+ If set to true, monospace font will be used (default: ``true``).
32
+
33
+ `linenos`
34
+ If set to true, print the line numbers (default: ``false``).
35
+
36
+ `wrap`
37
+ Wrap lines to the specified number of characters. Disabled if set to 0
38
+ (default: ``0``).
39
+ """
40
+
41
+ name = 'groff'
42
+ aliases = ['groff','troff','roff']
43
+ filenames = []
44
+
45
+ def __init__(self, **options):
46
+ Formatter.__init__(self, **options)
47
+
48
+ self.monospaced = get_bool_opt(options, 'monospaced', True)
49
+ self.linenos = get_bool_opt(options, 'linenos', False)
50
+ self._lineno = 0
51
+ self.wrap = get_int_opt(options, 'wrap', 0)
52
+ self._linelen = 0
53
+
54
+ self.styles = {}
55
+ self._make_styles()
56
+
57
+
58
+ def _make_styles(self):
59
+ regular = '\\f[CR]' if self.monospaced else '\\f[R]'
60
+ bold = '\\f[CB]' if self.monospaced else '\\f[B]'
61
+ italic = '\\f[CI]' if self.monospaced else '\\f[I]'
62
+
63
+ for ttype, ndef in self.style:
64
+ start = end = ''
65
+ if ndef['color']:
66
+ start += '\\m[{}]'.format(ndef['color'])
67
+ end = '\\m[]' + end
68
+ if ndef['bold']:
69
+ start += bold
70
+ end = regular + end
71
+ if ndef['italic']:
72
+ start += italic
73
+ end = regular + end
74
+ if ndef['bgcolor']:
75
+ start += '\\M[{}]'.format(ndef['bgcolor'])
76
+ end = '\\M[]' + end
77
+
78
+ self.styles[ttype] = start, end
79
+
80
+
81
+ def _define_colors(self, outfile):
82
+ colors = set()
83
+ for _, ndef in self.style:
84
+ if ndef['color'] is not None:
85
+ colors.add(ndef['color'])
86
+
87
+ for color in sorted(colors):
88
+ outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
89
+
90
+
91
+ def _write_lineno(self, outfile):
92
+ self._lineno += 1
93
+ outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
94
+
95
+
96
+ def _wrap_line(self, line):
97
+ length = len(line.rstrip('\n'))
98
+ space = ' ' if self.linenos else ''
99
+ newline = ''
100
+
101
+ if length > self.wrap:
102
+ for i in range(0, math.floor(length / self.wrap)):
103
+ chunk = line[i*self.wrap:i*self.wrap+self.wrap]
104
+ newline += (chunk + '\n' + space)
105
+ remainder = length % self.wrap
106
+ if remainder > 0:
107
+ newline += line[-remainder-1:]
108
+ self._linelen = remainder
109
+ elif self._linelen + length > self.wrap:
110
+ newline = ('\n' + space) + line
111
+ self._linelen = length
112
+ else:
113
+ newline = line
114
+ self._linelen += length
115
+
116
+ return newline
117
+
118
+
119
+ def _escape_chars(self, text):
120
+ text = text.replace('\\', '\\[u005C]'). \
121
+ replace('.', '\\[char46]'). \
122
+ replace('\'', '\\[u0027]'). \
123
+ replace('`', '\\[u0060]'). \
124
+ replace('~', '\\[u007E]')
125
+ copy = text
126
+
127
+ for char in copy:
128
+ if len(char) != len(char.encode()):
129
+ uni = char.encode('unicode_escape') \
130
+ .decode()[1:] \
131
+ .replace('x', 'u00') \
132
+ .upper()
133
+ text = text.replace(char, '\\[u' + uni[1:] + ']')
134
+
135
+ return text
136
+
137
+
138
+ def format_unencoded(self, tokensource, outfile):
139
+ self._define_colors(outfile)
140
+
141
+ outfile.write('.nf\n\\f[CR]\n')
142
+
143
+ if self.linenos:
144
+ self._write_lineno(outfile)
145
+
146
+ for ttype, value in tokensource:
147
+ while ttype not in self.styles:
148
+ ttype = ttype.parent
149
+ start, end = self.styles[ttype]
150
+
151
+ for line in value.splitlines(True):
152
+ if self.wrap > 0:
153
+ line = self._wrap_line(line)
154
+
155
+ if start and end:
156
+ text = self._escape_chars(line.rstrip('\n'))
157
+ if text != '':
158
+ outfile.write(''.join((start, text, end)))
159
+ else:
160
+ outfile.write(self._escape_chars(line.rstrip('\n')))
161
+
162
+ if line.endswith('\n'):
163
+ if self.linenos:
164
+ self._write_lineno(outfile)
165
+ self._linelen = 0
166
+ else:
167
+ outfile.write('\n')
168
+ self._linelen = 0
169
+
170
+ outfile.write('\n.fi')
mgm/lib/python3.10/site-packages/pygments/formatters/html.py ADDED
@@ -0,0 +1,995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.html
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for HTML output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import functools
12
+ import os
13
+ import sys
14
+ import os.path
15
+ from io import StringIO
16
+
17
+ from pygments.formatter import Formatter
18
+ from pygments.token import Token, Text, STANDARD_TYPES
19
+ from pygments.util import get_bool_opt, get_int_opt, get_list_opt
20
+
21
+ try:
22
+ import ctags
23
+ except ImportError:
24
+ ctags = None
25
+
26
+ __all__ = ['HtmlFormatter']
27
+
28
+
29
+ _escape_html_table = {
30
+ ord('&'): '&amp;',
31
+ ord('<'): '&lt;',
32
+ ord('>'): '&gt;',
33
+ ord('"'): '&quot;',
34
+ ord("'"): '&#39;',
35
+ }
36
+
37
+
38
+ def escape_html(text, table=_escape_html_table):
39
+ """Escape &, <, > as well as single and double quotes for HTML."""
40
+ return text.translate(table)
41
+
42
+
43
+ def webify(color):
44
+ if color.startswith('calc') or color.startswith('var'):
45
+ return color
46
+ else:
47
+ # Check if the color can be shortened from 6 to 3 characters
48
+ color = color.upper()
49
+ if (len(color) == 6 and
50
+ ( color[0] == color[1]
51
+ and color[2] == color[3]
52
+ and color[4] == color[5])):
53
+ return f'#{color[0]}{color[2]}{color[4]}'
54
+ else:
55
+ return f'#{color}'
56
+
57
+
58
+ def _get_ttype_class(ttype):
59
+ fname = STANDARD_TYPES.get(ttype)
60
+ if fname:
61
+ return fname
62
+ aname = ''
63
+ while fname is None:
64
+ aname = '-' + ttype[-1] + aname
65
+ ttype = ttype.parent
66
+ fname = STANDARD_TYPES.get(ttype)
67
+ return fname + aname
68
+
69
+
70
+ CSSFILE_TEMPLATE = '''\
71
+ /*
72
+ generated by Pygments <https://pygments.org/>
73
+ Copyright 2006-2025 by the Pygments team.
74
+ Licensed under the BSD license, see LICENSE for details.
75
+ */
76
+ %(styledefs)s
77
+ '''
78
+
79
+ DOC_HEADER = '''\
80
+ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
81
+ "http://www.w3.org/TR/html4/strict.dtd">
82
+ <!--
83
+ generated by Pygments <https://pygments.org/>
84
+ Copyright 2006-2025 by the Pygments team.
85
+ Licensed under the BSD license, see LICENSE for details.
86
+ -->
87
+ <html>
88
+ <head>
89
+ <title>%(title)s</title>
90
+ <meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
91
+ <style type="text/css">
92
+ ''' + CSSFILE_TEMPLATE + '''
93
+ </style>
94
+ </head>
95
+ <body>
96
+ <h2>%(title)s</h2>
97
+
98
+ '''
99
+
100
+ DOC_HEADER_EXTERNALCSS = '''\
101
+ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
102
+ "http://www.w3.org/TR/html4/strict.dtd">
103
+
104
+ <html>
105
+ <head>
106
+ <title>%(title)s</title>
107
+ <meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
108
+ <link rel="stylesheet" href="%(cssfile)s" type="text/css">
109
+ </head>
110
+ <body>
111
+ <h2>%(title)s</h2>
112
+
113
+ '''
114
+
115
+ DOC_FOOTER = '''\
116
+ </body>
117
+ </html>
118
+ '''
119
+
120
+
121
+ class HtmlFormatter(Formatter):
122
+ r"""
123
+ Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed
124
+ in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option).
125
+ The ``<div>``'s CSS class can be set by the `cssclass` option.
126
+
127
+ If the `linenos` option is set to ``"table"``, the ``<pre>`` is
128
+ additionally wrapped inside a ``<table>`` which has one row and two
129
+ cells: one containing the line numbers and one containing the code.
130
+ Example:
131
+
132
+ .. sourcecode:: html
133
+
134
+ <div class="highlight" >
135
+ <table><tr>
136
+ <td class="linenos" title="click to toggle"
137
+ onclick="with (this.firstChild.style)
138
+ { display = (display == '') ? 'none' : '' }">
139
+ <pre>1
140
+ 2</pre>
141
+ </td>
142
+ <td class="code">
143
+ <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
144
+ <span class="Ke">pass</span>
145
+ </pre>
146
+ </td>
147
+ </tr></table></div>
148
+
149
+ (whitespace added to improve clarity).
150
+
151
+ A list of lines can be specified using the `hl_lines` option to make these
152
+ lines highlighted (as of Pygments 0.11).
153
+
154
+ With the `full` option, a complete HTML 4 document is output, including
155
+ the style definitions inside a ``<style>`` tag, or in a separate file if
156
+ the `cssfile` option is given.
157
+
158
+ When `tagsfile` is set to the path of a ctags index file, it is used to
159
+ generate hyperlinks from names to their definition. You must enable
160
+ `lineanchors` and run ctags with the `-n` option for this to work. The
161
+ `python-ctags` module from PyPI must be installed to use this feature;
162
+ otherwise a `RuntimeError` will be raised.
163
+
164
+ The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
165
+ containing CSS rules for the CSS classes used by the formatter. The
166
+ argument `arg` can be used to specify additional CSS selectors that
167
+ are prepended to the classes. A call `fmter.get_style_defs('td .code')`
168
+ would result in the following CSS classes:
169
+
170
+ .. sourcecode:: css
171
+
172
+ td .code .kw { font-weight: bold; color: #00FF00 }
173
+ td .code .cm { color: #999999 }
174
+ ...
175
+
176
+ If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
177
+ `get_style_defs()` method to request multiple prefixes for the tokens:
178
+
179
+ .. sourcecode:: python
180
+
181
+ formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
182
+
183
+ The output would then look like this:
184
+
185
+ .. sourcecode:: css
186
+
187
+ div.syntax pre .kw,
188
+ pre.syntax .kw { font-weight: bold; color: #00FF00 }
189
+ div.syntax pre .cm,
190
+ pre.syntax .cm { color: #999999 }
191
+ ...
192
+
193
+ Additional options accepted:
194
+
195
+ `nowrap`
196
+ If set to ``True``, don't add a ``<pre>`` and a ``<div>`` tag
197
+ around the tokens. This disables most other options (default: ``False``).
198
+
199
+ `full`
200
+ Tells the formatter to output a "full" document, i.e. a complete
201
+ self-contained document (default: ``False``).
202
+
203
+ `title`
204
+ If `full` is true, the title that should be used to caption the
205
+ document (default: ``''``).
206
+
207
+ `style`
208
+ The style to use, can be a string or a Style subclass (default:
209
+ ``'default'``). This option has no effect if the `cssfile`
210
+ and `noclobber_cssfile` option are given and the file specified in
211
+ `cssfile` exists.
212
+
213
+ `noclasses`
214
+ If set to true, token ``<span>`` tags (as well as line number elements)
215
+ will not use CSS classes, but inline styles. This is not recommended
216
+ for larger pieces of code since it increases output size by quite a bit
217
+ (default: ``False``).
218
+
219
+ `classprefix`
220
+ Since the token types use relatively short class names, they may clash
221
+ with some of your own class names. In this case you can use the
222
+ `classprefix` option to give a string to prepend to all Pygments-generated
223
+ CSS class names for token types.
224
+ Note that this option also affects the output of `get_style_defs()`.
225
+
226
+ `cssclass`
227
+ CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
228
+ If you set this option, the default selector for `get_style_defs()`
229
+ will be this class.
230
+
231
+ .. versionadded:: 0.9
232
+ If you select the ``'table'`` line numbers, the wrapping table will
233
+ have a CSS class of this string plus ``'table'``, the default is
234
+ accordingly ``'highlighttable'``.
235
+
236
+ `cssstyles`
237
+ Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
238
+
239
+ `prestyles`
240
+ Inline CSS styles for the ``<pre>`` tag (default: ``''``).
241
+
242
+ .. versionadded:: 0.11
243
+
244
+ `cssfile`
245
+ If the `full` option is true and this option is given, it must be the
246
+ name of an external file. If the filename does not include an absolute
247
+ path, the file's path will be assumed to be relative to the main output
248
+ file's path, if the latter can be found. The stylesheet is then written
249
+ to this file instead of the HTML file.
250
+
251
+ .. versionadded:: 0.6
252
+
253
+ `noclobber_cssfile`
254
+ If `cssfile` is given and the specified file exists, the css file will
255
+ not be overwritten. This allows the use of the `full` option in
256
+ combination with a user specified css file. Default is ``False``.
257
+
258
+ .. versionadded:: 1.1
259
+
260
+ `linenos`
261
+ If set to ``'table'``, output line numbers as a table with two cells,
262
+ one containing the line numbers, the other the whole code. This is
263
+ copy-and-paste-friendly, but may cause alignment problems with some
264
+ browsers or fonts. If set to ``'inline'``, the line numbers will be
265
+ integrated in the ``<pre>`` tag that contains the code (that setting
266
+ is *new in Pygments 0.8*).
267
+
268
+ For compatibility with Pygments 0.7 and earlier, every true value
269
+ except ``'inline'`` means the same as ``'table'`` (in particular, that
270
+ means also ``True``).
271
+
272
+ The default value is ``False``, which means no line numbers at all.
273
+
274
+ **Note:** with the default ("table") line number mechanism, the line
275
+ numbers and code can have different line heights in Internet Explorer
276
+ unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
277
+ CSS property (you get the default line spacing with ``line-height:
278
+ 125%``).
279
+
280
+ `hl_lines`
281
+ Specify a list of lines to be highlighted. The line numbers are always
282
+ relative to the input (i.e. the first line is line 1) and are
283
+ independent of `linenostart`.
284
+
285
+ .. versionadded:: 0.11
286
+
287
+ `linenostart`
288
+ The line number for the first line (default: ``1``).
289
+
290
+ `linenostep`
291
+ If set to a number n > 1, only every nth line number is printed.
292
+
293
+ `linenospecial`
294
+ If set to a number n > 0, every nth line number is given the CSS
295
+ class ``"special"`` (default: ``0``).
296
+
297
+ `nobackground`
298
+ If set to ``True``, the formatter won't output the background color
299
+ for the wrapping element (this automatically defaults to ``False``
300
+ when there is no wrapping element [eg: no argument for the
301
+ `get_syntax_defs` method given]) (default: ``False``).
302
+
303
+ .. versionadded:: 0.6
304
+
305
+ `lineseparator`
306
+ This string is output between lines of code. It defaults to ``"\n"``,
307
+ which is enough to break a line inside ``<pre>`` tags, but you can
308
+ e.g. set it to ``"<br>"`` to get HTML line breaks.
309
+
310
+ .. versionadded:: 0.7
311
+
312
+ `lineanchors`
313
+ If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
314
+ output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
315
+ This allows easy linking to certain lines.
316
+
317
+ .. versionadded:: 0.9
318
+
319
+ `linespans`
320
+ If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
321
+ output line in a span tag with an ``id`` of ``foo-linenumber``.
322
+ This allows easy access to lines via javascript.
323
+
324
+ .. versionadded:: 1.6
325
+
326
+ `anchorlinenos`
327
+ If set to `True`, will wrap line numbers in <a> tags. Used in
328
+ combination with `linenos` and `lineanchors`.
329
+
330
+ `tagsfile`
331
+ If set to the path of a ctags file, wrap names in anchor tags that
332
+ link to their definitions. `lineanchors` should be used, and the
333
+ tags file should specify line numbers (see the `-n` option to ctags).
334
+ The tags file is assumed to be encoded in UTF-8.
335
+
336
+ .. versionadded:: 1.6
337
+
338
+ `tagurlformat`
339
+ A string formatting pattern used to generate links to ctags definitions.
340
+ Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
341
+ Defaults to an empty string, resulting in just `#prefix-number` links.
342
+
343
+ .. versionadded:: 1.6
344
+
345
+ `filename`
346
+ A string used to generate a filename when rendering ``<pre>`` blocks,
347
+ for example if displaying source code. If `linenos` is set to
348
+ ``'table'`` then the filename will be rendered in an initial row
349
+ containing a single `<th>` which spans both columns.
350
+
351
+ .. versionadded:: 2.1
352
+
353
+ `wrapcode`
354
+ Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
355
+ by the HTML5 specification.
356
+
357
+ .. versionadded:: 2.4
358
+
359
+ `debug_token_types`
360
+ Add ``title`` attributes to all token ``<span>`` tags that show the
361
+ name of the token.
362
+
363
+ .. versionadded:: 2.10
364
+
365
+
366
+ **Subclassing the HTML formatter**
367
+
368
+ .. versionadded:: 0.7
369
+
370
+ The HTML formatter is now built in a way that allows easy subclassing, thus
371
+ customizing the output HTML code. The `format()` method calls
372
+ `self._format_lines()` which returns a generator that yields tuples of ``(1,
373
+ line)``, where the ``1`` indicates that the ``line`` is a line of the
374
+ formatted source code.
375
+
376
+ If the `nowrap` option is set, the generator is the iterated over and the
377
+ resulting HTML is output.
378
+
379
+ Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
380
+ other generators. These may add some HTML code to the one generated by
381
+ `_format_lines()`, either by modifying the lines generated by the latter,
382
+ then yielding them again with ``(1, line)``, and/or by yielding other HTML
383
+ code before or after the lines, with ``(0, html)``. The distinction between
384
+ source lines and other code makes it possible to wrap the generator multiple
385
+ times.
386
+
387
+ The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
388
+
389
+ A custom `HtmlFormatter` subclass could look like this:
390
+
391
+ .. sourcecode:: python
392
+
393
+ class CodeHtmlFormatter(HtmlFormatter):
394
+
395
+ def wrap(self, source, *, include_div):
396
+ return self._wrap_code(source)
397
+
398
+ def _wrap_code(self, source):
399
+ yield 0, '<code>'
400
+ for i, t in source:
401
+ if i == 1:
402
+ # it's a line of formatted code
403
+ t += '<br>'
404
+ yield i, t
405
+ yield 0, '</code>'
406
+
407
+ This results in wrapping the formatted lines with a ``<code>`` tag, where the
408
+ source lines are broken using ``<br>`` tags.
409
+
410
+ After calling `wrap()`, the `format()` method also adds the "line numbers"
411
+ and/or "full document" wrappers if the respective options are set. Then, all
412
+ HTML yielded by the wrapped generator is output.
413
+ """
414
+
415
+ name = 'HTML'
416
+ aliases = ['html']
417
+ filenames = ['*.html', '*.htm']
418
+
419
+ def __init__(self, **options):
420
+ Formatter.__init__(self, **options)
421
+ self.title = self._decodeifneeded(self.title)
422
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
423
+ self.noclasses = get_bool_opt(options, 'noclasses', False)
424
+ self.classprefix = options.get('classprefix', '')
425
+ self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
426
+ self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
427
+ self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
428
+ self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
429
+ self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
430
+ self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
431
+ self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
432
+ self.filename = self._decodeifneeded(options.get('filename', ''))
433
+ self.wrapcode = get_bool_opt(options, 'wrapcode', False)
434
+ self.span_element_openers = {}
435
+ self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
436
+
437
+ if self.tagsfile:
438
+ if not ctags:
439
+ raise RuntimeError('The "ctags" package must to be installed '
440
+ 'to be able to use the "tagsfile" feature.')
441
+ self._ctags = ctags.CTags(self.tagsfile)
442
+
443
+ linenos = options.get('linenos', False)
444
+ if linenos == 'inline':
445
+ self.linenos = 2
446
+ elif linenos:
447
+ # compatibility with <= 0.7
448
+ self.linenos = 1
449
+ else:
450
+ self.linenos = 0
451
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
452
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
453
+ self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
454
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
455
+ self.lineseparator = options.get('lineseparator', '\n')
456
+ self.lineanchors = options.get('lineanchors', '')
457
+ self.linespans = options.get('linespans', '')
458
+ self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
459
+ self.hl_lines = set()
460
+ for lineno in get_list_opt(options, 'hl_lines', []):
461
+ try:
462
+ self.hl_lines.add(int(lineno))
463
+ except ValueError:
464
+ pass
465
+
466
+ self._create_stylesheet()
467
+
468
+ def _get_css_class(self, ttype):
469
+ """Return the css class of this token type prefixed with
470
+ the classprefix option."""
471
+ ttypeclass = _get_ttype_class(ttype)
472
+ if ttypeclass:
473
+ return self.classprefix + ttypeclass
474
+ return ''
475
+
476
+ def _get_css_classes(self, ttype):
477
+ """Return the CSS classes of this token type prefixed with the classprefix option."""
478
+ cls = self._get_css_class(ttype)
479
+ while ttype not in STANDARD_TYPES:
480
+ ttype = ttype.parent
481
+ cls = self._get_css_class(ttype) + ' ' + cls
482
+ return cls or ''
483
+
484
+ def _get_css_inline_styles(self, ttype):
485
+ """Return the inline CSS styles for this token type."""
486
+ cclass = self.ttype2class.get(ttype)
487
+ while cclass is None:
488
+ ttype = ttype.parent
489
+ cclass = self.ttype2class.get(ttype)
490
+ return cclass or ''
491
+
492
+ def _create_stylesheet(self):
493
+ t2c = self.ttype2class = {Token: ''}
494
+ c2s = self.class2style = {}
495
+ for ttype, ndef in self.style:
496
+ name = self._get_css_class(ttype)
497
+ style = ''
498
+ if ndef['color']:
499
+ style += 'color: {}; '.format(webify(ndef['color']))
500
+ if ndef['bold']:
501
+ style += 'font-weight: bold; '
502
+ if ndef['italic']:
503
+ style += 'font-style: italic; '
504
+ if ndef['underline']:
505
+ style += 'text-decoration: underline; '
506
+ if ndef['bgcolor']:
507
+ style += 'background-color: {}; '.format(webify(ndef['bgcolor']))
508
+ if ndef['border']:
509
+ style += 'border: 1px solid {}; '.format(webify(ndef['border']))
510
+ if style:
511
+ t2c[ttype] = name
512
+ # save len(ttype) to enable ordering the styles by
513
+ # hierarchy (necessary for CSS cascading rules!)
514
+ c2s[name] = (style[:-2], ttype, len(ttype))
515
+
516
+ def get_style_defs(self, arg=None):
517
+ """
518
+ Return CSS style definitions for the classes produced by the current
519
+ highlighting style. ``arg`` can be a string or list of selectors to
520
+ insert before the token type classes.
521
+ """
522
+ style_lines = []
523
+
524
+ style_lines.extend(self.get_linenos_style_defs())
525
+ style_lines.extend(self.get_background_style_defs(arg))
526
+ style_lines.extend(self.get_token_style_defs(arg))
527
+
528
+ return '\n'.join(style_lines)
529
+
530
+ def get_token_style_defs(self, arg=None):
531
+ prefix = self.get_css_prefix(arg)
532
+
533
+ styles = [
534
+ (level, ttype, cls, style)
535
+ for cls, (style, ttype, level) in self.class2style.items()
536
+ if cls and style
537
+ ]
538
+ styles.sort()
539
+
540
+ lines = [
541
+ f'{prefix(cls)} {{ {style} }} /* {repr(ttype)[6:]} */'
542
+ for (level, ttype, cls, style) in styles
543
+ ]
544
+
545
+ return lines
546
+
547
+ def get_background_style_defs(self, arg=None):
548
+ prefix = self.get_css_prefix(arg)
549
+ bg_color = self.style.background_color
550
+ hl_color = self.style.highlight_color
551
+
552
+ lines = []
553
+
554
+ if arg and not self.nobackground and bg_color is not None:
555
+ text_style = ''
556
+ if Text in self.ttype2class:
557
+ text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
558
+ lines.insert(
559
+ 0, '{}{{ background: {};{} }}'.format(
560
+ prefix(''), bg_color, text_style
561
+ )
562
+ )
563
+ if hl_color is not None:
564
+ lines.insert(
565
+ 0, '{} {{ background-color: {} }}'.format(prefix('hll'), hl_color)
566
+ )
567
+
568
+ return lines
569
+
570
+ def get_linenos_style_defs(self):
571
+ lines = [
572
+ f'pre {{ {self._pre_style} }}',
573
+ f'td.linenos .normal {{ {self._linenos_style} }}',
574
+ f'span.linenos {{ {self._linenos_style} }}',
575
+ f'td.linenos .special {{ {self._linenos_special_style} }}',
576
+ f'span.linenos.special {{ {self._linenos_special_style} }}',
577
+ ]
578
+
579
+ return lines
580
+
581
+ def get_css_prefix(self, arg):
582
+ if arg is None:
583
+ arg = ('cssclass' in self.options and '.'+self.cssclass or '')
584
+ if isinstance(arg, str):
585
+ args = [arg]
586
+ else:
587
+ args = list(arg)
588
+
589
+ def prefix(cls):
590
+ if cls:
591
+ cls = '.' + cls
592
+ tmp = []
593
+ for arg in args:
594
+ tmp.append((arg and arg + ' ' or '') + cls)
595
+ return ', '.join(tmp)
596
+
597
+ return prefix
598
+
599
+ @property
600
+ def _pre_style(self):
601
+ return 'line-height: 125%;'
602
+
603
+ @property
604
+ def _linenos_style(self):
605
+ color = self.style.line_number_color
606
+ background_color = self.style.line_number_background_color
607
+ return f'color: {color}; background-color: {background_color}; padding-left: 5px; padding-right: 5px;'
608
+
609
+ @property
610
+ def _linenos_special_style(self):
611
+ color = self.style.line_number_special_color
612
+ background_color = self.style.line_number_special_background_color
613
+ return f'color: {color}; background-color: {background_color}; padding-left: 5px; padding-right: 5px;'
614
+
615
+ def _decodeifneeded(self, value):
616
+ if isinstance(value, bytes):
617
+ if self.encoding:
618
+ return value.decode(self.encoding)
619
+ return value.decode()
620
+ return value
621
+
622
+ def _wrap_full(self, inner, outfile):
623
+ if self.cssfile:
624
+ if os.path.isabs(self.cssfile):
625
+ # it's an absolute filename
626
+ cssfilename = self.cssfile
627
+ else:
628
+ try:
629
+ filename = outfile.name
630
+ if not filename or filename[0] == '<':
631
+ # pseudo files, e.g. name == '<fdopen>'
632
+ raise AttributeError
633
+ cssfilename = os.path.join(os.path.dirname(filename),
634
+ self.cssfile)
635
+ except AttributeError:
636
+ print('Note: Cannot determine output file name, '
637
+ 'using current directory as base for the CSS file name',
638
+ file=sys.stderr)
639
+ cssfilename = self.cssfile
640
+ # write CSS file only if noclobber_cssfile isn't given as an option.
641
+ try:
642
+ if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
643
+ with open(cssfilename, "w", encoding="utf-8") as cf:
644
+ cf.write(CSSFILE_TEMPLATE %
645
+ {'styledefs': self.get_style_defs('body')})
646
+ except OSError as err:
647
+ err.strerror = 'Error writing CSS file: ' + err.strerror
648
+ raise
649
+
650
+ yield 0, (DOC_HEADER_EXTERNALCSS %
651
+ dict(title=self.title,
652
+ cssfile=self.cssfile,
653
+ encoding=self.encoding))
654
+ else:
655
+ yield 0, (DOC_HEADER %
656
+ dict(title=self.title,
657
+ styledefs=self.get_style_defs('body'),
658
+ encoding=self.encoding))
659
+
660
+ yield from inner
661
+ yield 0, DOC_FOOTER
662
+
663
+ def _wrap_tablelinenos(self, inner):
664
+ dummyoutfile = StringIO()
665
+ lncount = 0
666
+ for t, line in inner:
667
+ if t:
668
+ lncount += 1
669
+ dummyoutfile.write(line)
670
+
671
+ fl = self.linenostart
672
+ mw = len(str(lncount + fl - 1))
673
+ sp = self.linenospecial
674
+ st = self.linenostep
675
+ anchor_name = self.lineanchors or self.linespans
676
+ aln = self.anchorlinenos
677
+ nocls = self.noclasses
678
+
679
+ lines = []
680
+
681
+ for i in range(fl, fl+lncount):
682
+ print_line = i % st == 0
683
+ special_line = sp and i % sp == 0
684
+
685
+ if print_line:
686
+ line = '%*d' % (mw, i)
687
+ if aln:
688
+ line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
689
+ else:
690
+ line = ' ' * mw
691
+
692
+ if nocls:
693
+ if special_line:
694
+ style = f' style="{self._linenos_special_style}"'
695
+ else:
696
+ style = f' style="{self._linenos_style}"'
697
+ else:
698
+ if special_line:
699
+ style = ' class="special"'
700
+ else:
701
+ style = ' class="normal"'
702
+
703
+ if style:
704
+ line = f'<span{style}>{line}</span>'
705
+
706
+ lines.append(line)
707
+
708
+ ls = '\n'.join(lines)
709
+
710
+ # If a filename was specified, we can't put it into the code table as it
711
+ # would misalign the line numbers. Hence we emit a separate row for it.
712
+ filename_tr = ""
713
+ if self.filename:
714
+ filename_tr = (
715
+ '<tr><th colspan="2" class="filename">'
716
+ '<span class="filename">' + self.filename + '</span>'
717
+ '</th></tr>')
718
+
719
+ # in case you wonder about the seemingly redundant <div> here: since the
720
+ # content in the other cell also is wrapped in a div, some browsers in
721
+ # some configurations seem to mess up the formatting...
722
+ yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
723
+ '<tr><td class="linenos"><div class="linenodiv"><pre>' +
724
+ ls + '</pre></div></td><td class="code">')
725
+ yield 0, '<div>'
726
+ yield 0, dummyoutfile.getvalue()
727
+ yield 0, '</div>'
728
+ yield 0, '</td></tr></table>'
729
+
730
+
731
+ def _wrap_inlinelinenos(self, inner):
732
+ # need a list of lines since we need the width of a single number :(
733
+ inner_lines = list(inner)
734
+ sp = self.linenospecial
735
+ st = self.linenostep
736
+ num = self.linenostart
737
+ mw = len(str(len(inner_lines) + num - 1))
738
+ anchor_name = self.lineanchors or self.linespans
739
+ aln = self.anchorlinenos
740
+ nocls = self.noclasses
741
+
742
+ for _, inner_line in inner_lines:
743
+ print_line = num % st == 0
744
+ special_line = sp and num % sp == 0
745
+
746
+ if print_line:
747
+ line = '%*d' % (mw, num)
748
+ else:
749
+ line = ' ' * mw
750
+
751
+ if nocls:
752
+ if special_line:
753
+ style = f' style="{self._linenos_special_style}"'
754
+ else:
755
+ style = f' style="{self._linenos_style}"'
756
+ else:
757
+ if special_line:
758
+ style = ' class="linenos special"'
759
+ else:
760
+ style = ' class="linenos"'
761
+
762
+ if style:
763
+ linenos = f'<span{style}>{line}</span>'
764
+ else:
765
+ linenos = line
766
+
767
+ if aln:
768
+ yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
769
+ inner_line)
770
+ else:
771
+ yield 1, linenos + inner_line
772
+ num += 1
773
+
774
+ def _wrap_lineanchors(self, inner):
775
+ s = self.lineanchors
776
+ # subtract 1 since we have to increment i *before* yielding
777
+ i = self.linenostart - 1
778
+ for t, line in inner:
779
+ if t:
780
+ i += 1
781
+ href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
782
+ yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
783
+ else:
784
+ yield 0, line
785
+
786
+ def _wrap_linespans(self, inner):
787
+ s = self.linespans
788
+ i = self.linenostart - 1
789
+ for t, line in inner:
790
+ if t:
791
+ i += 1
792
+ yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
793
+ else:
794
+ yield 0, line
795
+
796
+ def _wrap_div(self, inner):
797
+ style = []
798
+ if (self.noclasses and not self.nobackground and
799
+ self.style.background_color is not None):
800
+ style.append(f'background: {self.style.background_color}')
801
+ if self.cssstyles:
802
+ style.append(self.cssstyles)
803
+ style = '; '.join(style)
804
+
805
+ yield 0, ('<div' + (self.cssclass and f' class="{self.cssclass}"') +
806
+ (style and (f' style="{style}"')) + '>')
807
+ yield from inner
808
+ yield 0, '</div>\n'
809
+
810
+ def _wrap_pre(self, inner):
811
+ style = []
812
+ if self.prestyles:
813
+ style.append(self.prestyles)
814
+ if self.noclasses:
815
+ style.append(self._pre_style)
816
+ style = '; '.join(style)
817
+
818
+ if self.filename and self.linenos != 1:
819
+ yield 0, ('<span class="filename">' + self.filename + '</span>')
820
+
821
+ # the empty span here is to keep leading empty lines from being
822
+ # ignored by HTML parsers
823
+ yield 0, ('<pre' + (style and f' style="{style}"') + '><span></span>')
824
+ yield from inner
825
+ yield 0, '</pre>'
826
+
827
+ def _wrap_code(self, inner):
828
+ yield 0, '<code>'
829
+ yield from inner
830
+ yield 0, '</code>'
831
+
832
+ @functools.lru_cache(maxsize=100)
833
+ def _translate_parts(self, value):
834
+ """HTML-escape a value and split it by newlines."""
835
+ return value.translate(_escape_html_table).split('\n')
836
+
837
+ def _format_lines(self, tokensource):
838
+ """
839
+ Just format the tokens, without any wrapping tags.
840
+ Yield individual lines.
841
+ """
842
+ nocls = self.noclasses
843
+ lsep = self.lineseparator
844
+ tagsfile = self.tagsfile
845
+
846
+ lspan = ''
847
+ line = []
848
+ for ttype, value in tokensource:
849
+ try:
850
+ cspan = self.span_element_openers[ttype]
851
+ except KeyError:
852
+ title = ' title="{}"'.format('.'.join(ttype)) if self.debug_token_types else ''
853
+ if nocls:
854
+ css_style = self._get_css_inline_styles(ttype)
855
+ if css_style:
856
+ css_style = self.class2style[css_style][0]
857
+ cspan = f'<span style="{css_style}"{title}>'
858
+ else:
859
+ cspan = ''
860
+ else:
861
+ css_class = self._get_css_classes(ttype)
862
+ if css_class:
863
+ cspan = f'<span class="{css_class}"{title}>'
864
+ else:
865
+ cspan = ''
866
+ self.span_element_openers[ttype] = cspan
867
+
868
+ parts = self._translate_parts(value)
869
+
870
+ if tagsfile and ttype in Token.Name:
871
+ filename, linenumber = self._lookup_ctag(value)
872
+ if linenumber:
873
+ base, filename = os.path.split(filename)
874
+ if base:
875
+ base += '/'
876
+ filename, extension = os.path.splitext(filename)
877
+ url = self.tagurlformat % {'path': base, 'fname': filename,
878
+ 'fext': extension}
879
+ parts[0] = "<a href=\"%s#%s-%d\">%s" % \
880
+ (url, self.lineanchors, linenumber, parts[0])
881
+ parts[-1] = parts[-1] + "</a>"
882
+
883
+ # for all but the last line
884
+ for part in parts[:-1]:
885
+ if line:
886
+ # Also check for part being non-empty, so we avoid creating
887
+ # empty <span> tags
888
+ if lspan != cspan and part:
889
+ line.extend(((lspan and '</span>'), cspan, part,
890
+ (cspan and '</span>'), lsep))
891
+ else: # both are the same, or the current part was empty
892
+ line.extend((part, (lspan and '</span>'), lsep))
893
+ yield 1, ''.join(line)
894
+ line = []
895
+ elif part:
896
+ yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
897
+ else:
898
+ yield 1, lsep
899
+ # for the last line
900
+ if line and parts[-1]:
901
+ if lspan != cspan:
902
+ line.extend(((lspan and '</span>'), cspan, parts[-1]))
903
+ lspan = cspan
904
+ else:
905
+ line.append(parts[-1])
906
+ elif parts[-1]:
907
+ line = [cspan, parts[-1]]
908
+ lspan = cspan
909
+ # else we neither have to open a new span nor set lspan
910
+
911
+ if line:
912
+ line.extend(((lspan and '</span>'), lsep))
913
+ yield 1, ''.join(line)
914
+
915
+ def _lookup_ctag(self, token):
916
+ entry = ctags.TagEntry()
917
+ if self._ctags.find(entry, token.encode(), 0):
918
+ return entry['file'].decode(), entry['lineNumber']
919
+ else:
920
+ return None, None
921
+
922
+ def _highlight_lines(self, tokensource):
923
+ """
924
+ Highlighted the lines specified in the `hl_lines` option by
925
+ post-processing the token stream coming from `_format_lines`.
926
+ """
927
+ hls = self.hl_lines
928
+
929
+ for i, (t, value) in enumerate(tokensource):
930
+ if t != 1:
931
+ yield t, value
932
+ if i + 1 in hls: # i + 1 because Python indexes start at 0
933
+ if self.noclasses:
934
+ style = ''
935
+ if self.style.highlight_color is not None:
936
+ style = (f' style="background-color: {self.style.highlight_color}"')
937
+ yield 1, f'<span{style}>{value}</span>'
938
+ else:
939
+ yield 1, f'<span class="hll">{value}</span>'
940
+ else:
941
+ yield 1, value
942
+
943
+ def wrap(self, source):
944
+ """
945
+ Wrap the ``source``, which is a generator yielding
946
+ individual lines, in custom generators. See docstring
947
+ for `format`. Can be overridden.
948
+ """
949
+
950
+ output = source
951
+ if self.wrapcode:
952
+ output = self._wrap_code(output)
953
+
954
+ output = self._wrap_pre(output)
955
+
956
+ return output
957
+
958
+ def format_unencoded(self, tokensource, outfile):
959
+ """
960
+ The formatting process uses several nested generators; which of
961
+ them are used is determined by the user's options.
962
+
963
+ Each generator should take at least one argument, ``inner``,
964
+ and wrap the pieces of text generated by this.
965
+
966
+ Always yield 2-tuples: (code, text). If "code" is 1, the text
967
+ is part of the original tokensource being highlighted, if it's
968
+ 0, the text is some piece of wrapping. This makes it possible to
969
+ use several different wrappers that process the original source
970
+ linewise, e.g. line number generators.
971
+ """
972
+ source = self._format_lines(tokensource)
973
+
974
+ # As a special case, we wrap line numbers before line highlighting
975
+ # so the line numbers get wrapped in the highlighting tag.
976
+ if not self.nowrap and self.linenos == 2:
977
+ source = self._wrap_inlinelinenos(source)
978
+
979
+ if self.hl_lines:
980
+ source = self._highlight_lines(source)
981
+
982
+ if not self.nowrap:
983
+ if self.lineanchors:
984
+ source = self._wrap_lineanchors(source)
985
+ if self.linespans:
986
+ source = self._wrap_linespans(source)
987
+ source = self.wrap(source)
988
+ if self.linenos == 1:
989
+ source = self._wrap_tablelinenos(source)
990
+ source = self._wrap_div(source)
991
+ if self.full:
992
+ source = self._wrap_full(source, outfile)
993
+
994
+ for t, piece in source:
995
+ outfile.write(piece)
mgm/lib/python3.10/site-packages/pygments/formatters/img.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.img
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for Pixmap output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+ import os
11
+ import sys
12
+
13
+ from pygments.formatter import Formatter
14
+ from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
15
+ get_choice_opt
16
+
17
+ import subprocess
18
+
19
+ # Import this carefully
20
+ try:
21
+ from PIL import Image, ImageDraw, ImageFont
22
+ pil_available = True
23
+ except ImportError:
24
+ pil_available = False
25
+
26
+ try:
27
+ import _winreg
28
+ except ImportError:
29
+ try:
30
+ import winreg as _winreg
31
+ except ImportError:
32
+ _winreg = None
33
+
34
+ __all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
35
+ 'BmpImageFormatter']
36
+
37
+
38
+ # For some unknown reason every font calls it something different
39
+ STYLES = {
40
+ 'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
41
+ 'ITALIC': ['Oblique', 'Italic'],
42
+ 'BOLD': ['Bold'],
43
+ 'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
44
+ }
45
+
46
+ # A sane default for modern systems
47
+ DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
48
+ DEFAULT_FONT_NAME_WIN = 'Courier New'
49
+ DEFAULT_FONT_NAME_MAC = 'Menlo'
50
+
51
+
52
+ class PilNotAvailable(ImportError):
53
+ """When Python imaging library is not available"""
54
+
55
+
56
+ class FontNotFound(Exception):
57
+ """When there are no usable fonts specified"""
58
+
59
+
60
+ class FontManager:
61
+ """
62
+ Manages a set of fonts: normal, italic, bold, etc...
63
+ """
64
+
65
+ def __init__(self, font_name, font_size=14):
66
+ self.font_name = font_name
67
+ self.font_size = font_size
68
+ self.fonts = {}
69
+ self.encoding = None
70
+ self.variable = False
71
+ if hasattr(font_name, 'read') or os.path.isfile(font_name):
72
+ font = ImageFont.truetype(font_name, self.font_size)
73
+ self.variable = True
74
+ for style in STYLES:
75
+ self.fonts[style] = font
76
+
77
+ return
78
+
79
+ if sys.platform.startswith('win'):
80
+ if not font_name:
81
+ self.font_name = DEFAULT_FONT_NAME_WIN
82
+ self._create_win()
83
+ elif sys.platform.startswith('darwin'):
84
+ if not font_name:
85
+ self.font_name = DEFAULT_FONT_NAME_MAC
86
+ self._create_mac()
87
+ else:
88
+ if not font_name:
89
+ self.font_name = DEFAULT_FONT_NAME_NIX
90
+ self._create_nix()
91
+
92
+ def _get_nix_font_path(self, name, style):
93
+ proc = subprocess.Popen(['fc-list', f"{name}:style={style}", 'file'],
94
+ stdout=subprocess.PIPE, stderr=None)
95
+ stdout, _ = proc.communicate()
96
+ if proc.returncode == 0:
97
+ lines = stdout.splitlines()
98
+ for line in lines:
99
+ if line.startswith(b'Fontconfig warning:'):
100
+ continue
101
+ path = line.decode().strip().strip(':')
102
+ if path:
103
+ return path
104
+ return None
105
+
106
+ def _create_nix(self):
107
+ for name in STYLES['NORMAL']:
108
+ path = self._get_nix_font_path(self.font_name, name)
109
+ if path is not None:
110
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
111
+ break
112
+ else:
113
+ raise FontNotFound(f'No usable fonts named: "{self.font_name}"')
114
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
115
+ for stylename in STYLES[style]:
116
+ path = self._get_nix_font_path(self.font_name, stylename)
117
+ if path is not None:
118
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
119
+ break
120
+ else:
121
+ if style == 'BOLDITALIC':
122
+ self.fonts[style] = self.fonts['BOLD']
123
+ else:
124
+ self.fonts[style] = self.fonts['NORMAL']
125
+
126
+ def _get_mac_font_path(self, font_map, name, style):
127
+ return font_map.get((name + ' ' + style).strip().lower())
128
+
129
+ def _create_mac(self):
130
+ font_map = {}
131
+ for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
132
+ '/Library/Fonts/', '/System/Library/Fonts/'):
133
+ font_map.update(
134
+ (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
135
+ for _, _, files in os.walk(font_dir)
136
+ for f in files
137
+ if f.lower().endswith(('ttf', 'ttc')))
138
+
139
+ for name in STYLES['NORMAL']:
140
+ path = self._get_mac_font_path(font_map, self.font_name, name)
141
+ if path is not None:
142
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
143
+ break
144
+ else:
145
+ raise FontNotFound(f'No usable fonts named: "{self.font_name}"')
146
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
147
+ for stylename in STYLES[style]:
148
+ path = self._get_mac_font_path(font_map, self.font_name, stylename)
149
+ if path is not None:
150
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
151
+ break
152
+ else:
153
+ if style == 'BOLDITALIC':
154
+ self.fonts[style] = self.fonts['BOLD']
155
+ else:
156
+ self.fonts[style] = self.fonts['NORMAL']
157
+
158
+ def _lookup_win(self, key, basename, styles, fail=False):
159
+ for suffix in ('', ' (TrueType)'):
160
+ for style in styles:
161
+ try:
162
+ valname = '{}{}{}'.format(basename, style and ' '+style, suffix)
163
+ val, _ = _winreg.QueryValueEx(key, valname)
164
+ return val
165
+ except OSError:
166
+ continue
167
+ else:
168
+ if fail:
169
+ raise FontNotFound(f'Font {basename} ({styles[0]}) not found in registry')
170
+ return None
171
+
172
+ def _create_win(self):
173
+ lookuperror = None
174
+ keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
175
+ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
176
+ (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
177
+ (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
178
+ for keyname in keynames:
179
+ try:
180
+ key = _winreg.OpenKey(*keyname)
181
+ try:
182
+ path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
183
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
184
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
185
+ path = self._lookup_win(key, self.font_name, STYLES[style])
186
+ if path:
187
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
188
+ else:
189
+ if style == 'BOLDITALIC':
190
+ self.fonts[style] = self.fonts['BOLD']
191
+ else:
192
+ self.fonts[style] = self.fonts['NORMAL']
193
+ return
194
+ except FontNotFound as err:
195
+ lookuperror = err
196
+ finally:
197
+ _winreg.CloseKey(key)
198
+ except OSError:
199
+ pass
200
+ else:
201
+ # If we get here, we checked all registry keys and had no luck
202
+ # We can be in one of two situations now:
203
+ # * All key lookups failed. In this case lookuperror is None and we
204
+ # will raise a generic error
205
+ # * At least one lookup failed with a FontNotFound error. In this
206
+ # case, we will raise that as a more specific error
207
+ if lookuperror:
208
+ raise lookuperror
209
+ raise FontNotFound('Can\'t open Windows font registry key')
210
+
211
+ def get_char_size(self):
212
+ """
213
+ Get the character size.
214
+ """
215
+ return self.get_text_size('M')
216
+
217
+ def get_text_size(self, text):
218
+ """
219
+ Get the text size (width, height).
220
+ """
221
+ font = self.fonts['NORMAL']
222
+ if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
223
+ return font.getbbox(text)[2:4]
224
+ else:
225
+ return font.getsize(text)
226
+
227
+ def get_font(self, bold, oblique):
228
+ """
229
+ Get the font based on bold and italic flags.
230
+ """
231
+ if bold and oblique:
232
+ if self.variable:
233
+ return self.get_style('BOLDITALIC')
234
+
235
+ return self.fonts['BOLDITALIC']
236
+ elif bold:
237
+ if self.variable:
238
+ return self.get_style('BOLD')
239
+
240
+ return self.fonts['BOLD']
241
+ elif oblique:
242
+ if self.variable:
243
+ return self.get_style('ITALIC')
244
+
245
+ return self.fonts['ITALIC']
246
+ else:
247
+ if self.variable:
248
+ return self.get_style('NORMAL')
249
+
250
+ return self.fonts['NORMAL']
251
+
252
+ def get_style(self, style):
253
+ """
254
+ Get the specified style of the font if it is a variable font.
255
+ If not found, return the normal font.
256
+ """
257
+ font = self.fonts[style]
258
+ for style_name in STYLES[style]:
259
+ try:
260
+ font.set_variation_by_name(style_name)
261
+ return font
262
+ except ValueError:
263
+ pass
264
+ except OSError:
265
+ return font
266
+
267
+ return font
268
+
269
+
270
+ class ImageFormatter(Formatter):
271
+ """
272
+ Create a PNG image from source code. This uses the Python Imaging Library to
273
+ generate a pixmap from the source code.
274
+
275
+ .. versionadded:: 0.10
276
+
277
+ Additional options accepted:
278
+
279
+ `image_format`
280
+ An image format to output to that is recognised by PIL, these include:
281
+
282
+ * "PNG" (default)
283
+ * "JPEG"
284
+ * "BMP"
285
+ * "GIF"
286
+
287
+ `line_pad`
288
+ The extra spacing (in pixels) between each line of text.
289
+
290
+ Default: 2
291
+
292
+ `font_name`
293
+ The font name to be used as the base font from which others, such as
294
+ bold and italic fonts will be generated. This really should be a
295
+ monospace font to look sane.
296
+ If a filename or a file-like object is specified, the user must
297
+ provide different styles of the font.
298
+
299
+ Default: "Courier New" on Windows, "Menlo" on Mac OS, and
300
+ "DejaVu Sans Mono" on \\*nix
301
+
302
+ `font_size`
303
+ The font size in points to be used.
304
+
305
+ Default: 14
306
+
307
+ `image_pad`
308
+ The padding, in pixels to be used at each edge of the resulting image.
309
+
310
+ Default: 10
311
+
312
+ `line_numbers`
313
+ Whether line numbers should be shown: True/False
314
+
315
+ Default: True
316
+
317
+ `line_number_start`
318
+ The line number of the first line.
319
+
320
+ Default: 1
321
+
322
+ `line_number_step`
323
+ The step used when printing line numbers.
324
+
325
+ Default: 1
326
+
327
+ `line_number_bg`
328
+ The background colour (in "#123456" format) of the line number bar, or
329
+ None to use the style background color.
330
+
331
+ Default: "#eed"
332
+
333
+ `line_number_fg`
334
+ The text color of the line numbers (in "#123456"-like format).
335
+
336
+ Default: "#886"
337
+
338
+ `line_number_chars`
339
+ The number of columns of line numbers allowable in the line number
340
+ margin.
341
+
342
+ Default: 2
343
+
344
+ `line_number_bold`
345
+ Whether line numbers will be bold: True/False
346
+
347
+ Default: False
348
+
349
+ `line_number_italic`
350
+ Whether line numbers will be italicized: True/False
351
+
352
+ Default: False
353
+
354
+ `line_number_separator`
355
+ Whether a line will be drawn between the line number area and the
356
+ source code area: True/False
357
+
358
+ Default: True
359
+
360
+ `line_number_pad`
361
+ The horizontal padding (in pixels) between the line number margin, and
362
+ the source code area.
363
+
364
+ Default: 6
365
+
366
+ `hl_lines`
367
+ Specify a list of lines to be highlighted.
368
+
369
+ .. versionadded:: 1.2
370
+
371
+ Default: empty list
372
+
373
+ `hl_color`
374
+ Specify the color for highlighting lines.
375
+
376
+ .. versionadded:: 1.2
377
+
378
+ Default: highlight color of the selected style
379
+ """
380
+
381
+ # Required by the pygments mapper
382
+ name = 'img'
383
+ aliases = ['img', 'IMG', 'png']
384
+ filenames = ['*.png']
385
+
386
+ unicodeoutput = False
387
+
388
+ default_image_format = 'png'
389
+
390
+ def __init__(self, **options):
391
+ """
392
+ See the class docstring for explanation of options.
393
+ """
394
+ if not pil_available:
395
+ raise PilNotAvailable(
396
+ 'Python Imaging Library is required for this formatter')
397
+ Formatter.__init__(self, **options)
398
+ self.encoding = 'latin1' # let pygments.format() do the right thing
399
+ # Read the style
400
+ self.styles = dict(self.style)
401
+ if self.style.background_color is None:
402
+ self.background_color = '#fff'
403
+ else:
404
+ self.background_color = self.style.background_color
405
+ # Image options
406
+ self.image_format = get_choice_opt(
407
+ options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
408
+ self.default_image_format, normcase=True)
409
+ self.image_pad = get_int_opt(options, 'image_pad', 10)
410
+ self.line_pad = get_int_opt(options, 'line_pad', 2)
411
+ # The fonts
412
+ fontsize = get_int_opt(options, 'font_size', 14)
413
+ self.fonts = FontManager(options.get('font_name', ''), fontsize)
414
+ self.fontw, self.fonth = self.fonts.get_char_size()
415
+ # Line number options
416
+ self.line_number_fg = options.get('line_number_fg', '#886')
417
+ self.line_number_bg = options.get('line_number_bg', '#eed')
418
+ self.line_number_chars = get_int_opt(options,
419
+ 'line_number_chars', 2)
420
+ self.line_number_bold = get_bool_opt(options,
421
+ 'line_number_bold', False)
422
+ self.line_number_italic = get_bool_opt(options,
423
+ 'line_number_italic', False)
424
+ self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
425
+ self.line_numbers = get_bool_opt(options, 'line_numbers', True)
426
+ self.line_number_separator = get_bool_opt(options,
427
+ 'line_number_separator', True)
428
+ self.line_number_step = get_int_opt(options, 'line_number_step', 1)
429
+ self.line_number_start = get_int_opt(options, 'line_number_start', 1)
430
+ if self.line_numbers:
431
+ self.line_number_width = (self.fontw * self.line_number_chars +
432
+ self.line_number_pad * 2)
433
+ else:
434
+ self.line_number_width = 0
435
+ self.hl_lines = []
436
+ hl_lines_str = get_list_opt(options, 'hl_lines', [])
437
+ for line in hl_lines_str:
438
+ try:
439
+ self.hl_lines.append(int(line))
440
+ except ValueError:
441
+ pass
442
+ self.hl_color = options.get('hl_color',
443
+ self.style.highlight_color) or '#f90'
444
+ self.drawables = []
445
+
446
+ def get_style_defs(self, arg=''):
447
+ raise NotImplementedError('The -S option is meaningless for the image '
448
+ 'formatter. Use -O style=<stylename> instead.')
449
+
450
+ def _get_line_height(self):
451
+ """
452
+ Get the height of a line.
453
+ """
454
+ return self.fonth + self.line_pad
455
+
456
+ def _get_line_y(self, lineno):
457
+ """
458
+ Get the Y coordinate of a line number.
459
+ """
460
+ return lineno * self._get_line_height() + self.image_pad
461
+
462
+ def _get_char_width(self):
463
+ """
464
+ Get the width of a character.
465
+ """
466
+ return self.fontw
467
+
468
+ def _get_char_x(self, linelength):
469
+ """
470
+ Get the X coordinate of a character position.
471
+ """
472
+ return linelength + self.image_pad + self.line_number_width
473
+
474
+ def _get_text_pos(self, linelength, lineno):
475
+ """
476
+ Get the actual position for a character and line position.
477
+ """
478
+ return self._get_char_x(linelength), self._get_line_y(lineno)
479
+
480
+ def _get_linenumber_pos(self, lineno):
481
+ """
482
+ Get the actual position for the start of a line number.
483
+ """
484
+ return (self.image_pad, self._get_line_y(lineno))
485
+
486
+ def _get_text_color(self, style):
487
+ """
488
+ Get the correct color for the token from the style.
489
+ """
490
+ if style['color'] is not None:
491
+ fill = '#' + style['color']
492
+ else:
493
+ fill = '#000'
494
+ return fill
495
+
496
+ def _get_text_bg_color(self, style):
497
+ """
498
+ Get the correct background color for the token from the style.
499
+ """
500
+ if style['bgcolor'] is not None:
501
+ bg_color = '#' + style['bgcolor']
502
+ else:
503
+ bg_color = None
504
+ return bg_color
505
+
506
+ def _get_style_font(self, style):
507
+ """
508
+ Get the correct font for the style.
509
+ """
510
+ return self.fonts.get_font(style['bold'], style['italic'])
511
+
512
+ def _get_image_size(self, maxlinelength, maxlineno):
513
+ """
514
+ Get the required image size.
515
+ """
516
+ return (self._get_char_x(maxlinelength) + self.image_pad,
517
+ self._get_line_y(maxlineno + 0) + self.image_pad)
518
+
519
+ def _draw_linenumber(self, posno, lineno):
520
+ """
521
+ Remember a line number drawable to paint later.
522
+ """
523
+ self._draw_text(
524
+ self._get_linenumber_pos(posno),
525
+ str(lineno).rjust(self.line_number_chars),
526
+ font=self.fonts.get_font(self.line_number_bold,
527
+ self.line_number_italic),
528
+ text_fg=self.line_number_fg,
529
+ text_bg=None,
530
+ )
531
+
532
+ def _draw_text(self, pos, text, font, text_fg, text_bg):
533
+ """
534
+ Remember a single drawable tuple to paint later.
535
+ """
536
+ self.drawables.append((pos, text, font, text_fg, text_bg))
537
+
538
+ def _create_drawables(self, tokensource):
539
+ """
540
+ Create drawables for the token content.
541
+ """
542
+ lineno = charno = maxcharno = 0
543
+ maxlinelength = linelength = 0
544
+ for ttype, value in tokensource:
545
+ while ttype not in self.styles:
546
+ ttype = ttype.parent
547
+ style = self.styles[ttype]
548
+ # TODO: make sure tab expansion happens earlier in the chain. It
549
+ # really ought to be done on the input, as to do it right here is
550
+ # quite complex.
551
+ value = value.expandtabs(4)
552
+ lines = value.splitlines(True)
553
+ # print lines
554
+ for i, line in enumerate(lines):
555
+ temp = line.rstrip('\n')
556
+ if temp:
557
+ self._draw_text(
558
+ self._get_text_pos(linelength, lineno),
559
+ temp,
560
+ font = self._get_style_font(style),
561
+ text_fg = self._get_text_color(style),
562
+ text_bg = self._get_text_bg_color(style),
563
+ )
564
+ temp_width, _ = self.fonts.get_text_size(temp)
565
+ linelength += temp_width
566
+ maxlinelength = max(maxlinelength, linelength)
567
+ charno += len(temp)
568
+ maxcharno = max(maxcharno, charno)
569
+ if line.endswith('\n'):
570
+ # add a line for each extra line in the value
571
+ linelength = 0
572
+ charno = 0
573
+ lineno += 1
574
+ self.maxlinelength = maxlinelength
575
+ self.maxcharno = maxcharno
576
+ self.maxlineno = lineno
577
+
578
+ def _draw_line_numbers(self):
579
+ """
580
+ Create drawables for the line numbers.
581
+ """
582
+ if not self.line_numbers:
583
+ return
584
+ for p in range(self.maxlineno):
585
+ n = p + self.line_number_start
586
+ if (n % self.line_number_step) == 0:
587
+ self._draw_linenumber(p, n)
588
+
589
+ def _paint_line_number_bg(self, im):
590
+ """
591
+ Paint the line number background on the image.
592
+ """
593
+ if not self.line_numbers:
594
+ return
595
+ if self.line_number_fg is None:
596
+ return
597
+ draw = ImageDraw.Draw(im)
598
+ recth = im.size[-1]
599
+ rectw = self.image_pad + self.line_number_width - self.line_number_pad
600
+ draw.rectangle([(0, 0), (rectw, recth)],
601
+ fill=self.line_number_bg)
602
+ if self.line_number_separator:
603
+ draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
604
+ del draw
605
+
606
+ def format(self, tokensource, outfile):
607
+ """
608
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
609
+ tuples and write it into ``outfile``.
610
+
611
+ This implementation calculates where it should draw each token on the
612
+ pixmap, then calculates the required pixmap size and draws the items.
613
+ """
614
+ self._create_drawables(tokensource)
615
+ self._draw_line_numbers()
616
+ im = Image.new(
617
+ 'RGB',
618
+ self._get_image_size(self.maxlinelength, self.maxlineno),
619
+ self.background_color
620
+ )
621
+ self._paint_line_number_bg(im)
622
+ draw = ImageDraw.Draw(im)
623
+ # Highlight
624
+ if self.hl_lines:
625
+ x = self.image_pad + self.line_number_width - self.line_number_pad + 1
626
+ recth = self._get_line_height()
627
+ rectw = im.size[0] - x
628
+ for linenumber in self.hl_lines:
629
+ y = self._get_line_y(linenumber - 1)
630
+ draw.rectangle([(x, y), (x + rectw, y + recth)],
631
+ fill=self.hl_color)
632
+ for pos, value, font, text_fg, text_bg in self.drawables:
633
+ if text_bg:
634
+ # see deprecations https://pillow.readthedocs.io/en/stable/releasenotes/9.2.0.html#font-size-and-offset-methods
635
+ if hasattr(draw, 'textsize'):
636
+ text_size = draw.textsize(text=value, font=font)
637
+ else:
638
+ text_size = font.getbbox(value)[2:]
639
+ draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
640
+ draw.text(pos, value, font=font, fill=text_fg)
641
+ im.save(outfile, self.image_format.upper())
642
+
643
+
644
+ # Add one formatter per format, so that the "-f gif" option gives the correct result
645
+ # when used in pygmentize.
646
+
647
+ class GifImageFormatter(ImageFormatter):
648
+ """
649
+ Create a GIF image from source code. This uses the Python Imaging Library to
650
+ generate a pixmap from the source code.
651
+
652
+ .. versionadded:: 1.0
653
+ """
654
+
655
+ name = 'img_gif'
656
+ aliases = ['gif']
657
+ filenames = ['*.gif']
658
+ default_image_format = 'gif'
659
+
660
+
661
+ class JpgImageFormatter(ImageFormatter):
662
+ """
663
+ Create a JPEG image from source code. This uses the Python Imaging Library to
664
+ generate a pixmap from the source code.
665
+
666
+ .. versionadded:: 1.0
667
+ """
668
+
669
+ name = 'img_jpg'
670
+ aliases = ['jpg', 'jpeg']
671
+ filenames = ['*.jpg']
672
+ default_image_format = 'jpeg'
673
+
674
+
675
+ class BmpImageFormatter(ImageFormatter):
676
+ """
677
+ Create a bitmap image from source code. This uses the Python Imaging Library to
678
+ generate a pixmap from the source code.
679
+
680
+ .. versionadded:: 1.0
681
+ """
682
+
683
+ name = 'img_bmp'
684
+ aliases = ['bmp', 'bitmap']
685
+ filenames = ['*.bmp']
686
+ default_image_format = 'bmp'
mgm/lib/python3.10/site-packages/pygments/formatters/irc.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.irc
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for IRC output
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.formatter import Formatter
12
+ from pygments.token import Keyword, Name, Comment, String, Error, \
13
+ Number, Operator, Generic, Token, Whitespace
14
+ from pygments.util import get_choice_opt
15
+
16
+
17
+ __all__ = ['IRCFormatter']
18
+
19
+
20
+ #: Map token types to a tuple of color values for light and dark
21
+ #: backgrounds.
22
+ IRC_COLORS = {
23
+ Token: ('', ''),
24
+
25
+ Whitespace: ('gray', 'brightblack'),
26
+ Comment: ('gray', 'brightblack'),
27
+ Comment.Preproc: ('cyan', 'brightcyan'),
28
+ Keyword: ('blue', 'brightblue'),
29
+ Keyword.Type: ('cyan', 'brightcyan'),
30
+ Operator.Word: ('magenta', 'brightcyan'),
31
+ Name.Builtin: ('cyan', 'brightcyan'),
32
+ Name.Function: ('green', 'brightgreen'),
33
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
34
+ Name.Class: ('_green_', '_brightgreen_'),
35
+ Name.Exception: ('cyan', 'brightcyan'),
36
+ Name.Decorator: ('brightblack', 'gray'),
37
+ Name.Variable: ('red', 'brightred'),
38
+ Name.Constant: ('red', 'brightred'),
39
+ Name.Attribute: ('cyan', 'brightcyan'),
40
+ Name.Tag: ('brightblue', 'brightblue'),
41
+ String: ('yellow', 'yellow'),
42
+ Number: ('blue', 'brightblue'),
43
+
44
+ Generic.Deleted: ('brightred', 'brightred'),
45
+ Generic.Inserted: ('green', 'brightgreen'),
46
+ Generic.Heading: ('**', '**'),
47
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
48
+ Generic.Error: ('brightred', 'brightred'),
49
+
50
+ Error: ('_brightred_', '_brightred_'),
51
+ }
52
+
53
+
54
+ IRC_COLOR_MAP = {
55
+ 'white': 0,
56
+ 'black': 1,
57
+ 'blue': 2,
58
+ 'brightgreen': 3,
59
+ 'brightred': 4,
60
+ 'yellow': 5,
61
+ 'magenta': 6,
62
+ 'orange': 7,
63
+ 'green': 7, #compat w/ ansi
64
+ 'brightyellow': 8,
65
+ 'lightgreen': 9,
66
+ 'brightcyan': 9, # compat w/ ansi
67
+ 'cyan': 10,
68
+ 'lightblue': 11,
69
+ 'red': 11, # compat w/ ansi
70
+ 'brightblue': 12,
71
+ 'brightmagenta': 13,
72
+ 'brightblack': 14,
73
+ 'gray': 15,
74
+ }
75
+
76
+ def ircformat(color, text):
77
+ if len(color) < 1:
78
+ return text
79
+ add = sub = ''
80
+ if '_' in color: # italic
81
+ add += '\x1D'
82
+ sub = '\x1D' + sub
83
+ color = color.strip('_')
84
+ if '*' in color: # bold
85
+ add += '\x02'
86
+ sub = '\x02' + sub
87
+ color = color.strip('*')
88
+ # underline (\x1F) not supported
89
+ # backgrounds (\x03FF,BB) not supported
90
+ if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
91
+ add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
92
+ sub = '\x03' + sub
93
+ return add + text + sub
94
+ return '<'+add+'>'+text+'</'+sub+'>'
95
+
96
+
97
+ class IRCFormatter(Formatter):
98
+ r"""
99
+ Format tokens with IRC color sequences
100
+
101
+ The `get_style_defs()` method doesn't do anything special since there is
102
+ no support for common styles.
103
+
104
+ Options accepted:
105
+
106
+ `bg`
107
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
108
+ (default: ``"light"``).
109
+
110
+ `colorscheme`
111
+ A dictionary mapping token types to (lightbg, darkbg) color names or
112
+ ``None`` (default: ``None`` = use builtin colorscheme).
113
+
114
+ `linenos`
115
+ Set to ``True`` to have line numbers in the output as well
116
+ (default: ``False`` = no line numbers).
117
+ """
118
+ name = 'IRC'
119
+ aliases = ['irc', 'IRC']
120
+ filenames = []
121
+
122
+ def __init__(self, **options):
123
+ Formatter.__init__(self, **options)
124
+ self.darkbg = get_choice_opt(options, 'bg',
125
+ ['light', 'dark'], 'light') == 'dark'
126
+ self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
127
+ self.linenos = options.get('linenos', False)
128
+ self._lineno = 0
129
+
130
+ def _write_lineno(self, outfile):
131
+ if self.linenos:
132
+ self._lineno += 1
133
+ outfile.write("%04d: " % self._lineno)
134
+
135
+ def format_unencoded(self, tokensource, outfile):
136
+ self._write_lineno(outfile)
137
+
138
+ for ttype, value in tokensource:
139
+ color = self.colorscheme.get(ttype)
140
+ while color is None:
141
+ ttype = ttype[:-1]
142
+ color = self.colorscheme.get(ttype)
143
+ if color:
144
+ color = color[self.darkbg]
145
+ spl = value.split('\n')
146
+ for line in spl[:-1]:
147
+ if line:
148
+ outfile.write(ircformat(color, line))
149
+ outfile.write('\n')
150
+ self._write_lineno(outfile)
151
+ if spl[-1]:
152
+ outfile.write(ircformat(color, spl[-1]))
153
+ else:
154
+ outfile.write(value)
mgm/lib/python3.10/site-packages/pygments/formatters/latex.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.latex
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for LaTeX fancyvrb output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from io import StringIO
12
+
13
+ from pygments.formatter import Formatter
14
+ from pygments.lexer import Lexer, do_insertions
15
+ from pygments.token import Token, STANDARD_TYPES
16
+ from pygments.util import get_bool_opt, get_int_opt
17
+
18
+
19
+ __all__ = ['LatexFormatter']
20
+
21
+
22
+ def escape_tex(text, commandprefix):
23
+ return text.replace('\\', '\x00'). \
24
+ replace('{', '\x01'). \
25
+ replace('}', '\x02'). \
26
+ replace('\x00', rf'\{commandprefix}Zbs{{}}'). \
27
+ replace('\x01', rf'\{commandprefix}Zob{{}}'). \
28
+ replace('\x02', rf'\{commandprefix}Zcb{{}}'). \
29
+ replace('^', rf'\{commandprefix}Zca{{}}'). \
30
+ replace('_', rf'\{commandprefix}Zus{{}}'). \
31
+ replace('&', rf'\{commandprefix}Zam{{}}'). \
32
+ replace('<', rf'\{commandprefix}Zlt{{}}'). \
33
+ replace('>', rf'\{commandprefix}Zgt{{}}'). \
34
+ replace('#', rf'\{commandprefix}Zsh{{}}'). \
35
+ replace('%', rf'\{commandprefix}Zpc{{}}'). \
36
+ replace('$', rf'\{commandprefix}Zdl{{}}'). \
37
+ replace('-', rf'\{commandprefix}Zhy{{}}'). \
38
+ replace("'", rf'\{commandprefix}Zsq{{}}'). \
39
+ replace('"', rf'\{commandprefix}Zdq{{}}'). \
40
+ replace('~', rf'\{commandprefix}Zti{{}}')
41
+
42
+
43
+ DOC_TEMPLATE = r'''
44
+ \documentclass{%(docclass)s}
45
+ \usepackage{fancyvrb}
46
+ \usepackage{color}
47
+ \usepackage[%(encoding)s]{inputenc}
48
+ %(preamble)s
49
+
50
+ %(styledefs)s
51
+
52
+ \begin{document}
53
+
54
+ \section*{%(title)s}
55
+
56
+ %(code)s
57
+ \end{document}
58
+ '''
59
+
60
+ ## Small explanation of the mess below :)
61
+ #
62
+ # The previous version of the LaTeX formatter just assigned a command to
63
+ # each token type defined in the current style. That obviously is
64
+ # problematic if the highlighted code is produced for a different style
65
+ # than the style commands themselves.
66
+ #
67
+ # This version works much like the HTML formatter which assigns multiple
68
+ # CSS classes to each <span> tag, from the most specific to the least
69
+ # specific token type, thus falling back to the parent token type if one
70
+ # is not defined. Here, the classes are there too and use the same short
71
+ # forms given in token.STANDARD_TYPES.
72
+ #
73
+ # Highlighted code now only uses one custom command, which by default is
74
+ # \PY and selectable by the commandprefix option (and in addition the
75
+ # escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
76
+ # backwards compatibility purposes).
77
+ #
78
+ # \PY has two arguments: the classes, separated by +, and the text to
79
+ # render in that style. The classes are resolved into the respective
80
+ # style commands by magic, which serves to ignore unknown classes.
81
+ #
82
+ # The magic macros are:
83
+ # * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
84
+ # to render in \PY@do. Their definition determines the style.
85
+ # * \PY@reset resets \PY@it etc. to do nothing.
86
+ # * \PY@toks parses the list of classes, using magic inspired by the
87
+ # keyval package (but modified to use plusses instead of commas
88
+ # because fancyvrb redefines commas inside its environments).
89
+ # * \PY@tok processes one class, calling the \PY@tok@classname command
90
+ # if it exists.
91
+ # * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
92
+ # for its class.
93
+ # * \PY resets the style, parses the classnames and then calls \PY@do.
94
+ #
95
+ # Tip: to read this code, print it out in substituted form using e.g.
96
+ # >>> print STYLE_TEMPLATE % {'cp': 'PY'}
97
+
98
+ STYLE_TEMPLATE = r'''
99
+ \makeatletter
100
+ \def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
101
+ \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
102
+ \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
103
+ \def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
104
+ \def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
105
+ \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
106
+ \def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
107
+ \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
108
+ \def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
109
+
110
+ %(styles)s
111
+
112
+ \def\%(cp)sZbs{\char`\\}
113
+ \def\%(cp)sZus{\char`\_}
114
+ \def\%(cp)sZob{\char`\{}
115
+ \def\%(cp)sZcb{\char`\}}
116
+ \def\%(cp)sZca{\char`\^}
117
+ \def\%(cp)sZam{\char`\&}
118
+ \def\%(cp)sZlt{\char`\<}
119
+ \def\%(cp)sZgt{\char`\>}
120
+ \def\%(cp)sZsh{\char`\#}
121
+ \def\%(cp)sZpc{\char`\%%}
122
+ \def\%(cp)sZdl{\char`\$}
123
+ \def\%(cp)sZhy{\char`\-}
124
+ \def\%(cp)sZsq{\char`\'}
125
+ \def\%(cp)sZdq{\char`\"}
126
+ \def\%(cp)sZti{\char`\~}
127
+ %% for compatibility with earlier versions
128
+ \def\%(cp)sZat{@}
129
+ \def\%(cp)sZlb{[}
130
+ \def\%(cp)sZrb{]}
131
+ \makeatother
132
+ '''
133
+
134
+
135
+ def _get_ttype_name(ttype):
136
+ fname = STANDARD_TYPES.get(ttype)
137
+ if fname:
138
+ return fname
139
+ aname = ''
140
+ while fname is None:
141
+ aname = ttype[-1] + aname
142
+ ttype = ttype.parent
143
+ fname = STANDARD_TYPES.get(ttype)
144
+ return fname + aname
145
+
146
+
147
+ class LatexFormatter(Formatter):
148
+ r"""
149
+ Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
150
+ standard packages.
151
+
152
+ Without the `full` option, code is formatted as one ``Verbatim``
153
+ environment, like this:
154
+
155
+ .. sourcecode:: latex
156
+
157
+ \begin{Verbatim}[commandchars=\\\{\}]
158
+ \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
159
+ \PY{k}{pass}
160
+ \end{Verbatim}
161
+
162
+ Wrapping can be disabled using the `nowrap` option.
163
+
164
+ The special command used here (``\PY``) and all the other macros it needs
165
+ are output by the `get_style_defs` method.
166
+
167
+ With the `full` option, a complete LaTeX document is output, including
168
+ the command definitions in the preamble.
169
+
170
+ The `get_style_defs()` method of a `LatexFormatter` returns a string
171
+ containing ``\def`` commands defining the macros needed inside the
172
+ ``Verbatim`` environments.
173
+
174
+ Additional options accepted:
175
+
176
+ `nowrap`
177
+ If set to ``True``, don't wrap the tokens at all, not even inside a
178
+ ``\begin{Verbatim}`` environment. This disables most other options
179
+ (default: ``False``).
180
+
181
+ `style`
182
+ The style to use, can be a string or a Style subclass (default:
183
+ ``'default'``).
184
+
185
+ `full`
186
+ Tells the formatter to output a "full" document, i.e. a complete
187
+ self-contained document (default: ``False``).
188
+
189
+ `title`
190
+ If `full` is true, the title that should be used to caption the
191
+ document (default: ``''``).
192
+
193
+ `docclass`
194
+ If the `full` option is enabled, this is the document class to use
195
+ (default: ``'article'``).
196
+
197
+ `preamble`
198
+ If the `full` option is enabled, this can be further preamble commands,
199
+ e.g. ``\usepackage`` (default: ``''``).
200
+
201
+ `linenos`
202
+ If set to ``True``, output line numbers (default: ``False``).
203
+
204
+ `linenostart`
205
+ The line number for the first line (default: ``1``).
206
+
207
+ `linenostep`
208
+ If set to a number n > 1, only every nth line number is printed.
209
+
210
+ `verboptions`
211
+ Additional options given to the Verbatim environment (see the *fancyvrb*
212
+ docs for possible values) (default: ``''``).
213
+
214
+ `commandprefix`
215
+ The LaTeX commands used to produce colored output are constructed
216
+ using this prefix and some letters (default: ``'PY'``).
217
+
218
+ .. versionadded:: 0.7
219
+ .. versionchanged:: 0.10
220
+ The default is now ``'PY'`` instead of ``'C'``.
221
+
222
+ `texcomments`
223
+ If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
224
+ in comment tokens is not escaped so that LaTeX can render it (default:
225
+ ``False``).
226
+
227
+ .. versionadded:: 1.2
228
+
229
+ `mathescape`
230
+ If set to ``True``, enables LaTeX math mode escape in comments. That
231
+ is, ``'$...$'`` inside a comment will trigger math mode (default:
232
+ ``False``).
233
+
234
+ .. versionadded:: 1.2
235
+
236
+ `escapeinside`
237
+ If set to a string of length 2, enables escaping to LaTeX. Text
238
+ delimited by these 2 characters is read as LaTeX code and
239
+ typeset accordingly. It has no effect in string literals. It has
240
+ no effect in comments if `texcomments` or `mathescape` is
241
+ set. (default: ``''``).
242
+
243
+ .. versionadded:: 2.0
244
+
245
+ `envname`
246
+ Allows you to pick an alternative environment name replacing Verbatim.
247
+ The alternate environment still has to support Verbatim's option syntax.
248
+ (default: ``'Verbatim'``).
249
+
250
+ .. versionadded:: 2.0
251
+ """
252
+ name = 'LaTeX'
253
+ aliases = ['latex', 'tex']
254
+ filenames = ['*.tex']
255
+
256
+ def __init__(self, **options):
257
+ Formatter.__init__(self, **options)
258
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
259
+ self.docclass = options.get('docclass', 'article')
260
+ self.preamble = options.get('preamble', '')
261
+ self.linenos = get_bool_opt(options, 'linenos', False)
262
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
263
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
264
+ self.verboptions = options.get('verboptions', '')
265
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
266
+ self.commandprefix = options.get('commandprefix', 'PY')
267
+ self.texcomments = get_bool_opt(options, 'texcomments', False)
268
+ self.mathescape = get_bool_opt(options, 'mathescape', False)
269
+ self.escapeinside = options.get('escapeinside', '')
270
+ if len(self.escapeinside) == 2:
271
+ self.left = self.escapeinside[0]
272
+ self.right = self.escapeinside[1]
273
+ else:
274
+ self.escapeinside = ''
275
+ self.envname = options.get('envname', 'Verbatim')
276
+
277
+ self._create_stylesheet()
278
+
279
+ def _create_stylesheet(self):
280
+ t2n = self.ttype2name = {Token: ''}
281
+ c2d = self.cmd2def = {}
282
+ cp = self.commandprefix
283
+
284
+ def rgbcolor(col):
285
+ if col:
286
+ return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
287
+ for i in (0, 2, 4)])
288
+ else:
289
+ return '1,1,1'
290
+
291
+ for ttype, ndef in self.style:
292
+ name = _get_ttype_name(ttype)
293
+ cmndef = ''
294
+ if ndef['bold']:
295
+ cmndef += r'\let\$$@bf=\textbf'
296
+ if ndef['italic']:
297
+ cmndef += r'\let\$$@it=\textit'
298
+ if ndef['underline']:
299
+ cmndef += r'\let\$$@ul=\underline'
300
+ if ndef['roman']:
301
+ cmndef += r'\let\$$@ff=\textrm'
302
+ if ndef['sans']:
303
+ cmndef += r'\let\$$@ff=\textsf'
304
+ if ndef['mono']:
305
+ cmndef += r'\let\$$@ff=\textsf'
306
+ if ndef['color']:
307
+ cmndef += (r'\def\$$@tc##1{{\textcolor[rgb]{{{}}}{{##1}}}}'.format(rgbcolor(ndef['color'])))
308
+ if ndef['border']:
309
+ cmndef += (r'\def\$$@bc##1{{{{\setlength{{\fboxsep}}{{\string -\fboxrule}}'
310
+ r'\fcolorbox[rgb]{{{}}}{{{}}}{{\strut ##1}}}}}}'.format(rgbcolor(ndef['border']),
311
+ rgbcolor(ndef['bgcolor'])))
312
+ elif ndef['bgcolor']:
313
+ cmndef += (r'\def\$$@bc##1{{{{\setlength{{\fboxsep}}{{0pt}}'
314
+ r'\colorbox[rgb]{{{}}}{{\strut ##1}}}}}}'.format(rgbcolor(ndef['bgcolor'])))
315
+ if cmndef == '':
316
+ continue
317
+ cmndef = cmndef.replace('$$', cp)
318
+ t2n[ttype] = name
319
+ c2d[name] = cmndef
320
+
321
+ def get_style_defs(self, arg=''):
322
+ """
323
+ Return the command sequences needed to define the commands
324
+ used to format text in the verbatim environment. ``arg`` is ignored.
325
+ """
326
+ cp = self.commandprefix
327
+ styles = []
328
+ for name, definition in self.cmd2def.items():
329
+ styles.append(rf'\@namedef{{{cp}@tok@{name}}}{{{definition}}}')
330
+ return STYLE_TEMPLATE % {'cp': self.commandprefix,
331
+ 'styles': '\n'.join(styles)}
332
+
333
+ def format_unencoded(self, tokensource, outfile):
334
+ # TODO: add support for background colors
335
+ t2n = self.ttype2name
336
+ cp = self.commandprefix
337
+
338
+ if self.full:
339
+ realoutfile = outfile
340
+ outfile = StringIO()
341
+
342
+ if not self.nowrap:
343
+ outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
344
+ if self.linenos:
345
+ start, step = self.linenostart, self.linenostep
346
+ outfile.write(',numbers=left' +
347
+ (start and ',firstnumber=%d' % start or '') +
348
+ (step and ',stepnumber=%d' % step or ''))
349
+ if self.mathescape or self.texcomments or self.escapeinside:
350
+ outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
351
+ '\\catcode`\\_=8\\relax}')
352
+ if self.verboptions:
353
+ outfile.write(',' + self.verboptions)
354
+ outfile.write(']\n')
355
+
356
+ for ttype, value in tokensource:
357
+ if ttype in Token.Comment:
358
+ if self.texcomments:
359
+ # Try to guess comment starting lexeme and escape it ...
360
+ start = value[0:1]
361
+ for i in range(1, len(value)):
362
+ if start[0] != value[i]:
363
+ break
364
+ start += value[i]
365
+
366
+ value = value[len(start):]
367
+ start = escape_tex(start, cp)
368
+
369
+ # ... but do not escape inside comment.
370
+ value = start + value
371
+ elif self.mathescape:
372
+ # Only escape parts not inside a math environment.
373
+ parts = value.split('$')
374
+ in_math = False
375
+ for i, part in enumerate(parts):
376
+ if not in_math:
377
+ parts[i] = escape_tex(part, cp)
378
+ in_math = not in_math
379
+ value = '$'.join(parts)
380
+ elif self.escapeinside:
381
+ text = value
382
+ value = ''
383
+ while text:
384
+ a, sep1, text = text.partition(self.left)
385
+ if sep1:
386
+ b, sep2, text = text.partition(self.right)
387
+ if sep2:
388
+ value += escape_tex(a, cp) + b
389
+ else:
390
+ value += escape_tex(a + sep1 + b, cp)
391
+ else:
392
+ value += escape_tex(a, cp)
393
+ else:
394
+ value = escape_tex(value, cp)
395
+ elif ttype not in Token.Escape:
396
+ value = escape_tex(value, cp)
397
+ styles = []
398
+ while ttype is not Token:
399
+ try:
400
+ styles.append(t2n[ttype])
401
+ except KeyError:
402
+ # not in current style
403
+ styles.append(_get_ttype_name(ttype))
404
+ ttype = ttype.parent
405
+ styleval = '+'.join(reversed(styles))
406
+ if styleval:
407
+ spl = value.split('\n')
408
+ for line in spl[:-1]:
409
+ if line:
410
+ outfile.write(f"\\{cp}{{{styleval}}}{{{line}}}")
411
+ outfile.write('\n')
412
+ if spl[-1]:
413
+ outfile.write(f"\\{cp}{{{styleval}}}{{{spl[-1]}}}")
414
+ else:
415
+ outfile.write(value)
416
+
417
+ if not self.nowrap:
418
+ outfile.write('\\end{' + self.envname + '}\n')
419
+
420
+ if self.full:
421
+ encoding = self.encoding or 'utf8'
422
+ # map known existings encodings from LaTeX distribution
423
+ encoding = {
424
+ 'utf_8': 'utf8',
425
+ 'latin_1': 'latin1',
426
+ 'iso_8859_1': 'latin1',
427
+ }.get(encoding.replace('-', '_'), encoding)
428
+ realoutfile.write(DOC_TEMPLATE %
429
+ dict(docclass = self.docclass,
430
+ preamble = self.preamble,
431
+ title = self.title,
432
+ encoding = encoding,
433
+ styledefs = self.get_style_defs(),
434
+ code = outfile.getvalue()))
435
+
436
+
437
+ class LatexEmbeddedLexer(Lexer):
438
+ """
439
+ This lexer takes one lexer as argument, the lexer for the language
440
+ being formatted, and the left and right delimiters for escaped text.
441
+
442
+ First everything is scanned using the language lexer to obtain
443
+ strings and comments. All other consecutive tokens are merged and
444
+ the resulting text is scanned for escaped segments, which are given
445
+ the Token.Escape type. Finally text that is not escaped is scanned
446
+ again with the language lexer.
447
+ """
448
+ def __init__(self, left, right, lang, **options):
449
+ self.left = left
450
+ self.right = right
451
+ self.lang = lang
452
+ Lexer.__init__(self, **options)
453
+
454
+ def get_tokens_unprocessed(self, text):
455
+ # find and remove all the escape tokens (replace with an empty string)
456
+ # this is very similar to DelegatingLexer.get_tokens_unprocessed.
457
+ buffered = ''
458
+ insertions = []
459
+ insertion_buf = []
460
+ for i, t, v in self._find_safe_escape_tokens(text):
461
+ if t is None:
462
+ if insertion_buf:
463
+ insertions.append((len(buffered), insertion_buf))
464
+ insertion_buf = []
465
+ buffered += v
466
+ else:
467
+ insertion_buf.append((i, t, v))
468
+ if insertion_buf:
469
+ insertions.append((len(buffered), insertion_buf))
470
+ return do_insertions(insertions,
471
+ self.lang.get_tokens_unprocessed(buffered))
472
+
473
+ def _find_safe_escape_tokens(self, text):
474
+ """ find escape tokens that are not in strings or comments """
475
+ for i, t, v in self._filter_to(
476
+ self.lang.get_tokens_unprocessed(text),
477
+ lambda t: t in Token.Comment or t in Token.String
478
+ ):
479
+ if t is None:
480
+ for i2, t2, v2 in self._find_escape_tokens(v):
481
+ yield i + i2, t2, v2
482
+ else:
483
+ yield i, None, v
484
+
485
+ def _filter_to(self, it, pred):
486
+ """ Keep only the tokens that match `pred`, merge the others together """
487
+ buf = ''
488
+ idx = 0
489
+ for i, t, v in it:
490
+ if pred(t):
491
+ if buf:
492
+ yield idx, None, buf
493
+ buf = ''
494
+ yield i, t, v
495
+ else:
496
+ if not buf:
497
+ idx = i
498
+ buf += v
499
+ if buf:
500
+ yield idx, None, buf
501
+
502
+ def _find_escape_tokens(self, text):
503
+ """ Find escape tokens within text, give token=None otherwise """
504
+ index = 0
505
+ while text:
506
+ a, sep1, text = text.partition(self.left)
507
+ if a:
508
+ yield index, None, a
509
+ index += len(a)
510
+ if sep1:
511
+ b, sep2, text = text.partition(self.right)
512
+ if sep2:
513
+ yield index + len(sep1), Token.Escape, b
514
+ index += len(sep1) + len(b) + len(sep2)
515
+ else:
516
+ yield index, Token.Error, sep1
517
+ index += len(sep1)
518
+ text = b
mgm/lib/python3.10/site-packages/pygments/formatters/other.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.other
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Other formatters: NullFormatter, RawTokenFormatter.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.formatter import Formatter
12
+ from pygments.util import get_choice_opt
13
+ from pygments.token import Token
14
+ from pygments.console import colorize
15
+
16
+ __all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
17
+
18
+
19
+ class NullFormatter(Formatter):
20
+ """
21
+ Output the text unchanged without any formatting.
22
+ """
23
+ name = 'Text only'
24
+ aliases = ['text', 'null']
25
+ filenames = ['*.txt']
26
+
27
+ def format(self, tokensource, outfile):
28
+ enc = self.encoding
29
+ for ttype, value in tokensource:
30
+ if enc:
31
+ outfile.write(value.encode(enc))
32
+ else:
33
+ outfile.write(value)
34
+
35
+
36
+ class RawTokenFormatter(Formatter):
37
+ r"""
38
+ Format tokens as a raw representation for storing token streams.
39
+
40
+ The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
41
+ be converted to a token stream with the `RawTokenLexer`, described in the
42
+ :doc:`lexer list <lexers>`.
43
+
44
+ Only two options are accepted:
45
+
46
+ `compress`
47
+ If set to ``'gz'`` or ``'bz2'``, compress the output with the given
48
+ compression algorithm after encoding (default: ``''``).
49
+ `error_color`
50
+ If set to a color name, highlight error tokens using that color. If
51
+ set but with no value, defaults to ``'red'``.
52
+
53
+ .. versionadded:: 0.11
54
+
55
+ """
56
+ name = 'Raw tokens'
57
+ aliases = ['raw', 'tokens']
58
+ filenames = ['*.raw']
59
+
60
+ unicodeoutput = False
61
+
62
+ def __init__(self, **options):
63
+ Formatter.__init__(self, **options)
64
+ # We ignore self.encoding if it is set, since it gets set for lexer
65
+ # and formatter if given with -Oencoding on the command line.
66
+ # The RawTokenFormatter outputs only ASCII. Override here.
67
+ self.encoding = 'ascii' # let pygments.format() do the right thing
68
+ self.compress = get_choice_opt(options, 'compress',
69
+ ['', 'none', 'gz', 'bz2'], '')
70
+ self.error_color = options.get('error_color', None)
71
+ if self.error_color is True:
72
+ self.error_color = 'red'
73
+ if self.error_color is not None:
74
+ try:
75
+ colorize(self.error_color, '')
76
+ except KeyError:
77
+ raise ValueError(f"Invalid color {self.error_color!r} specified")
78
+
79
+ def format(self, tokensource, outfile):
80
+ try:
81
+ outfile.write(b'')
82
+ except TypeError:
83
+ raise TypeError('The raw tokens formatter needs a binary '
84
+ 'output file')
85
+ if self.compress == 'gz':
86
+ import gzip
87
+ outfile = gzip.GzipFile('', 'wb', 9, outfile)
88
+
89
+ write = outfile.write
90
+ flush = outfile.close
91
+ elif self.compress == 'bz2':
92
+ import bz2
93
+ compressor = bz2.BZ2Compressor(9)
94
+
95
+ def write(text):
96
+ outfile.write(compressor.compress(text))
97
+
98
+ def flush():
99
+ outfile.write(compressor.flush())
100
+ outfile.flush()
101
+ else:
102
+ write = outfile.write
103
+ flush = outfile.flush
104
+
105
+ if self.error_color:
106
+ for ttype, value in tokensource:
107
+ line = b"%r\t%r\n" % (ttype, value)
108
+ if ttype is Token.Error:
109
+ write(colorize(self.error_color, line))
110
+ else:
111
+ write(line)
112
+ else:
113
+ for ttype, value in tokensource:
114
+ write(b"%r\t%r\n" % (ttype, value))
115
+ flush()
116
+
117
+
118
+ TESTCASE_BEFORE = '''\
119
+ def testNeedsName(lexer):
120
+ fragment = %r
121
+ tokens = [
122
+ '''
123
+ TESTCASE_AFTER = '''\
124
+ ]
125
+ assert list(lexer.get_tokens(fragment)) == tokens
126
+ '''
127
+
128
+
129
+ class TestcaseFormatter(Formatter):
130
+ """
131
+ Format tokens as appropriate for a new testcase.
132
+
133
+ .. versionadded:: 2.0
134
+ """
135
+ name = 'Testcase'
136
+ aliases = ['testcase']
137
+
138
+ def __init__(self, **options):
139
+ Formatter.__init__(self, **options)
140
+ if self.encoding is not None and self.encoding != 'utf-8':
141
+ raise ValueError("Only None and utf-8 are allowed encodings.")
142
+
143
+ def format(self, tokensource, outfile):
144
+ indentation = ' ' * 12
145
+ rawbuf = []
146
+ outbuf = []
147
+ for ttype, value in tokensource:
148
+ rawbuf.append(value)
149
+ outbuf.append(f'{indentation}({ttype}, {value!r}),\n')
150
+
151
+ before = TESTCASE_BEFORE % (''.join(rawbuf),)
152
+ during = ''.join(outbuf)
153
+ after = TESTCASE_AFTER
154
+ if self.encoding is None:
155
+ outfile.write(before + during + after)
156
+ else:
157
+ outfile.write(before.encode('utf-8'))
158
+ outfile.write(during.encode('utf-8'))
159
+ outfile.write(after.encode('utf-8'))
160
+ outfile.flush()
mgm/lib/python3.10/site-packages/pygments/formatters/pangomarkup.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.pangomarkup
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for Pango markup output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.formatter import Formatter
12
+
13
+
14
+ __all__ = ['PangoMarkupFormatter']
15
+
16
+
17
+ _escape_table = {
18
+ ord('&'): '&amp;',
19
+ ord('<'): '&lt;',
20
+ }
21
+
22
+
23
+ def escape_special_chars(text, table=_escape_table):
24
+ """Escape & and < for Pango Markup."""
25
+ return text.translate(table)
26
+
27
+
28
+ class PangoMarkupFormatter(Formatter):
29
+ """
30
+ Format tokens as Pango Markup code. It can then be rendered to an SVG.
31
+
32
+ .. versionadded:: 2.9
33
+ """
34
+
35
+ name = 'Pango Markup'
36
+ aliases = ['pango', 'pangomarkup']
37
+ filenames = []
38
+
39
+ def __init__(self, **options):
40
+ Formatter.__init__(self, **options)
41
+
42
+ self.styles = {}
43
+
44
+ for token, style in self.style:
45
+ start = ''
46
+ end = ''
47
+ if style['color']:
48
+ start += '<span fgcolor="#{}">'.format(style['color'])
49
+ end = '</span>' + end
50
+ if style['bold']:
51
+ start += '<b>'
52
+ end = '</b>' + end
53
+ if style['italic']:
54
+ start += '<i>'
55
+ end = '</i>' + end
56
+ if style['underline']:
57
+ start += '<u>'
58
+ end = '</u>' + end
59
+ self.styles[token] = (start, end)
60
+
61
+ def format_unencoded(self, tokensource, outfile):
62
+ lastval = ''
63
+ lasttype = None
64
+
65
+ outfile.write('<tt>')
66
+
67
+ for ttype, value in tokensource:
68
+ while ttype not in self.styles:
69
+ ttype = ttype.parent
70
+ if ttype == lasttype:
71
+ lastval += escape_special_chars(value)
72
+ else:
73
+ if lastval:
74
+ stylebegin, styleend = self.styles[lasttype]
75
+ outfile.write(stylebegin + lastval + styleend)
76
+ lastval = escape_special_chars(value)
77
+ lasttype = ttype
78
+
79
+ if lastval:
80
+ stylebegin, styleend = self.styles[lasttype]
81
+ outfile.write(stylebegin + lastval + styleend)
82
+
83
+ outfile.write('</tt>')
mgm/lib/python3.10/site-packages/pygments/formatters/rtf.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.rtf
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ A formatter that generates RTF files.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from collections import OrderedDict
12
+ from pygments.formatter import Formatter
13
+ from pygments.style import _ansimap
14
+ from pygments.util import get_bool_opt, get_int_opt, get_list_opt, surrogatepair
15
+
16
+
17
+ __all__ = ['RtfFormatter']
18
+
19
+
20
+ class RtfFormatter(Formatter):
21
+ """
22
+ Format tokens as RTF markup. This formatter automatically outputs full RTF
23
+ documents with color information and other useful stuff. Perfect for Copy and
24
+ Paste into Microsoft(R) Word(R) documents.
25
+
26
+ Please note that ``encoding`` and ``outencoding`` options are ignored.
27
+ The RTF format is ASCII natively, but handles unicode characters correctly
28
+ thanks to escape sequences.
29
+
30
+ .. versionadded:: 0.6
31
+
32
+ Additional options accepted:
33
+
34
+ `style`
35
+ The style to use, can be a string or a Style subclass (default:
36
+ ``'default'``).
37
+
38
+ `fontface`
39
+ The used font family, for example ``Bitstream Vera Sans``. Defaults to
40
+ some generic font which is supposed to have fixed width.
41
+
42
+ `fontsize`
43
+ Size of the font used. Size is specified in half points. The
44
+ default is 24 half-points, giving a size 12 font.
45
+
46
+ .. versionadded:: 2.0
47
+
48
+ `linenos`
49
+ Turn on line numbering (default: ``False``).
50
+
51
+ .. versionadded:: 2.18
52
+
53
+ `lineno_fontsize`
54
+ Font size for line numbers. Size is specified in half points
55
+ (default: `fontsize`).
56
+
57
+ .. versionadded:: 2.18
58
+
59
+ `lineno_padding`
60
+ Number of spaces between the (inline) line numbers and the
61
+ source code (default: ``2``).
62
+
63
+ .. versionadded:: 2.18
64
+
65
+ `linenostart`
66
+ The line number for the first line (default: ``1``).
67
+
68
+ .. versionadded:: 2.18
69
+
70
+ `linenostep`
71
+ If set to a number n > 1, only every nth line number is printed.
72
+
73
+ .. versionadded:: 2.18
74
+
75
+ `lineno_color`
76
+ Color for line numbers specified as a hex triplet, e.g. ``'5e5e5e'``.
77
+ Defaults to the style's line number color if it is a hex triplet,
78
+ otherwise ansi bright black.
79
+
80
+ .. versionadded:: 2.18
81
+
82
+ `hl_lines`
83
+ Specify a list of lines to be highlighted, as line numbers separated by
84
+ spaces, e.g. ``'3 7 8'``. The line numbers are relative to the input
85
+ (i.e. the first line is line 1) unless `hl_linenostart` is set.
86
+
87
+ .. versionadded:: 2.18
88
+
89
+ `hl_color`
90
+ Color for highlighting the lines specified in `hl_lines`, specified as
91
+ a hex triplet (default: style's `highlight_color`).
92
+
93
+ .. versionadded:: 2.18
94
+
95
+ `hl_linenostart`
96
+ If set to ``True`` line numbers in `hl_lines` are specified
97
+ relative to `linenostart` (default ``False``).
98
+
99
+ .. versionadded:: 2.18
100
+ """
101
+ name = 'RTF'
102
+ aliases = ['rtf']
103
+ filenames = ['*.rtf']
104
+
105
+ def __init__(self, **options):
106
+ r"""
107
+ Additional options accepted:
108
+
109
+ ``fontface``
110
+ Name of the font used. Could for example be ``'Courier New'``
111
+ to further specify the default which is ``'\fmodern'``. The RTF
112
+ specification claims that ``\fmodern`` are "Fixed-pitch serif
113
+ and sans serif fonts". Hope every RTF implementation thinks
114
+ the same about modern...
115
+
116
+ """
117
+ Formatter.__init__(self, **options)
118
+ self.fontface = options.get('fontface') or ''
119
+ self.fontsize = get_int_opt(options, 'fontsize', 0)
120
+ self.linenos = get_bool_opt(options, 'linenos', False)
121
+ self.lineno_fontsize = get_int_opt(options, 'lineno_fontsize',
122
+ self.fontsize)
123
+ self.lineno_padding = get_int_opt(options, 'lineno_padding', 2)
124
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
125
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
126
+ self.hl_linenostart = get_bool_opt(options, 'hl_linenostart', False)
127
+
128
+ self.hl_color = options.get('hl_color', '')
129
+ if not self.hl_color:
130
+ self.hl_color = self.style.highlight_color
131
+
132
+ self.hl_lines = []
133
+ for lineno in get_list_opt(options, 'hl_lines', []):
134
+ try:
135
+ lineno = int(lineno)
136
+ if self.hl_linenostart:
137
+ lineno = lineno - self.linenostart + 1
138
+ self.hl_lines.append(lineno)
139
+ except ValueError:
140
+ pass
141
+
142
+ self.lineno_color = options.get('lineno_color', '')
143
+ if not self.lineno_color:
144
+ if self.style.line_number_color == 'inherit':
145
+ # style color is the css value 'inherit'
146
+ # default to ansi bright-black
147
+ self.lineno_color = _ansimap['ansibrightblack']
148
+ else:
149
+ # style color is assumed to be a hex triplet as other
150
+ # colors in pygments/style.py
151
+ self.lineno_color = self.style.line_number_color
152
+
153
+ self.color_mapping = self._create_color_mapping()
154
+
155
+ def _escape(self, text):
156
+ return text.replace('\\', '\\\\') \
157
+ .replace('{', '\\{') \
158
+ .replace('}', '\\}')
159
+
160
+ def _escape_text(self, text):
161
+ # empty strings, should give a small performance improvement
162
+ if not text:
163
+ return ''
164
+
165
+ # escape text
166
+ text = self._escape(text)
167
+
168
+ buf = []
169
+ for c in text:
170
+ cn = ord(c)
171
+ if cn < (2**7):
172
+ # ASCII character
173
+ buf.append(str(c))
174
+ elif (2**7) <= cn < (2**16):
175
+ # single unicode escape sequence
176
+ buf.append('{\\u%d}' % cn)
177
+ elif (2**16) <= cn:
178
+ # RTF limits unicode to 16 bits.
179
+ # Force surrogate pairs
180
+ buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
181
+
182
+ return ''.join(buf).replace('\n', '\\par')
183
+
184
+ @staticmethod
185
+ def hex_to_rtf_color(hex_color):
186
+ if hex_color[0] == "#":
187
+ hex_color = hex_color[1:]
188
+
189
+ return '\\red%d\\green%d\\blue%d;' % (
190
+ int(hex_color[0:2], 16),
191
+ int(hex_color[2:4], 16),
192
+ int(hex_color[4:6], 16)
193
+ )
194
+
195
+ def _split_tokens_on_newlines(self, tokensource):
196
+ """
197
+ Split tokens containing newline characters into multiple token
198
+ each representing a line of the input file. Needed for numbering
199
+ lines of e.g. multiline comments.
200
+ """
201
+ for ttype, value in tokensource:
202
+ if value == '\n':
203
+ yield (ttype, value)
204
+ elif "\n" in value:
205
+ lines = value.split("\n")
206
+ for line in lines[:-1]:
207
+ yield (ttype, line+"\n")
208
+ if lines[-1]:
209
+ yield (ttype, lines[-1])
210
+ else:
211
+ yield (ttype, value)
212
+
213
+ def _create_color_mapping(self):
214
+ """
215
+ Create a mapping of style hex colors to index/offset in
216
+ the RTF color table.
217
+ """
218
+ color_mapping = OrderedDict()
219
+ offset = 1
220
+
221
+ if self.linenos:
222
+ color_mapping[self.lineno_color] = offset
223
+ offset += 1
224
+
225
+ if self.hl_lines:
226
+ color_mapping[self.hl_color] = offset
227
+ offset += 1
228
+
229
+ for _, style in self.style:
230
+ for color in style['color'], style['bgcolor'], style['border']:
231
+ if color and color not in color_mapping:
232
+ color_mapping[color] = offset
233
+ offset += 1
234
+
235
+ return color_mapping
236
+
237
+ @property
238
+ def _lineno_template(self):
239
+ if self.lineno_fontsize != self.fontsize:
240
+ return '{{\\fs{} \\cf{} %s{}}}'.format(self.lineno_fontsize,
241
+ self.color_mapping[self.lineno_color],
242
+ " " * self.lineno_padding)
243
+
244
+ return '{{\\cf{} %s{}}}'.format(self.color_mapping[self.lineno_color],
245
+ " " * self.lineno_padding)
246
+
247
+ @property
248
+ def _hl_open_str(self):
249
+ return rf'{{\highlight{self.color_mapping[self.hl_color]} '
250
+
251
+ @property
252
+ def _rtf_header(self):
253
+ lines = []
254
+ # rtf 1.8 header
255
+ lines.append('{\\rtf1\\ansi\\uc0\\deff0'
256
+ '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
257
+ % (self.fontface and ' '
258
+ + self._escape(self.fontface) or ''))
259
+
260
+ # color table
261
+ lines.append('{\\colortbl;')
262
+ for color, _ in self.color_mapping.items():
263
+ lines.append(self.hex_to_rtf_color(color))
264
+ lines.append('}')
265
+
266
+ # font and fontsize
267
+ lines.append('\\f0\\sa0')
268
+ if self.fontsize:
269
+ lines.append('\\fs%d' % self.fontsize)
270
+
271
+ # ensure Libre Office Writer imports and renders consecutive
272
+ # space characters the same width, needed for line numbering.
273
+ # https://bugs.documentfoundation.org/show_bug.cgi?id=144050
274
+ lines.append('\\dntblnsbdb')
275
+
276
+ return lines
277
+
278
+ def format_unencoded(self, tokensource, outfile):
279
+ for line in self._rtf_header:
280
+ outfile.write(line + "\n")
281
+
282
+ tokensource = self._split_tokens_on_newlines(tokensource)
283
+
284
+ # first pass of tokens to count lines, needed for line numbering
285
+ if self.linenos:
286
+ line_count = 0
287
+ tokens = [] # for copying the token source generator
288
+ for ttype, value in tokensource:
289
+ tokens.append((ttype, value))
290
+ if value.endswith("\n"):
291
+ line_count += 1
292
+
293
+ # width of line number strings (for padding with spaces)
294
+ linenos_width = len(str(line_count+self.linenostart-1))
295
+
296
+ tokensource = tokens
297
+
298
+ # highlight stream
299
+ lineno = 1
300
+ start_new_line = True
301
+ for ttype, value in tokensource:
302
+ if start_new_line and lineno in self.hl_lines:
303
+ outfile.write(self._hl_open_str)
304
+
305
+ if start_new_line and self.linenos:
306
+ if (lineno-self.linenostart+1)%self.linenostep == 0:
307
+ current_lineno = lineno + self.linenostart - 1
308
+ lineno_str = str(current_lineno).rjust(linenos_width)
309
+ else:
310
+ lineno_str = "".rjust(linenos_width)
311
+ outfile.write(self._lineno_template % lineno_str)
312
+
313
+ while not self.style.styles_token(ttype) and ttype.parent:
314
+ ttype = ttype.parent
315
+ style = self.style.style_for_token(ttype)
316
+ buf = []
317
+ if style['bgcolor']:
318
+ buf.append('\\cb%d' % self.color_mapping[style['bgcolor']])
319
+ if style['color']:
320
+ buf.append('\\cf%d' % self.color_mapping[style['color']])
321
+ if style['bold']:
322
+ buf.append('\\b')
323
+ if style['italic']:
324
+ buf.append('\\i')
325
+ if style['underline']:
326
+ buf.append('\\ul')
327
+ if style['border']:
328
+ buf.append('\\chbrdr\\chcfpat%d' %
329
+ self.color_mapping[style['border']])
330
+ start = ''.join(buf)
331
+ if start:
332
+ outfile.write(f'{{{start} ')
333
+ outfile.write(self._escape_text(value))
334
+ if start:
335
+ outfile.write('}')
336
+ start_new_line = False
337
+
338
+ # complete line of input
339
+ if value.endswith("\n"):
340
+ # close line highlighting
341
+ if lineno in self.hl_lines:
342
+ outfile.write('}')
343
+ # newline in RTF file after closing }
344
+ outfile.write("\n")
345
+
346
+ start_new_line = True
347
+ lineno += 1
348
+
349
+ outfile.write('}\n')
mgm/lib/python3.10/site-packages/pygments/formatters/svg.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.svg
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for SVG output.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.formatter import Formatter
12
+ from pygments.token import Comment
13
+ from pygments.util import get_bool_opt, get_int_opt
14
+
15
+ __all__ = ['SvgFormatter']
16
+
17
+
18
+ def escape_html(text):
19
+ """Escape &, <, > as well as single and double quotes for HTML."""
20
+ return text.replace('&', '&amp;'). \
21
+ replace('<', '&lt;'). \
22
+ replace('>', '&gt;'). \
23
+ replace('"', '&quot;'). \
24
+ replace("'", '&#39;')
25
+
26
+
27
+ class2style = {}
28
+
29
+ class SvgFormatter(Formatter):
30
+ """
31
+ Format tokens as an SVG graphics file. This formatter is still experimental.
32
+ Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
33
+ coordinates containing ``<tspan>`` elements with the individual token styles.
34
+
35
+ By default, this formatter outputs a full SVG document including doctype
36
+ declaration and the ``<svg>`` root element.
37
+
38
+ .. versionadded:: 0.9
39
+
40
+ Additional options accepted:
41
+
42
+ `nowrap`
43
+ Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
44
+ don't add a XML declaration and a doctype. If true, the `fontfamily`
45
+ and `fontsize` options are ignored. Defaults to ``False``.
46
+
47
+ `fontfamily`
48
+ The value to give the wrapping ``<g>`` element's ``font-family``
49
+ attribute, defaults to ``"monospace"``.
50
+
51
+ `fontsize`
52
+ The value to give the wrapping ``<g>`` element's ``font-size``
53
+ attribute, defaults to ``"14px"``.
54
+
55
+ `linenos`
56
+ If ``True``, add line numbers (default: ``False``).
57
+
58
+ `linenostart`
59
+ The line number for the first line (default: ``1``).
60
+
61
+ `linenostep`
62
+ If set to a number n > 1, only every nth line number is printed.
63
+
64
+ `linenowidth`
65
+ Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
66
+ for up to 4-digit line numbers. Increase width for longer code blocks).
67
+
68
+ `xoffset`
69
+ Starting offset in X direction, defaults to ``0``.
70
+
71
+ `yoffset`
72
+ Starting offset in Y direction, defaults to the font size if it is given
73
+ in pixels, or ``20`` else. (This is necessary since text coordinates
74
+ refer to the text baseline, not the top edge.)
75
+
76
+ `ystep`
77
+ Offset to add to the Y coordinate for each subsequent line. This should
78
+ roughly be the text size plus 5. It defaults to that value if the text
79
+ size is given in pixels, or ``25`` else.
80
+
81
+ `spacehack`
82
+ Convert spaces in the source to ``&#160;``, which are non-breaking
83
+ spaces. SVG provides the ``xml:space`` attribute to control how
84
+ whitespace inside tags is handled, in theory, the ``preserve`` value
85
+ could be used to keep all whitespace as-is. However, many current SVG
86
+ viewers don't obey that rule, so this option is provided as a workaround
87
+ and defaults to ``True``.
88
+ """
89
+ name = 'SVG'
90
+ aliases = ['svg']
91
+ filenames = ['*.svg']
92
+
93
+ def __init__(self, **options):
94
+ Formatter.__init__(self, **options)
95
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
96
+ self.fontfamily = options.get('fontfamily', 'monospace')
97
+ self.fontsize = options.get('fontsize', '14px')
98
+ self.xoffset = get_int_opt(options, 'xoffset', 0)
99
+ fs = self.fontsize.strip()
100
+ if fs.endswith('px'):
101
+ fs = fs[:-2].strip()
102
+ try:
103
+ int_fs = int(fs)
104
+ except ValueError:
105
+ int_fs = 20
106
+ self.yoffset = get_int_opt(options, 'yoffset', int_fs)
107
+ self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
108
+ self.spacehack = get_bool_opt(options, 'spacehack', True)
109
+ self.linenos = get_bool_opt(options,'linenos',False)
110
+ self.linenostart = get_int_opt(options,'linenostart',1)
111
+ self.linenostep = get_int_opt(options,'linenostep',1)
112
+ self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
113
+ self._stylecache = {}
114
+
115
+ def format_unencoded(self, tokensource, outfile):
116
+ """
117
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
118
+ tuples and write it into ``outfile``.
119
+
120
+ For our implementation we put all lines in their own 'line group'.
121
+ """
122
+ x = self.xoffset
123
+ y = self.yoffset
124
+ if not self.nowrap:
125
+ if self.encoding:
126
+ outfile.write(f'<?xml version="1.0" encoding="{self.encoding}"?>\n')
127
+ else:
128
+ outfile.write('<?xml version="1.0"?>\n')
129
+ outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
130
+ '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
131
+ 'svg10.dtd">\n')
132
+ outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
133
+ outfile.write(f'<g font-family="{self.fontfamily}" font-size="{self.fontsize}">\n')
134
+
135
+ counter = self.linenostart
136
+ counter_step = self.linenostep
137
+ counter_style = self._get_style(Comment)
138
+ line_x = x
139
+
140
+ if self.linenos:
141
+ if counter % counter_step == 0:
142
+ outfile.write(f'<text x="{x+self.linenowidth}" y="{y}" {counter_style} text-anchor="end">{counter}</text>')
143
+ line_x += self.linenowidth + self.ystep
144
+ counter += 1
145
+
146
+ outfile.write(f'<text x="{line_x}" y="{y}" xml:space="preserve">')
147
+ for ttype, value in tokensource:
148
+ style = self._get_style(ttype)
149
+ tspan = style and '<tspan' + style + '>' or ''
150
+ tspanend = tspan and '</tspan>' or ''
151
+ value = escape_html(value)
152
+ if self.spacehack:
153
+ value = value.expandtabs().replace(' ', '&#160;')
154
+ parts = value.split('\n')
155
+ for part in parts[:-1]:
156
+ outfile.write(tspan + part + tspanend)
157
+ y += self.ystep
158
+ outfile.write('</text>\n')
159
+ if self.linenos and counter % counter_step == 0:
160
+ outfile.write(f'<text x="{x+self.linenowidth}" y="{y}" text-anchor="end" {counter_style}>{counter}</text>')
161
+
162
+ counter += 1
163
+ outfile.write(f'<text x="{line_x}" y="{y}" ' 'xml:space="preserve">')
164
+ outfile.write(tspan + parts[-1] + tspanend)
165
+ outfile.write('</text>')
166
+
167
+ if not self.nowrap:
168
+ outfile.write('</g></svg>\n')
169
+
170
+ def _get_style(self, tokentype):
171
+ if tokentype in self._stylecache:
172
+ return self._stylecache[tokentype]
173
+ otokentype = tokentype
174
+ while not self.style.styles_token(tokentype):
175
+ tokentype = tokentype.parent
176
+ value = self.style.style_for_token(tokentype)
177
+ result = ''
178
+ if value['color']:
179
+ result = ' fill="#' + value['color'] + '"'
180
+ if value['bold']:
181
+ result += ' font-weight="bold"'
182
+ if value['italic']:
183
+ result += ' font-style="italic"'
184
+ self._stylecache[otokentype] = result
185
+ return result
mgm/lib/python3.10/site-packages/pygments/formatters/terminal.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.terminal
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for terminal output with ANSI sequences.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.formatter import Formatter
12
+ from pygments.token import Keyword, Name, Comment, String, Error, \
13
+ Number, Operator, Generic, Token, Whitespace
14
+ from pygments.console import ansiformat
15
+ from pygments.util import get_choice_opt
16
+
17
+
18
+ __all__ = ['TerminalFormatter']
19
+
20
+
21
+ #: Map token types to a tuple of color values for light and dark
22
+ #: backgrounds.
23
+ TERMINAL_COLORS = {
24
+ Token: ('', ''),
25
+
26
+ Whitespace: ('gray', 'brightblack'),
27
+ Comment: ('gray', 'brightblack'),
28
+ Comment.Preproc: ('cyan', 'brightcyan'),
29
+ Keyword: ('blue', 'brightblue'),
30
+ Keyword.Type: ('cyan', 'brightcyan'),
31
+ Operator.Word: ('magenta', 'brightmagenta'),
32
+ Name.Builtin: ('cyan', 'brightcyan'),
33
+ Name.Function: ('green', 'brightgreen'),
34
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
35
+ Name.Class: ('_green_', '_brightgreen_'),
36
+ Name.Exception: ('cyan', 'brightcyan'),
37
+ Name.Decorator: ('brightblack', 'gray'),
38
+ Name.Variable: ('red', 'brightred'),
39
+ Name.Constant: ('red', 'brightred'),
40
+ Name.Attribute: ('cyan', 'brightcyan'),
41
+ Name.Tag: ('brightblue', 'brightblue'),
42
+ String: ('yellow', 'yellow'),
43
+ Number: ('blue', 'brightblue'),
44
+
45
+ Generic.Deleted: ('brightred', 'brightred'),
46
+ Generic.Inserted: ('green', 'brightgreen'),
47
+ Generic.Heading: ('**', '**'),
48
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
49
+ Generic.Prompt: ('**', '**'),
50
+ Generic.Error: ('brightred', 'brightred'),
51
+
52
+ Error: ('_brightred_', '_brightred_'),
53
+ }
54
+
55
+
56
+ class TerminalFormatter(Formatter):
57
+ r"""
58
+ Format tokens with ANSI color sequences, for output in a text console.
59
+ Color sequences are terminated at newlines, so that paging the output
60
+ works correctly.
61
+
62
+ The `get_style_defs()` method doesn't do anything special since there is
63
+ no support for common styles.
64
+
65
+ Options accepted:
66
+
67
+ `bg`
68
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
69
+ (default: ``"light"``).
70
+
71
+ `colorscheme`
72
+ A dictionary mapping token types to (lightbg, darkbg) color names or
73
+ ``None`` (default: ``None`` = use builtin colorscheme).
74
+
75
+ `linenos`
76
+ Set to ``True`` to have line numbers on the terminal output as well
77
+ (default: ``False`` = no line numbers).
78
+ """
79
+ name = 'Terminal'
80
+ aliases = ['terminal', 'console']
81
+ filenames = []
82
+
83
+ def __init__(self, **options):
84
+ Formatter.__init__(self, **options)
85
+ self.darkbg = get_choice_opt(options, 'bg',
86
+ ['light', 'dark'], 'light') == 'dark'
87
+ self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
88
+ self.linenos = options.get('linenos', False)
89
+ self._lineno = 0
90
+
91
+ def format(self, tokensource, outfile):
92
+ return Formatter.format(self, tokensource, outfile)
93
+
94
+ def _write_lineno(self, outfile):
95
+ self._lineno += 1
96
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
97
+
98
+ def _get_color(self, ttype):
99
+ # self.colorscheme is a dict containing usually generic types, so we
100
+ # have to walk the tree of dots. The base Token type must be a key,
101
+ # even if it's empty string, as in the default above.
102
+ colors = self.colorscheme.get(ttype)
103
+ while colors is None:
104
+ ttype = ttype.parent
105
+ colors = self.colorscheme.get(ttype)
106
+ return colors[self.darkbg]
107
+
108
+ def format_unencoded(self, tokensource, outfile):
109
+ if self.linenos:
110
+ self._write_lineno(outfile)
111
+
112
+ for ttype, value in tokensource:
113
+ color = self._get_color(ttype)
114
+
115
+ for line in value.splitlines(True):
116
+ if color:
117
+ outfile.write(ansiformat(color, line.rstrip('\n')))
118
+ else:
119
+ outfile.write(line.rstrip('\n'))
120
+ if line.endswith('\n'):
121
+ if self.linenos:
122
+ self._write_lineno(outfile)
123
+ else:
124
+ outfile.write('\n')
125
+
126
+ if self.linenos:
127
+ outfile.write("\n")
mgm/lib/python3.10/site-packages/pygments/formatters/terminal256.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.formatters.terminal256
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Formatter for 256-color terminal output with ANSI sequences.
6
+
7
+ RGB-to-XTERM color conversion routines adapted from xterm256-conv
8
+ tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
9
+ by Wolfgang Frisch.
10
+
11
+ Formatter version 1.
12
+
13
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
14
+ :license: BSD, see LICENSE for details.
15
+ """
16
+
17
+ # TODO:
18
+ # - Options to map style's bold/underline/italic/border attributes
19
+ # to some ANSI attrbutes (something like 'italic=underline')
20
+ # - An option to output "style RGB to xterm RGB/index" conversion table
21
+ # - An option to indicate that we are running in "reverse background"
22
+ # xterm. This means that default colors are white-on-black, not
23
+ # black-on-while, so colors like "white background" need to be converted
24
+ # to "white background, black foreground", etc...
25
+
26
+ from pygments.formatter import Formatter
27
+ from pygments.console import codes
28
+ from pygments.style import ansicolors
29
+
30
+
31
+ __all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
32
+
33
+
34
+ class EscapeSequence:
35
+ def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
36
+ self.fg = fg
37
+ self.bg = bg
38
+ self.bold = bold
39
+ self.underline = underline
40
+ self.italic = italic
41
+
42
+ def escape(self, attrs):
43
+ if len(attrs):
44
+ return "\x1b[" + ";".join(attrs) + "m"
45
+ return ""
46
+
47
+ def color_string(self):
48
+ attrs = []
49
+ if self.fg is not None:
50
+ if self.fg in ansicolors:
51
+ esc = codes[self.fg.replace('ansi','')]
52
+ if ';01m' in esc:
53
+ self.bold = True
54
+ # extract fg color code.
55
+ attrs.append(esc[2:4])
56
+ else:
57
+ attrs.extend(("38", "5", "%i" % self.fg))
58
+ if self.bg is not None:
59
+ if self.bg in ansicolors:
60
+ esc = codes[self.bg.replace('ansi','')]
61
+ # extract fg color code, add 10 for bg.
62
+ attrs.append(str(int(esc[2:4])+10))
63
+ else:
64
+ attrs.extend(("48", "5", "%i" % self.bg))
65
+ if self.bold:
66
+ attrs.append("01")
67
+ if self.underline:
68
+ attrs.append("04")
69
+ if self.italic:
70
+ attrs.append("03")
71
+ return self.escape(attrs)
72
+
73
+ def true_color_string(self):
74
+ attrs = []
75
+ if self.fg:
76
+ attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
77
+ if self.bg:
78
+ attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
79
+ if self.bold:
80
+ attrs.append("01")
81
+ if self.underline:
82
+ attrs.append("04")
83
+ if self.italic:
84
+ attrs.append("03")
85
+ return self.escape(attrs)
86
+
87
+ def reset_string(self):
88
+ attrs = []
89
+ if self.fg is not None:
90
+ attrs.append("39")
91
+ if self.bg is not None:
92
+ attrs.append("49")
93
+ if self.bold or self.underline or self.italic:
94
+ attrs.append("00")
95
+ return self.escape(attrs)
96
+
97
+
98
+ class Terminal256Formatter(Formatter):
99
+ """
100
+ Format tokens with ANSI color sequences, for output in a 256-color
101
+ terminal or console. Like in `TerminalFormatter` color sequences
102
+ are terminated at newlines, so that paging the output works correctly.
103
+
104
+ The formatter takes colors from a style defined by the `style` option
105
+ and converts them to nearest ANSI 256-color escape sequences. Bold and
106
+ underline attributes from the style are preserved (and displayed).
107
+
108
+ .. versionadded:: 0.9
109
+
110
+ .. versionchanged:: 2.2
111
+ If the used style defines foreground colors in the form ``#ansi*``, then
112
+ `Terminal256Formatter` will map these to non extended foreground color.
113
+ See :ref:`AnsiTerminalStyle` for more information.
114
+
115
+ .. versionchanged:: 2.4
116
+ The ANSI color names have been updated with names that are easier to
117
+ understand and align with colornames of other projects and terminals.
118
+ See :ref:`this table <new-ansi-color-names>` for more information.
119
+
120
+
121
+ Options accepted:
122
+
123
+ `style`
124
+ The style to use, can be a string or a Style subclass (default:
125
+ ``'default'``).
126
+
127
+ `linenos`
128
+ Set to ``True`` to have line numbers on the terminal output as well
129
+ (default: ``False`` = no line numbers).
130
+ """
131
+ name = 'Terminal256'
132
+ aliases = ['terminal256', 'console256', '256']
133
+ filenames = []
134
+
135
+ def __init__(self, **options):
136
+ Formatter.__init__(self, **options)
137
+
138
+ self.xterm_colors = []
139
+ self.best_match = {}
140
+ self.style_string = {}
141
+
142
+ self.usebold = 'nobold' not in options
143
+ self.useunderline = 'nounderline' not in options
144
+ self.useitalic = 'noitalic' not in options
145
+
146
+ self._build_color_table() # build an RGB-to-256 color conversion table
147
+ self._setup_styles() # convert selected style's colors to term. colors
148
+
149
+ self.linenos = options.get('linenos', False)
150
+ self._lineno = 0
151
+
152
+ def _build_color_table(self):
153
+ # colors 0..15: 16 basic colors
154
+
155
+ self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
156
+ self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
157
+ self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
158
+ self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
159
+ self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
160
+ self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
161
+ self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
162
+ self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
163
+ self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
164
+ self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
165
+ self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
166
+ self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
167
+ self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
168
+ self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
169
+ self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
170
+ self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
171
+
172
+ # colors 16..232: the 6x6x6 color cube
173
+
174
+ valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
175
+
176
+ for i in range(217):
177
+ r = valuerange[(i // 36) % 6]
178
+ g = valuerange[(i // 6) % 6]
179
+ b = valuerange[i % 6]
180
+ self.xterm_colors.append((r, g, b))
181
+
182
+ # colors 233..253: grayscale
183
+
184
+ for i in range(1, 22):
185
+ v = 8 + i * 10
186
+ self.xterm_colors.append((v, v, v))
187
+
188
+ def _closest_color(self, r, g, b):
189
+ distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
190
+ match = 0
191
+
192
+ for i in range(0, 254):
193
+ values = self.xterm_colors[i]
194
+
195
+ rd = r - values[0]
196
+ gd = g - values[1]
197
+ bd = b - values[2]
198
+ d = rd*rd + gd*gd + bd*bd
199
+
200
+ if d < distance:
201
+ match = i
202
+ distance = d
203
+ return match
204
+
205
+ def _color_index(self, color):
206
+ index = self.best_match.get(color, None)
207
+ if color in ansicolors:
208
+ # strip the `ansi/#ansi` part and look up code
209
+ index = color
210
+ self.best_match[color] = index
211
+ if index is None:
212
+ try:
213
+ rgb = int(str(color), 16)
214
+ except ValueError:
215
+ rgb = 0
216
+
217
+ r = (rgb >> 16) & 0xff
218
+ g = (rgb >> 8) & 0xff
219
+ b = rgb & 0xff
220
+ index = self._closest_color(r, g, b)
221
+ self.best_match[color] = index
222
+ return index
223
+
224
+ def _setup_styles(self):
225
+ for ttype, ndef in self.style:
226
+ escape = EscapeSequence()
227
+ # get foreground from ansicolor if set
228
+ if ndef['ansicolor']:
229
+ escape.fg = self._color_index(ndef['ansicolor'])
230
+ elif ndef['color']:
231
+ escape.fg = self._color_index(ndef['color'])
232
+ if ndef['bgansicolor']:
233
+ escape.bg = self._color_index(ndef['bgansicolor'])
234
+ elif ndef['bgcolor']:
235
+ escape.bg = self._color_index(ndef['bgcolor'])
236
+ if self.usebold and ndef['bold']:
237
+ escape.bold = True
238
+ if self.useunderline and ndef['underline']:
239
+ escape.underline = True
240
+ if self.useitalic and ndef['italic']:
241
+ escape.italic = True
242
+ self.style_string[str(ttype)] = (escape.color_string(),
243
+ escape.reset_string())
244
+
245
+ def _write_lineno(self, outfile):
246
+ self._lineno += 1
247
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
248
+
249
+ def format(self, tokensource, outfile):
250
+ return Formatter.format(self, tokensource, outfile)
251
+
252
+ def format_unencoded(self, tokensource, outfile):
253
+ if self.linenos:
254
+ self._write_lineno(outfile)
255
+
256
+ for ttype, value in tokensource:
257
+ not_found = True
258
+ while ttype and not_found:
259
+ try:
260
+ # outfile.write( "<" + str(ttype) + ">" )
261
+ on, off = self.style_string[str(ttype)]
262
+
263
+ # Like TerminalFormatter, add "reset colors" escape sequence
264
+ # on newline.
265
+ spl = value.split('\n')
266
+ for line in spl[:-1]:
267
+ if line:
268
+ outfile.write(on + line + off)
269
+ if self.linenos:
270
+ self._write_lineno(outfile)
271
+ else:
272
+ outfile.write('\n')
273
+
274
+ if spl[-1]:
275
+ outfile.write(on + spl[-1] + off)
276
+
277
+ not_found = False
278
+ # outfile.write( '#' + str(ttype) + '#' )
279
+
280
+ except KeyError:
281
+ # ottype = ttype
282
+ ttype = ttype.parent
283
+ # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
284
+
285
+ if not_found:
286
+ outfile.write(value)
287
+
288
+ if self.linenos:
289
+ outfile.write("\n")
290
+
291
+
292
+
293
+ class TerminalTrueColorFormatter(Terminal256Formatter):
294
+ r"""
295
+ Format tokens with ANSI color sequences, for output in a true-color
296
+ terminal or console. Like in `TerminalFormatter` color sequences
297
+ are terminated at newlines, so that paging the output works correctly.
298
+
299
+ .. versionadded:: 2.1
300
+
301
+ Options accepted:
302
+
303
+ `style`
304
+ The style to use, can be a string or a Style subclass (default:
305
+ ``'default'``).
306
+ """
307
+ name = 'TerminalTrueColor'
308
+ aliases = ['terminal16m', 'console16m', '16m']
309
+ filenames = []
310
+
311
+ def _build_color_table(self):
312
+ pass
313
+
314
+ def _color_tuple(self, color):
315
+ try:
316
+ rgb = int(str(color), 16)
317
+ except ValueError:
318
+ return None
319
+ r = (rgb >> 16) & 0xff
320
+ g = (rgb >> 8) & 0xff
321
+ b = rgb & 0xff
322
+ return (r, g, b)
323
+
324
+ def _setup_styles(self):
325
+ for ttype, ndef in self.style:
326
+ escape = EscapeSequence()
327
+ if ndef['color']:
328
+ escape.fg = self._color_tuple(ndef['color'])
329
+ if ndef['bgcolor']:
330
+ escape.bg = self._color_tuple(ndef['bgcolor'])
331
+ if self.usebold and ndef['bold']:
332
+ escape.bold = True
333
+ if self.useunderline and ndef['underline']:
334
+ escape.underline = True
335
+ if self.useitalic and ndef['italic']:
336
+ escape.italic = True
337
+ self.style_string[str(ttype)] = (escape.true_color_string(),
338
+ escape.reset_string())
mgm/lib/python3.10/site-packages/pygments/lexer.py ADDED
@@ -0,0 +1,961 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexer
3
+ ~~~~~~~~~~~~~~
4
+
5
+ Base lexer classes.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+ import sys
13
+ import time
14
+
15
+ from pygments.filter import apply_filters, Filter
16
+ from pygments.filters import get_filter_by_name
17
+ from pygments.token import Error, Text, Other, Whitespace, _TokenType
18
+ from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
19
+ make_analysator, Future, guess_decode
20
+ from pygments.regexopt import regex_opt
21
+
22
+ __all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23
+ 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24
+ 'default', 'words', 'line_re']
25
+
26
+ line_re = re.compile('.*?\n')
27
+
28
+ _encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
29
+ (b'\xff\xfe\0\0', 'utf-32'),
30
+ (b'\0\0\xfe\xff', 'utf-32be'),
31
+ (b'\xff\xfe', 'utf-16'),
32
+ (b'\xfe\xff', 'utf-16be')]
33
+
34
+ _default_analyse = staticmethod(lambda x: 0.0)
35
+
36
+
37
+ class LexerMeta(type):
38
+ """
39
+ This metaclass automagically converts ``analyse_text`` methods into
40
+ static methods which always return float values.
41
+ """
42
+
43
+ def __new__(mcs, name, bases, d):
44
+ if 'analyse_text' in d:
45
+ d['analyse_text'] = make_analysator(d['analyse_text'])
46
+ return type.__new__(mcs, name, bases, d)
47
+
48
+
49
+ class Lexer(metaclass=LexerMeta):
50
+ """
51
+ Lexer for a specific language.
52
+
53
+ See also :doc:`lexerdevelopment`, a high-level guide to writing
54
+ lexers.
55
+
56
+ Lexer classes have attributes used for choosing the most appropriate
57
+ lexer based on various criteria.
58
+
59
+ .. autoattribute:: name
60
+ :no-value:
61
+ .. autoattribute:: aliases
62
+ :no-value:
63
+ .. autoattribute:: filenames
64
+ :no-value:
65
+ .. autoattribute:: alias_filenames
66
+ .. autoattribute:: mimetypes
67
+ :no-value:
68
+ .. autoattribute:: priority
69
+
70
+ Lexers included in Pygments should have two additional attributes:
71
+
72
+ .. autoattribute:: url
73
+ :no-value:
74
+ .. autoattribute:: version_added
75
+ :no-value:
76
+
77
+ Lexers included in Pygments may have additional attributes:
78
+
79
+ .. autoattribute:: _example
80
+ :no-value:
81
+
82
+ You can pass options to the constructor. The basic options recognized
83
+ by all lexers and processed by the base `Lexer` class are:
84
+
85
+ ``stripnl``
86
+ Strip leading and trailing newlines from the input (default: True).
87
+ ``stripall``
88
+ Strip all leading and trailing whitespace from the input
89
+ (default: False).
90
+ ``ensurenl``
91
+ Make sure that the input ends with a newline (default: True). This
92
+ is required for some lexers that consume input linewise.
93
+
94
+ .. versionadded:: 1.3
95
+
96
+ ``tabsize``
97
+ If given and greater than 0, expand tabs in the input (default: 0).
98
+ ``encoding``
99
+ If given, must be an encoding name. This encoding will be used to
100
+ convert the input string to Unicode, if it is not already a Unicode
101
+ string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
102
+ Latin1 detection. Can also be ``'chardet'`` to use the chardet
103
+ library, if it is installed.
104
+ ``inencoding``
105
+ Overrides the ``encoding`` if given.
106
+ """
107
+
108
+ #: Full name of the lexer, in human-readable form
109
+ name = None
110
+
111
+ #: A list of short, unique identifiers that can be used to look
112
+ #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
113
+ aliases = []
114
+
115
+ #: A list of `fnmatch` patterns that match filenames which contain
116
+ #: content for this lexer. The patterns in this list should be unique among
117
+ #: all lexers.
118
+ filenames = []
119
+
120
+ #: A list of `fnmatch` patterns that match filenames which may or may not
121
+ #: contain content for this lexer. This list is used by the
122
+ #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
123
+ #: are then included in guessing the correct one. That means that
124
+ #: e.g. every lexer for HTML and a template language should include
125
+ #: ``\*.html`` in this list.
126
+ alias_filenames = []
127
+
128
+ #: A list of MIME types for content that can be lexed with this lexer.
129
+ mimetypes = []
130
+
131
+ #: Priority, should multiple lexers match and no content is provided
132
+ priority = 0
133
+
134
+ #: URL of the language specification/definition. Used in the Pygments
135
+ #: documentation. Set to an empty string to disable.
136
+ url = None
137
+
138
+ #: Version of Pygments in which the lexer was added.
139
+ version_added = None
140
+
141
+ #: Example file name. Relative to the ``tests/examplefiles`` directory.
142
+ #: This is used by the documentation generator to show an example.
143
+ _example = None
144
+
145
+ def __init__(self, **options):
146
+ """
147
+ This constructor takes arbitrary options as keyword arguments.
148
+ Every subclass must first process its own options and then call
149
+ the `Lexer` constructor, since it processes the basic
150
+ options like `stripnl`.
151
+
152
+ An example looks like this:
153
+
154
+ .. sourcecode:: python
155
+
156
+ def __init__(self, **options):
157
+ self.compress = options.get('compress', '')
158
+ Lexer.__init__(self, **options)
159
+
160
+ As these options must all be specifiable as strings (due to the
161
+ command line usage), there are various utility functions
162
+ available to help with that, see `Utilities`_.
163
+ """
164
+ self.options = options
165
+ self.stripnl = get_bool_opt(options, 'stripnl', True)
166
+ self.stripall = get_bool_opt(options, 'stripall', False)
167
+ self.ensurenl = get_bool_opt(options, 'ensurenl', True)
168
+ self.tabsize = get_int_opt(options, 'tabsize', 0)
169
+ self.encoding = options.get('encoding', 'guess')
170
+ self.encoding = options.get('inencoding') or self.encoding
171
+ self.filters = []
172
+ for filter_ in get_list_opt(options, 'filters', ()):
173
+ self.add_filter(filter_)
174
+
175
+ def __repr__(self):
176
+ if self.options:
177
+ return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
178
+ else:
179
+ return f'<pygments.lexers.{self.__class__.__name__}>'
180
+
181
+ def add_filter(self, filter_, **options):
182
+ """
183
+ Add a new stream filter to this lexer.
184
+ """
185
+ if not isinstance(filter_, Filter):
186
+ filter_ = get_filter_by_name(filter_, **options)
187
+ self.filters.append(filter_)
188
+
189
+ def analyse_text(text):
190
+ """
191
+ A static method which is called for lexer guessing.
192
+
193
+ It should analyse the text and return a float in the range
194
+ from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
195
+ will not be selected as the most probable one, if it returns
196
+ ``1.0``, it will be selected immediately. This is used by
197
+ `guess_lexer`.
198
+
199
+ The `LexerMeta` metaclass automatically wraps this function so
200
+ that it works like a static method (no ``self`` or ``cls``
201
+ parameter) and the return value is automatically converted to
202
+ `float`. If the return value is an object that is boolean `False`
203
+ it's the same as if the return values was ``0.0``.
204
+ """
205
+
206
+ def _preprocess_lexer_input(self, text):
207
+ """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
208
+
209
+ if not isinstance(text, str):
210
+ if self.encoding == 'guess':
211
+ text, _ = guess_decode(text)
212
+ elif self.encoding == 'chardet':
213
+ try:
214
+ import chardet
215
+ except ImportError as e:
216
+ raise ImportError('To enable chardet encoding guessing, '
217
+ 'please install the chardet library '
218
+ 'from http://chardet.feedparser.org/') from e
219
+ # check for BOM first
220
+ decoded = None
221
+ for bom, encoding in _encoding_map:
222
+ if text.startswith(bom):
223
+ decoded = text[len(bom):].decode(encoding, 'replace')
224
+ break
225
+ # no BOM found, so use chardet
226
+ if decoded is None:
227
+ enc = chardet.detect(text[:1024]) # Guess using first 1KB
228
+ decoded = text.decode(enc.get('encoding') or 'utf-8',
229
+ 'replace')
230
+ text = decoded
231
+ else:
232
+ text = text.decode(self.encoding)
233
+ if text.startswith('\ufeff'):
234
+ text = text[len('\ufeff'):]
235
+ else:
236
+ if text.startswith('\ufeff'):
237
+ text = text[len('\ufeff'):]
238
+
239
+ # text now *is* a unicode string
240
+ text = text.replace('\r\n', '\n')
241
+ text = text.replace('\r', '\n')
242
+ if self.stripall:
243
+ text = text.strip()
244
+ elif self.stripnl:
245
+ text = text.strip('\n')
246
+ if self.tabsize > 0:
247
+ text = text.expandtabs(self.tabsize)
248
+ if self.ensurenl and not text.endswith('\n'):
249
+ text += '\n'
250
+
251
+ return text
252
+
253
+ def get_tokens(self, text, unfiltered=False):
254
+ """
255
+ This method is the basic interface of a lexer. It is called by
256
+ the `highlight()` function. It must process the text and return an
257
+ iterable of ``(tokentype, value)`` pairs from `text`.
258
+
259
+ Normally, you don't need to override this method. The default
260
+ implementation processes the options recognized by all lexers
261
+ (`stripnl`, `stripall` and so on), and then yields all tokens
262
+ from `get_tokens_unprocessed()`, with the ``index`` dropped.
263
+
264
+ If `unfiltered` is set to `True`, the filtering mechanism is
265
+ bypassed even if filters are defined.
266
+ """
267
+ text = self._preprocess_lexer_input(text)
268
+
269
+ def streamer():
270
+ for _, t, v in self.get_tokens_unprocessed(text):
271
+ yield t, v
272
+ stream = streamer()
273
+ if not unfiltered:
274
+ stream = apply_filters(stream, self.filters, self)
275
+ return stream
276
+
277
+ def get_tokens_unprocessed(self, text):
278
+ """
279
+ This method should process the text and return an iterable of
280
+ ``(index, tokentype, value)`` tuples where ``index`` is the starting
281
+ position of the token within the input text.
282
+
283
+ It must be overridden by subclasses. It is recommended to
284
+ implement it as a generator to maximize effectiveness.
285
+ """
286
+ raise NotImplementedError
287
+
288
+
289
+ class DelegatingLexer(Lexer):
290
+ """
291
+ This lexer takes two lexer as arguments. A root lexer and
292
+ a language lexer. First everything is scanned using the language
293
+ lexer, afterwards all ``Other`` tokens are lexed using the root
294
+ lexer.
295
+
296
+ The lexers from the ``template`` lexer package use this base lexer.
297
+ """
298
+
299
+ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
300
+ self.root_lexer = _root_lexer(**options)
301
+ self.language_lexer = _language_lexer(**options)
302
+ self.needle = _needle
303
+ Lexer.__init__(self, **options)
304
+
305
+ def get_tokens_unprocessed(self, text):
306
+ buffered = ''
307
+ insertions = []
308
+ lng_buffer = []
309
+ for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
310
+ if t is self.needle:
311
+ if lng_buffer:
312
+ insertions.append((len(buffered), lng_buffer))
313
+ lng_buffer = []
314
+ buffered += v
315
+ else:
316
+ lng_buffer.append((i, t, v))
317
+ if lng_buffer:
318
+ insertions.append((len(buffered), lng_buffer))
319
+ return do_insertions(insertions,
320
+ self.root_lexer.get_tokens_unprocessed(buffered))
321
+
322
+
323
+ # ------------------------------------------------------------------------------
324
+ # RegexLexer and ExtendedRegexLexer
325
+ #
326
+
327
+
328
+ class include(str): # pylint: disable=invalid-name
329
+ """
330
+ Indicates that a state should include rules from another state.
331
+ """
332
+ pass
333
+
334
+
335
+ class _inherit:
336
+ """
337
+ Indicates the a state should inherit from its superclass.
338
+ """
339
+ def __repr__(self):
340
+ return 'inherit'
341
+
342
+ inherit = _inherit() # pylint: disable=invalid-name
343
+
344
+
345
+ class combined(tuple): # pylint: disable=invalid-name
346
+ """
347
+ Indicates a state combined from multiple states.
348
+ """
349
+
350
+ def __new__(cls, *args):
351
+ return tuple.__new__(cls, args)
352
+
353
+ def __init__(self, *args):
354
+ # tuple.__init__ doesn't do anything
355
+ pass
356
+
357
+
358
+ class _PseudoMatch:
359
+ """
360
+ A pseudo match object constructed from a string.
361
+ """
362
+
363
+ def __init__(self, start, text):
364
+ self._text = text
365
+ self._start = start
366
+
367
+ def start(self, arg=None):
368
+ return self._start
369
+
370
+ def end(self, arg=None):
371
+ return self._start + len(self._text)
372
+
373
+ def group(self, arg=None):
374
+ if arg:
375
+ raise IndexError('No such group')
376
+ return self._text
377
+
378
+ def groups(self):
379
+ return (self._text,)
380
+
381
+ def groupdict(self):
382
+ return {}
383
+
384
+
385
+ def bygroups(*args):
386
+ """
387
+ Callback that yields multiple actions for each group in the match.
388
+ """
389
+ def callback(lexer, match, ctx=None):
390
+ for i, action in enumerate(args):
391
+ if action is None:
392
+ continue
393
+ elif type(action) is _TokenType:
394
+ data = match.group(i + 1)
395
+ if data:
396
+ yield match.start(i + 1), action, data
397
+ else:
398
+ data = match.group(i + 1)
399
+ if data is not None:
400
+ if ctx:
401
+ ctx.pos = match.start(i + 1)
402
+ for item in action(lexer,
403
+ _PseudoMatch(match.start(i + 1), data), ctx):
404
+ if item:
405
+ yield item
406
+ if ctx:
407
+ ctx.pos = match.end()
408
+ return callback
409
+
410
+
411
+ class _This:
412
+ """
413
+ Special singleton used for indicating the caller class.
414
+ Used by ``using``.
415
+ """
416
+
417
+ this = _This()
418
+
419
+
420
+ def using(_other, **kwargs):
421
+ """
422
+ Callback that processes the match with a different lexer.
423
+
424
+ The keyword arguments are forwarded to the lexer, except `state` which
425
+ is handled separately.
426
+
427
+ `state` specifies the state that the new lexer will start in, and can
428
+ be an enumerable such as ('root', 'inline', 'string') or a simple
429
+ string which is assumed to be on top of the root state.
430
+
431
+ Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
432
+ """
433
+ gt_kwargs = {}
434
+ if 'state' in kwargs:
435
+ s = kwargs.pop('state')
436
+ if isinstance(s, (list, tuple)):
437
+ gt_kwargs['stack'] = s
438
+ else:
439
+ gt_kwargs['stack'] = ('root', s)
440
+
441
+ if _other is this:
442
+ def callback(lexer, match, ctx=None):
443
+ # if keyword arguments are given the callback
444
+ # function has to create a new lexer instance
445
+ if kwargs:
446
+ # XXX: cache that somehow
447
+ kwargs.update(lexer.options)
448
+ lx = lexer.__class__(**kwargs)
449
+ else:
450
+ lx = lexer
451
+ s = match.start()
452
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
453
+ yield i + s, t, v
454
+ if ctx:
455
+ ctx.pos = match.end()
456
+ else:
457
+ def callback(lexer, match, ctx=None):
458
+ # XXX: cache that somehow
459
+ kwargs.update(lexer.options)
460
+ lx = _other(**kwargs)
461
+
462
+ s = match.start()
463
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
464
+ yield i + s, t, v
465
+ if ctx:
466
+ ctx.pos = match.end()
467
+ return callback
468
+
469
+
470
+ class default:
471
+ """
472
+ Indicates a state or state action (e.g. #pop) to apply.
473
+ For example default('#pop') is equivalent to ('', Token, '#pop')
474
+ Note that state tuples may be used as well.
475
+
476
+ .. versionadded:: 2.0
477
+ """
478
+ def __init__(self, state):
479
+ self.state = state
480
+
481
+
482
+ class words(Future):
483
+ """
484
+ Indicates a list of literal words that is transformed into an optimized
485
+ regex that matches any of the words.
486
+
487
+ .. versionadded:: 2.0
488
+ """
489
+ def __init__(self, words, prefix='', suffix=''):
490
+ self.words = words
491
+ self.prefix = prefix
492
+ self.suffix = suffix
493
+
494
+ def get(self):
495
+ return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
496
+
497
+
498
+ class RegexLexerMeta(LexerMeta):
499
+ """
500
+ Metaclass for RegexLexer, creates the self._tokens attribute from
501
+ self.tokens on the first instantiation.
502
+ """
503
+
504
+ def _process_regex(cls, regex, rflags, state):
505
+ """Preprocess the regular expression component of a token definition."""
506
+ if isinstance(regex, Future):
507
+ regex = regex.get()
508
+ return re.compile(regex, rflags).match
509
+
510
+ def _process_token(cls, token):
511
+ """Preprocess the token component of a token definition."""
512
+ assert type(token) is _TokenType or callable(token), \
513
+ f'token type must be simple type or callable, not {token!r}'
514
+ return token
515
+
516
+ def _process_new_state(cls, new_state, unprocessed, processed):
517
+ """Preprocess the state transition action of a token definition."""
518
+ if isinstance(new_state, str):
519
+ # an existing state
520
+ if new_state == '#pop':
521
+ return -1
522
+ elif new_state in unprocessed:
523
+ return (new_state,)
524
+ elif new_state == '#push':
525
+ return new_state
526
+ elif new_state[:5] == '#pop:':
527
+ return -int(new_state[5:])
528
+ else:
529
+ assert False, f'unknown new state {new_state!r}'
530
+ elif isinstance(new_state, combined):
531
+ # combine a new state from existing ones
532
+ tmp_state = '_tmp_%d' % cls._tmpname
533
+ cls._tmpname += 1
534
+ itokens = []
535
+ for istate in new_state:
536
+ assert istate != new_state, f'circular state ref {istate!r}'
537
+ itokens.extend(cls._process_state(unprocessed,
538
+ processed, istate))
539
+ processed[tmp_state] = itokens
540
+ return (tmp_state,)
541
+ elif isinstance(new_state, tuple):
542
+ # push more than one state
543
+ for istate in new_state:
544
+ assert (istate in unprocessed or
545
+ istate in ('#pop', '#push')), \
546
+ 'unknown new state ' + istate
547
+ return new_state
548
+ else:
549
+ assert False, f'unknown new state def {new_state!r}'
550
+
551
+ def _process_state(cls, unprocessed, processed, state):
552
+ """Preprocess a single state definition."""
553
+ assert isinstance(state, str), f"wrong state name {state!r}"
554
+ assert state[0] != '#', f"invalid state name {state!r}"
555
+ if state in processed:
556
+ return processed[state]
557
+ tokens = processed[state] = []
558
+ rflags = cls.flags
559
+ for tdef in unprocessed[state]:
560
+ if isinstance(tdef, include):
561
+ # it's a state reference
562
+ assert tdef != state, f"circular state reference {state!r}"
563
+ tokens.extend(cls._process_state(unprocessed, processed,
564
+ str(tdef)))
565
+ continue
566
+ if isinstance(tdef, _inherit):
567
+ # should be processed already, but may not in the case of:
568
+ # 1. the state has no counterpart in any parent
569
+ # 2. the state includes more than one 'inherit'
570
+ continue
571
+ if isinstance(tdef, default):
572
+ new_state = cls._process_new_state(tdef.state, unprocessed, processed)
573
+ tokens.append((re.compile('').match, None, new_state))
574
+ continue
575
+
576
+ assert type(tdef) is tuple, f"wrong rule def {tdef!r}"
577
+
578
+ try:
579
+ rex = cls._process_regex(tdef[0], rflags, state)
580
+ except Exception as err:
581
+ raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err
582
+
583
+ token = cls._process_token(tdef[1])
584
+
585
+ if len(tdef) == 2:
586
+ new_state = None
587
+ else:
588
+ new_state = cls._process_new_state(tdef[2],
589
+ unprocessed, processed)
590
+
591
+ tokens.append((rex, token, new_state))
592
+ return tokens
593
+
594
+ def process_tokendef(cls, name, tokendefs=None):
595
+ """Preprocess a dictionary of token definitions."""
596
+ processed = cls._all_tokens[name] = {}
597
+ tokendefs = tokendefs or cls.tokens[name]
598
+ for state in list(tokendefs):
599
+ cls._process_state(tokendefs, processed, state)
600
+ return processed
601
+
602
+ def get_tokendefs(cls):
603
+ """
604
+ Merge tokens from superclasses in MRO order, returning a single tokendef
605
+ dictionary.
606
+
607
+ Any state that is not defined by a subclass will be inherited
608
+ automatically. States that *are* defined by subclasses will, by
609
+ default, override that state in the superclass. If a subclass wishes to
610
+ inherit definitions from a superclass, it can use the special value
611
+ "inherit", which will cause the superclass' state definition to be
612
+ included at that point in the state.
613
+ """
614
+ tokens = {}
615
+ inheritable = {}
616
+ for c in cls.__mro__:
617
+ toks = c.__dict__.get('tokens', {})
618
+
619
+ for state, items in toks.items():
620
+ curitems = tokens.get(state)
621
+ if curitems is None:
622
+ # N.b. because this is assigned by reference, sufficiently
623
+ # deep hierarchies are processed incrementally (e.g. for
624
+ # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
625
+ # will not see any inherits in B).
626
+ tokens[state] = items
627
+ try:
628
+ inherit_ndx = items.index(inherit)
629
+ except ValueError:
630
+ continue
631
+ inheritable[state] = inherit_ndx
632
+ continue
633
+
634
+ inherit_ndx = inheritable.pop(state, None)
635
+ if inherit_ndx is None:
636
+ continue
637
+
638
+ # Replace the "inherit" value with the items
639
+ curitems[inherit_ndx:inherit_ndx+1] = items
640
+ try:
641
+ # N.b. this is the index in items (that is, the superclass
642
+ # copy), so offset required when storing below.
643
+ new_inh_ndx = items.index(inherit)
644
+ except ValueError:
645
+ pass
646
+ else:
647
+ inheritable[state] = inherit_ndx + new_inh_ndx
648
+
649
+ return tokens
650
+
651
+ def __call__(cls, *args, **kwds):
652
+ """Instantiate cls after preprocessing its token definitions."""
653
+ if '_tokens' not in cls.__dict__:
654
+ cls._all_tokens = {}
655
+ cls._tmpname = 0
656
+ if hasattr(cls, 'token_variants') and cls.token_variants:
657
+ # don't process yet
658
+ pass
659
+ else:
660
+ cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
661
+
662
+ return type.__call__(cls, *args, **kwds)
663
+
664
+
665
+ class RegexLexer(Lexer, metaclass=RegexLexerMeta):
666
+ """
667
+ Base for simple stateful regular expression-based lexers.
668
+ Simplifies the lexing process so that you need only
669
+ provide a list of states and regular expressions.
670
+ """
671
+
672
+ #: Flags for compiling the regular expressions.
673
+ #: Defaults to MULTILINE.
674
+ flags = re.MULTILINE
675
+
676
+ #: At all time there is a stack of states. Initially, the stack contains
677
+ #: a single state 'root'. The top of the stack is called "the current state".
678
+ #:
679
+ #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
680
+ #:
681
+ #: ``new_state`` can be omitted to signify no state transition.
682
+ #: If ``new_state`` is a string, it is pushed on the stack. This ensure
683
+ #: the new current state is ``new_state``.
684
+ #: If ``new_state`` is a tuple of strings, all of those strings are pushed
685
+ #: on the stack and the current state will be the last element of the list.
686
+ #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
687
+ #: to signify a new, anonymous state combined from the rules of two
688
+ #: or more existing ones.
689
+ #: Furthermore, it can be '#pop' to signify going back one step in
690
+ #: the state stack, or '#push' to push the current state on the stack
691
+ #: again. Note that if you push while in a combined state, the combined
692
+ #: state itself is pushed, and not only the state in which the rule is
693
+ #: defined.
694
+ #:
695
+ #: The tuple can also be replaced with ``include('state')``, in which
696
+ #: case the rules from the state named by the string are included in the
697
+ #: current one.
698
+ tokens = {}
699
+
700
+ def get_tokens_unprocessed(self, text, stack=('root',)):
701
+ """
702
+ Split ``text`` into (tokentype, text) pairs.
703
+
704
+ ``stack`` is the initial stack (default: ``['root']``)
705
+ """
706
+ pos = 0
707
+ tokendefs = self._tokens
708
+ statestack = list(stack)
709
+ statetokens = tokendefs[statestack[-1]]
710
+ while 1:
711
+ for rexmatch, action, new_state in statetokens:
712
+ m = rexmatch(text, pos)
713
+ if m:
714
+ if action is not None:
715
+ if type(action) is _TokenType:
716
+ yield pos, action, m.group()
717
+ else:
718
+ yield from action(self, m)
719
+ pos = m.end()
720
+ if new_state is not None:
721
+ # state transition
722
+ if isinstance(new_state, tuple):
723
+ for state in new_state:
724
+ if state == '#pop':
725
+ if len(statestack) > 1:
726
+ statestack.pop()
727
+ elif state == '#push':
728
+ statestack.append(statestack[-1])
729
+ else:
730
+ statestack.append(state)
731
+ elif isinstance(new_state, int):
732
+ # pop, but keep at least one state on the stack
733
+ # (random code leading to unexpected pops should
734
+ # not allow exceptions)
735
+ if abs(new_state) >= len(statestack):
736
+ del statestack[1:]
737
+ else:
738
+ del statestack[new_state:]
739
+ elif new_state == '#push':
740
+ statestack.append(statestack[-1])
741
+ else:
742
+ assert False, f"wrong state def: {new_state!r}"
743
+ statetokens = tokendefs[statestack[-1]]
744
+ break
745
+ else:
746
+ # We are here only if all state tokens have been considered
747
+ # and there was not a match on any of them.
748
+ try:
749
+ if text[pos] == '\n':
750
+ # at EOL, reset state to "root"
751
+ statestack = ['root']
752
+ statetokens = tokendefs['root']
753
+ yield pos, Whitespace, '\n'
754
+ pos += 1
755
+ continue
756
+ yield pos, Error, text[pos]
757
+ pos += 1
758
+ except IndexError:
759
+ break
760
+
761
+
762
+ class LexerContext:
763
+ """
764
+ A helper object that holds lexer position data.
765
+ """
766
+
767
+ def __init__(self, text, pos, stack=None, end=None):
768
+ self.text = text
769
+ self.pos = pos
770
+ self.end = end or len(text) # end=0 not supported ;-)
771
+ self.stack = stack or ['root']
772
+
773
+ def __repr__(self):
774
+ return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
775
+
776
+
777
+ class ExtendedRegexLexer(RegexLexer):
778
+ """
779
+ A RegexLexer that uses a context object to store its state.
780
+ """
781
+
782
+ def get_tokens_unprocessed(self, text=None, context=None):
783
+ """
784
+ Split ``text`` into (tokentype, text) pairs.
785
+ If ``context`` is given, use this lexer context instead.
786
+ """
787
+ tokendefs = self._tokens
788
+ if not context:
789
+ ctx = LexerContext(text, 0)
790
+ statetokens = tokendefs['root']
791
+ else:
792
+ ctx = context
793
+ statetokens = tokendefs[ctx.stack[-1]]
794
+ text = ctx.text
795
+ while 1:
796
+ for rexmatch, action, new_state in statetokens:
797
+ m = rexmatch(text, ctx.pos, ctx.end)
798
+ if m:
799
+ if action is not None:
800
+ if type(action) is _TokenType:
801
+ yield ctx.pos, action, m.group()
802
+ ctx.pos = m.end()
803
+ else:
804
+ yield from action(self, m, ctx)
805
+ if not new_state:
806
+ # altered the state stack?
807
+ statetokens = tokendefs[ctx.stack[-1]]
808
+ # CAUTION: callback must set ctx.pos!
809
+ if new_state is not None:
810
+ # state transition
811
+ if isinstance(new_state, tuple):
812
+ for state in new_state:
813
+ if state == '#pop':
814
+ if len(ctx.stack) > 1:
815
+ ctx.stack.pop()
816
+ elif state == '#push':
817
+ ctx.stack.append(ctx.stack[-1])
818
+ else:
819
+ ctx.stack.append(state)
820
+ elif isinstance(new_state, int):
821
+ # see RegexLexer for why this check is made
822
+ if abs(new_state) >= len(ctx.stack):
823
+ del ctx.stack[1:]
824
+ else:
825
+ del ctx.stack[new_state:]
826
+ elif new_state == '#push':
827
+ ctx.stack.append(ctx.stack[-1])
828
+ else:
829
+ assert False, f"wrong state def: {new_state!r}"
830
+ statetokens = tokendefs[ctx.stack[-1]]
831
+ break
832
+ else:
833
+ try:
834
+ if ctx.pos >= ctx.end:
835
+ break
836
+ if text[ctx.pos] == '\n':
837
+ # at EOL, reset state to "root"
838
+ ctx.stack = ['root']
839
+ statetokens = tokendefs['root']
840
+ yield ctx.pos, Text, '\n'
841
+ ctx.pos += 1
842
+ continue
843
+ yield ctx.pos, Error, text[ctx.pos]
844
+ ctx.pos += 1
845
+ except IndexError:
846
+ break
847
+
848
+
849
+ def do_insertions(insertions, tokens):
850
+ """
851
+ Helper for lexers which must combine the results of several
852
+ sublexers.
853
+
854
+ ``insertions`` is a list of ``(index, itokens)`` pairs.
855
+ Each ``itokens`` iterable should be inserted at position
856
+ ``index`` into the token stream given by the ``tokens``
857
+ argument.
858
+
859
+ The result is a combined token stream.
860
+
861
+ TODO: clean up the code here.
862
+ """
863
+ insertions = iter(insertions)
864
+ try:
865
+ index, itokens = next(insertions)
866
+ except StopIteration:
867
+ # no insertions
868
+ yield from tokens
869
+ return
870
+
871
+ realpos = None
872
+ insleft = True
873
+
874
+ # iterate over the token stream where we want to insert
875
+ # the tokens from the insertion list.
876
+ for i, t, v in tokens:
877
+ # first iteration. store the position of first item
878
+ if realpos is None:
879
+ realpos = i
880
+ oldi = 0
881
+ while insleft and i + len(v) >= index:
882
+ tmpval = v[oldi:index - i]
883
+ if tmpval:
884
+ yield realpos, t, tmpval
885
+ realpos += len(tmpval)
886
+ for it_index, it_token, it_value in itokens:
887
+ yield realpos, it_token, it_value
888
+ realpos += len(it_value)
889
+ oldi = index - i
890
+ try:
891
+ index, itokens = next(insertions)
892
+ except StopIteration:
893
+ insleft = False
894
+ break # not strictly necessary
895
+ if oldi < len(v):
896
+ yield realpos, t, v[oldi:]
897
+ realpos += len(v) - oldi
898
+
899
+ # leftover tokens
900
+ while insleft:
901
+ # no normal tokens, set realpos to zero
902
+ realpos = realpos or 0
903
+ for p, t, v in itokens:
904
+ yield realpos, t, v
905
+ realpos += len(v)
906
+ try:
907
+ index, itokens = next(insertions)
908
+ except StopIteration:
909
+ insleft = False
910
+ break # not strictly necessary
911
+
912
+
913
+ class ProfilingRegexLexerMeta(RegexLexerMeta):
914
+ """Metaclass for ProfilingRegexLexer, collects regex timing info."""
915
+
916
+ def _process_regex(cls, regex, rflags, state):
917
+ if isinstance(regex, words):
918
+ rex = regex_opt(regex.words, prefix=regex.prefix,
919
+ suffix=regex.suffix)
920
+ else:
921
+ rex = regex
922
+ compiled = re.compile(rex, rflags)
923
+
924
+ def match_func(text, pos, endpos=sys.maxsize):
925
+ info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
926
+ t0 = time.time()
927
+ res = compiled.match(text, pos, endpos)
928
+ t1 = time.time()
929
+ info[0] += 1
930
+ info[1] += t1 - t0
931
+ return res
932
+ return match_func
933
+
934
+
935
+ class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
936
+ """Drop-in replacement for RegexLexer that does profiling of its regexes."""
937
+
938
+ _prof_data = []
939
+ _prof_sort_index = 4 # defaults to time per call
940
+
941
+ def get_tokens_unprocessed(self, text, stack=('root',)):
942
+ # this needs to be a stack, since using(this) will produce nested calls
943
+ self.__class__._prof_data.append({})
944
+ yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
945
+ rawdata = self.__class__._prof_data.pop()
946
+ data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
947
+ n, 1000 * t, 1000 * t / n)
948
+ for ((s, r), (n, t)) in rawdata.items()),
949
+ key=lambda x: x[self._prof_sort_index],
950
+ reverse=True)
951
+ sum_total = sum(x[3] for x in data)
952
+
953
+ print()
954
+ print('Profiling result for %s lexing %d chars in %.3f ms' %
955
+ (self.__class__.__name__, len(text), sum_total))
956
+ print('=' * 110)
957
+ print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
958
+ print('-' * 110)
959
+ for d in data:
960
+ print('%-20s %-65s %5d %8.4f %8.4f' % d)
961
+ print('=' * 110)
mgm/lib/python3.10/site-packages/pygments/lexers/_scilab_builtins.py ADDED
@@ -0,0 +1,3093 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers._scilab_builtins
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Builtin list for the ScilabLexer.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ # Autogenerated
12
+
13
+ commands_kw = (
14
+ 'abort',
15
+ 'apropos',
16
+ 'break',
17
+ 'case',
18
+ 'catch',
19
+ 'continue',
20
+ 'do',
21
+ 'else',
22
+ 'elseif',
23
+ 'end',
24
+ 'endfunction',
25
+ 'for',
26
+ 'function',
27
+ 'help',
28
+ 'if',
29
+ 'pause',
30
+ 'quit',
31
+ 'select',
32
+ 'then',
33
+ 'try',
34
+ 'while',
35
+ )
36
+
37
+ functions_kw = (
38
+ '!!_invoke_',
39
+ '%H5Object_e',
40
+ '%H5Object_fieldnames',
41
+ '%H5Object_p',
42
+ '%XMLAttr_6',
43
+ '%XMLAttr_e',
44
+ '%XMLAttr_i_XMLElem',
45
+ '%XMLAttr_length',
46
+ '%XMLAttr_p',
47
+ '%XMLAttr_size',
48
+ '%XMLDoc_6',
49
+ '%XMLDoc_e',
50
+ '%XMLDoc_i_XMLList',
51
+ '%XMLDoc_p',
52
+ '%XMLElem_6',
53
+ '%XMLElem_e',
54
+ '%XMLElem_i_XMLDoc',
55
+ '%XMLElem_i_XMLElem',
56
+ '%XMLElem_i_XMLList',
57
+ '%XMLElem_p',
58
+ '%XMLList_6',
59
+ '%XMLList_e',
60
+ '%XMLList_i_XMLElem',
61
+ '%XMLList_i_XMLList',
62
+ '%XMLList_length',
63
+ '%XMLList_p',
64
+ '%XMLList_size',
65
+ '%XMLNs_6',
66
+ '%XMLNs_e',
67
+ '%XMLNs_i_XMLElem',
68
+ '%XMLNs_p',
69
+ '%XMLSet_6',
70
+ '%XMLSet_e',
71
+ '%XMLSet_length',
72
+ '%XMLSet_p',
73
+ '%XMLSet_size',
74
+ '%XMLValid_p',
75
+ '%_EClass_6',
76
+ '%_EClass_e',
77
+ '%_EClass_p',
78
+ '%_EObj_0',
79
+ '%_EObj_1__EObj',
80
+ '%_EObj_1_b',
81
+ '%_EObj_1_c',
82
+ '%_EObj_1_i',
83
+ '%_EObj_1_s',
84
+ '%_EObj_2__EObj',
85
+ '%_EObj_2_b',
86
+ '%_EObj_2_c',
87
+ '%_EObj_2_i',
88
+ '%_EObj_2_s',
89
+ '%_EObj_3__EObj',
90
+ '%_EObj_3_b',
91
+ '%_EObj_3_c',
92
+ '%_EObj_3_i',
93
+ '%_EObj_3_s',
94
+ '%_EObj_4__EObj',
95
+ '%_EObj_4_b',
96
+ '%_EObj_4_c',
97
+ '%_EObj_4_i',
98
+ '%_EObj_4_s',
99
+ '%_EObj_5',
100
+ '%_EObj_6',
101
+ '%_EObj_a__EObj',
102
+ '%_EObj_a_b',
103
+ '%_EObj_a_c',
104
+ '%_EObj_a_i',
105
+ '%_EObj_a_s',
106
+ '%_EObj_d__EObj',
107
+ '%_EObj_d_b',
108
+ '%_EObj_d_c',
109
+ '%_EObj_d_i',
110
+ '%_EObj_d_s',
111
+ '%_EObj_disp',
112
+ '%_EObj_e',
113
+ '%_EObj_g__EObj',
114
+ '%_EObj_g_b',
115
+ '%_EObj_g_c',
116
+ '%_EObj_g_i',
117
+ '%_EObj_g_s',
118
+ '%_EObj_h__EObj',
119
+ '%_EObj_h_b',
120
+ '%_EObj_h_c',
121
+ '%_EObj_h_i',
122
+ '%_EObj_h_s',
123
+ '%_EObj_i__EObj',
124
+ '%_EObj_j__EObj',
125
+ '%_EObj_j_b',
126
+ '%_EObj_j_c',
127
+ '%_EObj_j_i',
128
+ '%_EObj_j_s',
129
+ '%_EObj_k__EObj',
130
+ '%_EObj_k_b',
131
+ '%_EObj_k_c',
132
+ '%_EObj_k_i',
133
+ '%_EObj_k_s',
134
+ '%_EObj_l__EObj',
135
+ '%_EObj_l_b',
136
+ '%_EObj_l_c',
137
+ '%_EObj_l_i',
138
+ '%_EObj_l_s',
139
+ '%_EObj_m__EObj',
140
+ '%_EObj_m_b',
141
+ '%_EObj_m_c',
142
+ '%_EObj_m_i',
143
+ '%_EObj_m_s',
144
+ '%_EObj_n__EObj',
145
+ '%_EObj_n_b',
146
+ '%_EObj_n_c',
147
+ '%_EObj_n_i',
148
+ '%_EObj_n_s',
149
+ '%_EObj_o__EObj',
150
+ '%_EObj_o_b',
151
+ '%_EObj_o_c',
152
+ '%_EObj_o_i',
153
+ '%_EObj_o_s',
154
+ '%_EObj_p',
155
+ '%_EObj_p__EObj',
156
+ '%_EObj_p_b',
157
+ '%_EObj_p_c',
158
+ '%_EObj_p_i',
159
+ '%_EObj_p_s',
160
+ '%_EObj_q__EObj',
161
+ '%_EObj_q_b',
162
+ '%_EObj_q_c',
163
+ '%_EObj_q_i',
164
+ '%_EObj_q_s',
165
+ '%_EObj_r__EObj',
166
+ '%_EObj_r_b',
167
+ '%_EObj_r_c',
168
+ '%_EObj_r_i',
169
+ '%_EObj_r_s',
170
+ '%_EObj_s__EObj',
171
+ '%_EObj_s_b',
172
+ '%_EObj_s_c',
173
+ '%_EObj_s_i',
174
+ '%_EObj_s_s',
175
+ '%_EObj_t',
176
+ '%_EObj_x__EObj',
177
+ '%_EObj_x_b',
178
+ '%_EObj_x_c',
179
+ '%_EObj_x_i',
180
+ '%_EObj_x_s',
181
+ '%_EObj_y__EObj',
182
+ '%_EObj_y_b',
183
+ '%_EObj_y_c',
184
+ '%_EObj_y_i',
185
+ '%_EObj_y_s',
186
+ '%_EObj_z__EObj',
187
+ '%_EObj_z_b',
188
+ '%_EObj_z_c',
189
+ '%_EObj_z_i',
190
+ '%_EObj_z_s',
191
+ '%_eigs',
192
+ '%_load',
193
+ '%b_1__EObj',
194
+ '%b_2__EObj',
195
+ '%b_3__EObj',
196
+ '%b_4__EObj',
197
+ '%b_a__EObj',
198
+ '%b_d__EObj',
199
+ '%b_g__EObj',
200
+ '%b_h__EObj',
201
+ '%b_i_XMLList',
202
+ '%b_i__EObj',
203
+ '%b_j__EObj',
204
+ '%b_k__EObj',
205
+ '%b_l__EObj',
206
+ '%b_m__EObj',
207
+ '%b_n__EObj',
208
+ '%b_o__EObj',
209
+ '%b_p__EObj',
210
+ '%b_q__EObj',
211
+ '%b_r__EObj',
212
+ '%b_s__EObj',
213
+ '%b_x__EObj',
214
+ '%b_y__EObj',
215
+ '%b_z__EObj',
216
+ '%c_1__EObj',
217
+ '%c_2__EObj',
218
+ '%c_3__EObj',
219
+ '%c_4__EObj',
220
+ '%c_a__EObj',
221
+ '%c_d__EObj',
222
+ '%c_g__EObj',
223
+ '%c_h__EObj',
224
+ '%c_i_XMLAttr',
225
+ '%c_i_XMLDoc',
226
+ '%c_i_XMLElem',
227
+ '%c_i_XMLList',
228
+ '%c_i__EObj',
229
+ '%c_j__EObj',
230
+ '%c_k__EObj',
231
+ '%c_l__EObj',
232
+ '%c_m__EObj',
233
+ '%c_n__EObj',
234
+ '%c_o__EObj',
235
+ '%c_p__EObj',
236
+ '%c_q__EObj',
237
+ '%c_r__EObj',
238
+ '%c_s__EObj',
239
+ '%c_x__EObj',
240
+ '%c_y__EObj',
241
+ '%c_z__EObj',
242
+ '%ce_i_XMLList',
243
+ '%fptr_i_XMLList',
244
+ '%h_i_XMLList',
245
+ '%hm_i_XMLList',
246
+ '%i_1__EObj',
247
+ '%i_2__EObj',
248
+ '%i_3__EObj',
249
+ '%i_4__EObj',
250
+ '%i_a__EObj',
251
+ '%i_abs',
252
+ '%i_cumprod',
253
+ '%i_cumsum',
254
+ '%i_d__EObj',
255
+ '%i_diag',
256
+ '%i_g__EObj',
257
+ '%i_h__EObj',
258
+ '%i_i_XMLList',
259
+ '%i_i__EObj',
260
+ '%i_j__EObj',
261
+ '%i_k__EObj',
262
+ '%i_l__EObj',
263
+ '%i_m__EObj',
264
+ '%i_matrix',
265
+ '%i_max',
266
+ '%i_maxi',
267
+ '%i_min',
268
+ '%i_mini',
269
+ '%i_mput',
270
+ '%i_n__EObj',
271
+ '%i_o__EObj',
272
+ '%i_p',
273
+ '%i_p__EObj',
274
+ '%i_prod',
275
+ '%i_q__EObj',
276
+ '%i_r__EObj',
277
+ '%i_s__EObj',
278
+ '%i_sum',
279
+ '%i_tril',
280
+ '%i_triu',
281
+ '%i_x__EObj',
282
+ '%i_y__EObj',
283
+ '%i_z__EObj',
284
+ '%ip_i_XMLList',
285
+ '%l_i_XMLList',
286
+ '%l_i__EObj',
287
+ '%lss_i_XMLList',
288
+ '%mc_i_XMLList',
289
+ '%msp_full',
290
+ '%msp_i_XMLList',
291
+ '%msp_spget',
292
+ '%p_i_XMLList',
293
+ '%ptr_i_XMLList',
294
+ '%r_i_XMLList',
295
+ '%s_1__EObj',
296
+ '%s_2__EObj',
297
+ '%s_3__EObj',
298
+ '%s_4__EObj',
299
+ '%s_a__EObj',
300
+ '%s_d__EObj',
301
+ '%s_g__EObj',
302
+ '%s_h__EObj',
303
+ '%s_i_XMLList',
304
+ '%s_i__EObj',
305
+ '%s_j__EObj',
306
+ '%s_k__EObj',
307
+ '%s_l__EObj',
308
+ '%s_m__EObj',
309
+ '%s_n__EObj',
310
+ '%s_o__EObj',
311
+ '%s_p__EObj',
312
+ '%s_q__EObj',
313
+ '%s_r__EObj',
314
+ '%s_s__EObj',
315
+ '%s_x__EObj',
316
+ '%s_y__EObj',
317
+ '%s_z__EObj',
318
+ '%sp_i_XMLList',
319
+ '%spb_i_XMLList',
320
+ '%st_i_XMLList',
321
+ 'Calendar',
322
+ 'ClipBoard',
323
+ 'Matplot',
324
+ 'Matplot1',
325
+ 'PlaySound',
326
+ 'TCL_DeleteInterp',
327
+ 'TCL_DoOneEvent',
328
+ 'TCL_EvalFile',
329
+ 'TCL_EvalStr',
330
+ 'TCL_ExistArray',
331
+ 'TCL_ExistInterp',
332
+ 'TCL_ExistVar',
333
+ 'TCL_GetVar',
334
+ 'TCL_GetVersion',
335
+ 'TCL_SetVar',
336
+ 'TCL_UnsetVar',
337
+ 'TCL_UpVar',
338
+ '_',
339
+ '_code2str',
340
+ '_d',
341
+ '_str2code',
342
+ 'about',
343
+ 'abs',
344
+ 'acos',
345
+ 'addModulePreferences',
346
+ 'addcolor',
347
+ 'addf',
348
+ 'addhistory',
349
+ 'addinter',
350
+ 'addlocalizationdomain',
351
+ 'amell',
352
+ 'and',
353
+ 'argn',
354
+ 'arl2_ius',
355
+ 'ascii',
356
+ 'asin',
357
+ 'atan',
358
+ 'backslash',
359
+ 'balanc',
360
+ 'banner',
361
+ 'base2dec',
362
+ 'basename',
363
+ 'bdiag',
364
+ 'beep',
365
+ 'besselh',
366
+ 'besseli',
367
+ 'besselj',
368
+ 'besselk',
369
+ 'bessely',
370
+ 'beta',
371
+ 'bezout',
372
+ 'bfinit',
373
+ 'blkfc1i',
374
+ 'blkslvi',
375
+ 'bool2s',
376
+ 'browsehistory',
377
+ 'browsevar',
378
+ 'bsplin3val',
379
+ 'buildDoc',
380
+ 'buildouttb',
381
+ 'bvode',
382
+ 'c_link',
383
+ 'call',
384
+ 'callblk',
385
+ 'captions',
386
+ 'cd',
387
+ 'cdfbet',
388
+ 'cdfbin',
389
+ 'cdfchi',
390
+ 'cdfchn',
391
+ 'cdff',
392
+ 'cdffnc',
393
+ 'cdfgam',
394
+ 'cdfnbn',
395
+ 'cdfnor',
396
+ 'cdfpoi',
397
+ 'cdft',
398
+ 'ceil',
399
+ 'champ',
400
+ 'champ1',
401
+ 'chdir',
402
+ 'chol',
403
+ 'clc',
404
+ 'clean',
405
+ 'clear',
406
+ 'clearfun',
407
+ 'clearglobal',
408
+ 'closeEditor',
409
+ 'closeEditvar',
410
+ 'closeXcos',
411
+ 'code2str',
412
+ 'coeff',
413
+ 'color',
414
+ 'comp',
415
+ 'completion',
416
+ 'conj',
417
+ 'contour2di',
418
+ 'contr',
419
+ 'conv2',
420
+ 'convstr',
421
+ 'copy',
422
+ 'copyfile',
423
+ 'corr',
424
+ 'cos',
425
+ 'coserror',
426
+ 'createdir',
427
+ 'cshep2d',
428
+ 'csvDefault',
429
+ 'csvIsnum',
430
+ 'csvRead',
431
+ 'csvStringToDouble',
432
+ 'csvTextScan',
433
+ 'csvWrite',
434
+ 'ctree2',
435
+ 'ctree3',
436
+ 'ctree4',
437
+ 'cumprod',
438
+ 'cumsum',
439
+ 'curblock',
440
+ 'curblockc',
441
+ 'daskr',
442
+ 'dasrt',
443
+ 'dassl',
444
+ 'data2sig',
445
+ 'datatipCreate',
446
+ 'datatipManagerMode',
447
+ 'datatipMove',
448
+ 'datatipRemove',
449
+ 'datatipSetDisplay',
450
+ 'datatipSetInterp',
451
+ 'datatipSetOrientation',
452
+ 'datatipSetStyle',
453
+ 'datatipToggle',
454
+ 'dawson',
455
+ 'dct',
456
+ 'debug',
457
+ 'dec2base',
458
+ 'deff',
459
+ 'definedfields',
460
+ 'degree',
461
+ 'delbpt',
462
+ 'delete',
463
+ 'deletefile',
464
+ 'delip',
465
+ 'delmenu',
466
+ 'det',
467
+ 'dgettext',
468
+ 'dhinf',
469
+ 'diag',
470
+ 'diary',
471
+ 'diffobjs',
472
+ 'disp',
473
+ 'dispbpt',
474
+ 'displayhistory',
475
+ 'disposefftwlibrary',
476
+ 'dlgamma',
477
+ 'dnaupd',
478
+ 'dneupd',
479
+ 'double',
480
+ 'drawaxis',
481
+ 'drawlater',
482
+ 'drawnow',
483
+ 'driver',
484
+ 'dsaupd',
485
+ 'dsearch',
486
+ 'dseupd',
487
+ 'dst',
488
+ 'duplicate',
489
+ 'editvar',
490
+ 'emptystr',
491
+ 'end_scicosim',
492
+ 'ereduc',
493
+ 'erf',
494
+ 'erfc',
495
+ 'erfcx',
496
+ 'erfi',
497
+ 'errcatch',
498
+ 'errclear',
499
+ 'error',
500
+ 'eval_cshep2d',
501
+ 'exec',
502
+ 'execstr',
503
+ 'exists',
504
+ 'exit',
505
+ 'exp',
506
+ 'expm',
507
+ 'exportUI',
508
+ 'export_to_hdf5',
509
+ 'eye',
510
+ 'fadj2sp',
511
+ 'fec',
512
+ 'feval',
513
+ 'fft',
514
+ 'fftw',
515
+ 'fftw_flags',
516
+ 'fftw_forget_wisdom',
517
+ 'fftwlibraryisloaded',
518
+ 'figure',
519
+ 'file',
520
+ 'filebrowser',
521
+ 'fileext',
522
+ 'fileinfo',
523
+ 'fileparts',
524
+ 'filesep',
525
+ 'find',
526
+ 'findBD',
527
+ 'findfiles',
528
+ 'fire_closing_finished',
529
+ 'floor',
530
+ 'format',
531
+ 'fort',
532
+ 'fprintfMat',
533
+ 'freq',
534
+ 'frexp',
535
+ 'fromc',
536
+ 'fromjava',
537
+ 'fscanfMat',
538
+ 'fsolve',
539
+ 'fstair',
540
+ 'full',
541
+ 'fullpath',
542
+ 'funcprot',
543
+ 'funptr',
544
+ 'gamma',
545
+ 'gammaln',
546
+ 'geom3d',
547
+ 'get',
548
+ 'getURL',
549
+ 'get_absolute_file_path',
550
+ 'get_fftw_wisdom',
551
+ 'getblocklabel',
552
+ 'getcallbackobject',
553
+ 'getdate',
554
+ 'getdebuginfo',
555
+ 'getdefaultlanguage',
556
+ 'getdrives',
557
+ 'getdynlibext',
558
+ 'getenv',
559
+ 'getfield',
560
+ 'gethistory',
561
+ 'gethistoryfile',
562
+ 'getinstalledlookandfeels',
563
+ 'getio',
564
+ 'getlanguage',
565
+ 'getlongpathname',
566
+ 'getlookandfeel',
567
+ 'getmd5',
568
+ 'getmemory',
569
+ 'getmodules',
570
+ 'getos',
571
+ 'getpid',
572
+ 'getrelativefilename',
573
+ 'getscicosvars',
574
+ 'getscilabmode',
575
+ 'getshortpathname',
576
+ 'gettext',
577
+ 'getvariablesonstack',
578
+ 'getversion',
579
+ 'glist',
580
+ 'global',
581
+ 'glue',
582
+ 'grand',
583
+ 'graphicfunction',
584
+ 'grayplot',
585
+ 'grep',
586
+ 'gsort',
587
+ 'gstacksize',
588
+ 'h5attr',
589
+ 'h5close',
590
+ 'h5cp',
591
+ 'h5dataset',
592
+ 'h5dump',
593
+ 'h5exists',
594
+ 'h5flush',
595
+ 'h5get',
596
+ 'h5group',
597
+ 'h5isArray',
598
+ 'h5isAttr',
599
+ 'h5isCompound',
600
+ 'h5isFile',
601
+ 'h5isGroup',
602
+ 'h5isList',
603
+ 'h5isRef',
604
+ 'h5isSet',
605
+ 'h5isSpace',
606
+ 'h5isType',
607
+ 'h5isVlen',
608
+ 'h5label',
609
+ 'h5ln',
610
+ 'h5ls',
611
+ 'h5mount',
612
+ 'h5mv',
613
+ 'h5open',
614
+ 'h5read',
615
+ 'h5readattr',
616
+ 'h5rm',
617
+ 'h5umount',
618
+ 'h5write',
619
+ 'h5writeattr',
620
+ 'havewindow',
621
+ 'helpbrowser',
622
+ 'hess',
623
+ 'hinf',
624
+ 'historymanager',
625
+ 'historysize',
626
+ 'host',
627
+ 'htmlDump',
628
+ 'htmlRead',
629
+ 'htmlReadStr',
630
+ 'htmlWrite',
631
+ 'iconvert',
632
+ 'ieee',
633
+ 'ilib_verbose',
634
+ 'imag',
635
+ 'impl',
636
+ 'import_from_hdf5',
637
+ 'imult',
638
+ 'inpnvi',
639
+ 'int',
640
+ 'int16',
641
+ 'int2d',
642
+ 'int32',
643
+ 'int3d',
644
+ 'int8',
645
+ 'interp',
646
+ 'interp2d',
647
+ 'interp3d',
648
+ 'intg',
649
+ 'intppty',
650
+ 'inttype',
651
+ 'inv',
652
+ 'invoke_lu',
653
+ 'is_handle_valid',
654
+ 'is_hdf5_file',
655
+ 'isalphanum',
656
+ 'isascii',
657
+ 'isdef',
658
+ 'isdigit',
659
+ 'isdir',
660
+ 'isequal',
661
+ 'isequalbitwise',
662
+ 'iserror',
663
+ 'isfile',
664
+ 'isglobal',
665
+ 'isletter',
666
+ 'isnum',
667
+ 'isreal',
668
+ 'iswaitingforinput',
669
+ 'jallowClassReloading',
670
+ 'jarray',
671
+ 'jautoTranspose',
672
+ 'jautoUnwrap',
673
+ 'javaclasspath',
674
+ 'javalibrarypath',
675
+ 'jcast',
676
+ 'jcompile',
677
+ 'jconvMatrixMethod',
678
+ 'jcreatejar',
679
+ 'jdeff',
680
+ 'jdisableTrace',
681
+ 'jenableTrace',
682
+ 'jexists',
683
+ 'jgetclassname',
684
+ 'jgetfield',
685
+ 'jgetfields',
686
+ 'jgetinfo',
687
+ 'jgetmethods',
688
+ 'jimport',
689
+ 'jinvoke',
690
+ 'jinvoke_db',
691
+ 'jnewInstance',
692
+ 'jremove',
693
+ 'jsetfield',
694
+ 'junwrap',
695
+ 'junwraprem',
696
+ 'jwrap',
697
+ 'jwrapinfloat',
698
+ 'kron',
699
+ 'lasterror',
700
+ 'ldiv',
701
+ 'ldivf',
702
+ 'legendre',
703
+ 'length',
704
+ 'lib',
705
+ 'librarieslist',
706
+ 'libraryinfo',
707
+ 'light',
708
+ 'linear_interpn',
709
+ 'lines',
710
+ 'link',
711
+ 'linmeq',
712
+ 'list',
713
+ 'listvar_in_hdf5',
714
+ 'load',
715
+ 'loadGui',
716
+ 'loadScicos',
717
+ 'loadXcos',
718
+ 'loadfftwlibrary',
719
+ 'loadhistory',
720
+ 'log',
721
+ 'log1p',
722
+ 'lsq',
723
+ 'lsq_splin',
724
+ 'lsqrsolve',
725
+ 'lsslist',
726
+ 'lstcat',
727
+ 'lstsize',
728
+ 'ltitr',
729
+ 'lu',
730
+ 'ludel',
731
+ 'lufact',
732
+ 'luget',
733
+ 'lusolve',
734
+ 'macr2lst',
735
+ 'macr2tree',
736
+ 'matfile_close',
737
+ 'matfile_listvar',
738
+ 'matfile_open',
739
+ 'matfile_varreadnext',
740
+ 'matfile_varwrite',
741
+ 'matrix',
742
+ 'max',
743
+ 'maxfiles',
744
+ 'mclearerr',
745
+ 'mclose',
746
+ 'meof',
747
+ 'merror',
748
+ 'messagebox',
749
+ 'mfprintf',
750
+ 'mfscanf',
751
+ 'mget',
752
+ 'mgeti',
753
+ 'mgetl',
754
+ 'mgetstr',
755
+ 'min',
756
+ 'mlist',
757
+ 'mode',
758
+ 'model2blk',
759
+ 'mopen',
760
+ 'move',
761
+ 'movefile',
762
+ 'mprintf',
763
+ 'mput',
764
+ 'mputl',
765
+ 'mputstr',
766
+ 'mscanf',
767
+ 'mseek',
768
+ 'msprintf',
769
+ 'msscanf',
770
+ 'mtell',
771
+ 'mtlb_mode',
772
+ 'mtlb_sparse',
773
+ 'mucomp',
774
+ 'mulf',
775
+ 'name2rgb',
776
+ 'nearfloat',
777
+ 'newaxes',
778
+ 'newest',
779
+ 'newfun',
780
+ 'nnz',
781
+ 'norm',
782
+ 'notify',
783
+ 'number_properties',
784
+ 'ode',
785
+ 'odedc',
786
+ 'ones',
787
+ 'openged',
788
+ 'opentk',
789
+ 'optim',
790
+ 'or',
791
+ 'ordmmd',
792
+ 'parallel_concurrency',
793
+ 'parallel_run',
794
+ 'param3d',
795
+ 'param3d1',
796
+ 'part',
797
+ 'pathconvert',
798
+ 'pathsep',
799
+ 'phase_simulation',
800
+ 'plot2d',
801
+ 'plot2d1',
802
+ 'plot2d2',
803
+ 'plot2d3',
804
+ 'plot2d4',
805
+ 'plot3d',
806
+ 'plot3d1',
807
+ 'plotbrowser',
808
+ 'pointer_xproperty',
809
+ 'poly',
810
+ 'ppol',
811
+ 'pppdiv',
812
+ 'predef',
813
+ 'preferences',
814
+ 'print',
815
+ 'printf',
816
+ 'printfigure',
817
+ 'printsetupbox',
818
+ 'prod',
819
+ 'progressionbar',
820
+ 'prompt',
821
+ 'pwd',
822
+ 'qld',
823
+ 'qp_solve',
824
+ 'qr',
825
+ 'raise_window',
826
+ 'rand',
827
+ 'rankqr',
828
+ 'rat',
829
+ 'rcond',
830
+ 'rdivf',
831
+ 'read',
832
+ 'read4b',
833
+ 'read_csv',
834
+ 'readb',
835
+ 'readgateway',
836
+ 'readmps',
837
+ 'real',
838
+ 'realtime',
839
+ 'realtimeinit',
840
+ 'regexp',
841
+ 'relocate_handle',
842
+ 'remez',
843
+ 'removeModulePreferences',
844
+ 'removedir',
845
+ 'removelinehistory',
846
+ 'res_with_prec',
847
+ 'resethistory',
848
+ 'residu',
849
+ 'resume',
850
+ 'return',
851
+ 'ricc',
852
+ 'rlist',
853
+ 'roots',
854
+ 'rotate_axes',
855
+ 'round',
856
+ 'rpem',
857
+ 'rtitr',
858
+ 'rubberbox',
859
+ 'save',
860
+ 'saveGui',
861
+ 'saveafterncommands',
862
+ 'saveconsecutivecommands',
863
+ 'savehistory',
864
+ 'schur',
865
+ 'sci_haltscicos',
866
+ 'sci_tree2',
867
+ 'sci_tree3',
868
+ 'sci_tree4',
869
+ 'sciargs',
870
+ 'scicos_debug',
871
+ 'scicos_debug_count',
872
+ 'scicos_time',
873
+ 'scicosim',
874
+ 'scinotes',
875
+ 'sctree',
876
+ 'semidef',
877
+ 'set',
878
+ 'set_blockerror',
879
+ 'set_fftw_wisdom',
880
+ 'set_xproperty',
881
+ 'setbpt',
882
+ 'setdefaultlanguage',
883
+ 'setenv',
884
+ 'setfield',
885
+ 'sethistoryfile',
886
+ 'setlanguage',
887
+ 'setlookandfeel',
888
+ 'setmenu',
889
+ 'sfact',
890
+ 'sfinit',
891
+ 'show_window',
892
+ 'sident',
893
+ 'sig2data',
894
+ 'sign',
895
+ 'simp',
896
+ 'simp_mode',
897
+ 'sin',
898
+ 'size',
899
+ 'slash',
900
+ 'sleep',
901
+ 'sorder',
902
+ 'sparse',
903
+ 'spchol',
904
+ 'spcompack',
905
+ 'spec',
906
+ 'spget',
907
+ 'splin',
908
+ 'splin2d',
909
+ 'splin3d',
910
+ 'splitURL',
911
+ 'spones',
912
+ 'sprintf',
913
+ 'sqrt',
914
+ 'stacksize',
915
+ 'str2code',
916
+ 'strcat',
917
+ 'strchr',
918
+ 'strcmp',
919
+ 'strcspn',
920
+ 'strindex',
921
+ 'string',
922
+ 'stringbox',
923
+ 'stripblanks',
924
+ 'strncpy',
925
+ 'strrchr',
926
+ 'strrev',
927
+ 'strsplit',
928
+ 'strspn',
929
+ 'strstr',
930
+ 'strsubst',
931
+ 'strtod',
932
+ 'strtok',
933
+ 'subf',
934
+ 'sum',
935
+ 'svd',
936
+ 'swap_handles',
937
+ 'symfcti',
938
+ 'syredi',
939
+ 'system_getproperty',
940
+ 'system_setproperty',
941
+ 'ta2lpd',
942
+ 'tan',
943
+ 'taucs_chdel',
944
+ 'taucs_chfact',
945
+ 'taucs_chget',
946
+ 'taucs_chinfo',
947
+ 'taucs_chsolve',
948
+ 'tempname',
949
+ 'testmatrix',
950
+ 'timer',
951
+ 'tlist',
952
+ 'tohome',
953
+ 'tokens',
954
+ 'toolbar',
955
+ 'toprint',
956
+ 'tr_zer',
957
+ 'tril',
958
+ 'triu',
959
+ 'type',
960
+ 'typename',
961
+ 'uiDisplayTree',
962
+ 'uicontextmenu',
963
+ 'uicontrol',
964
+ 'uigetcolor',
965
+ 'uigetdir',
966
+ 'uigetfile',
967
+ 'uigetfont',
968
+ 'uimenu',
969
+ 'uint16',
970
+ 'uint32',
971
+ 'uint8',
972
+ 'uipopup',
973
+ 'uiputfile',
974
+ 'uiwait',
975
+ 'ulink',
976
+ 'umf_ludel',
977
+ 'umf_lufact',
978
+ 'umf_luget',
979
+ 'umf_luinfo',
980
+ 'umf_lusolve',
981
+ 'umfpack',
982
+ 'unglue',
983
+ 'unix',
984
+ 'unsetmenu',
985
+ 'unzoom',
986
+ 'updatebrowsevar',
987
+ 'usecanvas',
988
+ 'useeditor',
989
+ 'user',
990
+ 'var2vec',
991
+ 'varn',
992
+ 'vec2var',
993
+ 'waitbar',
994
+ 'warnBlockByUID',
995
+ 'warning',
996
+ 'what',
997
+ 'where',
998
+ 'whereis',
999
+ 'who',
1000
+ 'winsid',
1001
+ 'with_module',
1002
+ 'writb',
1003
+ 'write',
1004
+ 'write4b',
1005
+ 'write_csv',
1006
+ 'x_choose',
1007
+ 'x_choose_modeless',
1008
+ 'x_dialog',
1009
+ 'x_mdialog',
1010
+ 'xarc',
1011
+ 'xarcs',
1012
+ 'xarrows',
1013
+ 'xchange',
1014
+ 'xchoicesi',
1015
+ 'xclick',
1016
+ 'xcos',
1017
+ 'xcosAddToolsMenu',
1018
+ 'xcosConfigureXmlFile',
1019
+ 'xcosDiagramToScilab',
1020
+ 'xcosPalCategoryAdd',
1021
+ 'xcosPalDelete',
1022
+ 'xcosPalDisable',
1023
+ 'xcosPalEnable',
1024
+ 'xcosPalGenerateIcon',
1025
+ 'xcosPalGet',
1026
+ 'xcosPalLoad',
1027
+ 'xcosPalMove',
1028
+ 'xcosSimulationStarted',
1029
+ 'xcosUpdateBlock',
1030
+ 'xdel',
1031
+ 'xend',
1032
+ 'xfarc',
1033
+ 'xfarcs',
1034
+ 'xfpoly',
1035
+ 'xfpolys',
1036
+ 'xfrect',
1037
+ 'xget',
1038
+ 'xgetmouse',
1039
+ 'xgraduate',
1040
+ 'xgrid',
1041
+ 'xinit',
1042
+ 'xlfont',
1043
+ 'xls_open',
1044
+ 'xls_read',
1045
+ 'xmlAddNs',
1046
+ 'xmlAppend',
1047
+ 'xmlAsNumber',
1048
+ 'xmlAsText',
1049
+ 'xmlDTD',
1050
+ 'xmlDelete',
1051
+ 'xmlDocument',
1052
+ 'xmlDump',
1053
+ 'xmlElement',
1054
+ 'xmlFormat',
1055
+ 'xmlGetNsByHref',
1056
+ 'xmlGetNsByPrefix',
1057
+ 'xmlGetOpenDocs',
1058
+ 'xmlIsValidObject',
1059
+ 'xmlName',
1060
+ 'xmlNs',
1061
+ 'xmlRead',
1062
+ 'xmlReadStr',
1063
+ 'xmlRelaxNG',
1064
+ 'xmlRemove',
1065
+ 'xmlSchema',
1066
+ 'xmlSetAttributes',
1067
+ 'xmlValidate',
1068
+ 'xmlWrite',
1069
+ 'xmlXPath',
1070
+ 'xname',
1071
+ 'xpause',
1072
+ 'xpoly',
1073
+ 'xpolys',
1074
+ 'xrect',
1075
+ 'xrects',
1076
+ 'xs2bmp',
1077
+ 'xs2emf',
1078
+ 'xs2eps',
1079
+ 'xs2gif',
1080
+ 'xs2jpg',
1081
+ 'xs2pdf',
1082
+ 'xs2png',
1083
+ 'xs2ppm',
1084
+ 'xs2ps',
1085
+ 'xs2svg',
1086
+ 'xsegs',
1087
+ 'xset',
1088
+ 'xstring',
1089
+ 'xstringb',
1090
+ 'xtitle',
1091
+ 'zeros',
1092
+ 'znaupd',
1093
+ 'zneupd',
1094
+ 'zoom_rect',
1095
+ )
1096
+
1097
+ macros_kw = (
1098
+ '!_deff_wrapper',
1099
+ '%0_i_st',
1100
+ '%3d_i_h',
1101
+ '%Block_xcosUpdateBlock',
1102
+ '%TNELDER_p',
1103
+ '%TNELDER_string',
1104
+ '%TNMPLOT_p',
1105
+ '%TNMPLOT_string',
1106
+ '%TOPTIM_p',
1107
+ '%TOPTIM_string',
1108
+ '%TSIMPLEX_p',
1109
+ '%TSIMPLEX_string',
1110
+ '%_EVoid_p',
1111
+ '%_gsort',
1112
+ '%_listvarinfile',
1113
+ '%_rlist',
1114
+ '%_save',
1115
+ '%_sodload',
1116
+ '%_strsplit',
1117
+ '%_unwrap',
1118
+ '%ar_p',
1119
+ '%asn',
1120
+ '%b_a_b',
1121
+ '%b_a_s',
1122
+ '%b_c_s',
1123
+ '%b_c_spb',
1124
+ '%b_cumprod',
1125
+ '%b_cumsum',
1126
+ '%b_d_s',
1127
+ '%b_diag',
1128
+ '%b_e',
1129
+ '%b_f_s',
1130
+ '%b_f_spb',
1131
+ '%b_g_s',
1132
+ '%b_g_spb',
1133
+ '%b_grand',
1134
+ '%b_h_s',
1135
+ '%b_h_spb',
1136
+ '%b_i_b',
1137
+ '%b_i_ce',
1138
+ '%b_i_h',
1139
+ '%b_i_hm',
1140
+ '%b_i_s',
1141
+ '%b_i_sp',
1142
+ '%b_i_spb',
1143
+ '%b_i_st',
1144
+ '%b_iconvert',
1145
+ '%b_l_b',
1146
+ '%b_l_s',
1147
+ '%b_m_b',
1148
+ '%b_m_s',
1149
+ '%b_matrix',
1150
+ '%b_n_hm',
1151
+ '%b_o_hm',
1152
+ '%b_p_s',
1153
+ '%b_prod',
1154
+ '%b_r_b',
1155
+ '%b_r_s',
1156
+ '%b_s_b',
1157
+ '%b_s_s',
1158
+ '%b_string',
1159
+ '%b_sum',
1160
+ '%b_tril',
1161
+ '%b_triu',
1162
+ '%b_x_b',
1163
+ '%b_x_s',
1164
+ '%bicg',
1165
+ '%bicgstab',
1166
+ '%c_a_c',
1167
+ '%c_b_c',
1168
+ '%c_b_s',
1169
+ '%c_diag',
1170
+ '%c_dsearch',
1171
+ '%c_e',
1172
+ '%c_eye',
1173
+ '%c_f_s',
1174
+ '%c_grand',
1175
+ '%c_i_c',
1176
+ '%c_i_ce',
1177
+ '%c_i_h',
1178
+ '%c_i_hm',
1179
+ '%c_i_lss',
1180
+ '%c_i_r',
1181
+ '%c_i_s',
1182
+ '%c_i_st',
1183
+ '%c_matrix',
1184
+ '%c_n_l',
1185
+ '%c_n_st',
1186
+ '%c_o_l',
1187
+ '%c_o_st',
1188
+ '%c_ones',
1189
+ '%c_rand',
1190
+ '%c_tril',
1191
+ '%c_triu',
1192
+ '%cblock_c_cblock',
1193
+ '%cblock_c_s',
1194
+ '%cblock_e',
1195
+ '%cblock_f_cblock',
1196
+ '%cblock_p',
1197
+ '%cblock_size',
1198
+ '%ce_6',
1199
+ '%ce_c_ce',
1200
+ '%ce_e',
1201
+ '%ce_f_ce',
1202
+ '%ce_i_ce',
1203
+ '%ce_i_s',
1204
+ '%ce_i_st',
1205
+ '%ce_matrix',
1206
+ '%ce_p',
1207
+ '%ce_size',
1208
+ '%ce_string',
1209
+ '%ce_t',
1210
+ '%cgs',
1211
+ '%champdat_i_h',
1212
+ '%choose',
1213
+ '%diagram_xcos',
1214
+ '%dir_p',
1215
+ '%fptr_i_st',
1216
+ '%grand_perm',
1217
+ '%grayplot_i_h',
1218
+ '%h_i_st',
1219
+ '%hmS_k_hmS_generic',
1220
+ '%hm_1_hm',
1221
+ '%hm_1_s',
1222
+ '%hm_2_hm',
1223
+ '%hm_2_s',
1224
+ '%hm_3_hm',
1225
+ '%hm_3_s',
1226
+ '%hm_4_hm',
1227
+ '%hm_4_s',
1228
+ '%hm_5',
1229
+ '%hm_a_hm',
1230
+ '%hm_a_r',
1231
+ '%hm_a_s',
1232
+ '%hm_abs',
1233
+ '%hm_and',
1234
+ '%hm_bool2s',
1235
+ '%hm_c_hm',
1236
+ '%hm_ceil',
1237
+ '%hm_conj',
1238
+ '%hm_cos',
1239
+ '%hm_cumprod',
1240
+ '%hm_cumsum',
1241
+ '%hm_d_hm',
1242
+ '%hm_d_s',
1243
+ '%hm_degree',
1244
+ '%hm_dsearch',
1245
+ '%hm_e',
1246
+ '%hm_exp',
1247
+ '%hm_eye',
1248
+ '%hm_f_hm',
1249
+ '%hm_find',
1250
+ '%hm_floor',
1251
+ '%hm_g_hm',
1252
+ '%hm_grand',
1253
+ '%hm_gsort',
1254
+ '%hm_h_hm',
1255
+ '%hm_i_b',
1256
+ '%hm_i_ce',
1257
+ '%hm_i_h',
1258
+ '%hm_i_hm',
1259
+ '%hm_i_i',
1260
+ '%hm_i_p',
1261
+ '%hm_i_r',
1262
+ '%hm_i_s',
1263
+ '%hm_i_st',
1264
+ '%hm_iconvert',
1265
+ '%hm_imag',
1266
+ '%hm_int',
1267
+ '%hm_isnan',
1268
+ '%hm_isreal',
1269
+ '%hm_j_hm',
1270
+ '%hm_j_s',
1271
+ '%hm_k_hm',
1272
+ '%hm_k_s',
1273
+ '%hm_log',
1274
+ '%hm_m_p',
1275
+ '%hm_m_r',
1276
+ '%hm_m_s',
1277
+ '%hm_matrix',
1278
+ '%hm_max',
1279
+ '%hm_mean',
1280
+ '%hm_median',
1281
+ '%hm_min',
1282
+ '%hm_n_b',
1283
+ '%hm_n_c',
1284
+ '%hm_n_hm',
1285
+ '%hm_n_i',
1286
+ '%hm_n_p',
1287
+ '%hm_n_s',
1288
+ '%hm_o_b',
1289
+ '%hm_o_c',
1290
+ '%hm_o_hm',
1291
+ '%hm_o_i',
1292
+ '%hm_o_p',
1293
+ '%hm_o_s',
1294
+ '%hm_ones',
1295
+ '%hm_or',
1296
+ '%hm_p',
1297
+ '%hm_prod',
1298
+ '%hm_q_hm',
1299
+ '%hm_r_s',
1300
+ '%hm_rand',
1301
+ '%hm_real',
1302
+ '%hm_round',
1303
+ '%hm_s',
1304
+ '%hm_s_hm',
1305
+ '%hm_s_r',
1306
+ '%hm_s_s',
1307
+ '%hm_sign',
1308
+ '%hm_sin',
1309
+ '%hm_size',
1310
+ '%hm_sqrt',
1311
+ '%hm_stdev',
1312
+ '%hm_string',
1313
+ '%hm_sum',
1314
+ '%hm_x_hm',
1315
+ '%hm_x_p',
1316
+ '%hm_x_s',
1317
+ '%hm_zeros',
1318
+ '%i_1_s',
1319
+ '%i_2_s',
1320
+ '%i_3_s',
1321
+ '%i_4_s',
1322
+ '%i_Matplot',
1323
+ '%i_a_i',
1324
+ '%i_a_s',
1325
+ '%i_and',
1326
+ '%i_ascii',
1327
+ '%i_b_s',
1328
+ '%i_bezout',
1329
+ '%i_champ',
1330
+ '%i_champ1',
1331
+ '%i_contour',
1332
+ '%i_contour2d',
1333
+ '%i_d_i',
1334
+ '%i_d_s',
1335
+ '%i_dsearch',
1336
+ '%i_e',
1337
+ '%i_fft',
1338
+ '%i_g_i',
1339
+ '%i_gcd',
1340
+ '%i_grand',
1341
+ '%i_h_i',
1342
+ '%i_i_ce',
1343
+ '%i_i_h',
1344
+ '%i_i_hm',
1345
+ '%i_i_i',
1346
+ '%i_i_s',
1347
+ '%i_i_st',
1348
+ '%i_j_i',
1349
+ '%i_j_s',
1350
+ '%i_l_s',
1351
+ '%i_lcm',
1352
+ '%i_length',
1353
+ '%i_m_i',
1354
+ '%i_m_s',
1355
+ '%i_mfprintf',
1356
+ '%i_mprintf',
1357
+ '%i_msprintf',
1358
+ '%i_n_s',
1359
+ '%i_o_s',
1360
+ '%i_or',
1361
+ '%i_p_i',
1362
+ '%i_p_s',
1363
+ '%i_plot2d',
1364
+ '%i_plot2d1',
1365
+ '%i_plot2d2',
1366
+ '%i_q_s',
1367
+ '%i_r_i',
1368
+ '%i_r_s',
1369
+ '%i_round',
1370
+ '%i_s_i',
1371
+ '%i_s_s',
1372
+ '%i_sign',
1373
+ '%i_string',
1374
+ '%i_x_i',
1375
+ '%i_x_s',
1376
+ '%ip_a_s',
1377
+ '%ip_i_st',
1378
+ '%ip_m_s',
1379
+ '%ip_n_ip',
1380
+ '%ip_o_ip',
1381
+ '%ip_p',
1382
+ '%ip_part',
1383
+ '%ip_s_s',
1384
+ '%ip_string',
1385
+ '%k',
1386
+ '%l_i_h',
1387
+ '%l_i_s',
1388
+ '%l_i_st',
1389
+ '%l_isequal',
1390
+ '%l_n_c',
1391
+ '%l_n_l',
1392
+ '%l_n_m',
1393
+ '%l_n_p',
1394
+ '%l_n_s',
1395
+ '%l_n_st',
1396
+ '%l_o_c',
1397
+ '%l_o_l',
1398
+ '%l_o_m',
1399
+ '%l_o_p',
1400
+ '%l_o_s',
1401
+ '%l_o_st',
1402
+ '%lss_a_lss',
1403
+ '%lss_a_p',
1404
+ '%lss_a_r',
1405
+ '%lss_a_s',
1406
+ '%lss_c_lss',
1407
+ '%lss_c_p',
1408
+ '%lss_c_r',
1409
+ '%lss_c_s',
1410
+ '%lss_e',
1411
+ '%lss_eye',
1412
+ '%lss_f_lss',
1413
+ '%lss_f_p',
1414
+ '%lss_f_r',
1415
+ '%lss_f_s',
1416
+ '%lss_i_ce',
1417
+ '%lss_i_lss',
1418
+ '%lss_i_p',
1419
+ '%lss_i_r',
1420
+ '%lss_i_s',
1421
+ '%lss_i_st',
1422
+ '%lss_inv',
1423
+ '%lss_l_lss',
1424
+ '%lss_l_p',
1425
+ '%lss_l_r',
1426
+ '%lss_l_s',
1427
+ '%lss_m_lss',
1428
+ '%lss_m_p',
1429
+ '%lss_m_r',
1430
+ '%lss_m_s',
1431
+ '%lss_n_lss',
1432
+ '%lss_n_p',
1433
+ '%lss_n_r',
1434
+ '%lss_n_s',
1435
+ '%lss_norm',
1436
+ '%lss_o_lss',
1437
+ '%lss_o_p',
1438
+ '%lss_o_r',
1439
+ '%lss_o_s',
1440
+ '%lss_ones',
1441
+ '%lss_r_lss',
1442
+ '%lss_r_p',
1443
+ '%lss_r_r',
1444
+ '%lss_r_s',
1445
+ '%lss_rand',
1446
+ '%lss_s',
1447
+ '%lss_s_lss',
1448
+ '%lss_s_p',
1449
+ '%lss_s_r',
1450
+ '%lss_s_s',
1451
+ '%lss_size',
1452
+ '%lss_t',
1453
+ '%lss_v_lss',
1454
+ '%lss_v_p',
1455
+ '%lss_v_r',
1456
+ '%lss_v_s',
1457
+ '%lt_i_s',
1458
+ '%m_n_l',
1459
+ '%m_o_l',
1460
+ '%mc_i_h',
1461
+ '%mc_i_s',
1462
+ '%mc_i_st',
1463
+ '%mc_n_st',
1464
+ '%mc_o_st',
1465
+ '%mc_string',
1466
+ '%mps_p',
1467
+ '%mps_string',
1468
+ '%msp_a_s',
1469
+ '%msp_abs',
1470
+ '%msp_e',
1471
+ '%msp_find',
1472
+ '%msp_i_s',
1473
+ '%msp_i_st',
1474
+ '%msp_length',
1475
+ '%msp_m_s',
1476
+ '%msp_maxi',
1477
+ '%msp_n_msp',
1478
+ '%msp_nnz',
1479
+ '%msp_o_msp',
1480
+ '%msp_p',
1481
+ '%msp_sparse',
1482
+ '%msp_spones',
1483
+ '%msp_t',
1484
+ '%p_a_lss',
1485
+ '%p_a_r',
1486
+ '%p_c_lss',
1487
+ '%p_c_r',
1488
+ '%p_cumprod',
1489
+ '%p_cumsum',
1490
+ '%p_d_p',
1491
+ '%p_d_r',
1492
+ '%p_d_s',
1493
+ '%p_det',
1494
+ '%p_e',
1495
+ '%p_f_lss',
1496
+ '%p_f_r',
1497
+ '%p_grand',
1498
+ '%p_i_ce',
1499
+ '%p_i_h',
1500
+ '%p_i_hm',
1501
+ '%p_i_lss',
1502
+ '%p_i_p',
1503
+ '%p_i_r',
1504
+ '%p_i_s',
1505
+ '%p_i_st',
1506
+ '%p_inv',
1507
+ '%p_j_s',
1508
+ '%p_k_p',
1509
+ '%p_k_r',
1510
+ '%p_k_s',
1511
+ '%p_l_lss',
1512
+ '%p_l_p',
1513
+ '%p_l_r',
1514
+ '%p_l_s',
1515
+ '%p_m_hm',
1516
+ '%p_m_lss',
1517
+ '%p_m_r',
1518
+ '%p_matrix',
1519
+ '%p_n_l',
1520
+ '%p_n_lss',
1521
+ '%p_n_r',
1522
+ '%p_o_l',
1523
+ '%p_o_lss',
1524
+ '%p_o_r',
1525
+ '%p_o_sp',
1526
+ '%p_p_s',
1527
+ '%p_part',
1528
+ '%p_prod',
1529
+ '%p_q_p',
1530
+ '%p_q_r',
1531
+ '%p_q_s',
1532
+ '%p_r_lss',
1533
+ '%p_r_p',
1534
+ '%p_r_r',
1535
+ '%p_r_s',
1536
+ '%p_s_lss',
1537
+ '%p_s_r',
1538
+ '%p_simp',
1539
+ '%p_string',
1540
+ '%p_sum',
1541
+ '%p_v_lss',
1542
+ '%p_v_p',
1543
+ '%p_v_r',
1544
+ '%p_v_s',
1545
+ '%p_x_hm',
1546
+ '%p_x_r',
1547
+ '%p_y_p',
1548
+ '%p_y_r',
1549
+ '%p_y_s',
1550
+ '%p_z_p',
1551
+ '%p_z_r',
1552
+ '%p_z_s',
1553
+ '%pcg',
1554
+ '%plist_p',
1555
+ '%plist_string',
1556
+ '%r_0',
1557
+ '%r_a_hm',
1558
+ '%r_a_lss',
1559
+ '%r_a_p',
1560
+ '%r_a_r',
1561
+ '%r_a_s',
1562
+ '%r_c_lss',
1563
+ '%r_c_p',
1564
+ '%r_c_r',
1565
+ '%r_c_s',
1566
+ '%r_clean',
1567
+ '%r_cumprod',
1568
+ '%r_cumsum',
1569
+ '%r_d_p',
1570
+ '%r_d_r',
1571
+ '%r_d_s',
1572
+ '%r_det',
1573
+ '%r_diag',
1574
+ '%r_e',
1575
+ '%r_eye',
1576
+ '%r_f_lss',
1577
+ '%r_f_p',
1578
+ '%r_f_r',
1579
+ '%r_f_s',
1580
+ '%r_i_ce',
1581
+ '%r_i_hm',
1582
+ '%r_i_lss',
1583
+ '%r_i_p',
1584
+ '%r_i_r',
1585
+ '%r_i_s',
1586
+ '%r_i_st',
1587
+ '%r_inv',
1588
+ '%r_j_s',
1589
+ '%r_k_p',
1590
+ '%r_k_r',
1591
+ '%r_k_s',
1592
+ '%r_l_lss',
1593
+ '%r_l_p',
1594
+ '%r_l_r',
1595
+ '%r_l_s',
1596
+ '%r_m_hm',
1597
+ '%r_m_lss',
1598
+ '%r_m_p',
1599
+ '%r_m_r',
1600
+ '%r_m_s',
1601
+ '%r_matrix',
1602
+ '%r_n_lss',
1603
+ '%r_n_p',
1604
+ '%r_n_r',
1605
+ '%r_n_s',
1606
+ '%r_norm',
1607
+ '%r_o_lss',
1608
+ '%r_o_p',
1609
+ '%r_o_r',
1610
+ '%r_o_s',
1611
+ '%r_ones',
1612
+ '%r_p',
1613
+ '%r_p_s',
1614
+ '%r_prod',
1615
+ '%r_q_p',
1616
+ '%r_q_r',
1617
+ '%r_q_s',
1618
+ '%r_r_lss',
1619
+ '%r_r_p',
1620
+ '%r_r_r',
1621
+ '%r_r_s',
1622
+ '%r_rand',
1623
+ '%r_s',
1624
+ '%r_s_hm',
1625
+ '%r_s_lss',
1626
+ '%r_s_p',
1627
+ '%r_s_r',
1628
+ '%r_s_s',
1629
+ '%r_simp',
1630
+ '%r_size',
1631
+ '%r_string',
1632
+ '%r_sum',
1633
+ '%r_t',
1634
+ '%r_tril',
1635
+ '%r_triu',
1636
+ '%r_v_lss',
1637
+ '%r_v_p',
1638
+ '%r_v_r',
1639
+ '%r_v_s',
1640
+ '%r_varn',
1641
+ '%r_x_p',
1642
+ '%r_x_r',
1643
+ '%r_x_s',
1644
+ '%r_y_p',
1645
+ '%r_y_r',
1646
+ '%r_y_s',
1647
+ '%r_z_p',
1648
+ '%r_z_r',
1649
+ '%r_z_s',
1650
+ '%s_1_hm',
1651
+ '%s_1_i',
1652
+ '%s_2_hm',
1653
+ '%s_2_i',
1654
+ '%s_3_hm',
1655
+ '%s_3_i',
1656
+ '%s_4_hm',
1657
+ '%s_4_i',
1658
+ '%s_5',
1659
+ '%s_a_b',
1660
+ '%s_a_hm',
1661
+ '%s_a_i',
1662
+ '%s_a_ip',
1663
+ '%s_a_lss',
1664
+ '%s_a_msp',
1665
+ '%s_a_r',
1666
+ '%s_a_sp',
1667
+ '%s_and',
1668
+ '%s_b_i',
1669
+ '%s_b_s',
1670
+ '%s_bezout',
1671
+ '%s_c_b',
1672
+ '%s_c_cblock',
1673
+ '%s_c_lss',
1674
+ '%s_c_r',
1675
+ '%s_c_sp',
1676
+ '%s_d_b',
1677
+ '%s_d_i',
1678
+ '%s_d_p',
1679
+ '%s_d_r',
1680
+ '%s_d_sp',
1681
+ '%s_e',
1682
+ '%s_f_b',
1683
+ '%s_f_cblock',
1684
+ '%s_f_lss',
1685
+ '%s_f_r',
1686
+ '%s_f_sp',
1687
+ '%s_g_b',
1688
+ '%s_g_s',
1689
+ '%s_gcd',
1690
+ '%s_grand',
1691
+ '%s_h_b',
1692
+ '%s_h_s',
1693
+ '%s_i_b',
1694
+ '%s_i_c',
1695
+ '%s_i_ce',
1696
+ '%s_i_h',
1697
+ '%s_i_hm',
1698
+ '%s_i_i',
1699
+ '%s_i_lss',
1700
+ '%s_i_p',
1701
+ '%s_i_r',
1702
+ '%s_i_s',
1703
+ '%s_i_sp',
1704
+ '%s_i_spb',
1705
+ '%s_i_st',
1706
+ '%s_j_i',
1707
+ '%s_k_hm',
1708
+ '%s_k_p',
1709
+ '%s_k_r',
1710
+ '%s_k_sp',
1711
+ '%s_l_b',
1712
+ '%s_l_hm',
1713
+ '%s_l_i',
1714
+ '%s_l_lss',
1715
+ '%s_l_p',
1716
+ '%s_l_r',
1717
+ '%s_l_s',
1718
+ '%s_l_sp',
1719
+ '%s_lcm',
1720
+ '%s_m_b',
1721
+ '%s_m_hm',
1722
+ '%s_m_i',
1723
+ '%s_m_ip',
1724
+ '%s_m_lss',
1725
+ '%s_m_msp',
1726
+ '%s_m_r',
1727
+ '%s_matrix',
1728
+ '%s_n_hm',
1729
+ '%s_n_i',
1730
+ '%s_n_l',
1731
+ '%s_n_lss',
1732
+ '%s_n_r',
1733
+ '%s_n_st',
1734
+ '%s_o_hm',
1735
+ '%s_o_i',
1736
+ '%s_o_l',
1737
+ '%s_o_lss',
1738
+ '%s_o_r',
1739
+ '%s_o_st',
1740
+ '%s_or',
1741
+ '%s_p_b',
1742
+ '%s_p_i',
1743
+ '%s_pow',
1744
+ '%s_q_hm',
1745
+ '%s_q_i',
1746
+ '%s_q_p',
1747
+ '%s_q_r',
1748
+ '%s_q_sp',
1749
+ '%s_r_b',
1750
+ '%s_r_i',
1751
+ '%s_r_lss',
1752
+ '%s_r_p',
1753
+ '%s_r_r',
1754
+ '%s_r_s',
1755
+ '%s_r_sp',
1756
+ '%s_s_b',
1757
+ '%s_s_hm',
1758
+ '%s_s_i',
1759
+ '%s_s_ip',
1760
+ '%s_s_lss',
1761
+ '%s_s_r',
1762
+ '%s_s_sp',
1763
+ '%s_simp',
1764
+ '%s_v_lss',
1765
+ '%s_v_p',
1766
+ '%s_v_r',
1767
+ '%s_v_s',
1768
+ '%s_x_b',
1769
+ '%s_x_hm',
1770
+ '%s_x_i',
1771
+ '%s_x_r',
1772
+ '%s_y_p',
1773
+ '%s_y_r',
1774
+ '%s_y_sp',
1775
+ '%s_z_p',
1776
+ '%s_z_r',
1777
+ '%s_z_sp',
1778
+ '%sn',
1779
+ '%sp_a_s',
1780
+ '%sp_a_sp',
1781
+ '%sp_and',
1782
+ '%sp_c_s',
1783
+ '%sp_ceil',
1784
+ '%sp_conj',
1785
+ '%sp_cos',
1786
+ '%sp_cumprod',
1787
+ '%sp_cumsum',
1788
+ '%sp_d_s',
1789
+ '%sp_d_sp',
1790
+ '%sp_det',
1791
+ '%sp_diag',
1792
+ '%sp_e',
1793
+ '%sp_exp',
1794
+ '%sp_f_s',
1795
+ '%sp_floor',
1796
+ '%sp_grand',
1797
+ '%sp_gsort',
1798
+ '%sp_i_ce',
1799
+ '%sp_i_h',
1800
+ '%sp_i_s',
1801
+ '%sp_i_sp',
1802
+ '%sp_i_st',
1803
+ '%sp_int',
1804
+ '%sp_inv',
1805
+ '%sp_k_s',
1806
+ '%sp_k_sp',
1807
+ '%sp_l_s',
1808
+ '%sp_l_sp',
1809
+ '%sp_length',
1810
+ '%sp_max',
1811
+ '%sp_min',
1812
+ '%sp_norm',
1813
+ '%sp_or',
1814
+ '%sp_p_s',
1815
+ '%sp_prod',
1816
+ '%sp_q_s',
1817
+ '%sp_q_sp',
1818
+ '%sp_r_s',
1819
+ '%sp_r_sp',
1820
+ '%sp_round',
1821
+ '%sp_s_s',
1822
+ '%sp_s_sp',
1823
+ '%sp_sin',
1824
+ '%sp_sqrt',
1825
+ '%sp_string',
1826
+ '%sp_sum',
1827
+ '%sp_tril',
1828
+ '%sp_triu',
1829
+ '%sp_y_s',
1830
+ '%sp_y_sp',
1831
+ '%sp_z_s',
1832
+ '%sp_z_sp',
1833
+ '%spb_and',
1834
+ '%spb_c_b',
1835
+ '%spb_cumprod',
1836
+ '%spb_cumsum',
1837
+ '%spb_diag',
1838
+ '%spb_e',
1839
+ '%spb_f_b',
1840
+ '%spb_g_b',
1841
+ '%spb_g_spb',
1842
+ '%spb_h_b',
1843
+ '%spb_h_spb',
1844
+ '%spb_i_b',
1845
+ '%spb_i_ce',
1846
+ '%spb_i_h',
1847
+ '%spb_i_st',
1848
+ '%spb_or',
1849
+ '%spb_prod',
1850
+ '%spb_sum',
1851
+ '%spb_tril',
1852
+ '%spb_triu',
1853
+ '%st_6',
1854
+ '%st_c_st',
1855
+ '%st_e',
1856
+ '%st_f_st',
1857
+ '%st_i_b',
1858
+ '%st_i_c',
1859
+ '%st_i_fptr',
1860
+ '%st_i_h',
1861
+ '%st_i_i',
1862
+ '%st_i_ip',
1863
+ '%st_i_lss',
1864
+ '%st_i_msp',
1865
+ '%st_i_p',
1866
+ '%st_i_r',
1867
+ '%st_i_s',
1868
+ '%st_i_sp',
1869
+ '%st_i_spb',
1870
+ '%st_i_st',
1871
+ '%st_matrix',
1872
+ '%st_n_c',
1873
+ '%st_n_l',
1874
+ '%st_n_mc',
1875
+ '%st_n_p',
1876
+ '%st_n_s',
1877
+ '%st_o_c',
1878
+ '%st_o_l',
1879
+ '%st_o_mc',
1880
+ '%st_o_p',
1881
+ '%st_o_s',
1882
+ '%st_o_tl',
1883
+ '%st_p',
1884
+ '%st_size',
1885
+ '%st_string',
1886
+ '%st_t',
1887
+ '%ticks_i_h',
1888
+ '%xls_e',
1889
+ '%xls_p',
1890
+ '%xlssheet_e',
1891
+ '%xlssheet_p',
1892
+ '%xlssheet_size',
1893
+ '%xlssheet_string',
1894
+ 'DominationRank',
1895
+ 'G_make',
1896
+ 'IsAScalar',
1897
+ 'NDcost',
1898
+ 'OS_Version',
1899
+ 'PlotSparse',
1900
+ 'ReadHBSparse',
1901
+ 'TCL_CreateSlave',
1902
+ 'abcd',
1903
+ 'abinv',
1904
+ 'accept_func_default',
1905
+ 'accept_func_vfsa',
1906
+ 'acf',
1907
+ 'acosd',
1908
+ 'acosh',
1909
+ 'acoshm',
1910
+ 'acosm',
1911
+ 'acot',
1912
+ 'acotd',
1913
+ 'acoth',
1914
+ 'acsc',
1915
+ 'acscd',
1916
+ 'acsch',
1917
+ 'add_demo',
1918
+ 'add_help_chapter',
1919
+ 'add_module_help_chapter',
1920
+ 'add_param',
1921
+ 'add_profiling',
1922
+ 'adj2sp',
1923
+ 'aff2ab',
1924
+ 'ana_style',
1925
+ 'analpf',
1926
+ 'analyze',
1927
+ 'aplat',
1928
+ 'arhnk',
1929
+ 'arl2',
1930
+ 'arma2p',
1931
+ 'arma2ss',
1932
+ 'armac',
1933
+ 'armax',
1934
+ 'armax1',
1935
+ 'arobasestring2strings',
1936
+ 'arsimul',
1937
+ 'ascii2string',
1938
+ 'asciimat',
1939
+ 'asec',
1940
+ 'asecd',
1941
+ 'asech',
1942
+ 'asind',
1943
+ 'asinh',
1944
+ 'asinhm',
1945
+ 'asinm',
1946
+ 'assert_checkalmostequal',
1947
+ 'assert_checkequal',
1948
+ 'assert_checkerror',
1949
+ 'assert_checkfalse',
1950
+ 'assert_checkfilesequal',
1951
+ 'assert_checktrue',
1952
+ 'assert_comparecomplex',
1953
+ 'assert_computedigits',
1954
+ 'assert_cond2reltol',
1955
+ 'assert_cond2reqdigits',
1956
+ 'assert_generror',
1957
+ 'atand',
1958
+ 'atanh',
1959
+ 'atanhm',
1960
+ 'atanm',
1961
+ 'atomsAutoload',
1962
+ 'atomsAutoloadAdd',
1963
+ 'atomsAutoloadDel',
1964
+ 'atomsAutoloadList',
1965
+ 'atomsCategoryList',
1966
+ 'atomsCheckModule',
1967
+ 'atomsDepTreeShow',
1968
+ 'atomsGetConfig',
1969
+ 'atomsGetInstalled',
1970
+ 'atomsGetInstalledPath',
1971
+ 'atomsGetLoaded',
1972
+ 'atomsGetLoadedPath',
1973
+ 'atomsInstall',
1974
+ 'atomsIsInstalled',
1975
+ 'atomsIsLoaded',
1976
+ 'atomsList',
1977
+ 'atomsLoad',
1978
+ 'atomsQuit',
1979
+ 'atomsRemove',
1980
+ 'atomsRepositoryAdd',
1981
+ 'atomsRepositoryDel',
1982
+ 'atomsRepositoryList',
1983
+ 'atomsRestoreConfig',
1984
+ 'atomsSaveConfig',
1985
+ 'atomsSearch',
1986
+ 'atomsSetConfig',
1987
+ 'atomsShow',
1988
+ 'atomsSystemInit',
1989
+ 'atomsSystemUpdate',
1990
+ 'atomsTest',
1991
+ 'atomsUpdate',
1992
+ 'atomsVersion',
1993
+ 'augment',
1994
+ 'auread',
1995
+ 'auwrite',
1996
+ 'balreal',
1997
+ 'bench_run',
1998
+ 'bilin',
1999
+ 'bilt',
2000
+ 'bin2dec',
2001
+ 'binomial',
2002
+ 'bitand',
2003
+ 'bitcmp',
2004
+ 'bitget',
2005
+ 'bitor',
2006
+ 'bitset',
2007
+ 'bitxor',
2008
+ 'black',
2009
+ 'blanks',
2010
+ 'bloc2exp',
2011
+ 'bloc2ss',
2012
+ 'block_parameter_error',
2013
+ 'bode',
2014
+ 'bode_asymp',
2015
+ 'bstap',
2016
+ 'buttmag',
2017
+ 'bvodeS',
2018
+ 'bytecode',
2019
+ 'bytecodewalk',
2020
+ 'cainv',
2021
+ 'calendar',
2022
+ 'calerf',
2023
+ 'calfrq',
2024
+ 'canon',
2025
+ 'casc',
2026
+ 'cat',
2027
+ 'cat_code',
2028
+ 'cb_m2sci_gui',
2029
+ 'ccontrg',
2030
+ 'cell',
2031
+ 'cell2mat',
2032
+ 'cellstr',
2033
+ 'center',
2034
+ 'cepstrum',
2035
+ 'cfspec',
2036
+ 'char',
2037
+ 'chart',
2038
+ 'cheb1mag',
2039
+ 'cheb2mag',
2040
+ 'check_gateways',
2041
+ 'check_modules_xml',
2042
+ 'check_versions',
2043
+ 'chepol',
2044
+ 'chfact',
2045
+ 'chsolve',
2046
+ 'classmarkov',
2047
+ 'clean_help',
2048
+ 'clock',
2049
+ 'cls2dls',
2050
+ 'cmb_lin',
2051
+ 'cmndred',
2052
+ 'cmoment',
2053
+ 'coding_ga_binary',
2054
+ 'coding_ga_identity',
2055
+ 'coff',
2056
+ 'coffg',
2057
+ 'colcomp',
2058
+ 'colcompr',
2059
+ 'colinout',
2060
+ 'colregul',
2061
+ 'companion',
2062
+ 'complex',
2063
+ 'compute_initial_temp',
2064
+ 'cond',
2065
+ 'cond2sp',
2066
+ 'condestsp',
2067
+ 'configure_msifort',
2068
+ 'configure_msvc',
2069
+ 'conjgrad',
2070
+ 'cont_frm',
2071
+ 'cont_mat',
2072
+ 'contrss',
2073
+ 'conv',
2074
+ 'convert_to_float',
2075
+ 'convertindex',
2076
+ 'convol',
2077
+ 'convol2d',
2078
+ 'copfac',
2079
+ 'correl',
2080
+ 'cosd',
2081
+ 'cosh',
2082
+ 'coshm',
2083
+ 'cosm',
2084
+ 'cotd',
2085
+ 'cotg',
2086
+ 'coth',
2087
+ 'cothm',
2088
+ 'cov',
2089
+ 'covar',
2090
+ 'createXConfiguration',
2091
+ 'createfun',
2092
+ 'createstruct',
2093
+ 'cross',
2094
+ 'crossover_ga_binary',
2095
+ 'crossover_ga_default',
2096
+ 'csc',
2097
+ 'cscd',
2098
+ 'csch',
2099
+ 'csgn',
2100
+ 'csim',
2101
+ 'cspect',
2102
+ 'ctr_gram',
2103
+ 'czt',
2104
+ 'dae',
2105
+ 'daeoptions',
2106
+ 'damp',
2107
+ 'datafit',
2108
+ 'date',
2109
+ 'datenum',
2110
+ 'datevec',
2111
+ 'dbphi',
2112
+ 'dcf',
2113
+ 'ddp',
2114
+ 'dec2bin',
2115
+ 'dec2hex',
2116
+ 'dec2oct',
2117
+ 'del_help_chapter',
2118
+ 'del_module_help_chapter',
2119
+ 'demo_begin',
2120
+ 'demo_choose',
2121
+ 'demo_compiler',
2122
+ 'demo_end',
2123
+ 'demo_file_choice',
2124
+ 'demo_folder_choice',
2125
+ 'demo_function_choice',
2126
+ 'demo_gui',
2127
+ 'demo_run',
2128
+ 'demo_viewCode',
2129
+ 'denom',
2130
+ 'derivat',
2131
+ 'derivative',
2132
+ 'des2ss',
2133
+ 'des2tf',
2134
+ 'detectmsifort64tools',
2135
+ 'detectmsvc64tools',
2136
+ 'determ',
2137
+ 'detr',
2138
+ 'detrend',
2139
+ 'devtools_run_builder',
2140
+ 'dhnorm',
2141
+ 'diff',
2142
+ 'diophant',
2143
+ 'dir',
2144
+ 'dirname',
2145
+ 'dispfiles',
2146
+ 'dllinfo',
2147
+ 'dscr',
2148
+ 'dsimul',
2149
+ 'dt_ility',
2150
+ 'dtsi',
2151
+ 'edit',
2152
+ 'edit_error',
2153
+ 'editor',
2154
+ 'eigenmarkov',
2155
+ 'eigs',
2156
+ 'ell1mag',
2157
+ 'enlarge_shape',
2158
+ 'entropy',
2159
+ 'eomday',
2160
+ 'epred',
2161
+ 'eqfir',
2162
+ 'eqiir',
2163
+ 'equil',
2164
+ 'equil1',
2165
+ 'erfinv',
2166
+ 'etime',
2167
+ 'eval',
2168
+ 'evans',
2169
+ 'evstr',
2170
+ 'example_run',
2171
+ 'expression2code',
2172
+ 'extract_help_examples',
2173
+ 'factor',
2174
+ 'factorial',
2175
+ 'factors',
2176
+ 'faurre',
2177
+ 'ffilt',
2178
+ 'fft2',
2179
+ 'fftshift',
2180
+ 'fieldnames',
2181
+ 'filt_sinc',
2182
+ 'filter',
2183
+ 'findABCD',
2184
+ 'findAC',
2185
+ 'findBDK',
2186
+ 'findR',
2187
+ 'find_freq',
2188
+ 'find_links',
2189
+ 'find_scicos_version',
2190
+ 'findm',
2191
+ 'findmsifortcompiler',
2192
+ 'findmsvccompiler',
2193
+ 'findx0BD',
2194
+ 'firstnonsingleton',
2195
+ 'fix',
2196
+ 'fixedpointgcd',
2197
+ 'flipdim',
2198
+ 'flts',
2199
+ 'fminsearch',
2200
+ 'formatBlackTip',
2201
+ 'formatBodeMagTip',
2202
+ 'formatBodePhaseTip',
2203
+ 'formatGainplotTip',
2204
+ 'formatHallModuleTip',
2205
+ 'formatHallPhaseTip',
2206
+ 'formatNicholsGainTip',
2207
+ 'formatNicholsPhaseTip',
2208
+ 'formatNyquistTip',
2209
+ 'formatPhaseplotTip',
2210
+ 'formatSgridDampingTip',
2211
+ 'formatSgridFreqTip',
2212
+ 'formatZgridDampingTip',
2213
+ 'formatZgridFreqTip',
2214
+ 'format_txt',
2215
+ 'fourplan',
2216
+ 'frep2tf',
2217
+ 'freson',
2218
+ 'frfit',
2219
+ 'frmag',
2220
+ 'fseek_origin',
2221
+ 'fsfirlin',
2222
+ 'fspec',
2223
+ 'fspecg',
2224
+ 'fstabst',
2225
+ 'ftest',
2226
+ 'ftuneq',
2227
+ 'fullfile',
2228
+ 'fullrf',
2229
+ 'fullrfk',
2230
+ 'fun2string',
2231
+ 'g_margin',
2232
+ 'gainplot',
2233
+ 'gamitg',
2234
+ 'gcare',
2235
+ 'gcd',
2236
+ 'gencompilationflags_unix',
2237
+ 'generateBlockImage',
2238
+ 'generateBlockImages',
2239
+ 'generic_i_ce',
2240
+ 'generic_i_h',
2241
+ 'generic_i_hm',
2242
+ 'generic_i_s',
2243
+ 'generic_i_st',
2244
+ 'genlib',
2245
+ 'genmarkov',
2246
+ 'geomean',
2247
+ 'getDiagramVersion',
2248
+ 'getModelicaPath',
2249
+ 'getPreferencesValue',
2250
+ 'get_file_path',
2251
+ 'get_function_path',
2252
+ 'get_param',
2253
+ 'get_profile',
2254
+ 'get_scicos_version',
2255
+ 'getd',
2256
+ 'getscilabkeywords',
2257
+ 'getshell',
2258
+ 'gettklib',
2259
+ 'gfare',
2260
+ 'gfrancis',
2261
+ 'givens',
2262
+ 'glever',
2263
+ 'gmres',
2264
+ 'group',
2265
+ 'gschur',
2266
+ 'gspec',
2267
+ 'gtild',
2268
+ 'h2norm',
2269
+ 'h_cl',
2270
+ 'h_inf',
2271
+ 'h_inf_st',
2272
+ 'h_norm',
2273
+ 'hallchart',
2274
+ 'halt',
2275
+ 'hank',
2276
+ 'hankelsv',
2277
+ 'harmean',
2278
+ 'haveacompiler',
2279
+ 'head_comments',
2280
+ 'help_from_sci',
2281
+ 'help_skeleton',
2282
+ 'hermit',
2283
+ 'hex2dec',
2284
+ 'hilb',
2285
+ 'hilbert',
2286
+ 'histc',
2287
+ 'horner',
2288
+ 'householder',
2289
+ 'hrmt',
2290
+ 'htrianr',
2291
+ 'hypermat',
2292
+ 'idct',
2293
+ 'idst',
2294
+ 'ifft',
2295
+ 'ifftshift',
2296
+ 'iir',
2297
+ 'iirgroup',
2298
+ 'iirlp',
2299
+ 'iirmod',
2300
+ 'ilib_build',
2301
+ 'ilib_build_jar',
2302
+ 'ilib_compile',
2303
+ 'ilib_for_link',
2304
+ 'ilib_gen_Make',
2305
+ 'ilib_gen_Make_unix',
2306
+ 'ilib_gen_cleaner',
2307
+ 'ilib_gen_gateway',
2308
+ 'ilib_gen_loader',
2309
+ 'ilib_include_flag',
2310
+ 'ilib_mex_build',
2311
+ 'im_inv',
2312
+ 'importScicosDiagram',
2313
+ 'importScicosPal',
2314
+ 'importXcosDiagram',
2315
+ 'imrep2ss',
2316
+ 'ind2sub',
2317
+ 'inistate',
2318
+ 'init_ga_default',
2319
+ 'init_param',
2320
+ 'initial_scicos_tables',
2321
+ 'input',
2322
+ 'instruction2code',
2323
+ 'intc',
2324
+ 'intdec',
2325
+ 'integrate',
2326
+ 'interp1',
2327
+ 'interpln',
2328
+ 'intersect',
2329
+ 'intl',
2330
+ 'intsplin',
2331
+ 'inttrap',
2332
+ 'inv_coeff',
2333
+ 'invr',
2334
+ 'invrs',
2335
+ 'invsyslin',
2336
+ 'iqr',
2337
+ 'isLeapYear',
2338
+ 'is_absolute_path',
2339
+ 'is_param',
2340
+ 'iscell',
2341
+ 'iscellstr',
2342
+ 'iscolumn',
2343
+ 'isempty',
2344
+ 'isfield',
2345
+ 'isinf',
2346
+ 'ismatrix',
2347
+ 'isnan',
2348
+ 'isrow',
2349
+ 'isscalar',
2350
+ 'issparse',
2351
+ 'issquare',
2352
+ 'isstruct',
2353
+ 'isvector',
2354
+ 'jmat',
2355
+ 'justify',
2356
+ 'kalm',
2357
+ 'karmarkar',
2358
+ 'kernel',
2359
+ 'kpure',
2360
+ 'krac2',
2361
+ 'kroneck',
2362
+ 'lattn',
2363
+ 'lattp',
2364
+ 'launchtest',
2365
+ 'lcf',
2366
+ 'lcm',
2367
+ 'lcmdiag',
2368
+ 'leastsq',
2369
+ 'leqe',
2370
+ 'leqr',
2371
+ 'lev',
2372
+ 'levin',
2373
+ 'lex_sort',
2374
+ 'lft',
2375
+ 'lin',
2376
+ 'lin2mu',
2377
+ 'lincos',
2378
+ 'lindquist',
2379
+ 'linf',
2380
+ 'linfn',
2381
+ 'linsolve',
2382
+ 'linspace',
2383
+ 'list2vec',
2384
+ 'list_param',
2385
+ 'listfiles',
2386
+ 'listfunctions',
2387
+ 'listvarinfile',
2388
+ 'lmisolver',
2389
+ 'lmitool',
2390
+ 'loadXcosLibs',
2391
+ 'loadmatfile',
2392
+ 'loadwave',
2393
+ 'log10',
2394
+ 'log2',
2395
+ 'logm',
2396
+ 'logspace',
2397
+ 'lqe',
2398
+ 'lqg',
2399
+ 'lqg2stan',
2400
+ 'lqg_ltr',
2401
+ 'lqr',
2402
+ 'ls',
2403
+ 'lyap',
2404
+ 'm2sci_gui',
2405
+ 'm_circle',
2406
+ 'macglov',
2407
+ 'macrovar',
2408
+ 'mad',
2409
+ 'makecell',
2410
+ 'manedit',
2411
+ 'mapsound',
2412
+ 'markp2ss',
2413
+ 'matfile2sci',
2414
+ 'mdelete',
2415
+ 'mean',
2416
+ 'meanf',
2417
+ 'median',
2418
+ 'members',
2419
+ 'mese',
2420
+ 'meshgrid',
2421
+ 'mfft',
2422
+ 'mfile2sci',
2423
+ 'minreal',
2424
+ 'minss',
2425
+ 'mkdir',
2426
+ 'modulo',
2427
+ 'moment',
2428
+ 'mrfit',
2429
+ 'msd',
2430
+ 'mstr2sci',
2431
+ 'mtlb',
2432
+ 'mtlb_0',
2433
+ 'mtlb_a',
2434
+ 'mtlb_all',
2435
+ 'mtlb_any',
2436
+ 'mtlb_axes',
2437
+ 'mtlb_axis',
2438
+ 'mtlb_beta',
2439
+ 'mtlb_box',
2440
+ 'mtlb_choices',
2441
+ 'mtlb_close',
2442
+ 'mtlb_colordef',
2443
+ 'mtlb_cond',
2444
+ 'mtlb_cov',
2445
+ 'mtlb_cumprod',
2446
+ 'mtlb_cumsum',
2447
+ 'mtlb_dec2hex',
2448
+ 'mtlb_delete',
2449
+ 'mtlb_diag',
2450
+ 'mtlb_diff',
2451
+ 'mtlb_dir',
2452
+ 'mtlb_double',
2453
+ 'mtlb_e',
2454
+ 'mtlb_echo',
2455
+ 'mtlb_error',
2456
+ 'mtlb_eval',
2457
+ 'mtlb_exist',
2458
+ 'mtlb_eye',
2459
+ 'mtlb_false',
2460
+ 'mtlb_fft',
2461
+ 'mtlb_fftshift',
2462
+ 'mtlb_filter',
2463
+ 'mtlb_find',
2464
+ 'mtlb_findstr',
2465
+ 'mtlb_fliplr',
2466
+ 'mtlb_fopen',
2467
+ 'mtlb_format',
2468
+ 'mtlb_fprintf',
2469
+ 'mtlb_fread',
2470
+ 'mtlb_fscanf',
2471
+ 'mtlb_full',
2472
+ 'mtlb_fwrite',
2473
+ 'mtlb_get',
2474
+ 'mtlb_grid',
2475
+ 'mtlb_hold',
2476
+ 'mtlb_i',
2477
+ 'mtlb_ifft',
2478
+ 'mtlb_image',
2479
+ 'mtlb_imp',
2480
+ 'mtlb_int16',
2481
+ 'mtlb_int32',
2482
+ 'mtlb_int8',
2483
+ 'mtlb_is',
2484
+ 'mtlb_isa',
2485
+ 'mtlb_isfield',
2486
+ 'mtlb_isletter',
2487
+ 'mtlb_isspace',
2488
+ 'mtlb_l',
2489
+ 'mtlb_legendre',
2490
+ 'mtlb_linspace',
2491
+ 'mtlb_logic',
2492
+ 'mtlb_logical',
2493
+ 'mtlb_loglog',
2494
+ 'mtlb_lower',
2495
+ 'mtlb_max',
2496
+ 'mtlb_mean',
2497
+ 'mtlb_median',
2498
+ 'mtlb_mesh',
2499
+ 'mtlb_meshdom',
2500
+ 'mtlb_min',
2501
+ 'mtlb_more',
2502
+ 'mtlb_num2str',
2503
+ 'mtlb_ones',
2504
+ 'mtlb_pcolor',
2505
+ 'mtlb_plot',
2506
+ 'mtlb_prod',
2507
+ 'mtlb_qr',
2508
+ 'mtlb_qz',
2509
+ 'mtlb_rand',
2510
+ 'mtlb_randn',
2511
+ 'mtlb_rcond',
2512
+ 'mtlb_realmax',
2513
+ 'mtlb_realmin',
2514
+ 'mtlb_s',
2515
+ 'mtlb_semilogx',
2516
+ 'mtlb_semilogy',
2517
+ 'mtlb_setstr',
2518
+ 'mtlb_size',
2519
+ 'mtlb_sort',
2520
+ 'mtlb_sortrows',
2521
+ 'mtlb_sprintf',
2522
+ 'mtlb_sscanf',
2523
+ 'mtlb_std',
2524
+ 'mtlb_strcmp',
2525
+ 'mtlb_strcmpi',
2526
+ 'mtlb_strfind',
2527
+ 'mtlb_strrep',
2528
+ 'mtlb_subplot',
2529
+ 'mtlb_sum',
2530
+ 'mtlb_t',
2531
+ 'mtlb_toeplitz',
2532
+ 'mtlb_tril',
2533
+ 'mtlb_triu',
2534
+ 'mtlb_true',
2535
+ 'mtlb_type',
2536
+ 'mtlb_uint16',
2537
+ 'mtlb_uint32',
2538
+ 'mtlb_uint8',
2539
+ 'mtlb_upper',
2540
+ 'mtlb_var',
2541
+ 'mtlb_zeros',
2542
+ 'mu2lin',
2543
+ 'mutation_ga_binary',
2544
+ 'mutation_ga_default',
2545
+ 'mvcorrel',
2546
+ 'mvvacov',
2547
+ 'nancumsum',
2548
+ 'nand2mean',
2549
+ 'nanmax',
2550
+ 'nanmean',
2551
+ 'nanmeanf',
2552
+ 'nanmedian',
2553
+ 'nanmin',
2554
+ 'nanreglin',
2555
+ 'nanstdev',
2556
+ 'nansum',
2557
+ 'narsimul',
2558
+ 'ndgrid',
2559
+ 'ndims',
2560
+ 'nehari',
2561
+ 'neigh_func_csa',
2562
+ 'neigh_func_default',
2563
+ 'neigh_func_fsa',
2564
+ 'neigh_func_vfsa',
2565
+ 'neldermead_cget',
2566
+ 'neldermead_configure',
2567
+ 'neldermead_costf',
2568
+ 'neldermead_defaultoutput',
2569
+ 'neldermead_destroy',
2570
+ 'neldermead_function',
2571
+ 'neldermead_get',
2572
+ 'neldermead_log',
2573
+ 'neldermead_new',
2574
+ 'neldermead_restart',
2575
+ 'neldermead_search',
2576
+ 'neldermead_updatesimp',
2577
+ 'nextpow2',
2578
+ 'nfreq',
2579
+ 'nicholschart',
2580
+ 'nlev',
2581
+ 'nmplot_cget',
2582
+ 'nmplot_configure',
2583
+ 'nmplot_contour',
2584
+ 'nmplot_destroy',
2585
+ 'nmplot_function',
2586
+ 'nmplot_get',
2587
+ 'nmplot_historyplot',
2588
+ 'nmplot_log',
2589
+ 'nmplot_new',
2590
+ 'nmplot_outputcmd',
2591
+ 'nmplot_restart',
2592
+ 'nmplot_search',
2593
+ 'nmplot_simplexhistory',
2594
+ 'noisegen',
2595
+ 'nonreg_test_run',
2596
+ 'now',
2597
+ 'nthroot',
2598
+ 'null',
2599
+ 'num2cell',
2600
+ 'numderivative',
2601
+ 'numdiff',
2602
+ 'numer',
2603
+ 'nyquist',
2604
+ 'nyquistfrequencybounds',
2605
+ 'obs_gram',
2606
+ 'obscont',
2607
+ 'observer',
2608
+ 'obsv_mat',
2609
+ 'obsvss',
2610
+ 'oct2dec',
2611
+ 'odeoptions',
2612
+ 'optim_ga',
2613
+ 'optim_moga',
2614
+ 'optim_nsga',
2615
+ 'optim_nsga2',
2616
+ 'optim_sa',
2617
+ 'optimbase_cget',
2618
+ 'optimbase_checkbounds',
2619
+ 'optimbase_checkcostfun',
2620
+ 'optimbase_checkx0',
2621
+ 'optimbase_configure',
2622
+ 'optimbase_destroy',
2623
+ 'optimbase_function',
2624
+ 'optimbase_get',
2625
+ 'optimbase_hasbounds',
2626
+ 'optimbase_hasconstraints',
2627
+ 'optimbase_hasnlcons',
2628
+ 'optimbase_histget',
2629
+ 'optimbase_histset',
2630
+ 'optimbase_incriter',
2631
+ 'optimbase_isfeasible',
2632
+ 'optimbase_isinbounds',
2633
+ 'optimbase_isinnonlincons',
2634
+ 'optimbase_log',
2635
+ 'optimbase_logshutdown',
2636
+ 'optimbase_logstartup',
2637
+ 'optimbase_new',
2638
+ 'optimbase_outputcmd',
2639
+ 'optimbase_outstruct',
2640
+ 'optimbase_proj2bnds',
2641
+ 'optimbase_set',
2642
+ 'optimbase_stoplog',
2643
+ 'optimbase_terminate',
2644
+ 'optimget',
2645
+ 'optimplotfunccount',
2646
+ 'optimplotfval',
2647
+ 'optimplotx',
2648
+ 'optimset',
2649
+ 'optimsimplex_center',
2650
+ 'optimsimplex_check',
2651
+ 'optimsimplex_compsomefv',
2652
+ 'optimsimplex_computefv',
2653
+ 'optimsimplex_deltafv',
2654
+ 'optimsimplex_deltafvmax',
2655
+ 'optimsimplex_destroy',
2656
+ 'optimsimplex_dirmat',
2657
+ 'optimsimplex_fvmean',
2658
+ 'optimsimplex_fvstdev',
2659
+ 'optimsimplex_fvvariance',
2660
+ 'optimsimplex_getall',
2661
+ 'optimsimplex_getallfv',
2662
+ 'optimsimplex_getallx',
2663
+ 'optimsimplex_getfv',
2664
+ 'optimsimplex_getn',
2665
+ 'optimsimplex_getnbve',
2666
+ 'optimsimplex_getve',
2667
+ 'optimsimplex_getx',
2668
+ 'optimsimplex_gradientfv',
2669
+ 'optimsimplex_log',
2670
+ 'optimsimplex_new',
2671
+ 'optimsimplex_reflect',
2672
+ 'optimsimplex_setall',
2673
+ 'optimsimplex_setallfv',
2674
+ 'optimsimplex_setallx',
2675
+ 'optimsimplex_setfv',
2676
+ 'optimsimplex_setn',
2677
+ 'optimsimplex_setnbve',
2678
+ 'optimsimplex_setve',
2679
+ 'optimsimplex_setx',
2680
+ 'optimsimplex_shrink',
2681
+ 'optimsimplex_size',
2682
+ 'optimsimplex_sort',
2683
+ 'optimsimplex_xbar',
2684
+ 'orth',
2685
+ 'output_ga_default',
2686
+ 'output_moga_default',
2687
+ 'output_nsga2_default',
2688
+ 'output_nsga_default',
2689
+ 'p_margin',
2690
+ 'pack',
2691
+ 'pareto_filter',
2692
+ 'parrot',
2693
+ 'pbig',
2694
+ 'pca',
2695
+ 'pcg',
2696
+ 'pdiv',
2697
+ 'pen2ea',
2698
+ 'pencan',
2699
+ 'pencost',
2700
+ 'penlaur',
2701
+ 'perctl',
2702
+ 'perl',
2703
+ 'perms',
2704
+ 'permute',
2705
+ 'pertrans',
2706
+ 'pfactors',
2707
+ 'pfss',
2708
+ 'phasemag',
2709
+ 'phaseplot',
2710
+ 'phc',
2711
+ 'pinv',
2712
+ 'playsnd',
2713
+ 'plotprofile',
2714
+ 'plzr',
2715
+ 'pmodulo',
2716
+ 'pol2des',
2717
+ 'pol2str',
2718
+ 'polar',
2719
+ 'polfact',
2720
+ 'prbs_a',
2721
+ 'prettyprint',
2722
+ 'primes',
2723
+ 'princomp',
2724
+ 'profile',
2725
+ 'proj',
2726
+ 'projsl',
2727
+ 'projspec',
2728
+ 'psmall',
2729
+ 'pspect',
2730
+ 'qmr',
2731
+ 'qpsolve',
2732
+ 'quart',
2733
+ 'quaskro',
2734
+ 'rafiter',
2735
+ 'randpencil',
2736
+ 'range',
2737
+ 'rank',
2738
+ 'readxls',
2739
+ 'recompilefunction',
2740
+ 'recons',
2741
+ 'reglin',
2742
+ 'regress',
2743
+ 'remezb',
2744
+ 'remove_param',
2745
+ 'remove_profiling',
2746
+ 'repfreq',
2747
+ 'replace_Ix_by_Fx',
2748
+ 'repmat',
2749
+ 'reset_profiling',
2750
+ 'resize_matrix',
2751
+ 'returntoscilab',
2752
+ 'rhs2code',
2753
+ 'ric_desc',
2754
+ 'riccati',
2755
+ 'rmdir',
2756
+ 'routh_t',
2757
+ 'rowcomp',
2758
+ 'rowcompr',
2759
+ 'rowinout',
2760
+ 'rowregul',
2761
+ 'rowshuff',
2762
+ 'rref',
2763
+ 'sample',
2764
+ 'samplef',
2765
+ 'samwr',
2766
+ 'savematfile',
2767
+ 'savewave',
2768
+ 'scanf',
2769
+ 'sci2exp',
2770
+ 'sciGUI_init',
2771
+ 'sci_sparse',
2772
+ 'scicos_getvalue',
2773
+ 'scicos_simulate',
2774
+ 'scicos_workspace_init',
2775
+ 'scisptdemo',
2776
+ 'scitest',
2777
+ 'sdiff',
2778
+ 'sec',
2779
+ 'secd',
2780
+ 'sech',
2781
+ 'selection_ga_elitist',
2782
+ 'selection_ga_random',
2783
+ 'sensi',
2784
+ 'setPreferencesValue',
2785
+ 'set_param',
2786
+ 'setdiff',
2787
+ 'sgrid',
2788
+ 'show_margins',
2789
+ 'show_pca',
2790
+ 'showprofile',
2791
+ 'signm',
2792
+ 'sinc',
2793
+ 'sincd',
2794
+ 'sind',
2795
+ 'sinh',
2796
+ 'sinhm',
2797
+ 'sinm',
2798
+ 'sm2des',
2799
+ 'sm2ss',
2800
+ 'smga',
2801
+ 'smooth',
2802
+ 'solve',
2803
+ 'sound',
2804
+ 'soundsec',
2805
+ 'sp2adj',
2806
+ 'spaninter',
2807
+ 'spanplus',
2808
+ 'spantwo',
2809
+ 'specfact',
2810
+ 'speye',
2811
+ 'sprand',
2812
+ 'spzeros',
2813
+ 'sqroot',
2814
+ 'sqrtm',
2815
+ 'squarewave',
2816
+ 'squeeze',
2817
+ 'srfaur',
2818
+ 'srkf',
2819
+ 'ss2des',
2820
+ 'ss2ss',
2821
+ 'ss2tf',
2822
+ 'sskf',
2823
+ 'ssprint',
2824
+ 'ssrand',
2825
+ 'st_deviation',
2826
+ 'st_i_generic',
2827
+ 'st_ility',
2828
+ 'stabil',
2829
+ 'statgain',
2830
+ 'stdev',
2831
+ 'stdevf',
2832
+ 'steadycos',
2833
+ 'strange',
2834
+ 'strcmpi',
2835
+ 'struct',
2836
+ 'sub2ind',
2837
+ 'sva',
2838
+ 'svplot',
2839
+ 'sylm',
2840
+ 'sylv',
2841
+ 'sysconv',
2842
+ 'sysdiag',
2843
+ 'sysfact',
2844
+ 'syslin',
2845
+ 'syssize',
2846
+ 'system',
2847
+ 'systmat',
2848
+ 'tabul',
2849
+ 'tand',
2850
+ 'tanh',
2851
+ 'tanhm',
2852
+ 'tanm',
2853
+ 'tbx_build_blocks',
2854
+ 'tbx_build_cleaner',
2855
+ 'tbx_build_gateway',
2856
+ 'tbx_build_gateway_clean',
2857
+ 'tbx_build_gateway_loader',
2858
+ 'tbx_build_help',
2859
+ 'tbx_build_help_loader',
2860
+ 'tbx_build_loader',
2861
+ 'tbx_build_localization',
2862
+ 'tbx_build_macros',
2863
+ 'tbx_build_pal_loader',
2864
+ 'tbx_build_src',
2865
+ 'tbx_builder',
2866
+ 'tbx_builder_gateway',
2867
+ 'tbx_builder_gateway_lang',
2868
+ 'tbx_builder_help',
2869
+ 'tbx_builder_help_lang',
2870
+ 'tbx_builder_macros',
2871
+ 'tbx_builder_src',
2872
+ 'tbx_builder_src_lang',
2873
+ 'tbx_generate_pofile',
2874
+ 'temp_law_csa',
2875
+ 'temp_law_default',
2876
+ 'temp_law_fsa',
2877
+ 'temp_law_huang',
2878
+ 'temp_law_vfsa',
2879
+ 'test_clean',
2880
+ 'test_on_columns',
2881
+ 'test_run',
2882
+ 'test_run_level',
2883
+ 'testexamples',
2884
+ 'tf2des',
2885
+ 'tf2ss',
2886
+ 'thrownan',
2887
+ 'tic',
2888
+ 'time_id',
2889
+ 'toc',
2890
+ 'toeplitz',
2891
+ 'tokenpos',
2892
+ 'toolboxes',
2893
+ 'trace',
2894
+ 'trans',
2895
+ 'translatepaths',
2896
+ 'tree2code',
2897
+ 'trfmod',
2898
+ 'trianfml',
2899
+ 'trimmean',
2900
+ 'trisolve',
2901
+ 'trzeros',
2902
+ 'typeof',
2903
+ 'ui_observer',
2904
+ 'union',
2905
+ 'unique',
2906
+ 'unit_test_run',
2907
+ 'unix_g',
2908
+ 'unix_s',
2909
+ 'unix_w',
2910
+ 'unix_x',
2911
+ 'unobs',
2912
+ 'unpack',
2913
+ 'unwrap',
2914
+ 'variance',
2915
+ 'variancef',
2916
+ 'vec2list',
2917
+ 'vectorfind',
2918
+ 'ver',
2919
+ 'warnobsolete',
2920
+ 'wavread',
2921
+ 'wavwrite',
2922
+ 'wcenter',
2923
+ 'weekday',
2924
+ 'wfir',
2925
+ 'wfir_gui',
2926
+ 'whereami',
2927
+ 'who_user',
2928
+ 'whos',
2929
+ 'wiener',
2930
+ 'wigner',
2931
+ 'window',
2932
+ 'winlist',
2933
+ 'with_javasci',
2934
+ 'with_macros_source',
2935
+ 'with_modelica_compiler',
2936
+ 'with_tk',
2937
+ 'xcorr',
2938
+ 'xcosBlockEval',
2939
+ 'xcosBlockInterface',
2940
+ 'xcosCodeGeneration',
2941
+ 'xcosConfigureModelica',
2942
+ 'xcosPal',
2943
+ 'xcosPalAdd',
2944
+ 'xcosPalAddBlock',
2945
+ 'xcosPalExport',
2946
+ 'xcosPalGenerateAllIcons',
2947
+ 'xcosShowBlockWarning',
2948
+ 'xcosValidateBlockSet',
2949
+ 'xcosValidateCompareBlock',
2950
+ 'xcos_compile',
2951
+ 'xcos_debug_gui',
2952
+ 'xcos_run',
2953
+ 'xcos_simulate',
2954
+ 'xcov',
2955
+ 'xmltochm',
2956
+ 'xmltoformat',
2957
+ 'xmltohtml',
2958
+ 'xmltojar',
2959
+ 'xmltopdf',
2960
+ 'xmltops',
2961
+ 'xmltoweb',
2962
+ 'yulewalk',
2963
+ 'zeropen',
2964
+ 'zgrid',
2965
+ 'zpbutt',
2966
+ 'zpch1',
2967
+ 'zpch2',
2968
+ 'zpell',
2969
+ )
2970
+
2971
+ variables_kw = (
2972
+ '$',
2973
+ '%F',
2974
+ '%T',
2975
+ '%e',
2976
+ '%eps',
2977
+ '%f',
2978
+ '%fftw',
2979
+ '%gui',
2980
+ '%i',
2981
+ '%inf',
2982
+ '%io',
2983
+ '%modalWarning',
2984
+ '%nan',
2985
+ '%pi',
2986
+ '%s',
2987
+ '%t',
2988
+ '%tk',
2989
+ '%toolboxes',
2990
+ '%toolboxes_dir',
2991
+ '%z',
2992
+ 'PWD',
2993
+ 'SCI',
2994
+ 'SCIHOME',
2995
+ 'TMPDIR',
2996
+ 'arnoldilib',
2997
+ 'assertlib',
2998
+ 'atomslib',
2999
+ 'cacsdlib',
3000
+ 'compatibility_functilib',
3001
+ 'corelib',
3002
+ 'data_structureslib',
3003
+ 'demo_toolslib',
3004
+ 'development_toolslib',
3005
+ 'differential_equationlib',
3006
+ 'dynamic_linklib',
3007
+ 'elementary_functionslib',
3008
+ 'enull',
3009
+ 'evoid',
3010
+ 'external_objectslib',
3011
+ 'fd',
3012
+ 'fileiolib',
3013
+ 'functionslib',
3014
+ 'genetic_algorithmslib',
3015
+ 'helptoolslib',
3016
+ 'home',
3017
+ 'integerlib',
3018
+ 'interpolationlib',
3019
+ 'iolib',
3020
+ 'jnull',
3021
+ 'jvoid',
3022
+ 'linear_algebralib',
3023
+ 'm2scilib',
3024
+ 'matiolib',
3025
+ 'modules_managerlib',
3026
+ 'neldermeadlib',
3027
+ 'optimbaselib',
3028
+ 'optimizationlib',
3029
+ 'optimsimplexlib',
3030
+ 'output_streamlib',
3031
+ 'overloadinglib',
3032
+ 'parameterslib',
3033
+ 'polynomialslib',
3034
+ 'preferenceslib',
3035
+ 'randliblib',
3036
+ 'scicos_autolib',
3037
+ 'scicos_utilslib',
3038
+ 'scinoteslib',
3039
+ 'signal_processinglib',
3040
+ 'simulated_annealinglib',
3041
+ 'soundlib',
3042
+ 'sparselib',
3043
+ 'special_functionslib',
3044
+ 'spreadsheetlib',
3045
+ 'statisticslib',
3046
+ 'stringlib',
3047
+ 'tclscilib',
3048
+ 'timelib',
3049
+ 'umfpacklib',
3050
+ 'xcoslib',
3051
+ )
3052
+
3053
+
3054
+ if __name__ == '__main__': # pragma: no cover
3055
+ import subprocess
3056
+ from pygments.util import format_lines, duplicates_removed
3057
+
3058
+ mapping = {'variables': 'builtin'}
3059
+
3060
+ def extract_completion(var_type):
3061
+ s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
3062
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3063
+ output = s.communicate(f'''\
3064
+ fd = mopen("/dev/stderr", "wt");
3065
+ mputl(strcat(completion("", "{var_type}"), "||"), fd);
3066
+ mclose(fd)\n''')
3067
+ if '||' not in output[1]:
3068
+ raise Exception(output[0])
3069
+ # Invalid DISPLAY causes this to be output:
3070
+ text = output[1].strip()
3071
+ if text.startswith('Error: unable to open display \n'):
3072
+ text = text[len('Error: unable to open display \n'):]
3073
+ return text.split('||')
3074
+
3075
+ new_data = {}
3076
+ seen = set() # only keep first type for a given word
3077
+ for t in ('functions', 'commands', 'macros', 'variables'):
3078
+ new_data[t] = duplicates_removed(extract_completion(t), seen)
3079
+ seen.update(set(new_data[t]))
3080
+
3081
+
3082
+ with open(__file__, encoding='utf-8') as f:
3083
+ content = f.read()
3084
+
3085
+ header = content[:content.find('# Autogenerated')]
3086
+ footer = content[content.find("if __name__ == '__main__':"):]
3087
+
3088
+ with open(__file__, 'w', encoding='utf-8') as f:
3089
+ f.write(header)
3090
+ f.write('# Autogenerated\n\n')
3091
+ for k, v in sorted(new_data.items()):
3092
+ f.write(format_lines(k + '_kw', v) + '\n\n')
3093
+ f.write(footer)
mgm/lib/python3.10/site-packages/pygments/lexers/algebra.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.algebra
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexers for computer algebra systems.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+
13
+ from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, words
14
+ from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
15
+ Number, Punctuation, Generic, Whitespace
16
+
17
+ __all__ = ['GAPLexer', 'GAPConsoleLexer', 'MathematicaLexer', 'MuPADLexer',
18
+ 'BCLexer']
19
+
20
+
21
+ class GAPLexer(RegexLexer):
22
+ """
23
+ For GAP source code.
24
+ """
25
+ name = 'GAP'
26
+ url = 'https://www.gap-system.org'
27
+ aliases = ['gap']
28
+ filenames = ['*.g', '*.gd', '*.gi', '*.gap']
29
+ version_added = '2.0'
30
+
31
+ tokens = {
32
+ 'root': [
33
+ (r'#.*$', Comment.Single),
34
+ (r'"(?:[^"\\]|\\.)*"', String),
35
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
36
+ (r'''(?x)\b(?:
37
+ if|then|elif|else|fi|
38
+ for|while|do|od|
39
+ repeat|until|
40
+ break|continue|
41
+ function|local|return|end|
42
+ rec|
43
+ quit|QUIT|
44
+ IsBound|Unbind|
45
+ TryNextMethod|
46
+ Info|Assert
47
+ )\b''', Keyword),
48
+ (r'''(?x)\b(?:
49
+ true|false|fail|infinity
50
+ )\b''',
51
+ Name.Constant),
52
+ (r'''(?x)\b(?:
53
+ (Declare|Install)([A-Z][A-Za-z]+)|
54
+ BindGlobal|BIND_GLOBAL
55
+ )\b''',
56
+ Name.Builtin),
57
+ (r'\.|,|:=|;|=|\+|-|\*|/|\^|>|<', Operator),
58
+ (r'''(?x)\b(?:
59
+ and|or|not|mod|in
60
+ )\b''',
61
+ Operator.Word),
62
+ (r'''(?x)
63
+ (?:\w+|`[^`]*`)
64
+ (?:::\w+|`[^`]*`)*''', Name.Variable),
65
+ (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
66
+ (r'\.[0-9]+(?:e[0-9]+)?', Number),
67
+ (r'.', Text)
68
+ ],
69
+ }
70
+
71
+ def analyse_text(text):
72
+ score = 0.0
73
+
74
+ # Declaration part
75
+ if re.search(
76
+ r"(InstallTrueMethod|Declare(Attribute|Category|Filter|Operation" +
77
+ r"|GlobalFunction|Synonym|SynonymAttr|Property))", text
78
+ ):
79
+ score += 0.7
80
+
81
+ # Implementation part
82
+ if re.search(
83
+ r"(DeclareRepresentation|Install(GlobalFunction|Method|" +
84
+ r"ImmediateMethod|OtherMethod)|New(Family|Type)|Objectify)", text
85
+ ):
86
+ score += 0.7
87
+
88
+ return min(score, 1.0)
89
+
90
+
91
+ class GAPConsoleLexer(Lexer):
92
+ """
93
+ For GAP console sessions. Modeled after JuliaConsoleLexer.
94
+ """
95
+ name = 'GAP session'
96
+ aliases = ['gap-console', 'gap-repl']
97
+ filenames = ['*.tst']
98
+ url = 'https://www.gap-system.org'
99
+ version_added = '2.14'
100
+ _example = "gap-repl/euclidean.tst"
101
+
102
+ def get_tokens_unprocessed(self, text):
103
+ gaplexer = GAPLexer(**self.options)
104
+ start = 0
105
+ curcode = ''
106
+ insertions = []
107
+ output = False
108
+ error = False
109
+
110
+ for line in text.splitlines(keepends=True):
111
+ if line.startswith('gap> ') or line.startswith('brk> '):
112
+ insertions.append((len(curcode), [(0, Generic.Prompt, line[:5])]))
113
+ curcode += line[5:]
114
+ output = False
115
+ error = False
116
+ elif not output and line.startswith('> '):
117
+ insertions.append((len(curcode), [(0, Generic.Prompt, line[:2])]))
118
+ curcode += line[2:]
119
+ else:
120
+ if curcode:
121
+ yield from do_insertions(
122
+ insertions, gaplexer.get_tokens_unprocessed(curcode))
123
+ curcode = ''
124
+ insertions = []
125
+ if line.startswith('Error, ') or error:
126
+ yield start, Generic.Error, line
127
+ error = True
128
+ else:
129
+ yield start, Generic.Output, line
130
+ output = True
131
+ start += len(line)
132
+
133
+ if curcode:
134
+ yield from do_insertions(
135
+ insertions, gaplexer.get_tokens_unprocessed(curcode))
136
+
137
+ # the following is needed to distinguish Scilab and GAP .tst files
138
+ def analyse_text(text):
139
+ # GAP prompts are a dead give away, although hypothetical;y a
140
+ # file in another language could be trying to compare a variable
141
+ # "gap" as in "gap> 0.1". But that this should happen at the
142
+ # start of a line seems unlikely...
143
+ if re.search(r"^gap> ", text):
144
+ return 0.9
145
+ else:
146
+ return 0.0
147
+
148
+
149
+ class MathematicaLexer(RegexLexer):
150
+ """
151
+ Lexer for Mathematica source code.
152
+ """
153
+ name = 'Mathematica'
154
+ url = 'http://www.wolfram.com/mathematica/'
155
+ aliases = ['mathematica', 'mma', 'nb']
156
+ filenames = ['*.nb', '*.cdf', '*.nbp', '*.ma']
157
+ mimetypes = ['application/mathematica',
158
+ 'application/vnd.wolfram.mathematica',
159
+ 'application/vnd.wolfram.mathematica.package',
160
+ 'application/vnd.wolfram.cdf']
161
+ version_added = '2.0'
162
+
163
+ # http://reference.wolfram.com/mathematica/guide/Syntax.html
164
+ operators = (
165
+ ";;", "=", "=.", "!=" "==", ":=", "->", ":>", "/.", "+", "-", "*", "/",
166
+ "^", "&&", "||", "!", "<>", "|", "/;", "?", "@", "//", "/@", "@@",
167
+ "@@@", "~~", "===", "&", "<", ">", "<=", ">=",
168
+ )
169
+
170
+ punctuation = (",", ";", "(", ")", "[", "]", "{", "}")
171
+
172
+ def _multi_escape(entries):
173
+ return '({})'.format('|'.join(re.escape(entry) for entry in entries))
174
+
175
+ tokens = {
176
+ 'root': [
177
+ (r'(?s)\(\*.*?\*\)', Comment),
178
+
179
+ (r'([a-zA-Z]+[A-Za-z0-9]*`)', Name.Namespace),
180
+ (r'([A-Za-z0-9]*_+[A-Za-z0-9]*)', Name.Variable),
181
+ (r'#\d*', Name.Variable),
182
+ (r'([a-zA-Z]+[a-zA-Z0-9]*)', Name),
183
+
184
+ (r'-?\d+\.\d*', Number.Float),
185
+ (r'-?\d*\.\d+', Number.Float),
186
+ (r'-?\d+', Number.Integer),
187
+
188
+ (words(operators), Operator),
189
+ (words(punctuation), Punctuation),
190
+ (r'".*?"', String),
191
+ (r'\s+', Text.Whitespace),
192
+ ],
193
+ }
194
+
195
+
196
+ class MuPADLexer(RegexLexer):
197
+ """
198
+ A MuPAD lexer.
199
+ Contributed by Christopher Creutzig <christopher@creutzig.de>.
200
+ """
201
+ name = 'MuPAD'
202
+ url = 'http://www.mupad.com'
203
+ aliases = ['mupad']
204
+ filenames = ['*.mu']
205
+ version_added = '0.8'
206
+
207
+ tokens = {
208
+ 'root': [
209
+ (r'//.*?$', Comment.Single),
210
+ (r'/\*', Comment.Multiline, 'comment'),
211
+ (r'"(?:[^"\\]|\\.)*"', String),
212
+ (r'\(|\)|\[|\]|\{|\}', Punctuation),
213
+ (r'''(?x)\b(?:
214
+ next|break|end|
215
+ axiom|end_axiom|category|end_category|domain|end_domain|inherits|
216
+ if|%if|then|elif|else|end_if|
217
+ case|of|do|otherwise|end_case|
218
+ while|end_while|
219
+ repeat|until|end_repeat|
220
+ for|from|to|downto|step|end_for|
221
+ proc|local|option|save|begin|end_proc|
222
+ delete|frame
223
+ )\b''', Keyword),
224
+ (r'''(?x)\b(?:
225
+ DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
226
+ DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
227
+ DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
228
+ DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
229
+ )\b''', Name.Class),
230
+ (r'''(?x)\b(?:
231
+ PI|EULER|E|CATALAN|
232
+ NIL|FAIL|undefined|infinity|
233
+ TRUE|FALSE|UNKNOWN
234
+ )\b''',
235
+ Name.Constant),
236
+ (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
237
+ (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
238
+ (r'''(?x)\b(?:
239
+ and|or|not|xor|
240
+ assuming|
241
+ div|mod|
242
+ union|minus|intersect|in|subset
243
+ )\b''',
244
+ Operator.Word),
245
+ (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
246
+ # (r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
247
+ (r'''(?x)
248
+ ((?:[a-zA-Z_#][\w#]*|`[^`]*`)
249
+ (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*)(\s*)([(])''',
250
+ bygroups(Name.Function, Text, Punctuation)),
251
+ (r'''(?x)
252
+ (?:[a-zA-Z_#][\w#]*|`[^`]*`)
253
+ (?:::[a-zA-Z_#][\w#]*|`[^`]*`)*''', Name.Variable),
254
+ (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
255
+ (r'\.[0-9]+(?:e[0-9]+)?', Number),
256
+ (r'\s+', Whitespace),
257
+ (r'.', Text)
258
+ ],
259
+ 'comment': [
260
+ (r'[^/*]+', Comment.Multiline),
261
+ (r'/\*', Comment.Multiline, '#push'),
262
+ (r'\*/', Comment.Multiline, '#pop'),
263
+ (r'[*/]', Comment.Multiline)
264
+ ],
265
+ }
266
+
267
+
268
+ class BCLexer(RegexLexer):
269
+ """
270
+ A BC lexer.
271
+ """
272
+ name = 'BC'
273
+ url = 'https://www.gnu.org/software/bc/'
274
+ aliases = ['bc']
275
+ filenames = ['*.bc']
276
+ version_added = '2.1'
277
+
278
+ tokens = {
279
+ 'root': [
280
+ (r'/\*', Comment.Multiline, 'comment'),
281
+ (r'"(?:[^"\\]|\\.)*"', String),
282
+ (r'[{}();,]', Punctuation),
283
+ (words(('if', 'else', 'while', 'for', 'break', 'continue',
284
+ 'halt', 'return', 'define', 'auto', 'print', 'read',
285
+ 'length', 'scale', 'sqrt', 'limits', 'quit',
286
+ 'warranty'), suffix=r'\b'), Keyword),
287
+ (r'\+\+|--|\|\||&&|'
288
+ r'([-<>+*%\^/!=])=?', Operator),
289
+ # bc doesn't support exponential
290
+ (r'[0-9]+(\.[0-9]*)?', Number),
291
+ (r'\.[0-9]+', Number),
292
+ (r'.', Text)
293
+ ],
294
+ 'comment': [
295
+ (r'[^*/]+', Comment.Multiline),
296
+ (r'\*/', Comment.Multiline, '#pop'),
297
+ (r'[*/]', Comment.Multiline)
298
+ ],
299
+ }
mgm/lib/python3.10/site-packages/pygments/lexers/apdlexer.py ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.apdlexer
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexers for ANSYS Parametric Design Language.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+
13
+ from pygments.lexer import RegexLexer, include, words, default
14
+ from pygments.token import Comment, Keyword, Name, Number, Operator, \
15
+ String, Generic, Punctuation, Whitespace, Escape
16
+
17
+ __all__ = ['apdlexer']
18
+
19
+
20
+ class apdlexer(RegexLexer):
21
+ """
22
+ For APDL source code.
23
+ """
24
+ name = 'ANSYS parametric design language'
25
+ aliases = ['ansys', 'apdl']
26
+ filenames = ['*.ans']
27
+ url = 'https://www.ansys.com'
28
+ version_added = '2.9'
29
+
30
+ flags = re.IGNORECASE
31
+
32
+ # list of elements
33
+ elafunb = ("SURF152", "SURF153", "SURF154", "SURF156", "SHELL157",
34
+ "SURF159", "LINK160", "BEAM161", "PLANE162",
35
+ "SHELL163", "SOLID164", "COMBI165", "MASS166",
36
+ "LINK167", "SOLID168", "TARGE169", "TARGE170",
37
+ "CONTA171", "CONTA172", "CONTA173", "CONTA174",
38
+ "CONTA175", "CONTA176", "CONTA177", "CONTA178",
39
+ "PRETS179", "LINK180", "SHELL181", "PLANE182",
40
+ "PLANE183", "MPC184", "SOLID185", "SOLID186",
41
+ "SOLID187", "BEAM188", "BEAM189", "SOLSH190",
42
+ "INTER192", "INTER193", "INTER194", "INTER195",
43
+ "MESH200", "FOLLW201", "INTER202", "INTER203",
44
+ "INTER204", "INTER205", "SHELL208", "SHELL209",
45
+ "CPT212", "CPT213", "COMBI214", "CPT215", "CPT216",
46
+ "CPT217", "FLUID220", "FLUID221", "PLANE223",
47
+ "SOLID226", "SOLID227", "PLANE230", "SOLID231",
48
+ "SOLID232", "PLANE233", "SOLID236", "SOLID237",
49
+ "PLANE238", "SOLID239", "SOLID240", "HSFLD241",
50
+ "HSFLD242", "SURF251", "SURF252", "REINF263",
51
+ "REINF264", "REINF265", "SOLID272", "SOLID273",
52
+ "SOLID278", "SOLID279", "SHELL281", "SOLID285",
53
+ "PIPE288", "PIPE289", "ELBOW290", "USER300", "BEAM3",
54
+ "BEAM4", "BEAM23", "BEAM24", "BEAM44", "BEAM54",
55
+ "COMBIN7", "FLUID79", "FLUID80", "FLUID81", "FLUID141",
56
+ "FLUID142", "INFIN9", "INFIN47", "PLANE13", "PLANE25",
57
+ "PLANE42", "PLANE53", "PLANE67", "PLANE82", "PLANE83",
58
+ "PLANE145", "PLANE146", "CONTAC12", "CONTAC52",
59
+ "LINK1", "LINK8", "LINK10", "LINK32", "PIPE16",
60
+ "PIPE17", "PIPE18", "PIPE20", "PIPE59", "PIPE60",
61
+ "SHELL41", "SHELL43", "SHELL57", "SHELL63", "SHELL91",
62
+ "SHELL93", "SHELL99", "SHELL150", "SOLID5", "SOLID45",
63
+ "SOLID46", "SOLID65", "SOLID69", "SOLID92", "SOLID95",
64
+ "SOLID117", "SOLID127", "SOLID128", "SOLID147",
65
+ "SOLID148", "SOLID191", "VISCO88", "VISCO89",
66
+ "VISCO106", "VISCO107", "VISCO108", "TRANS109")
67
+
68
+ elafunc = ("PGRAPH", "/VT", "VTIN", "VTRFIL", "VTTEMP", "PGRSET",
69
+ "VTCLR", "VTMETH", "VTRSLT", "VTVMOD", "PGSELE",
70
+ "VTDISC", "VTMP", "VTSEC", "PGWRITE", "VTEVAL", "VTOP",
71
+ "VTSFE", "POUTRES", "VTFREQ", "VTPOST", "VTSL",
72
+ "FLDATA1-40", "HFPCSWP", "MSDATA", "MSVARY", "QFACT",
73
+ "FLOCHECK", "HFPOWER", "MSMASS", "PERI", "SPADP",
74
+ "FLREAD", "HFPORT", "MSMETH", "PLFSS", "SPARM",
75
+ "FLOTRAN", "HFSCAT", "MSMIR", "PLSCH", "SPFSS",
76
+ "HFADP", "ICE", "MSNOMF", "PLSYZ", "SPICE", "HFARRAY",
77
+ "ICEDELE", "MSPROP", "PLTD", "SPSCAN", "HFDEEM",
78
+ "ICELIST", "MSQUAD", "PLTLINE", "SPSWP", "HFEIGOPT",
79
+ "ICVFRC", "MSRELAX", "PLVFRC", "HFEREFINE", "LPRT",
80
+ "MSSOLU", "/PICE", "HFMODPRT", "MSADV", "MSSPEC",
81
+ "PLWAVE", "HFPA", "MSCAP", "MSTERM", "PRSYZ")
82
+
83
+ elafund = ("*VOPER", "VOVLAP", "*VPLOT", "VPLOT", "VPTN", "*VPUT",
84
+ "VPUT", "*VREAD", "VROTAT", "VSBA", "VSBV", "VSBW",
85
+ "/VSCALE", "*VSCFUN", "VSEL", "VSLA", "*VSTAT", "VSUM",
86
+ "VSWEEP", "VSYMM", "VTRAN", "VTYPE", "/VUP", "*VWRITE",
87
+ "/WAIT", "WAVES", "WERASE", "WFRONT", "/WINDOW",
88
+ "WMID", "WMORE", "WPAVE", "WPCSYS", "WPLANE", "WPOFFS",
89
+ "WPROTA", "WPSTYL", "WRFULL", "WRITE", "WRITEMAP",
90
+ "*WRK", "WSORT", "WSPRINGS", "WSTART", "WTBCREATE",
91
+ "XFDATA", "XFENRICH", "XFLIST", "/XFRM", "/XRANGE",
92
+ "XVAR", "/YRANGE", "/ZOOM", "/WB", "XMLO", "/XML",
93
+ "CNTR", "EBLOCK", "CMBLOCK", "NBLOCK", "/TRACK",
94
+ "CWZPLOT", "~EUI", "NELE", "EALL", "NALL", "FLITEM",
95
+ "LSLN", "PSOLVE", "ASLN", "/VERIFY", "/SSS", "~CFIN",
96
+ "*EVAL", "*MOONEY", "/RUNSTAT", "ALPFILL",
97
+ "ARCOLLAPSE", "ARDETACH", "ARFILL", "ARMERGE",
98
+ "ARSPLIT", "FIPLOT", "GAPFINISH", "GAPLIST",
99
+ "GAPMERGE", "GAPOPT", "GAPPLOT", "LNCOLLAPSE",
100
+ "LNDETACH", "LNFILL", "LNMERGE", "LNSPLIT", "PCONV",
101
+ "PLCONV", "PEMOPTS", "PEXCLUDE", "PINCLUDE", "PMETH",
102
+ "/PMETH", "PMOPTS", "PPLOT", "PPRANGE", "PRCONV",
103
+ "PRECISION", "RALL", "RFILSZ", "RITER", "RMEMRY",
104
+ "RSPEED", "RSTAT", "RTIMST", "/RUNST", "RWFRNT",
105
+ "SARPLOT", "SHSD", "SLPPLOT", "SLSPLOT", "VCVFILL",
106
+ "/OPT", "OPEQN", "OPFACT", "OPFRST", "OPGRAD",
107
+ "OPKEEP", "OPLOOP", "OPPRNT", "OPRAND", "OPSUBP",
108
+ "OPSWEEP", "OPTYPE", "OPUSER", "OPVAR", "OPADD",
109
+ "OPCLR", "OPDEL", "OPMAKE", "OPSEL", "OPANL", "OPDATA",
110
+ "OPRESU", "OPSAVE", "OPEXE", "OPLFA", "OPLGR",
111
+ "OPLIST", "OPLSW", "OPRFA", "OPRGR", "OPRSW",
112
+ "PILECALC", "PILEDISPSET", "PILEGEN", "PILELOAD",
113
+ "PILEMASS", "PILERUN", "PILESEL", "PILESTIF",
114
+ "PLVAROPT", "PRVAROPT", "TOCOMP", "TODEF", "TOFREQ",
115
+ "TOTYPE", "TOVAR", "TOEXE", "TOLOOP", "TOGRAPH",
116
+ "TOLIST", "TOPLOT", "TOPRINT", "TOSTAT", "TZAMESH",
117
+ "TZDELE", "TZEGEN", "XVAROPT", "PGSAVE", "SOLCONTROL",
118
+ "TOTAL", "VTGEOM", "VTREAL", "VTSTAT")
119
+
120
+ elafune = ("/ANUM", "AOFFST", "AOVLAP", "APLOT", "APPEND", "APTN",
121
+ "ARCLEN", "ARCTRM", "AREAS", "AREFINE", "AREMESH",
122
+ "AREVERSE", "AROTAT", "ARSCALE", "ARSYM", "ASBA",
123
+ "ASBL", "ASBV", "ASBW", "ASCRES", "ASEL", "ASIFILE",
124
+ "*ASK", "ASKIN", "ASLL", "ASLV", "ASOL", "/ASSIGN",
125
+ "ASUB", "ASUM", "ATAN", "ATRAN", "ATYPE", "/AUTO",
126
+ "AUTOTS", "/AUX2", "/AUX3", "/AUX12", "/AUX15",
127
+ "AVPRIN", "AVRES", "AWAVE", "/AXLAB", "*AXPY",
128
+ "/BATCH", "BCSOPTION", "BETAD", "BF", "BFA", "BFADELE",
129
+ "BFALIST", "BFCUM", "BFDELE", "BFE", "BFECUM",
130
+ "BFEDELE", "BFELIST", "BFESCAL", "BFINT", "BFK",
131
+ "BFKDELE", "BFKLIST", "BFL", "BFLDELE", "BFLIST",
132
+ "BFLLIST", "BFSCALE", "BFTRAN", "BFUNIF", "BFV",
133
+ "BFVDELE", "BFVLIST", "BIOOPT", "BIOT", "BLC4", "BLC5",
134
+ "BLOCK", "BOOL", "BOPTN", "BSAX", "BSMD", "BSM1",
135
+ "BSM2", "BSPLIN", "BSS1", "BSS2", "BSTE", "BSTQ",
136
+ "BTOL", "BUCOPT", "C", "CALC", "CAMPBELL", "CBDOF",
137
+ "CBMD", "CBMX", "CBTE", "CBTMP", "CDOPT", "CDREAD",
138
+ "CDWRITE", "CE", "CECHECK", "CECMOD", "CECYC",
139
+ "CEDELE", "CEINTF", "CELIST", "CENTER", "CEQN",
140
+ "CERIG", "CESGEN", "CFACT", "*CFCLOS", "*CFOPEN",
141
+ "*CFWRITE", "/CFORMAT", "CGLOC", "CGOMGA", "CGROW",
142
+ "CHECK", "CHKMSH", "CINT", "CIRCLE", "CISOL",
143
+ "/CLABEL", "/CLEAR", "CLOCAL", "CLOG", "/CLOG",
144
+ "CLRMSHLN", "CM", "CMACEL", "/CMAP", "CMATRIX",
145
+ "CMDELE", "CMDOMEGA", "CMEDIT", "CMGRP", "CMLIST",
146
+ "CMMOD", "CMOMEGA", "CMPLOT", "CMROTATE", "CMSEL",
147
+ "CMSFILE", "CMSOPT", "CMWRITE", "CNCHECK", "CNKMOD",
148
+ "CNTR", "CNVTOL", "/COLOR", "*COMP", "COMBINE",
149
+ "COMPRESS", "CON4", "CONE", "/CONFIG", "CONJUG",
150
+ "/CONTOUR", "/COPY", "CORIOLIS", "COUPLE", "COVAL",
151
+ "CP", "CPCYC", "CPDELE", "CPINTF", "/CPLANE", "CPLGEN",
152
+ "CPLIST", "CPMERGE", "CPNGEN", "CPSGEN", "CQC",
153
+ "*CREATE", "CRPLIM", "CS", "CSCIR", "CSDELE", "CSKP",
154
+ "CSLIST", "CSWPLA", "CSYS", "/CTYPE", "CURR2D",
155
+ "CUTCONTROL", "/CVAL", "CVAR", "/CWD", "CYCCALC",
156
+ "/CYCEXPAND", "CYCFILES", "CYCFREQ", "*CYCLE",
157
+ "CYCLIC", "CYCOPT", "CYCPHASE", "CYCSPEC", "CYL4",
158
+ "CYL5", "CYLIND", "CZDEL", "CZMESH", "D", "DA",
159
+ "DADELE", "DALIST", "DAMORPH", "DATA", "DATADEF",
160
+ "DCGOMG", "DCUM", "DCVSWP", "DDASPEC", "DDELE",
161
+ "DDOPTION", "DEACT", "DEFINE", "*DEL", "DELETE",
162
+ "/DELETE", "DELTIM", "DELTIME", "DEMORPH", "DERIV", "DESIZE",
163
+ "DESOL", "DETAB", "/DEVDISP", "/DEVICE", "/DFLAB",
164
+ "DFLX", "DFSWAVE", "DIG", "DIGIT", "*DIM",
165
+ "/DIRECTORY", "DISPLAY", "/DIST", "DJ", "DJDELE",
166
+ "DJLIST", "DK", "DKDELE", "DKLIST", "DL", "DLDELE",
167
+ "DLIST", "DLLIST", "*DMAT", "DMOVE", "DMPEXT",
168
+ "DMPOPTION", "DMPRAT", "DMPSTR", "DNSOL", "*DO", "DOF",
169
+ "DOFSEL", "DOMEGA", "*DOT", "*DOWHILE", "DSCALE",
170
+ "/DSCALE", "DSET", "DSPOPTION", "DSUM", "DSURF",
171
+ "DSYM", "DSYS", "DTRAN", "DUMP", "/DV3D", "DVAL",
172
+ "DVMORPH", "DYNOPT", "E", "EALIVE", "EDADAPT", "EDALE",
173
+ "EDASMP", "EDBOUND", "EDBX", "EDBVIS", "EDCADAPT",
174
+ "EDCGEN", "EDCLIST", "EDCMORE", "EDCNSTR", "EDCONTACT",
175
+ "EDCPU", "EDCRB", "EDCSC", "EDCTS", "EDCURVE",
176
+ "EDDAMP", "EDDBL", "EDDC", "EDDRELAX", "EDDUMP",
177
+ "EDELE", "EDENERGY", "EDFPLOT", "EDGCALE", "/EDGE",
178
+ "EDHGLS", "EDHIST", "EDHTIME", "EDINT", "EDIPART",
179
+ "EDIS", "EDLCS", "EDLOAD", "EDMP", "EDNB", "EDNDTSD",
180
+ "EDNROT", "EDOPT", "EDOUT", "EDPART", "EDPC", "EDPL",
181
+ "EDPVEL", "EDRC", "EDRD", "EDREAD", "EDRI", "EDRST",
182
+ "EDRUN", "EDSHELL", "EDSOLV", "EDSP", "EDSTART",
183
+ "EDTERM", "EDTP", "EDVEL", "EDWELD", "EDWRITE",
184
+ "EEXTRUDE", "/EFACET", "EGEN", "*EIGEN", "EINFIN",
185
+ "EINTF", "EKILL", "ELBOW", "ELEM", "ELIST", "*ELSE",
186
+ "*ELSEIF", "EMAGERR", "EMATWRITE", "EMF", "EMFT",
187
+ "EMID", "EMIS", "EMODIF", "EMORE", "EMSYM", "EMTGEN",
188
+ "EMUNIT", "EN", "*END", "*ENDDO", "*ENDIF",
189
+ "ENDRELEASE", "ENERSOL", "ENGEN", "ENORM", "ENSYM",
190
+ "EORIENT", "EPLOT", "EQSLV", "ERASE", "/ERASE",
191
+ "EREAD", "EREFINE", "EREINF", "ERESX", "ERNORM",
192
+ "ERRANG", "ESCHECK", "ESEL", "/ESHAPE", "ESIZE",
193
+ "ESLA", "ESLL", "ESLN", "ESLV", "ESOL", "ESORT",
194
+ "ESSOLV", "ESTIF", "ESURF", "ESYM", "ESYS", "ET",
195
+ "ETABLE", "ETCHG", "ETCONTROL", "ETDELE", "ETLIST",
196
+ "ETYPE", "EUSORT", "EWRITE", "*EXIT", "/EXIT", "EXP",
197
+ "EXPAND", "/EXPAND", "EXPASS", "*EXPORT", "EXPROFILE",
198
+ "EXPSOL", "EXTOPT", "EXTREM", "EXUNIT", "F", "/FACET",
199
+ "FATIGUE", "FC", "FCCHECK", "FCDELE", "FCLIST", "FCUM",
200
+ "FCTYP", "FDELE", "/FDELE", "FE", "FEBODY", "FECONS",
201
+ "FEFOR", "FELIST", "FESURF", "*FFT", "FILE",
202
+ "FILEAUX2", "FILEAUX3", "FILEDISP", "FILL", "FILLDATA",
203
+ "/FILNAME", "FINISH", "FITEM", "FJ", "FJDELE",
204
+ "FJLIST", "FK", "FKDELE", "FKLIST", "FL", "FLIST",
205
+ "FLLIST", "FLST", "FLUXV", "FLUREAD", "FMAGBC",
206
+ "FMAGSUM", "/FOCUS", "FOR2D", "FORCE", "FORM",
207
+ "/FORMAT", "FP", "FPLIST", "*FREE", "FREQ", "FRQSCL",
208
+ "FS", "FSCALE", "FSDELE", "FSLIST", "FSNODE", "FSPLOT",
209
+ "FSSECT", "FSSPARM", "FSUM", "FTCALC", "FTRAN",
210
+ "FTSIZE", "FTWRITE", "FTYPE", "FVMESH", "GAP", "GAPF",
211
+ "GAUGE", "GCDEF", "GCGEN", "/GCMD", "/GCOLUMN",
212
+ "GENOPT", "GEOM", "GEOMETRY", "*GET", "/GFILE",
213
+ "/GFORMAT", "/GLINE", "/GMARKER", "GMATRIX", "GMFACE",
214
+ "*GO", "/GO", "/GOLIST", "/GOPR", "GP", "GPDELE",
215
+ "GPLIST", "GPLOT", "/GRAPHICS", "/GRESUME", "/GRID",
216
+ "/GROPT", "GRP", "/GRTYP", "/GSAVE", "GSBDATA",
217
+ "GSGDATA", "GSLIST", "GSSOL", "/GST", "GSUM", "/GTHK",
218
+ "/GTYPE", "HARFRQ", "/HBC", "HBMAT", "/HEADER", "HELP",
219
+ "HELPDISP", "HEMIOPT", "HFANG", "HFSYM", "HMAGSOLV",
220
+ "HPGL", "HPTCREATE", "HPTDELETE", "HRCPLX", "HREXP",
221
+ "HROPT", "HROCEAN", "HROUT", "IC", "ICDELE", "ICLIST",
222
+ "/ICLWID", "/ICSCALE", "*IF", "IGESIN", "IGESOUT",
223
+ "/IMAGE", "IMAGIN", "IMESH", "IMMED", "IMPD",
224
+ "INISTATE", "*INIT", "/INPUT", "/INQUIRE", "INRES",
225
+ "INRTIA", "INT1", "INTSRF", "IOPTN", "IRLF", "IRLIST",
226
+ "*ITENGINE", "JPEG", "JSOL", "K", "KATT", "KBC",
227
+ "KBETW", "KCALC", "KCENTER", "KCLEAR", "KDELE",
228
+ "KDIST", "KEEP", "KESIZE", "KEYOPT", "KEYPTS", "KEYW",
229
+ "KFILL", "KGEN", "KL", "KLIST", "KMESH", "KMODIF",
230
+ "KMOVE", "KNODE", "KPLOT", "KPSCALE", "KREFINE",
231
+ "KSCALE", "KSCON", "KSEL", "KSLL", "KSLN", "KSUM",
232
+ "KSYMM", "KTRAN", "KUSE", "KWPAVE", "KWPLAN", "L",
233
+ "L2ANG", "L2TAN", "LANG", "LARC", "/LARC", "LAREA",
234
+ "LARGE", "LATT", "LAYER", "LAYERP26", "LAYLIST",
235
+ "LAYPLOT", "LCABS", "LCASE", "LCCALC", "LCCAT",
236
+ "LCDEF", "LCFACT", "LCFILE", "LCLEAR", "LCOMB",
237
+ "LCOPER", "LCSEL", "LCSL", "LCSUM", "LCWRITE",
238
+ "LCZERO", "LDELE", "LDIV", "LDRAG", "LDREAD", "LESIZE",
239
+ "LEXTND", "LFILLT", "LFSURF", "LGEN", "LGLUE",
240
+ "LGWRITE", "/LIGHT", "LINA", "LINE", "/LINE", "LINES",
241
+ "LINL", "LINP", "LINV", "LIST", "*LIST", "LLIST",
242
+ "LMATRIX", "LMESH", "LNSRCH", "LOCAL", "LOVLAP",
243
+ "LPLOT", "LPTN", "LREFINE", "LREVERSE", "LROTAT",
244
+ "LSBA", "*LSBAC", "LSBL", "LSBV", "LSBW", "LSCLEAR",
245
+ "LSDELE", "*LSDUMP", "LSEL", "*LSENGINE", "*LSFACTOR",
246
+ "LSLA", "LSLK", "LSOPER", "/LSPEC", "LSREAD",
247
+ "*LSRESTORE", "LSSCALE", "LSSOLVE", "LSTR", "LSUM",
248
+ "LSWRITE", "/LSYMBOL", "LSYMM", "LTAN", "LTRAN",
249
+ "LUMPM", "LVSCALE", "LWPLAN", "M", "MADAPT", "MAGOPT",
250
+ "MAGSOLV", "/MAIL", "MAP", "/MAP", "MAP2DTO3D",
251
+ "MAPSOLVE", "MAPVAR", "MASTER", "MAT", "MATER",
252
+ "MCHECK", "MDAMP", "MDELE", "MDPLOT", "MEMM", "/MENU",
253
+ "MESHING", "MFANALYSIS", "MFBUCKET", "MFCALC", "MFCI",
254
+ "MFCLEAR", "MFCMMAND", "MFCONV", "MFDTIME", "MFELEM",
255
+ "MFEM", "MFEXTER", "MFFNAME", "MFFR", "MFIMPORT",
256
+ "MFINTER", "MFITER", "MFLCOMM", "MFLIST", "MFMAP",
257
+ "MFORDER", "MFOUTPUT", "*MFOURI", "MFPSIMUL", "MFRC",
258
+ "MFRELAX", "MFRSTART", "MFSORDER", "MFSURFACE",
259
+ "MFTIME", "MFTOL", "*MFUN", "MFVOLUME", "MFWRITE",
260
+ "MGEN", "MIDTOL", "/MKDIR", "MLIST", "MMASS", "MMF",
261
+ "MODCONT", "MODE", "MODIFY", "MODMSH", "MODSELOPTION",
262
+ "MODOPT", "MONITOR", "*MOPER", "MOPT", "MORPH", "MOVE",
263
+ "MP", "MPAMOD", "MPCHG", "MPCOPY", "MPDATA", "MPDELE",
264
+ "MPDRES", "/MPLIB", "MPLIST", "MPPLOT", "MPREAD",
265
+ "MPRINT", "MPTEMP", "MPTGEN", "MPTRES", "MPWRITE",
266
+ "/MREP", "MSAVE", "*MSG", "MSHAPE", "MSHCOPY",
267
+ "MSHKEY", "MSHMID", "MSHPATTERN", "MSOLVE", "/MSTART",
268
+ "MSTOLE", "*MULT", "*MWRITE", "MXPAND", "N", "NANG",
269
+ "NAXIS", "NCNV", "NDELE", "NDIST", "NDSURF", "NEQIT",
270
+ "/NERR", "NFORCE", "NGEN", "NKPT", "NLADAPTIVE",
271
+ "NLDIAG", "NLDPOST", "NLGEOM", "NLHIST", "NLIST",
272
+ "NLMESH", "NLOG", "NLOPT", "NMODIF", "NOCOLOR",
273
+ "NODES", "/NOERASE", "/NOLIST", "NOOFFSET", "NOORDER",
274
+ "/NOPR", "NORA", "NORL", "/NORMAL", "NPLOT", "NPRINT",
275
+ "NREAD", "NREFINE", "NRLSUM", "*NRM", "NROPT",
276
+ "NROTAT", "NRRANG", "NSCALE", "NSEL", "NSLA", "NSLE",
277
+ "NSLK", "NSLL", "NSLV", "NSMOOTH", "NSOL", "NSORT",
278
+ "NSTORE", "NSUBST", "NSVR", "NSYM", "/NUMBER",
279
+ "NUMCMP", "NUMEXP", "NUMMRG", "NUMOFF", "NUMSTR",
280
+ "NUMVAR", "NUSORT", "NWPAVE", "NWPLAN", "NWRITE",
281
+ "OCDATA", "OCDELETE", "OCLIST", "OCREAD", "OCTABLE",
282
+ "OCTYPE", "OCZONE", "OMEGA", "OPERATE", "OPNCONTROL",
283
+ "OUTAERO", "OUTOPT", "OUTPR", "/OUTPUT", "OUTRES",
284
+ "OVCHECK", "PADELE", "/PAGE", "PAGET", "PAPUT",
285
+ "PARESU", "PARTSEL", "PARRES", "PARSAV", "PASAVE",
286
+ "PATH", "PAUSE", "/PBC", "/PBF", "PCALC", "PCGOPT",
287
+ "PCIRC", "/PCIRCLE", "/PCOPY", "PCROSS", "PDANL",
288
+ "PDCDF", "PDCFLD", "PDCLR", "PDCMAT", "PDCORR",
289
+ "PDDMCS", "PDDOEL", "PDEF", "PDEXE", "PDHIST",
290
+ "PDINQR", "PDLHS", "PDMETH", "PDOT", "PDPINV",
291
+ "PDPLOT", "PDPROB", "PDRESU", "PDROPT", "/PDS",
292
+ "PDSAVE", "PDSCAT", "PDSENS", "PDSHIS", "PDUSER",
293
+ "PDVAR", "PDWRITE", "PERBC2D", "PERTURB", "PFACT",
294
+ "PHYSICS", "PIVCHECK", "PLCAMP", "PLCFREQ", "PLCHIST",
295
+ "PLCINT", "PLCPLX", "PLCRACK", "PLDISP", "PLESOL",
296
+ "PLETAB", "PLFAR", "PLF2D", "PLGEOM", "PLLS", "PLMAP",
297
+ "PLMC", "PLNEAR", "PLNSOL", "/PLOPTS", "PLORB", "PLOT",
298
+ "PLOTTING", "PLPAGM", "PLPATH", "PLSECT", "PLST",
299
+ "PLTIME", "PLTRAC", "PLVAR", "PLVECT", "PLZZ",
300
+ "/PMACRO", "PMAP", "PMGTRAN", "PMLOPT", "PMLSIZE",
301
+ "/PMORE", "PNGR", "/PNUM", "POINT", "POLY", "/POLYGON",
302
+ "/POST1", "/POST26", "POWERH", "PPATH", "PRANGE",
303
+ "PRAS", "PRCAMP", "PRCINT", "PRCPLX", "PRED",
304
+ "PRENERGY", "/PREP7", "PRERR", "PRESOL", "PRETAB",
305
+ "PRFAR", "PRI2", "PRIM", "PRINT", "*PRINT", "PRISM",
306
+ "PRITER", "PRJSOL", "PRNEAR", "PRNLD", "PRNSOL",
307
+ "PROD", "PRORB", "PRPATH", "PRRFOR", "PRRSOL",
308
+ "PRSCONTROL", "PRSECT", "PRTIME", "PRVAR", "PRVECT",
309
+ "PSCONTROL", "PSCR", "PSDCOM", "PSDFRQ", "PSDGRAPH",
310
+ "PSDRES", "PSDSPL", "PSDUNIT", "PSDVAL", "PSDWAV",
311
+ "/PSEARCH", "PSEL", "/PSF", "PSMAT", "PSMESH",
312
+ "/PSPEC", "/PSTATUS", "PSTRES", "/PSYMB", "PTR",
313
+ "PTXY", "PVECT", "/PWEDGE", "QDVAL", "QRDOPT", "QSOPT",
314
+ "QUAD", "/QUIT", "QUOT", "R", "RACE", "RADOPT",
315
+ "RAPPND", "RATE", "/RATIO", "RBE3", "RCON", "RCYC",
316
+ "RDEC", "RDELE", "READ", "REAL", "REALVAR", "RECTNG",
317
+ "REMESH", "/RENAME", "REORDER", "*REPEAT", "/REPLOT",
318
+ "RESCOMBINE", "RESCONTROL", "RESET", "/RESET", "RESP",
319
+ "RESUME", "RESVEC", "RESWRITE", "*RETURN", "REXPORT",
320
+ "REZONE", "RFORCE", "/RGB", "RIGID", "RIGRESP",
321
+ "RIMPORT", "RLIST", "RMALIST", "RMANL", "RMASTER",
322
+ "RMCAP", "RMCLIST", "/RMDIR", "RMFLVEC", "RMLVSCALE",
323
+ "RMMLIST", "RMMRANGE", "RMMSELECT", "RMNDISP",
324
+ "RMNEVEC", "RMODIF", "RMORE", "RMPORDER", "RMRESUME",
325
+ "RMRGENERATE", "RMROPTIONS", "RMRPLOT", "RMRSTATUS",
326
+ "RMSAVE", "RMSMPLE", "RMUSE", "RMXPORT", "ROCK",
327
+ "ROSE", "RPOLY", "RPR4", "RPRISM", "RPSD", "RSFIT",
328
+ "RSOPT", "RSPLIT", "RSPLOT", "RSPRNT", "RSSIMS",
329
+ "RSTMAC", "RSTOFF", "RSURF", "RSYMM", "RSYS", "RTHICK",
330
+ "SABS", "SADD", "SALLOW", "SAVE", "SBCLIST", "SBCTRAN",
331
+ "SDELETE", "SE", "SECCONTROL", "SECDATA",
332
+ "SECFUNCTION", "SECJOINT", "/SECLIB", "SECLOCK",
333
+ "SECMODIF", "SECNUM", "SECOFFSET", "SECPLOT",
334
+ "SECREAD", "SECSTOP", "SECTYPE", "SECWRITE", "SED",
335
+ "SEDLIST", "SEEXP", "/SEG", "SEGEN", "SELIST", "SELM",
336
+ "SELTOL", "SENERGY", "SEOPT", "SESYMM", "*SET", "SET",
337
+ "SETFGAP", "SETRAN", "SEXP", "SF", "SFA", "SFACT",
338
+ "SFADELE", "SFALIST", "SFBEAM", "SFCALC", "SFCUM",
339
+ "SFDELE", "SFE", "SFEDELE", "SFELIST", "SFFUN",
340
+ "SFGRAD", "SFL", "SFLDELE", "SFLEX", "SFLIST",
341
+ "SFLLIST", "SFSCALE", "SFTRAN", "/SHADE", "SHELL",
342
+ "/SHOW", "/SHOWDISP", "SHPP", "/SHRINK", "SLIST",
343
+ "SLOAD", "SMALL", "*SMAT", "SMAX", "/SMBC", "SMBODY",
344
+ "SMCONS", "SMFOR", "SMIN", "SMOOTH", "SMRTSIZE",
345
+ "SMSURF", "SMULT", "SNOPTION", "SOLU", "/SOLU",
346
+ "SOLUOPT", "SOLVE", "SORT", "SOURCE", "SPACE",
347
+ "SPCNOD", "SPCTEMP", "SPDAMP", "SPEC", "SPFREQ",
348
+ "SPGRAPH", "SPH4", "SPH5", "SPHERE", "SPLINE", "SPLOT",
349
+ "SPMWRITE", "SPOINT", "SPOPT", "SPREAD", "SPTOPT",
350
+ "SPOWER", "SPUNIT", "SPVAL", "SQRT", "*SREAD", "SRSS",
351
+ "SSBT", "/SSCALE", "SSLN", "SSMT", "SSPA", "SSPB",
352
+ "SSPD", "SSPE", "SSPM", "SSUM", "SSTATE", "STABILIZE",
353
+ "STAOPT", "STAT", "*STATUS", "/STATUS", "STEF",
354
+ "STORE", "SUBOPT", "SUBSET", "SUCALC",
355
+ "SUCR", "SUDEL", "SUEVAL", "SUGET", "SUMAP", "SUMTYPE",
356
+ "SUPL", "SUPR", "SURESU", "SUSAVE", "SUSEL", "SUVECT",
357
+ "SV", "SVPLOT", "SVTYP", "SWADD", "SWDEL", "SWGEN",
358
+ "SWLIST", "SYNCHRO", "/SYP", "/SYS", "TALLOW",
359
+ "TARGET", "*TAXIS", "TB", "TBCOPY", "TBDATA", "TBDELE",
360
+ "TBEO", "TBIN", "TBFIELD", "TBFT", "TBLE", "TBLIST",
361
+ "TBMODIF", "TBPLOT", "TBPT", "TBTEMP", "TCHG", "/TEE",
362
+ "TERM", "THEXPAND", "THOPT", "TIFF", "TIME",
363
+ "TIMERANGE", "TIMINT", "TIMP", "TINTP",
364
+ "/TLABEL", "TOFFST", "*TOPER", "TORQ2D", "TORQC2D",
365
+ "TORQSUM", "TORUS", "TRANS", "TRANSFER", "*TREAD",
366
+ "TREF", "/TRIAD", "/TRLCY", "TRNOPT", "TRPDEL",
367
+ "TRPLIS", "TRPOIN", "TRTIME", "TSHAP", "/TSPEC",
368
+ "TSRES", "TUNIF", "TVAR", "/TXTRE", "/TYPE", "TYPE",
369
+ "/UCMD", "/UDOC", "/UI", "UIMP", "/UIS", "*ULIB", "/UPF",
370
+ "UNDELETE", "UNDO", "/UNITS", "UNPAUSE", "UPCOORD",
371
+ "UPGEOM", "*USE", "/USER", "USRCAL", "USRDOF",
372
+ "USRELEM", "V", "V2DOPT", "VA", "*VABS", "VADD",
373
+ "VARDEL", "VARNAM", "VATT", "VCLEAR", "*VCOL",
374
+ "/VCONE", "VCROSS", "*VCUM", "VDDAM", "VDELE", "VDGL",
375
+ "VDOT", "VDRAG", "*VEC", "*VEDIT", "VEORIENT", "VEXT",
376
+ "*VFACT", "*VFILL", "VFOPT", "VFQUERY", "VFSM",
377
+ "*VFUN", "VGEN", "*VGET", "VGET", "VGLUE", "/VIEW",
378
+ "VIMP", "VINP", "VINV", "*VITRP", "*VLEN", "VLIST",
379
+ "VLSCALE", "*VMASK", "VMESH", "VOFFST", "VOLUMES")
380
+
381
+ # list of in-built () functions
382
+ elafunf = ("NX()", "NY()", "NZ()", "KX()", "KY()", "KZ()", "LX()",
383
+ "LY()", "LZ()", "LSX()", "LSY()", "LSZ()", "NODE()",
384
+ "KP()", "DISTND()", "DISTKP()", "DISTEN()", "ANGLEN()",
385
+ "ANGLEK()", "NNEAR()", "KNEAR()", "ENEARN()",
386
+ "AREAND()", "AREAKP()", "ARNODE()", "NORMNX()",
387
+ "NORMNY()", "NORMNZ()", "NORMKX()", "NORMKY()",
388
+ "NORMKZ()", "ENEXTN()", "NELEM()", "NODEDOF()",
389
+ "ELADJ()", "NDFACE()", "NMFACE()", "ARFACE()", "UX()",
390
+ "UY()", "UZ()", "ROTX()", "ROTY()", "ROTZ()", "TEMP()",
391
+ "PRES()", "VX()", "VY()", "VZ()", "ENKE()", "ENDS()",
392
+ "VOLT()", "MAG()", "AX()", "AY()", "AZ()",
393
+ "VIRTINQR()", "KWGET()", "VALCHR()", "VALHEX()",
394
+ "CHRHEX()", "STRFILL()", "STRCOMP()", "STRPOS()",
395
+ "STRLENG()", "UPCASE()", "LWCASE()", "JOIN()",
396
+ "SPLIT()", "ABS()", "SIGN()", "CXABS()", "EXP()",
397
+ "LOG()", "LOG10()", "SQRT()", "NINT()", "MOD()",
398
+ "RAND()", "GDIS()", "SIN()", "COS()", "TAN()",
399
+ "SINH()", "COSH()", "TANH()", "ASIN()", "ACOS()",
400
+ "ATAN()", "ATAN2()")
401
+
402
+ elafung = ("NSEL()", "ESEL()", "KSEL()", "LSEL()", "ASEL()",
403
+ "VSEL()", "NDNEXT()", "ELNEXT()", "KPNEXT()",
404
+ "LSNEXT()", "ARNEXT()", "VLNEXT()", "CENTRX()",
405
+ "CENTRY()", "CENTRZ()")
406
+
407
+ elafunh = ("~CAT5IN", "~CATIAIN", "~PARAIN", "~PROEIN", "~SATIN",
408
+ "~UGIN", "A", "AADD", "AATT", "ABEXTRACT", "*ABBR",
409
+ "ABBRES", "ABBSAV", "ABS", "ACCAT", "ACCOPTION",
410
+ "ACEL", "ACLEAR", "ADAMS", "ADAPT", "ADD", "ADDAM",
411
+ "ADELE", "ADGL", "ADRAG", "AESIZE", "AFILLT", "AFLIST",
412
+ "AFSURF", "*AFUN", "AGEN", "AGLUE", "AINA", "AINP",
413
+ "AINV", "AL", "ALIST", "ALLSEL", "ALPHAD", "AMAP",
414
+ "AMESH", "/AN3D", "ANCNTR", "ANCUT", "ANCYC", "ANDATA",
415
+ "ANDSCL", "ANDYNA", "/ANFILE", "ANFLOW", "/ANGLE",
416
+ "ANHARM", "ANIM", "ANISOS", "ANMODE", "ANMRES",
417
+ "/ANNOT", "ANORM", "ANPRES", "ANSOL", "ANSTOAQWA",
418
+ "ANSTOASAS", "ANTIME", "ANTYPE")
419
+
420
+ special = ("/COM", "/TITLE", "STITLE")
421
+
422
+ elements = ("SOLID5",
423
+ "LINK11",
424
+ "PLANE13",
425
+ "COMBIN14",
426
+ "MASS2",
427
+ "PLANE25",
428
+ "MATRIX27",
429
+ "FLUID29",
430
+ "FLUID30",
431
+ "LINK31",
432
+ "LINK33",
433
+ "LINK34",
434
+ "PLANE35",
435
+ "SOURC36",
436
+ "COMBIN37",
437
+ "FLUID38",
438
+ "COMBIN39",
439
+ "COMBIN40",
440
+ "INFIN47",
441
+ "MATRIX50",
442
+ "PLANE55",
443
+ "SHELL61",
444
+ "LINK68",
445
+ "SOLID70",
446
+ "MASS71",
447
+ "PLANE75",
448
+ "PLANE77",
449
+ "PLANE78",
450
+ "PLANE83",
451
+ "SOLID87",
452
+ "SOLID90",
453
+ "CIRCU94",
454
+ "SOLID96",
455
+ "SOLID98",
456
+ "INFIN110",
457
+ "INFIN111",
458
+ "FLUID116",
459
+ "PLANE121",
460
+ "SOLID122",
461
+ "SOLID123",
462
+ "CIRCU124",
463
+ "CIRCU125",
464
+ "TRANS126",
465
+ "FLUID129",
466
+ "FLUID130",
467
+ "SHELL131",
468
+ "SHELL132",
469
+ "FLUID136",
470
+ "FLUID138",
471
+ "FLUID139",
472
+ "SURF151",
473
+ "SURF152",
474
+ "SURF153",
475
+ "SURF154",
476
+ "SURF155",
477
+ "SURF156",
478
+ "SHELL157",
479
+ "SURF159",
480
+ "TARGE169",
481
+ "TARGE170",
482
+ "CONTA172",
483
+ "CONTA174",
484
+ "CONTA175",
485
+ "CONTA177",
486
+ "CONTA178",
487
+ "PRETS179",
488
+ "LINK180",
489
+ "SHELL181",
490
+ "PLANE182",
491
+ "PLANE183",
492
+ "MPC184",
493
+ "SOLID185",
494
+ "SOLID186",
495
+ "SOLID187",
496
+ "BEAM188",
497
+ "BEAM189",
498
+ "SOLSH190",
499
+ "INTER192",
500
+ "INTER193",
501
+ "INTER194",
502
+ "INTER195",
503
+ "MESH200",
504
+ "FOLLW201",
505
+ "INTER202",
506
+ "INTER203",
507
+ "INTER204",
508
+ "INTER205",
509
+ "SHELL208",
510
+ "SHELL209",
511
+ "CPT212",
512
+ "CPT213",
513
+ "COMBI214",
514
+ "CPT215",
515
+ "CPT216",
516
+ "CPT217",
517
+ "FLUID218",
518
+ "FLUID220",
519
+ "FLUID221",
520
+ "PLANE222",
521
+ "PLANE223",
522
+ "SOLID225",
523
+ "SOLID226",
524
+ "SOLID227",
525
+ "PLANE230",
526
+ "SOLID231",
527
+ "SOLID232",
528
+ "PLANE233",
529
+ "SOLID236",
530
+ "SOLID237",
531
+ "PLANE238",
532
+ "SOLID239",
533
+ "SOLID240",
534
+ "HSFLD241",
535
+ "HSFLD242",
536
+ "COMBI250",
537
+ "SURF251",
538
+ "SURF252",
539
+ "INFIN257",
540
+ "REINF263",
541
+ "REINF264",
542
+ "REINF265",
543
+ "SOLID272",
544
+ "SOLID273",
545
+ "SOLID278",
546
+ "SOLID279",
547
+ "CABLE280",
548
+ "SHELL281",
549
+ "SOLID285",
550
+ "PIPE288",
551
+ "PIPE289",
552
+ "ELBOW290",
553
+ "SOLID291",
554
+ "PLANE292",
555
+ "PLANE293",
556
+ "USER300")
557
+
558
+ tokens = {
559
+ 'root': [
560
+ (r'[^\S\n]+', Whitespace),
561
+ (words((elafunb+elafunc+elafund+elafune+elafunh+special), suffix=r'\b'), Keyword, 'non-keyword'),
562
+ default('non-keyword'),
563
+ ],
564
+ 'non-keyword': [
565
+ (r'!.*\n', Comment, '#pop'),
566
+ (r'%.*?%', Escape),
567
+ include('strings'),
568
+ include('nums'),
569
+ (words((elafunf+elafung), suffix=r'\b'), Name.Builtin),
570
+ (words((elements), suffix=r'\b'), Name.Property),
571
+ include('core'),
572
+ (r'AR[0-9]+', Name.Variable.Instance),
573
+ (r'[a-z_][a-z0-9_]*', Name.Variable),
574
+ (r'\n+', Whitespace, '#pop'),
575
+ (r'[^\S\n]+', Whitespace),
576
+ ],
577
+ 'core': [
578
+ # Operators
579
+ (r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=|\(|\))', Operator),
580
+ (r'/EOF', Generic.Emph),
581
+ (r'[\.(),:&;]', Punctuation),
582
+ ],
583
+ 'strings': [
584
+ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
585
+ (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
586
+ (r'[$%]', String.Symbol),
587
+ ],
588
+ 'nums': [
589
+ (r'[+-]?\d*\.\d+([efEF][-+]?\d+)?', Number.Float), # with dot
590
+ (r'([+-]?\d+([efEF][-+]?\d+))', Number.Float), # With scientific notation
591
+ (r'\b\d+(?![.ef])', Number.Integer), # integer simple
592
+ ]
593
+ }
mgm/lib/python3.10/site-packages/pygments/lexers/blueprint.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.blueprint
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexer for the Blueprint UI markup language.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ import re
12
+
13
+ from pygments.lexer import RegexLexer, include, bygroups, words
14
+ from pygments.token import (
15
+ Comment,
16
+ Operator,
17
+ Keyword,
18
+ Name,
19
+ String,
20
+ Number,
21
+ Punctuation,
22
+ Whitespace,
23
+ )
24
+
25
+ __all__ = ["BlueprintLexer"]
26
+
27
+
28
+ class BlueprintLexer(RegexLexer):
29
+ """
30
+ For Blueprint UI markup.
31
+ """
32
+
33
+ name = "Blueprint"
34
+ aliases = ["blueprint"]
35
+ filenames = ["*.blp"]
36
+ mimetypes = ["text/x-blueprint"]
37
+ url = "https://gitlab.gnome.org/jwestman/blueprint-compiler"
38
+ version_added = '2.16'
39
+
40
+ flags = re.IGNORECASE
41
+ tokens = {
42
+ "root": [
43
+ include("block-content"),
44
+ ],
45
+ "type": [
46
+ (r"\$\s*[a-z_][a-z0-9_\-]*", Name.Class),
47
+ (r"(?:([a-z_][a-z0-9_\-]*)(\s*)(\.)(\s*))?([a-z_][a-z0-9_\-]*)",
48
+ bygroups(Name.Namespace, Whitespace, Punctuation, Whitespace, Name.Class)),
49
+ ],
50
+ "whitespace": [
51
+ (r"\s+", Whitespace),
52
+ (r"//.*?\n", Comment.Single),
53
+ (r"/\*", Comment.Multiline, "comment-multiline"),
54
+ ],
55
+ "comment-multiline": [
56
+ (r"\*/", Comment.Multiline, "#pop"),
57
+ (r"[^*]+", Comment.Multiline),
58
+ (r"\*", Comment.Multiline),
59
+ ],
60
+ "value": [
61
+ (r"(typeof)(\s*)(<)", bygroups(Keyword, Whitespace, Punctuation), "typeof"),
62
+ (words(("true", "false", "null")), Keyword.Constant),
63
+ (r"[a-z_][a-z0-9_\-]*", Name.Variable),
64
+ (r"\|", Operator),
65
+ (r'".*?"', String.Double),
66
+ (r"\'.*?\'", String.Single),
67
+ (r"0x[\d_]*", Number.Hex),
68
+ (r"[0-9_]+", Number.Integer),
69
+ (r"\d[\d\.a-z_]*", Number),
70
+ ],
71
+ "typeof": [
72
+ include("whitespace"),
73
+ include("type"),
74
+ (r">", Punctuation, "#pop"),
75
+ ],
76
+ "content": [
77
+ include("whitespace"),
78
+ # Keywords
79
+ (words(("after", "bidirectional", "bind-property", "bind", "default",
80
+ "destructive", "disabled", "inverted", "no-sync-create",
81
+ "suggested", "swapped", "sync-create", "template")),
82
+ Keyword),
83
+ # Translated strings
84
+ (r"(C?_)(\s*)(\()",
85
+ bygroups(Name.Function.Builtin, Whitespace, Punctuation),
86
+ "paren-content"),
87
+ # Cast expressions
88
+ (r"(as)(\s*)(<)", bygroups(Keyword, Whitespace, Punctuation), "typeof"),
89
+ # Closures
90
+ (r"(\$?[a-z_][a-z0-9_\-]*)(\s*)(\()",
91
+ bygroups(Name.Function, Whitespace, Punctuation),
92
+ "paren-content"),
93
+ # Objects
94
+ (r"(?:(\$\s*[a-z_][a-z0-9_\-]+)|(?:([a-z_][a-z0-9_\-]*)(\s*)(\.)(\s*))?([a-z_][a-z0-9_\-]*))(?:(\s+)([a-z_][a-z0-9_\-]*))?(\s*)(\{)",
95
+ bygroups(Name.Class, Name.Namespace, Whitespace, Punctuation, Whitespace,
96
+ Name.Class, Whitespace, Name.Variable, Whitespace, Punctuation),
97
+ "brace-block"),
98
+ # Misc
99
+ include("value"),
100
+ (r",|\.", Punctuation),
101
+ ],
102
+ "block-content": [
103
+ # Import statements
104
+ (r"(using)(\s+)([a-z_][a-z0-9_\-]*)(\s+)(\d[\d\.]*)(;)",
105
+ bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
106
+ Name.Namespace, Punctuation)),
107
+ # Menus
108
+ (r"(menu|section|submenu)(?:(\s+)([a-z_][a-z0-9_\-]*))?(\s*)(\{)",
109
+ bygroups(Keyword, Whitespace, Name.Variable, Whitespace, Punctuation),
110
+ "brace-block"),
111
+ (r"(item)(\s*)(\{)",
112
+ bygroups(Keyword, Whitespace, Punctuation),
113
+ "brace-block"),
114
+ (r"(item)(\s*)(\()",
115
+ bygroups(Keyword, Whitespace, Punctuation),
116
+ "paren-block"),
117
+ # Templates
118
+ (r"template", Keyword.Declaration, "template"),
119
+ # Nested blocks. When extensions are added, this is where they go.
120
+ (r"(responses|items|mime-types|patterns|suffixes|marks|widgets|strings|styles)(\s*)(\[)",
121
+ bygroups(Keyword, Whitespace, Punctuation),
122
+ "bracket-block"),
123
+ (r"(accessibility|setters|layout|item)(\s*)(\{)",
124
+ bygroups(Keyword, Whitespace, Punctuation),
125
+ "brace-block"),
126
+ (r"(condition|mark|item)(\s*)(\()",
127
+ bygroups(Keyword, Whitespace, Punctuation),
128
+ "paren-content"),
129
+ (r"\[", Punctuation, "child-type"),
130
+ # Properties and signals
131
+ (r"([a-z_][a-z0-9_\-]*(?:::[a-z0-9_]+)?)(\s*)(:|=>)",
132
+ bygroups(Name.Property, Whitespace, Punctuation),
133
+ "statement"),
134
+ include("content"),
135
+ ],
136
+ "paren-block": [
137
+ include("block-content"),
138
+ (r"\)", Punctuation, "#pop"),
139
+ ],
140
+ "paren-content": [
141
+ include("content"),
142
+ (r"\)", Punctuation, "#pop"),
143
+ ],
144
+ "bracket-block": [
145
+ include("block-content"),
146
+ (r"\]", Punctuation, "#pop"),
147
+ ],
148
+ "brace-block": [
149
+ include("block-content"),
150
+ (r"\}", Punctuation, "#pop"),
151
+ ],
152
+ "statement": [
153
+ include("content"),
154
+ (r";", Punctuation, "#pop"),
155
+ ],
156
+ "child-type": [
157
+ include("whitespace"),
158
+ (r"(action)(\s+)(response)(\s*)(=)(\s*)",
159
+ bygroups(Keyword, Whitespace, Name.Attribute, Whitespace,
160
+ Punctuation, Whitespace)),
161
+ (words(("default", "internal-child", "response")), Keyword),
162
+ (r"[a-z_][a-z0-9_\-]*", Name.Decorator),
163
+ include("value"),
164
+ (r"=", Punctuation),
165
+ (r"\]", Punctuation, "#pop"),
166
+ ],
167
+ "template": [
168
+ include("whitespace"),
169
+ include("type"),
170
+ (r":", Punctuation),
171
+ (r"\{", Punctuation, ("#pop", "brace-block")),
172
+ ],
173
+ }
mgm/lib/python3.10/site-packages/pygments/lexers/devicetree.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.devicetree
3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexers for Devicetree language.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.lexer import RegexLexer, bygroups, include, default, words
12
+ from pygments.token import Comment, Keyword, Name, Number, Operator, \
13
+ Punctuation, String, Text, Whitespace
14
+
15
+ __all__ = ['DevicetreeLexer']
16
+
17
+
18
+ class DevicetreeLexer(RegexLexer):
19
+ """
20
+ Lexer for Devicetree files.
21
+ """
22
+
23
+ name = 'Devicetree'
24
+ url = 'https://www.devicetree.org/'
25
+ aliases = ['devicetree', 'dts']
26
+ filenames = ['*.dts', '*.dtsi']
27
+ mimetypes = ['text/x-c']
28
+ version_added = '2.7'
29
+
30
+ #: optional Whitespace or /*...*/ style comment
31
+ _ws = r'\s*(?:/[*][^*/]*?[*]/\s*)*'
32
+
33
+ tokens = {
34
+ 'macro': [
35
+ # Include preprocessor directives (C style):
36
+ (r'(#include)(' + _ws + r')([^\n]+)',
37
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
38
+ # Define preprocessor directives (C style):
39
+ (r'(#define)(' + _ws + r')([^\n]+)',
40
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc)),
41
+ # devicetree style with file:
42
+ (r'(/[^*/{]+/)(' + _ws + r')("[^\n{]+")',
43
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.PreprocFile)),
44
+ # devicetree style with property:
45
+ (r'(/[^*/{]+/)(' + _ws + r')([^\n;{]*)([;]?)',
46
+ bygroups(Comment.Preproc, Comment.Multiline, Comment.Preproc, Punctuation)),
47
+ ],
48
+ 'whitespace': [
49
+ (r'\n', Whitespace),
50
+ (r'\s+', Whitespace),
51
+ (r'\\\n', Text), # line continuation
52
+ (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single),
53
+ (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline),
54
+ # Open until EOF, so no ending delimiter
55
+ (r'/(\\\n)?[*][\w\W]*', Comment.Multiline),
56
+ ],
57
+ 'statements': [
58
+ (r'(L?)(")', bygroups(String.Affix, String), 'string'),
59
+ (r'0x[0-9a-fA-F]+', Number.Hex),
60
+ (r'\d+', Number.Integer),
61
+ (r'([^\s{}/*]*)(\s*)(:)', bygroups(Name.Label, Text, Punctuation), '#pop'),
62
+ (words(('compatible', 'model', 'phandle', 'status', '#address-cells',
63
+ '#size-cells', 'reg', 'virtual-reg', 'ranges', 'dma-ranges',
64
+ 'device_type', 'name'), suffix=r'\b'), Keyword.Reserved),
65
+ (r'([~!%^&*+=|?:<>/#-])', Operator),
66
+ (r'[()\[\]{},.]', Punctuation),
67
+ (r'[a-zA-Z_][\w-]*(?=(?:\s*,\s*[a-zA-Z_][\w-]*|(?:' + _ws + r'))*\s*[=;])',
68
+ Name),
69
+ (r'[a-zA-Z_]\w*', Name.Attribute),
70
+ ],
71
+ 'root': [
72
+ include('whitespace'),
73
+ include('macro'),
74
+
75
+ # Nodes
76
+ (r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)',
77
+ bygroups(Name.Function, Operator, Number.Integer,
78
+ Comment.Multiline, Punctuation), 'node'),
79
+
80
+ default('statement'),
81
+ ],
82
+ 'statement': [
83
+ include('whitespace'),
84
+ include('statements'),
85
+ (';', Punctuation, '#pop'),
86
+ ],
87
+ 'node': [
88
+ include('whitespace'),
89
+ include('macro'),
90
+
91
+ (r'([^/*@\s&]+|/)(@?)((?:0x)?[0-9a-fA-F,]*)(' + _ws + r')(\{)',
92
+ bygroups(Name.Function, Operator, Number.Integer,
93
+ Comment.Multiline, Punctuation), '#push'),
94
+
95
+ include('statements'),
96
+
97
+ (r'\};', Punctuation, '#pop'),
98
+ (';', Punctuation),
99
+ ],
100
+ 'string': [
101
+ (r'"', String, '#pop'),
102
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
103
+ r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
104
+ (r'[^\\"\n]+', String), # all other characters
105
+ (r'\\\n', String), # line continuation
106
+ (r'\\', String), # stray backslash
107
+ ],
108
+ }
mgm/lib/python3.10/site-packages/pygments/lexers/factor.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.factor
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexers for the Factor language.
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.lexer import RegexLexer, bygroups, default, words
12
+ from pygments.token import Text, Comment, Keyword, Name, String, Number, \
13
+ Whitespace, Punctuation
14
+
15
+ __all__ = ['FactorLexer']
16
+
17
+
18
+ class FactorLexer(RegexLexer):
19
+ """
20
+ Lexer for the Factor language.
21
+ """
22
+ name = 'Factor'
23
+ url = 'http://factorcode.org'
24
+ aliases = ['factor']
25
+ filenames = ['*.factor']
26
+ mimetypes = ['text/x-factor']
27
+ version_added = '1.4'
28
+
29
+ builtin_kernel = words((
30
+ '-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
31
+ '2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
32
+ '3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
33
+ '?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
34
+ 'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
35
+ 'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
36
+ 'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
37
+ 'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
38
+ 'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
39
+ 'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
40
+ 'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
41
+ 'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
42
+ 'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
43
+ 'wrapper', 'wrapper?', 'xor'), suffix=r'(\s+)')
44
+
45
+ builtin_assocs = words((
46
+ '2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
47
+ 'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
48
+ 'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
49
+ 'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
50
+ 'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
51
+ 'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
52
+ 'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
53
+ 'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
54
+ 'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
55
+ 'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
56
+ 'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
57
+ 'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'(\s+)')
58
+
59
+ builtin_combinators = words((
60
+ '2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
61
+ '4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
62
+ 'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
63
+ 'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
64
+ 'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
65
+ 'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'(\s+)')
66
+
67
+ builtin_math = words((
68
+ '-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
69
+ '>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
70
+ '(each-integer)', '(find-integer)', '*', '+', '?1+',
71
+ 'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
72
+ 'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
73
+ 'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
74
+ 'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
75
+ 'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
76
+ 'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
77
+ 'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
78
+ 'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
79
+ 'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
80
+ 'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
81
+ 'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
82
+ 'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
83
+ 'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
84
+ 'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
85
+ 'zero?'), suffix=r'(\s+)')
86
+
87
+ builtin_sequences = words((
88
+ '1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
89
+ '2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
90
+ '3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
91
+ '?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
92
+ 'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
93
+ 'assert-sequence', 'assert-sequence=', 'assert-sequence?',
94
+ 'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
95
+ 'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
96
+ 'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
97
+ 'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
98
+ 'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
99
+ 'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
100
+ 'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
101
+ 'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
102
+ 'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
103
+ 'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
104
+ 'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
105
+ 'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
106
+ 'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
107
+ 'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
108
+ 'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
109
+ 'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
110
+ 'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
111
+ 'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
112
+ 'non-negative-integer-expected', 'non-negative-integer-expected?',
113
+ 'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
114
+ 'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
115
+ 'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
116
+ 'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
117
+ 'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
118
+ 'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
119
+ 'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
120
+ 'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
121
+ 'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
122
+ 'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
123
+ 'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
124
+ 'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
125
+ 'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
126
+ 'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
127
+ 'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
128
+ 'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
129
+ 'when-empty'), suffix=r'(\s+)')
130
+
131
+ builtin_namespaces = words((
132
+ '+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
133
+ 'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
134
+ 'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
135
+ 'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
136
+ suffix=r'(\s+)')
137
+
138
+ builtin_arrays = words((
139
+ '1array', '2array', '3array', '4array', '<array>', '>array', 'array',
140
+ 'array?', 'pair', 'pair?', 'resize-array'), suffix=r'(\s+)')
141
+
142
+ builtin_io = words((
143
+ '(each-stream-block-slice)', '(each-stream-block)',
144
+ '(stream-contents-by-block)', '(stream-contents-by-element)',
145
+ '(stream-contents-by-length-or-block)',
146
+ '(stream-contents-by-length)', '+byte+', '+character+',
147
+ 'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
148
+ 'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
149
+ 'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
150
+ 'error-stream', 'flush', 'input-stream', 'input-stream?',
151
+ 'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
152
+ 'output-stream', 'output-stream?', 'print', 'read', 'read-into',
153
+ 'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
154
+ 'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
155
+ 'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
156
+ 'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
157
+ 'stream-copy*', 'stream-element-type', 'stream-flush',
158
+ 'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
159
+ 'stream-read', 'stream-read-into', 'stream-read-partial',
160
+ 'stream-read-partial-into', 'stream-read-partial-unsafe',
161
+ 'stream-read-unsafe', 'stream-read-until', 'stream-read1',
162
+ 'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
163
+ 'stream-write', 'stream-write1', 'tell-input', 'tell-output',
164
+ 'with-error-stream', 'with-error-stream*', 'with-error>output',
165
+ 'with-input-output+error-streams',
166
+ 'with-input-output+error-streams*', 'with-input-stream',
167
+ 'with-input-stream*', 'with-output-stream', 'with-output-stream*',
168
+ 'with-output>error', 'with-output+error-stream',
169
+ 'with-output+error-stream*', 'with-streams', 'with-streams*',
170
+ 'write', 'write1'), suffix=r'(\s+)')
171
+
172
+ builtin_strings = words((
173
+ '1string', '<string>', '>string', 'resize-string', 'string',
174
+ 'string?'), suffix=r'(\s+)')
175
+
176
+ builtin_vectors = words((
177
+ '1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
178
+ suffix=r'(\s+)')
179
+
180
+ builtin_continuations = words((
181
+ '<condition>', '<continuation>', '<restart>', 'attempt-all',
182
+ 'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
183
+ 'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
184
+ 'condition?', 'continuation', 'continuation?', 'continue',
185
+ 'continue-restart', 'continue-with', 'current-continuation',
186
+ 'error', 'error-continuation', 'error-in-thread', 'error-thread',
187
+ 'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
188
+ 'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
189
+ 'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
190
+ 'throw-restarts', 'with-datastack', 'with-return'), suffix=r'(\s+)')
191
+
192
+ tokens = {
193
+ 'root': [
194
+ # factor allows a file to start with a shebang
195
+ (r'#!.*$', Comment.Preproc),
196
+ default('base'),
197
+ ],
198
+ 'base': [
199
+ (r'\s+', Whitespace),
200
+
201
+ # defining words
202
+ (r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
203
+ bygroups(Keyword, Whitespace, Name.Function)),
204
+ (r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
205
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace,
206
+ Name.Function)),
207
+ (r'(C:)(\s+)(\S+)(\s+)(\S+)',
208
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace,
209
+ Name.Class)),
210
+ (r'(GENERIC:)(\s+)(\S+)',
211
+ bygroups(Keyword, Whitespace, Name.Function)),
212
+ (r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
213
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace,
214
+ Name.Function)),
215
+ (r'(\()(\s)', bygroups(Name.Function, Whitespace), 'stackeffect'),
216
+ (r'(;)(\s)', bygroups(Keyword, Whitespace)),
217
+
218
+ # imports and namespaces
219
+ (r'(USING:)(\s+)',
220
+ bygroups(Keyword.Namespace, Whitespace), 'vocabs'),
221
+ (r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
222
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
223
+ (r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
224
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
225
+ Whitespace, Name.Namespace)),
226
+ (r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
227
+ bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
228
+ Whitespace), 'words'),
229
+ (r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+)(=>)(\s+)(\S+)',
230
+ bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
231
+ Name.Namespace, Whitespace, Punctuation, Whitespace,
232
+ Name.Function)),
233
+ (r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
234
+ bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
235
+ Name.Function)),
236
+ (r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
237
+ bygroups(Keyword.Namespace, Whitespace, Name.Function)),
238
+
239
+ # tuples and classes
240
+ (r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
241
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Punctuation,
242
+ Whitespace, Name.Class), 'slots'),
243
+ (r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
244
+ bygroups(Keyword, Whitespace, Name.Class), 'slots'),
245
+ (r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
246
+ bygroups(Keyword, Whitespace, Name.Class)),
247
+ (r'(PREDICATE:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
248
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace,
249
+ Punctuation, Whitespace, Name.Class)),
250
+ (r'(C:)(\s+)(\S+)(\s+)(\S+)',
251
+ bygroups(Keyword, Whitespace, Name.Function, Whitespace, Name.Class)),
252
+ (r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
253
+ bygroups(Keyword, Whitespace, Name.Class, Whitespace, Name.Class)),
254
+ (r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Function)),
255
+ (r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
256
+ (r'SINGLETONS:', Keyword, 'classes'),
257
+
258
+ # other syntax
259
+ (r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
260
+ bygroups(Keyword, Whitespace, Name.Function)),
261
+ (r'(SYMBOLS:)(\s+)', bygroups(Keyword, Whitespace), 'words'),
262
+ (r'(SYNTAX:)(\s+)', bygroups(Keyword, Whitespace)),
263
+ (r'(ALIEN:)(\s+)', bygroups(Keyword, Whitespace)),
264
+ (r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
265
+ (r'(FUNCTION:)(\s+)'
266
+ r'(\S+)(\s+)(\S+)(\s+)'
267
+ r'(\()(\s+)([^)]+)(\))(\s)',
268
+ bygroups(Keyword.Namespace, Whitespace,
269
+ Text, Whitespace, Name.Function, Whitespace,
270
+ Punctuation, Whitespace, Text, Punctuation, Whitespace)),
271
+ (r'(FUNCTION-ALIAS:)(\s+)'
272
+ r'(\S+)(\s+)(\S+)(\s+)'
273
+ r'(\S+)(\s+)'
274
+ r'(\()(\s+)([^)]+)(\))(\s)',
275
+ bygroups(Keyword.Namespace, Whitespace,
276
+ Text, Whitespace, Name.Function, Whitespace,
277
+ Name.Function, Whitespace,
278
+ Punctuation, Whitespace, Text, Punctuation, Whitespace)),
279
+
280
+ # vocab.private
281
+ (r'(<PRIVATE|PRIVATE>)(\s)', bygroups(Keyword.Namespace, Whitespace)),
282
+
283
+ # strings
284
+ (r'"""\s(?:.|\n)*?\s"""', String),
285
+ (r'"(?:\\\\|\\"|[^"])*"', String),
286
+ (r'(\S+")(\s+)((?:\\\\|\\"|[^"])*")',
287
+ bygroups(String, Whitespace, String)),
288
+ (r'(CHAR:)(\s+)(\\[\\abfnrstv]|[^\\]\S*)(\s)',
289
+ bygroups(String.Char, Whitespace, String.Char, Whitespace)),
290
+
291
+ # comments
292
+ (r'!\s+.*$', Comment),
293
+ (r'#!\s+.*$', Comment),
294
+ (r'/\*\s+(?:.|\n)*?\s\*/', Comment),
295
+
296
+ # boolean constants
297
+ (r'[tf]\b', Name.Constant),
298
+
299
+ # symbols and literals
300
+ (r'[\\$]\s+\S+', Name.Constant),
301
+ (r'M\\\s+\S+\s+\S+', Name.Constant),
302
+
303
+ # numbers
304
+ (r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
305
+ (r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
306
+ (r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
307
+ (r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
308
+ (r'0b[01]+\s', Number.Bin),
309
+ (r'0o[0-7]+\s', Number.Oct),
310
+ (r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
311
+ (r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
312
+
313
+ # keywords
314
+ (r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
315
+ Keyword),
316
+
317
+ # builtins
318
+ (builtin_kernel, bygroups(Name.Builtin, Whitespace)),
319
+ (builtin_assocs, bygroups(Name.Builtin, Whitespace)),
320
+ (builtin_combinators, bygroups(Name.Builtin, Whitespace)),
321
+ (builtin_math, bygroups(Name.Builtin, Whitespace)),
322
+ (builtin_sequences, bygroups(Name.Builtin, Whitespace)),
323
+ (builtin_namespaces, bygroups(Name.Builtin, Whitespace)),
324
+ (builtin_arrays, bygroups(Name.Builtin, Whitespace)),
325
+ (builtin_io, bygroups(Name.Builtin, Whitespace)),
326
+ (builtin_strings, bygroups(Name.Builtin, Whitespace)),
327
+ (builtin_vectors, bygroups(Name.Builtin, Whitespace)),
328
+ (builtin_continuations, bygroups(Name.Builtin, Whitespace)),
329
+
330
+ # everything else is text
331
+ (r'\S+', Text),
332
+ ],
333
+ 'stackeffect': [
334
+ (r'\s+', Whitespace),
335
+ (r'(\()(\s+)', bygroups(Name.Function, Whitespace), 'stackeffect'),
336
+ (r'(\))(\s+)', bygroups(Name.Function, Whitespace), '#pop'),
337
+ (r'(--)(\s+)', bygroups(Name.Function, Whitespace)),
338
+ (r'\S+', Name.Variable),
339
+ ],
340
+ 'slots': [
341
+ (r'\s+', Whitespace),
342
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
343
+ (r'(\{)(\s+)(\S+)(\s+)([^}]+)(\s+)(\})(\s+)',
344
+ bygroups(Text, Whitespace, Name.Variable, Whitespace,
345
+ Text, Whitespace, Text, Whitespace)),
346
+ (r'\S+', Name.Variable),
347
+ ],
348
+ 'vocabs': [
349
+ (r'\s+', Whitespace),
350
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
351
+ (r'\S+', Name.Namespace),
352
+ ],
353
+ 'classes': [
354
+ (r'\s+', Whitespace),
355
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
356
+ (r'\S+', Name.Class),
357
+ ],
358
+ 'words': [
359
+ (r'\s+', Whitespace),
360
+ (r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
361
+ (r'\S+', Name.Function),
362
+ ],
363
+ }
mgm/lib/python3.10/site-packages/pygments/lexers/futhark.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pygments.lexers.futhark
3
+ ~~~~~~~~~~~~~~~~~~~~~~~
4
+
5
+ Lexer for the Futhark language
6
+
7
+ :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8
+ :license: BSD, see LICENSE for details.
9
+ """
10
+
11
+ from pygments.lexer import RegexLexer, bygroups
12
+ from pygments.token import Comment, Operator, Keyword, Name, String, \
13
+ Number, Punctuation, Whitespace
14
+ from pygments import unistring as uni
15
+
16
+ __all__ = ['FutharkLexer']
17
+
18
+
19
+ class FutharkLexer(RegexLexer):
20
+ """
21
+ A Futhark lexer
22
+ """
23
+ name = 'Futhark'
24
+ url = 'https://futhark-lang.org/'
25
+ aliases = ['futhark']
26
+ filenames = ['*.fut']
27
+ mimetypes = ['text/x-futhark']
28
+ version_added = '2.8'
29
+
30
+ num_types = ('i8', 'i16', 'i32', 'i64', 'u8', 'u16', 'u32', 'u64', 'f32', 'f64')
31
+
32
+ other_types = ('bool', )
33
+
34
+ reserved = ('if', 'then', 'else', 'def', 'let', 'loop', 'in', 'with',
35
+ 'type', 'type~', 'type^',
36
+ 'val', 'entry', 'for', 'while', 'do', 'case', 'match',
37
+ 'include', 'import', 'module', 'open', 'local', 'assert', '_')
38
+
39
+ ascii = ('NUL', 'SOH', '[SE]TX', 'EOT', 'ENQ', 'ACK',
40
+ 'BEL', 'BS', 'HT', 'LF', 'VT', 'FF', 'CR', 'S[OI]', 'DLE',
41
+ 'DC[1-4]', 'NAK', 'SYN', 'ETB', 'CAN',
42
+ 'EM', 'SUB', 'ESC', '[FGRU]S', 'SP', 'DEL')
43
+
44
+ num_postfix = r'({})?'.format('|'.join(num_types))
45
+
46
+ identifier_re = '[a-zA-Z_][a-zA-Z_0-9\']*'
47
+
48
+ # opstart_re = '+\-\*/%=\!><\|&\^'
49
+
50
+ tokens = {
51
+ 'root': [
52
+ (r'--(.*?)$', Comment.Single),
53
+ (r'\s+', Whitespace),
54
+ (r'\(\)', Punctuation),
55
+ (r'\b({})(?!\')\b'.format('|'.join(reserved)), Keyword.Reserved),
56
+ (r'\b({})(?!\')\b'.format('|'.join(num_types + other_types)), Keyword.Type),
57
+
58
+ # Identifiers
59
+ (r'#\[([a-zA-Z_\(\) ]*)\]', Comment.Preproc),
60
+ (rf'[#!]?({identifier_re}\.)*{identifier_re}', Name),
61
+
62
+ (r'\\', Operator),
63
+ (r'[-+/%=!><|&*^][-+/%=!><|&*^.]*', Operator),
64
+ (r'[][(),:;`{}?.\'~^]', Punctuation),
65
+
66
+ # Numbers
67
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*_*[pP][+-]?\d(_*\d)*' + num_postfix,
68
+ Number.Float),
69
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*\.[\da-fA-F](_*[\da-fA-F])*'
70
+ r'(_*[pP][+-]?\d(_*\d)*)?' + num_postfix, Number.Float),
71
+ (r'\d(_*\d)*_*[eE][+-]?\d(_*\d)*' + num_postfix, Number.Float),
72
+ (r'\d(_*\d)*\.\d(_*\d)*(_*[eE][+-]?\d(_*\d)*)?' + num_postfix, Number.Float),
73
+ (r'0[bB]_*[01](_*[01])*' + num_postfix, Number.Bin),
74
+ (r'0[xX]_*[\da-fA-F](_*[\da-fA-F])*' + num_postfix, Number.Hex),
75
+ (r'\d(_*\d)*' + num_postfix, Number.Integer),
76
+
77
+ # Character/String Literals
78
+ (r"'", String.Char, 'character'),
79
+ (r'"', String, 'string'),
80
+ # Special
81
+ (r'\[[a-zA-Z_\d]*\]', Keyword.Type),
82
+ (r'\(\)', Name.Builtin),
83
+ ],
84
+ 'character': [
85
+ # Allows multi-chars, incorrectly.
86
+ (r"[^\\']'", String.Char, '#pop'),
87
+ (r"\\", String.Escape, 'escape'),
88
+ ("'", String.Char, '#pop'),
89
+ ],
90
+ 'string': [
91
+ (r'[^\\"]+', String),
92
+ (r"\\", String.Escape, 'escape'),
93
+ ('"', String, '#pop'),
94
+ ],
95
+
96
+ 'escape': [
97
+ (r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
98
+ (r'\^[][' + uni.Lu + r'@^_]', String.Escape, '#pop'),
99
+ ('|'.join(ascii), String.Escape, '#pop'),
100
+ (r'o[0-7]+', String.Escape, '#pop'),
101
+ (r'x[\da-fA-F]+', String.Escape, '#pop'),
102
+ (r'\d+', String.Escape, '#pop'),
103
+ (r'(\s+)(\\)', bygroups(Whitespace, String.Escape), '#pop'),
104
+ ],
105
+ }