Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/parser_core.py +45 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/__init__.py +28 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/__pycache__/commonmark.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/commonmark.py +74 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/zero.py +43 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/py.typed +1 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/renderer.py +336 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__init__.py +27 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/blockquote.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/hr.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/lheading.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/paragraph.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/blockquote.py +299 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/heading.py +68 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/html_block.py +90 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/lheading.py +86 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/paragraph.py +65 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/state_block.py +261 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/table.py +236 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/block.py +13 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/smartquotes.py +202 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/state_core.py +25 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__init__.py +31 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc +0 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/autolink.py +77 -0
- evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/backticks.py +72 -0
evalkit_tf446/lib/python3.10/site-packages/markdown_it/parser_core.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
* class Core
|
| 3 |
+
*
|
| 4 |
+
* Top-level rules executor. Glues block/inline parsers and does intermediate
|
| 5 |
+
* transformations.
|
| 6 |
+
"""
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from typing import Callable
|
| 10 |
+
|
| 11 |
+
from .ruler import Ruler
|
| 12 |
+
from .rules_core import (
|
| 13 |
+
block,
|
| 14 |
+
inline,
|
| 15 |
+
linkify,
|
| 16 |
+
normalize,
|
| 17 |
+
replace,
|
| 18 |
+
smartquotes,
|
| 19 |
+
text_join,
|
| 20 |
+
)
|
| 21 |
+
from .rules_core.state_core import StateCore
|
| 22 |
+
|
| 23 |
+
RuleFuncCoreType = Callable[[StateCore], None]
|
| 24 |
+
|
| 25 |
+
_rules: list[tuple[str, RuleFuncCoreType]] = [
|
| 26 |
+
("normalize", normalize),
|
| 27 |
+
("block", block),
|
| 28 |
+
("inline", inline),
|
| 29 |
+
("linkify", linkify),
|
| 30 |
+
("replacements", replace),
|
| 31 |
+
("smartquotes", smartquotes),
|
| 32 |
+
("text_join", text_join),
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class ParserCore:
|
| 37 |
+
def __init__(self) -> None:
|
| 38 |
+
self.ruler = Ruler[RuleFuncCoreType]()
|
| 39 |
+
for name, rule in _rules:
|
| 40 |
+
self.ruler.push(name, rule)
|
| 41 |
+
|
| 42 |
+
def process(self, state: StateCore) -> None:
|
| 43 |
+
"""Executes core chain rules."""
|
| 44 |
+
for rule in self.ruler.getRules(""):
|
| 45 |
+
rule(state)
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ("commonmark", "default", "zero", "js_default", "gfm_like")
|
| 2 |
+
|
| 3 |
+
from . import commonmark, default, zero
|
| 4 |
+
from ..utils import PresetType
|
| 5 |
+
|
| 6 |
+
js_default = default
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class gfm_like: # noqa: N801
|
| 10 |
+
"""GitHub Flavoured Markdown (GFM) like.
|
| 11 |
+
|
| 12 |
+
This adds the linkify, table and strikethrough components to CommmonMark.
|
| 13 |
+
|
| 14 |
+
Note, it lacks task-list items and raw HTML filtering,
|
| 15 |
+
to meet the the full GFM specification
|
| 16 |
+
(see https://github.github.com/gfm/#autolinks-extension-).
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
@staticmethod
|
| 20 |
+
def make() -> PresetType:
|
| 21 |
+
config = commonmark.make()
|
| 22 |
+
config["components"]["core"]["rules"].append("linkify")
|
| 23 |
+
config["components"]["block"]["rules"].append("table")
|
| 24 |
+
config["components"]["inline"]["rules"].extend(["strikethrough", "linkify"])
|
| 25 |
+
config["components"]["inline"]["rules2"].append("strikethrough")
|
| 26 |
+
config["options"]["linkify"] = True
|
| 27 |
+
config["options"]["html"] = True
|
| 28 |
+
return config
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/__pycache__/commonmark.cpython-310.pyc
ADDED
|
Binary file (1.06 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/commonmark.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Commonmark default options.
|
| 2 |
+
|
| 3 |
+
This differs to presets.default,
|
| 4 |
+
primarily in that it allows HTML and does not enable components:
|
| 5 |
+
|
| 6 |
+
- block: table
|
| 7 |
+
- inline: strikethrough
|
| 8 |
+
"""
|
| 9 |
+
from ..utils import PresetType
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def make() -> PresetType:
|
| 13 |
+
return {
|
| 14 |
+
"options": {
|
| 15 |
+
"maxNesting": 20, # Internal protection, recursion limit
|
| 16 |
+
"html": True, # Enable HTML tags in source,
|
| 17 |
+
# this is just a shorthand for .enable(["html_inline", "html_block"])
|
| 18 |
+
# used by the linkify rule:
|
| 19 |
+
"linkify": False, # autoconvert URL-like texts to links
|
| 20 |
+
# used by the replacements and smartquotes rules
|
| 21 |
+
# Enable some language-neutral replacements + quotes beautification
|
| 22 |
+
"typographer": False,
|
| 23 |
+
# used by the smartquotes rule:
|
| 24 |
+
# Double + single quotes replacement pairs, when typographer enabled,
|
| 25 |
+
# and smartquotes on. Could be either a String or an Array.
|
| 26 |
+
#
|
| 27 |
+
# For example, you can use '«»„“' for Russian, '„“‚‘' for German,
|
| 28 |
+
# and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
|
| 29 |
+
"quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */
|
| 30 |
+
# Renderer specific; these options are used directly in the HTML renderer
|
| 31 |
+
"xhtmlOut": True, # Use '/' to close single tags (<br />)
|
| 32 |
+
"breaks": False, # Convert '\n' in paragraphs into <br>
|
| 33 |
+
"langPrefix": "language-", # CSS language prefix for fenced blocks
|
| 34 |
+
# Highlighter function. Should return escaped HTML,
|
| 35 |
+
# or '' if the source string is not changed and should be escaped externally.
|
| 36 |
+
# If result starts with <pre... internal wrapper is skipped.
|
| 37 |
+
#
|
| 38 |
+
# function (/*str, lang, attrs*/) { return ''; }
|
| 39 |
+
#
|
| 40 |
+
"highlight": None,
|
| 41 |
+
},
|
| 42 |
+
"components": {
|
| 43 |
+
"core": {"rules": ["normalize", "block", "inline", "text_join"]},
|
| 44 |
+
"block": {
|
| 45 |
+
"rules": [
|
| 46 |
+
"blockquote",
|
| 47 |
+
"code",
|
| 48 |
+
"fence",
|
| 49 |
+
"heading",
|
| 50 |
+
"hr",
|
| 51 |
+
"html_block",
|
| 52 |
+
"lheading",
|
| 53 |
+
"list",
|
| 54 |
+
"reference",
|
| 55 |
+
"paragraph",
|
| 56 |
+
]
|
| 57 |
+
},
|
| 58 |
+
"inline": {
|
| 59 |
+
"rules": [
|
| 60 |
+
"autolink",
|
| 61 |
+
"backticks",
|
| 62 |
+
"emphasis",
|
| 63 |
+
"entity",
|
| 64 |
+
"escape",
|
| 65 |
+
"html_inline",
|
| 66 |
+
"image",
|
| 67 |
+
"link",
|
| 68 |
+
"newline",
|
| 69 |
+
"text",
|
| 70 |
+
],
|
| 71 |
+
"rules2": ["balance_pairs", "emphasis", "fragments_join"],
|
| 72 |
+
},
|
| 73 |
+
},
|
| 74 |
+
}
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/presets/zero.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
"Zero" preset, with nothing enabled. Useful for manual configuring of simple
|
| 3 |
+
modes. For example, to parse bold/italic only.
|
| 4 |
+
"""
|
| 5 |
+
from ..utils import PresetType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def make() -> PresetType:
|
| 9 |
+
return {
|
| 10 |
+
"options": {
|
| 11 |
+
"maxNesting": 20, # Internal protection, recursion limit
|
| 12 |
+
"html": False, # Enable HTML tags in source
|
| 13 |
+
# this is just a shorthand for .disable(["html_inline", "html_block"])
|
| 14 |
+
# used by the linkify rule:
|
| 15 |
+
"linkify": False, # autoconvert URL-like texts to links
|
| 16 |
+
# used by the replacements and smartquotes rules:
|
| 17 |
+
# Enable some language-neutral replacements + quotes beautification
|
| 18 |
+
"typographer": False,
|
| 19 |
+
# used by the smartquotes rule:
|
| 20 |
+
# Double + single quotes replacement pairs, when typographer enabled,
|
| 21 |
+
# and smartquotes on. Could be either a String or an Array.
|
| 22 |
+
# For example, you can use '«»„“' for Russian, '„“‚‘' for German,
|
| 23 |
+
# and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
|
| 24 |
+
"quotes": "\u201c\u201d\u2018\u2019", # /* “”‘’ */
|
| 25 |
+
# Renderer specific; these options are used directly in the HTML renderer
|
| 26 |
+
"xhtmlOut": False, # Use '/' to close single tags (<br />)
|
| 27 |
+
"breaks": False, # Convert '\n' in paragraphs into <br>
|
| 28 |
+
"langPrefix": "language-", # CSS language prefix for fenced blocks
|
| 29 |
+
# Highlighter function. Should return escaped HTML,
|
| 30 |
+
# or '' if the source string is not changed and should be escaped externally.
|
| 31 |
+
# If result starts with <pre... internal wrapper is skipped.
|
| 32 |
+
# function (/*str, lang, attrs*/) { return ''; }
|
| 33 |
+
"highlight": None,
|
| 34 |
+
},
|
| 35 |
+
"components": {
|
| 36 |
+
"core": {"rules": ["normalize", "block", "inline", "text_join"]},
|
| 37 |
+
"block": {"rules": ["paragraph"]},
|
| 38 |
+
"inline": {
|
| 39 |
+
"rules": ["text"],
|
| 40 |
+
"rules2": ["balance_pairs", "fragments_join"],
|
| 41 |
+
},
|
| 42 |
+
},
|
| 43 |
+
}
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/py.typed
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Marker file for PEP 561
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/renderer.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
class Renderer
|
| 3 |
+
|
| 4 |
+
Generates HTML from parsed token stream. Each instance has independent
|
| 5 |
+
copy of rules. Those can be rewritten with ease. Also, you can add new
|
| 6 |
+
rules if you create plugin and adds new token types.
|
| 7 |
+
"""
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from collections.abc import Sequence
|
| 11 |
+
import inspect
|
| 12 |
+
from typing import Any, ClassVar, Protocol
|
| 13 |
+
|
| 14 |
+
from .common.utils import escapeHtml, unescapeAll
|
| 15 |
+
from .token import Token
|
| 16 |
+
from .utils import EnvType, OptionsDict
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class RendererProtocol(Protocol):
|
| 20 |
+
__output__: ClassVar[str]
|
| 21 |
+
|
| 22 |
+
def render(
|
| 23 |
+
self, tokens: Sequence[Token], options: OptionsDict, env: EnvType
|
| 24 |
+
) -> Any:
|
| 25 |
+
...
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class RendererHTML(RendererProtocol):
|
| 29 |
+
"""Contains render rules for tokens. Can be updated and extended.
|
| 30 |
+
|
| 31 |
+
Example:
|
| 32 |
+
|
| 33 |
+
Each rule is called as independent static function with fixed signature:
|
| 34 |
+
|
| 35 |
+
::
|
| 36 |
+
|
| 37 |
+
class Renderer:
|
| 38 |
+
def token_type_name(self, tokens, idx, options, env) {
|
| 39 |
+
# ...
|
| 40 |
+
return renderedHTML
|
| 41 |
+
|
| 42 |
+
::
|
| 43 |
+
|
| 44 |
+
class CustomRenderer(RendererHTML):
|
| 45 |
+
def strong_open(self, tokens, idx, options, env):
|
| 46 |
+
return '<b>'
|
| 47 |
+
def strong_close(self, tokens, idx, options, env):
|
| 48 |
+
return '</b>'
|
| 49 |
+
|
| 50 |
+
md = MarkdownIt(renderer_cls=CustomRenderer)
|
| 51 |
+
|
| 52 |
+
result = md.render(...)
|
| 53 |
+
|
| 54 |
+
See https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js
|
| 55 |
+
for more details and examples.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
__output__ = "html"
|
| 59 |
+
|
| 60 |
+
def __init__(self, parser: Any = None):
|
| 61 |
+
self.rules = {
|
| 62 |
+
k: v
|
| 63 |
+
for k, v in inspect.getmembers(self, predicate=inspect.ismethod)
|
| 64 |
+
if not (k.startswith("render") or k.startswith("_"))
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
def render(
|
| 68 |
+
self, tokens: Sequence[Token], options: OptionsDict, env: EnvType
|
| 69 |
+
) -> str:
|
| 70 |
+
"""Takes token stream and generates HTML.
|
| 71 |
+
|
| 72 |
+
:param tokens: list on block tokens to render
|
| 73 |
+
:param options: params of parser instance
|
| 74 |
+
:param env: additional data from parsed input
|
| 75 |
+
|
| 76 |
+
"""
|
| 77 |
+
result = ""
|
| 78 |
+
|
| 79 |
+
for i, token in enumerate(tokens):
|
| 80 |
+
if token.type == "inline":
|
| 81 |
+
if token.children:
|
| 82 |
+
result += self.renderInline(token.children, options, env)
|
| 83 |
+
elif token.type in self.rules:
|
| 84 |
+
result += self.rules[token.type](tokens, i, options, env)
|
| 85 |
+
else:
|
| 86 |
+
result += self.renderToken(tokens, i, options, env)
|
| 87 |
+
|
| 88 |
+
return result
|
| 89 |
+
|
| 90 |
+
def renderInline(
|
| 91 |
+
self, tokens: Sequence[Token], options: OptionsDict, env: EnvType
|
| 92 |
+
) -> str:
|
| 93 |
+
"""The same as ``render``, but for single token of `inline` type.
|
| 94 |
+
|
| 95 |
+
:param tokens: list on block tokens to render
|
| 96 |
+
:param options: params of parser instance
|
| 97 |
+
:param env: additional data from parsed input (references, for example)
|
| 98 |
+
"""
|
| 99 |
+
result = ""
|
| 100 |
+
|
| 101 |
+
for i, token in enumerate(tokens):
|
| 102 |
+
if token.type in self.rules:
|
| 103 |
+
result += self.rules[token.type](tokens, i, options, env)
|
| 104 |
+
else:
|
| 105 |
+
result += self.renderToken(tokens, i, options, env)
|
| 106 |
+
|
| 107 |
+
return result
|
| 108 |
+
|
| 109 |
+
def renderToken(
|
| 110 |
+
self,
|
| 111 |
+
tokens: Sequence[Token],
|
| 112 |
+
idx: int,
|
| 113 |
+
options: OptionsDict,
|
| 114 |
+
env: EnvType,
|
| 115 |
+
) -> str:
|
| 116 |
+
"""Default token renderer.
|
| 117 |
+
|
| 118 |
+
Can be overridden by custom function
|
| 119 |
+
|
| 120 |
+
:param idx: token index to render
|
| 121 |
+
:param options: params of parser instance
|
| 122 |
+
"""
|
| 123 |
+
result = ""
|
| 124 |
+
needLf = False
|
| 125 |
+
token = tokens[idx]
|
| 126 |
+
|
| 127 |
+
# Tight list paragraphs
|
| 128 |
+
if token.hidden:
|
| 129 |
+
return ""
|
| 130 |
+
|
| 131 |
+
# Insert a newline between hidden paragraph and subsequent opening
|
| 132 |
+
# block-level tag.
|
| 133 |
+
#
|
| 134 |
+
# For example, here we should insert a newline before blockquote:
|
| 135 |
+
# - a
|
| 136 |
+
# >
|
| 137 |
+
#
|
| 138 |
+
if token.block and token.nesting != -1 and idx and tokens[idx - 1].hidden:
|
| 139 |
+
result += "\n"
|
| 140 |
+
|
| 141 |
+
# Add token name, e.g. `<img`
|
| 142 |
+
result += ("</" if token.nesting == -1 else "<") + token.tag
|
| 143 |
+
|
| 144 |
+
# Encode attributes, e.g. `<img src="foo"`
|
| 145 |
+
result += self.renderAttrs(token)
|
| 146 |
+
|
| 147 |
+
# Add a slash for self-closing tags, e.g. `<img src="foo" /`
|
| 148 |
+
if token.nesting == 0 and options["xhtmlOut"]:
|
| 149 |
+
result += " /"
|
| 150 |
+
|
| 151 |
+
# Check if we need to add a newline after this tag
|
| 152 |
+
if token.block:
|
| 153 |
+
needLf = True
|
| 154 |
+
|
| 155 |
+
if token.nesting == 1 and (idx + 1 < len(tokens)):
|
| 156 |
+
nextToken = tokens[idx + 1]
|
| 157 |
+
|
| 158 |
+
if nextToken.type == "inline" or nextToken.hidden: # noqa: SIM114
|
| 159 |
+
# Block-level tag containing an inline tag.
|
| 160 |
+
#
|
| 161 |
+
needLf = False
|
| 162 |
+
|
| 163 |
+
elif nextToken.nesting == -1 and nextToken.tag == token.tag:
|
| 164 |
+
# Opening tag + closing tag of the same type. E.g. `<li></li>`.
|
| 165 |
+
#
|
| 166 |
+
needLf = False
|
| 167 |
+
|
| 168 |
+
result += ">\n" if needLf else ">"
|
| 169 |
+
|
| 170 |
+
return result
|
| 171 |
+
|
| 172 |
+
@staticmethod
|
| 173 |
+
def renderAttrs(token: Token) -> str:
|
| 174 |
+
"""Render token attributes to string."""
|
| 175 |
+
result = ""
|
| 176 |
+
|
| 177 |
+
for key, value in token.attrItems():
|
| 178 |
+
result += " " + escapeHtml(key) + '="' + escapeHtml(str(value)) + '"'
|
| 179 |
+
|
| 180 |
+
return result
|
| 181 |
+
|
| 182 |
+
def renderInlineAsText(
|
| 183 |
+
self,
|
| 184 |
+
tokens: Sequence[Token] | None,
|
| 185 |
+
options: OptionsDict,
|
| 186 |
+
env: EnvType,
|
| 187 |
+
) -> str:
|
| 188 |
+
"""Special kludge for image `alt` attributes to conform CommonMark spec.
|
| 189 |
+
|
| 190 |
+
Don't try to use it! Spec requires to show `alt` content with stripped markup,
|
| 191 |
+
instead of simple escaping.
|
| 192 |
+
|
| 193 |
+
:param tokens: list on block tokens to render
|
| 194 |
+
:param options: params of parser instance
|
| 195 |
+
:param env: additional data from parsed input
|
| 196 |
+
"""
|
| 197 |
+
result = ""
|
| 198 |
+
|
| 199 |
+
for token in tokens or []:
|
| 200 |
+
if token.type == "text":
|
| 201 |
+
result += token.content
|
| 202 |
+
elif token.type == "image":
|
| 203 |
+
if token.children:
|
| 204 |
+
result += self.renderInlineAsText(token.children, options, env)
|
| 205 |
+
elif token.type == "softbreak":
|
| 206 |
+
result += "\n"
|
| 207 |
+
|
| 208 |
+
return result
|
| 209 |
+
|
| 210 |
+
###################################################
|
| 211 |
+
|
| 212 |
+
def code_inline(
|
| 213 |
+
self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
|
| 214 |
+
) -> str:
|
| 215 |
+
token = tokens[idx]
|
| 216 |
+
return (
|
| 217 |
+
"<code"
|
| 218 |
+
+ self.renderAttrs(token)
|
| 219 |
+
+ ">"
|
| 220 |
+
+ escapeHtml(tokens[idx].content)
|
| 221 |
+
+ "</code>"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
def code_block(
|
| 225 |
+
self,
|
| 226 |
+
tokens: Sequence[Token],
|
| 227 |
+
idx: int,
|
| 228 |
+
options: OptionsDict,
|
| 229 |
+
env: EnvType,
|
| 230 |
+
) -> str:
|
| 231 |
+
token = tokens[idx]
|
| 232 |
+
|
| 233 |
+
return (
|
| 234 |
+
"<pre"
|
| 235 |
+
+ self.renderAttrs(token)
|
| 236 |
+
+ "><code>"
|
| 237 |
+
+ escapeHtml(tokens[idx].content)
|
| 238 |
+
+ "</code></pre>\n"
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
def fence(
|
| 242 |
+
self,
|
| 243 |
+
tokens: Sequence[Token],
|
| 244 |
+
idx: int,
|
| 245 |
+
options: OptionsDict,
|
| 246 |
+
env: EnvType,
|
| 247 |
+
) -> str:
|
| 248 |
+
token = tokens[idx]
|
| 249 |
+
info = unescapeAll(token.info).strip() if token.info else ""
|
| 250 |
+
langName = ""
|
| 251 |
+
langAttrs = ""
|
| 252 |
+
|
| 253 |
+
if info:
|
| 254 |
+
arr = info.split(maxsplit=1)
|
| 255 |
+
langName = arr[0]
|
| 256 |
+
if len(arr) == 2:
|
| 257 |
+
langAttrs = arr[1]
|
| 258 |
+
|
| 259 |
+
if options.highlight:
|
| 260 |
+
highlighted = options.highlight(
|
| 261 |
+
token.content, langName, langAttrs
|
| 262 |
+
) or escapeHtml(token.content)
|
| 263 |
+
else:
|
| 264 |
+
highlighted = escapeHtml(token.content)
|
| 265 |
+
|
| 266 |
+
if highlighted.startswith("<pre"):
|
| 267 |
+
return highlighted + "\n"
|
| 268 |
+
|
| 269 |
+
# If language exists, inject class gently, without modifying original token.
|
| 270 |
+
# May be, one day we will add .deepClone() for token and simplify this part, but
|
| 271 |
+
# now we prefer to keep things local.
|
| 272 |
+
if info:
|
| 273 |
+
# Fake token just to render attributes
|
| 274 |
+
tmpToken = Token(type="", tag="", nesting=0, attrs=token.attrs.copy())
|
| 275 |
+
tmpToken.attrJoin("class", options.langPrefix + langName)
|
| 276 |
+
|
| 277 |
+
return (
|
| 278 |
+
"<pre><code"
|
| 279 |
+
+ self.renderAttrs(tmpToken)
|
| 280 |
+
+ ">"
|
| 281 |
+
+ highlighted
|
| 282 |
+
+ "</code></pre>\n"
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
return (
|
| 286 |
+
"<pre><code"
|
| 287 |
+
+ self.renderAttrs(token)
|
| 288 |
+
+ ">"
|
| 289 |
+
+ highlighted
|
| 290 |
+
+ "</code></pre>\n"
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
def image(
|
| 294 |
+
self,
|
| 295 |
+
tokens: Sequence[Token],
|
| 296 |
+
idx: int,
|
| 297 |
+
options: OptionsDict,
|
| 298 |
+
env: EnvType,
|
| 299 |
+
) -> str:
|
| 300 |
+
token = tokens[idx]
|
| 301 |
+
|
| 302 |
+
# "alt" attr MUST be set, even if empty. Because it's mandatory and
|
| 303 |
+
# should be placed on proper position for tests.
|
| 304 |
+
if token.children:
|
| 305 |
+
token.attrSet("alt", self.renderInlineAsText(token.children, options, env))
|
| 306 |
+
else:
|
| 307 |
+
token.attrSet("alt", "")
|
| 308 |
+
|
| 309 |
+
return self.renderToken(tokens, idx, options, env)
|
| 310 |
+
|
| 311 |
+
def hardbreak(
|
| 312 |
+
self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
|
| 313 |
+
) -> str:
|
| 314 |
+
return "<br />\n" if options.xhtmlOut else "<br>\n"
|
| 315 |
+
|
| 316 |
+
def softbreak(
|
| 317 |
+
self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
|
| 318 |
+
) -> str:
|
| 319 |
+
return (
|
| 320 |
+
("<br />\n" if options.xhtmlOut else "<br>\n") if options.breaks else "\n"
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
def text(
|
| 324 |
+
self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
|
| 325 |
+
) -> str:
|
| 326 |
+
return escapeHtml(tokens[idx].content)
|
| 327 |
+
|
| 328 |
+
def html_block(
|
| 329 |
+
self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
|
| 330 |
+
) -> str:
|
| 331 |
+
return tokens[idx].content
|
| 332 |
+
|
| 333 |
+
def html_inline(
|
| 334 |
+
self, tokens: Sequence[Token], idx: int, options: OptionsDict, env: EnvType
|
| 335 |
+
) -> str:
|
| 336 |
+
return tokens[idx].content
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = (
|
| 2 |
+
"StateBlock",
|
| 3 |
+
"paragraph",
|
| 4 |
+
"heading",
|
| 5 |
+
"lheading",
|
| 6 |
+
"code",
|
| 7 |
+
"fence",
|
| 8 |
+
"hr",
|
| 9 |
+
"list_block",
|
| 10 |
+
"reference",
|
| 11 |
+
"blockquote",
|
| 12 |
+
"html_block",
|
| 13 |
+
"table",
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
from .blockquote import blockquote
|
| 17 |
+
from .code import code
|
| 18 |
+
from .fence import fence
|
| 19 |
+
from .heading import heading
|
| 20 |
+
from .hr import hr
|
| 21 |
+
from .html_block import html_block
|
| 22 |
+
from .lheading import lheading
|
| 23 |
+
from .list import list_block
|
| 24 |
+
from .paragraph import paragraph
|
| 25 |
+
from .reference import reference
|
| 26 |
+
from .state_block import StateBlock
|
| 27 |
+
from .table import table
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/blockquote.cpython-310.pyc
ADDED
|
Binary file (3.15 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/hr.cpython-310.pyc
ADDED
|
Binary file (1.13 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/lheading.cpython-310.pyc
ADDED
|
Binary file (1.54 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/__pycache__/paragraph.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/blockquote.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Block quotes
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
from ..common.utils import isStrSpace
|
| 7 |
+
from .state_block import StateBlock
|
| 8 |
+
|
| 9 |
+
LOGGER = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def blockquote(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
|
| 13 |
+
LOGGER.debug(
|
| 14 |
+
"entering blockquote: %s, %s, %s, %s", state, startLine, endLine, silent
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
oldLineMax = state.lineMax
|
| 18 |
+
pos = state.bMarks[startLine] + state.tShift[startLine]
|
| 19 |
+
max = state.eMarks[startLine]
|
| 20 |
+
|
| 21 |
+
if state.is_code_block(startLine):
|
| 22 |
+
return False
|
| 23 |
+
|
| 24 |
+
# check the block quote marker
|
| 25 |
+
try:
|
| 26 |
+
if state.src[pos] != ">":
|
| 27 |
+
return False
|
| 28 |
+
except IndexError:
|
| 29 |
+
return False
|
| 30 |
+
pos += 1
|
| 31 |
+
|
| 32 |
+
# we know that it's going to be a valid blockquote,
|
| 33 |
+
# so no point trying to find the end of it in silent mode
|
| 34 |
+
if silent:
|
| 35 |
+
return True
|
| 36 |
+
|
| 37 |
+
# set offset past spaces and ">"
|
| 38 |
+
initial = offset = state.sCount[startLine] + 1
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
second_char: str | None = state.src[pos]
|
| 42 |
+
except IndexError:
|
| 43 |
+
second_char = None
|
| 44 |
+
|
| 45 |
+
# skip one optional space after '>'
|
| 46 |
+
if second_char == " ":
|
| 47 |
+
# ' > test '
|
| 48 |
+
# ^ -- position start of line here:
|
| 49 |
+
pos += 1
|
| 50 |
+
initial += 1
|
| 51 |
+
offset += 1
|
| 52 |
+
adjustTab = False
|
| 53 |
+
spaceAfterMarker = True
|
| 54 |
+
elif second_char == "\t":
|
| 55 |
+
spaceAfterMarker = True
|
| 56 |
+
|
| 57 |
+
if (state.bsCount[startLine] + offset) % 4 == 3:
|
| 58 |
+
# ' >\t test '
|
| 59 |
+
# ^ -- position start of line here (tab has width==1)
|
| 60 |
+
pos += 1
|
| 61 |
+
initial += 1
|
| 62 |
+
offset += 1
|
| 63 |
+
adjustTab = False
|
| 64 |
+
else:
|
| 65 |
+
# ' >\t test '
|
| 66 |
+
# ^ -- position start of line here + shift bsCount slightly
|
| 67 |
+
# to make extra space appear
|
| 68 |
+
adjustTab = True
|
| 69 |
+
|
| 70 |
+
else:
|
| 71 |
+
spaceAfterMarker = False
|
| 72 |
+
|
| 73 |
+
oldBMarks = [state.bMarks[startLine]]
|
| 74 |
+
state.bMarks[startLine] = pos
|
| 75 |
+
|
| 76 |
+
while pos < max:
|
| 77 |
+
ch = state.src[pos]
|
| 78 |
+
|
| 79 |
+
if isStrSpace(ch):
|
| 80 |
+
if ch == "\t":
|
| 81 |
+
offset += (
|
| 82 |
+
4
|
| 83 |
+
- (offset + state.bsCount[startLine] + (1 if adjustTab else 0)) % 4
|
| 84 |
+
)
|
| 85 |
+
else:
|
| 86 |
+
offset += 1
|
| 87 |
+
|
| 88 |
+
else:
|
| 89 |
+
break
|
| 90 |
+
|
| 91 |
+
pos += 1
|
| 92 |
+
|
| 93 |
+
oldBSCount = [state.bsCount[startLine]]
|
| 94 |
+
state.bsCount[startLine] = (
|
| 95 |
+
state.sCount[startLine] + 1 + (1 if spaceAfterMarker else 0)
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
lastLineEmpty = pos >= max
|
| 99 |
+
|
| 100 |
+
oldSCount = [state.sCount[startLine]]
|
| 101 |
+
state.sCount[startLine] = offset - initial
|
| 102 |
+
|
| 103 |
+
oldTShift = [state.tShift[startLine]]
|
| 104 |
+
state.tShift[startLine] = pos - state.bMarks[startLine]
|
| 105 |
+
|
| 106 |
+
terminatorRules = state.md.block.ruler.getRules("blockquote")
|
| 107 |
+
|
| 108 |
+
oldParentType = state.parentType
|
| 109 |
+
state.parentType = "blockquote"
|
| 110 |
+
|
| 111 |
+
# Search the end of the block
|
| 112 |
+
#
|
| 113 |
+
# Block ends with either:
|
| 114 |
+
# 1. an empty line outside:
|
| 115 |
+
# ```
|
| 116 |
+
# > test
|
| 117 |
+
#
|
| 118 |
+
# ```
|
| 119 |
+
# 2. an empty line inside:
|
| 120 |
+
# ```
|
| 121 |
+
# >
|
| 122 |
+
# test
|
| 123 |
+
# ```
|
| 124 |
+
# 3. another tag:
|
| 125 |
+
# ```
|
| 126 |
+
# > test
|
| 127 |
+
# - - -
|
| 128 |
+
# ```
|
| 129 |
+
|
| 130 |
+
# for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {
|
| 131 |
+
nextLine = startLine + 1
|
| 132 |
+
while nextLine < endLine:
|
| 133 |
+
# check if it's outdented, i.e. it's inside list item and indented
|
| 134 |
+
# less than said list item:
|
| 135 |
+
#
|
| 136 |
+
# ```
|
| 137 |
+
# 1. anything
|
| 138 |
+
# > current blockquote
|
| 139 |
+
# 2. checking this line
|
| 140 |
+
# ```
|
| 141 |
+
isOutdented = state.sCount[nextLine] < state.blkIndent
|
| 142 |
+
|
| 143 |
+
pos = state.bMarks[nextLine] + state.tShift[nextLine]
|
| 144 |
+
max = state.eMarks[nextLine]
|
| 145 |
+
|
| 146 |
+
if pos >= max:
|
| 147 |
+
# Case 1: line is not inside the blockquote, and this line is empty.
|
| 148 |
+
break
|
| 149 |
+
|
| 150 |
+
evaluatesTrue = state.src[pos] == ">" and not isOutdented
|
| 151 |
+
pos += 1
|
| 152 |
+
if evaluatesTrue:
|
| 153 |
+
# This line is inside the blockquote.
|
| 154 |
+
|
| 155 |
+
# set offset past spaces and ">"
|
| 156 |
+
initial = offset = state.sCount[nextLine] + 1
|
| 157 |
+
|
| 158 |
+
try:
|
| 159 |
+
next_char: str | None = state.src[pos]
|
| 160 |
+
except IndexError:
|
| 161 |
+
next_char = None
|
| 162 |
+
|
| 163 |
+
# skip one optional space after '>'
|
| 164 |
+
if next_char == " ":
|
| 165 |
+
# ' > test '
|
| 166 |
+
# ^ -- position start of line here:
|
| 167 |
+
pos += 1
|
| 168 |
+
initial += 1
|
| 169 |
+
offset += 1
|
| 170 |
+
adjustTab = False
|
| 171 |
+
spaceAfterMarker = True
|
| 172 |
+
elif next_char == "\t":
|
| 173 |
+
spaceAfterMarker = True
|
| 174 |
+
|
| 175 |
+
if (state.bsCount[nextLine] + offset) % 4 == 3:
|
| 176 |
+
# ' >\t test '
|
| 177 |
+
# ^ -- position start of line here (tab has width==1)
|
| 178 |
+
pos += 1
|
| 179 |
+
initial += 1
|
| 180 |
+
offset += 1
|
| 181 |
+
adjustTab = False
|
| 182 |
+
else:
|
| 183 |
+
# ' >\t test '
|
| 184 |
+
# ^ -- position start of line here + shift bsCount slightly
|
| 185 |
+
# to make extra space appear
|
| 186 |
+
adjustTab = True
|
| 187 |
+
|
| 188 |
+
else:
|
| 189 |
+
spaceAfterMarker = False
|
| 190 |
+
|
| 191 |
+
oldBMarks.append(state.bMarks[nextLine])
|
| 192 |
+
state.bMarks[nextLine] = pos
|
| 193 |
+
|
| 194 |
+
while pos < max:
|
| 195 |
+
ch = state.src[pos]
|
| 196 |
+
|
| 197 |
+
if isStrSpace(ch):
|
| 198 |
+
if ch == "\t":
|
| 199 |
+
offset += (
|
| 200 |
+
4
|
| 201 |
+
- (
|
| 202 |
+
offset
|
| 203 |
+
+ state.bsCount[nextLine]
|
| 204 |
+
+ (1 if adjustTab else 0)
|
| 205 |
+
)
|
| 206 |
+
% 4
|
| 207 |
+
)
|
| 208 |
+
else:
|
| 209 |
+
offset += 1
|
| 210 |
+
else:
|
| 211 |
+
break
|
| 212 |
+
|
| 213 |
+
pos += 1
|
| 214 |
+
|
| 215 |
+
lastLineEmpty = pos >= max
|
| 216 |
+
|
| 217 |
+
oldBSCount.append(state.bsCount[nextLine])
|
| 218 |
+
state.bsCount[nextLine] = (
|
| 219 |
+
state.sCount[nextLine] + 1 + (1 if spaceAfterMarker else 0)
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
oldSCount.append(state.sCount[nextLine])
|
| 223 |
+
state.sCount[nextLine] = offset - initial
|
| 224 |
+
|
| 225 |
+
oldTShift.append(state.tShift[nextLine])
|
| 226 |
+
state.tShift[nextLine] = pos - state.bMarks[nextLine]
|
| 227 |
+
|
| 228 |
+
nextLine += 1
|
| 229 |
+
continue
|
| 230 |
+
|
| 231 |
+
# Case 2: line is not inside the blockquote, and the last line was empty.
|
| 232 |
+
if lastLineEmpty:
|
| 233 |
+
break
|
| 234 |
+
|
| 235 |
+
# Case 3: another tag found.
|
| 236 |
+
terminate = False
|
| 237 |
+
|
| 238 |
+
for terminatorRule in terminatorRules:
|
| 239 |
+
if terminatorRule(state, nextLine, endLine, True):
|
| 240 |
+
terminate = True
|
| 241 |
+
break
|
| 242 |
+
|
| 243 |
+
if terminate:
|
| 244 |
+
# Quirk to enforce "hard termination mode" for paragraphs;
|
| 245 |
+
# normally if you call `tokenize(state, startLine, nextLine)`,
|
| 246 |
+
# paragraphs will look below nextLine for paragraph continuation,
|
| 247 |
+
# but if blockquote is terminated by another tag, they shouldn't
|
| 248 |
+
state.lineMax = nextLine
|
| 249 |
+
|
| 250 |
+
if state.blkIndent != 0:
|
| 251 |
+
# state.blkIndent was non-zero, we now set it to zero,
|
| 252 |
+
# so we need to re-calculate all offsets to appear as
|
| 253 |
+
# if indent wasn't changed
|
| 254 |
+
oldBMarks.append(state.bMarks[nextLine])
|
| 255 |
+
oldBSCount.append(state.bsCount[nextLine])
|
| 256 |
+
oldTShift.append(state.tShift[nextLine])
|
| 257 |
+
oldSCount.append(state.sCount[nextLine])
|
| 258 |
+
state.sCount[nextLine] -= state.blkIndent
|
| 259 |
+
|
| 260 |
+
break
|
| 261 |
+
|
| 262 |
+
oldBMarks.append(state.bMarks[nextLine])
|
| 263 |
+
oldBSCount.append(state.bsCount[nextLine])
|
| 264 |
+
oldTShift.append(state.tShift[nextLine])
|
| 265 |
+
oldSCount.append(state.sCount[nextLine])
|
| 266 |
+
|
| 267 |
+
# A negative indentation means that this is a paragraph continuation
|
| 268 |
+
#
|
| 269 |
+
state.sCount[nextLine] = -1
|
| 270 |
+
|
| 271 |
+
nextLine += 1
|
| 272 |
+
|
| 273 |
+
oldIndent = state.blkIndent
|
| 274 |
+
state.blkIndent = 0
|
| 275 |
+
|
| 276 |
+
token = state.push("blockquote_open", "blockquote", 1)
|
| 277 |
+
token.markup = ">"
|
| 278 |
+
token.map = lines = [startLine, 0]
|
| 279 |
+
|
| 280 |
+
state.md.block.tokenize(state, startLine, nextLine)
|
| 281 |
+
|
| 282 |
+
token = state.push("blockquote_close", "blockquote", -1)
|
| 283 |
+
token.markup = ">"
|
| 284 |
+
|
| 285 |
+
state.lineMax = oldLineMax
|
| 286 |
+
state.parentType = oldParentType
|
| 287 |
+
lines[1] = state.line
|
| 288 |
+
|
| 289 |
+
# Restore original tShift; this might not be necessary since the parser
|
| 290 |
+
# has already been here, but just to make sure we can do that.
|
| 291 |
+
for i, item in enumerate(oldTShift):
|
| 292 |
+
state.bMarks[i + startLine] = oldBMarks[i]
|
| 293 |
+
state.tShift[i + startLine] = item
|
| 294 |
+
state.sCount[i + startLine] = oldSCount[i]
|
| 295 |
+
state.bsCount[i + startLine] = oldBSCount[i]
|
| 296 |
+
|
| 297 |
+
state.blkIndent = oldIndent
|
| 298 |
+
|
| 299 |
+
return True
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/heading.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Atex heading (#, ##, ...) """
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
from ..common.utils import isStrSpace
|
| 7 |
+
from .state_block import StateBlock
|
| 8 |
+
|
| 9 |
+
LOGGER = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def heading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
|
| 13 |
+
LOGGER.debug("entering heading: %s, %s, %s, %s", state, startLine, endLine, silent)
|
| 14 |
+
|
| 15 |
+
pos = state.bMarks[startLine] + state.tShift[startLine]
|
| 16 |
+
maximum = state.eMarks[startLine]
|
| 17 |
+
|
| 18 |
+
if state.is_code_block(startLine):
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
ch: str | None = state.src[pos]
|
| 22 |
+
|
| 23 |
+
if ch != "#" or pos >= maximum:
|
| 24 |
+
return False
|
| 25 |
+
|
| 26 |
+
# count heading level
|
| 27 |
+
level = 1
|
| 28 |
+
pos += 1
|
| 29 |
+
try:
|
| 30 |
+
ch = state.src[pos]
|
| 31 |
+
except IndexError:
|
| 32 |
+
ch = None
|
| 33 |
+
while ch == "#" and pos < maximum and level <= 6:
|
| 34 |
+
level += 1
|
| 35 |
+
pos += 1
|
| 36 |
+
try:
|
| 37 |
+
ch = state.src[pos]
|
| 38 |
+
except IndexError:
|
| 39 |
+
ch = None
|
| 40 |
+
|
| 41 |
+
if level > 6 or (pos < maximum and not isStrSpace(ch)):
|
| 42 |
+
return False
|
| 43 |
+
|
| 44 |
+
if silent:
|
| 45 |
+
return True
|
| 46 |
+
|
| 47 |
+
# Let's cut tails like ' ### ' from the end of string
|
| 48 |
+
|
| 49 |
+
maximum = state.skipSpacesBack(maximum, pos)
|
| 50 |
+
tmp = state.skipCharsStrBack(maximum, "#", pos)
|
| 51 |
+
if tmp > pos and isStrSpace(state.src[tmp - 1]):
|
| 52 |
+
maximum = tmp
|
| 53 |
+
|
| 54 |
+
state.line = startLine + 1
|
| 55 |
+
|
| 56 |
+
token = state.push("heading_open", "h" + str(level), 1)
|
| 57 |
+
token.markup = "########"[:level]
|
| 58 |
+
token.map = [startLine, state.line]
|
| 59 |
+
|
| 60 |
+
token = state.push("inline", "", 0)
|
| 61 |
+
token.content = state.src[pos:maximum].strip()
|
| 62 |
+
token.map = [startLine, state.line]
|
| 63 |
+
token.children = []
|
| 64 |
+
|
| 65 |
+
token = state.push("heading_close", "h" + str(level), -1)
|
| 66 |
+
token.markup = "########"[:level]
|
| 67 |
+
|
| 68 |
+
return True
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/html_block.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HTML block
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import logging
|
| 5 |
+
import re
|
| 6 |
+
|
| 7 |
+
from ..common.html_blocks import block_names
|
| 8 |
+
from ..common.html_re import HTML_OPEN_CLOSE_TAG_STR
|
| 9 |
+
from .state_block import StateBlock
|
| 10 |
+
|
| 11 |
+
LOGGER = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
# An array of opening and corresponding closing sequences for html tags,
|
| 14 |
+
# last argument defines whether it can terminate a paragraph or not
|
| 15 |
+
HTML_SEQUENCES: list[tuple[re.Pattern[str], re.Pattern[str], bool]] = [
|
| 16 |
+
(
|
| 17 |
+
re.compile(r"^<(script|pre|style|textarea)(?=(\s|>|$))", re.IGNORECASE),
|
| 18 |
+
re.compile(r"<\/(script|pre|style|textarea)>", re.IGNORECASE),
|
| 19 |
+
True,
|
| 20 |
+
),
|
| 21 |
+
(re.compile(r"^<!--"), re.compile(r"-->"), True),
|
| 22 |
+
(re.compile(r"^<\?"), re.compile(r"\?>"), True),
|
| 23 |
+
(re.compile(r"^<![A-Z]"), re.compile(r">"), True),
|
| 24 |
+
(re.compile(r"^<!\[CDATA\["), re.compile(r"\]\]>"), True),
|
| 25 |
+
(
|
| 26 |
+
re.compile("^</?(" + "|".join(block_names) + ")(?=(\\s|/?>|$))", re.IGNORECASE),
|
| 27 |
+
re.compile(r"^$"),
|
| 28 |
+
True,
|
| 29 |
+
),
|
| 30 |
+
(re.compile(HTML_OPEN_CLOSE_TAG_STR + "\\s*$"), re.compile(r"^$"), False),
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def html_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
|
| 35 |
+
LOGGER.debug(
|
| 36 |
+
"entering html_block: %s, %s, %s, %s", state, startLine, endLine, silent
|
| 37 |
+
)
|
| 38 |
+
pos = state.bMarks[startLine] + state.tShift[startLine]
|
| 39 |
+
maximum = state.eMarks[startLine]
|
| 40 |
+
|
| 41 |
+
if state.is_code_block(startLine):
|
| 42 |
+
return False
|
| 43 |
+
|
| 44 |
+
if not state.md.options.get("html", None):
|
| 45 |
+
return False
|
| 46 |
+
|
| 47 |
+
if state.src[pos] != "<":
|
| 48 |
+
return False
|
| 49 |
+
|
| 50 |
+
lineText = state.src[pos:maximum]
|
| 51 |
+
|
| 52 |
+
html_seq = None
|
| 53 |
+
for HTML_SEQUENCE in HTML_SEQUENCES:
|
| 54 |
+
if HTML_SEQUENCE[0].search(lineText):
|
| 55 |
+
html_seq = HTML_SEQUENCE
|
| 56 |
+
break
|
| 57 |
+
|
| 58 |
+
if not html_seq:
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
if silent:
|
| 62 |
+
# true if this sequence can be a terminator, false otherwise
|
| 63 |
+
return html_seq[2]
|
| 64 |
+
|
| 65 |
+
nextLine = startLine + 1
|
| 66 |
+
|
| 67 |
+
# If we are here - we detected HTML block.
|
| 68 |
+
# Let's roll down till block end.
|
| 69 |
+
if not html_seq[1].search(lineText):
|
| 70 |
+
while nextLine < endLine:
|
| 71 |
+
if state.sCount[nextLine] < state.blkIndent:
|
| 72 |
+
break
|
| 73 |
+
|
| 74 |
+
pos = state.bMarks[nextLine] + state.tShift[nextLine]
|
| 75 |
+
maximum = state.eMarks[nextLine]
|
| 76 |
+
lineText = state.src[pos:maximum]
|
| 77 |
+
|
| 78 |
+
if html_seq[1].search(lineText):
|
| 79 |
+
if len(lineText) != 0:
|
| 80 |
+
nextLine += 1
|
| 81 |
+
break
|
| 82 |
+
nextLine += 1
|
| 83 |
+
|
| 84 |
+
state.line = nextLine
|
| 85 |
+
|
| 86 |
+
token = state.push("html_block", "", 0)
|
| 87 |
+
token.map = [startLine, nextLine]
|
| 88 |
+
token.content = state.getLines(startLine, nextLine, state.blkIndent, True)
|
| 89 |
+
|
| 90 |
+
return True
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/lheading.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# lheading (---, ==)
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
from .state_block import StateBlock
|
| 5 |
+
|
| 6 |
+
LOGGER = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def lheading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
|
| 10 |
+
LOGGER.debug("entering lheading: %s, %s, %s, %s", state, startLine, endLine, silent)
|
| 11 |
+
|
| 12 |
+
level = None
|
| 13 |
+
nextLine = startLine + 1
|
| 14 |
+
ruler = state.md.block.ruler
|
| 15 |
+
terminatorRules = ruler.getRules("paragraph")
|
| 16 |
+
|
| 17 |
+
if state.is_code_block(startLine):
|
| 18 |
+
return False
|
| 19 |
+
|
| 20 |
+
oldParentType = state.parentType
|
| 21 |
+
state.parentType = "paragraph" # use paragraph to match terminatorRules
|
| 22 |
+
|
| 23 |
+
# jump line-by-line until empty one or EOF
|
| 24 |
+
while nextLine < endLine and not state.isEmpty(nextLine):
|
| 25 |
+
# this would be a code block normally, but after paragraph
|
| 26 |
+
# it's considered a lazy continuation regardless of what's there
|
| 27 |
+
if state.sCount[nextLine] - state.blkIndent > 3:
|
| 28 |
+
nextLine += 1
|
| 29 |
+
continue
|
| 30 |
+
|
| 31 |
+
# Check for underline in setext header
|
| 32 |
+
if state.sCount[nextLine] >= state.blkIndent:
|
| 33 |
+
pos = state.bMarks[nextLine] + state.tShift[nextLine]
|
| 34 |
+
maximum = state.eMarks[nextLine]
|
| 35 |
+
|
| 36 |
+
if pos < maximum:
|
| 37 |
+
marker = state.src[pos]
|
| 38 |
+
|
| 39 |
+
if marker in ("-", "="):
|
| 40 |
+
pos = state.skipCharsStr(pos, marker)
|
| 41 |
+
pos = state.skipSpaces(pos)
|
| 42 |
+
|
| 43 |
+
# /* = */
|
| 44 |
+
if pos >= maximum:
|
| 45 |
+
level = 1 if marker == "=" else 2
|
| 46 |
+
break
|
| 47 |
+
|
| 48 |
+
# quirk for blockquotes, this line should already be checked by that rule
|
| 49 |
+
if state.sCount[nextLine] < 0:
|
| 50 |
+
nextLine += 1
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
# Some tags can terminate paragraph without empty line.
|
| 54 |
+
terminate = False
|
| 55 |
+
for terminatorRule in terminatorRules:
|
| 56 |
+
if terminatorRule(state, nextLine, endLine, True):
|
| 57 |
+
terminate = True
|
| 58 |
+
break
|
| 59 |
+
if terminate:
|
| 60 |
+
break
|
| 61 |
+
|
| 62 |
+
nextLine += 1
|
| 63 |
+
|
| 64 |
+
if not level:
|
| 65 |
+
# Didn't find valid underline
|
| 66 |
+
return False
|
| 67 |
+
|
| 68 |
+
content = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
|
| 69 |
+
|
| 70 |
+
state.line = nextLine + 1
|
| 71 |
+
|
| 72 |
+
token = state.push("heading_open", "h" + str(level), 1)
|
| 73 |
+
token.markup = marker
|
| 74 |
+
token.map = [startLine, state.line]
|
| 75 |
+
|
| 76 |
+
token = state.push("inline", "", 0)
|
| 77 |
+
token.content = content
|
| 78 |
+
token.map = [startLine, state.line - 1]
|
| 79 |
+
token.children = []
|
| 80 |
+
|
| 81 |
+
token = state.push("heading_close", "h" + str(level), -1)
|
| 82 |
+
token.markup = marker
|
| 83 |
+
|
| 84 |
+
state.parentType = oldParentType
|
| 85 |
+
|
| 86 |
+
return True
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/paragraph.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Paragraph."""
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
from .state_block import StateBlock
|
| 5 |
+
|
| 6 |
+
LOGGER = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
|
| 10 |
+
LOGGER.debug(
|
| 11 |
+
"entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
nextLine = startLine + 1
|
| 15 |
+
ruler = state.md.block.ruler
|
| 16 |
+
terminatorRules = ruler.getRules("paragraph")
|
| 17 |
+
endLine = state.lineMax
|
| 18 |
+
|
| 19 |
+
oldParentType = state.parentType
|
| 20 |
+
state.parentType = "paragraph"
|
| 21 |
+
|
| 22 |
+
# jump line-by-line until empty one or EOF
|
| 23 |
+
while nextLine < endLine:
|
| 24 |
+
if state.isEmpty(nextLine):
|
| 25 |
+
break
|
| 26 |
+
# this would be a code block normally, but after paragraph
|
| 27 |
+
# it's considered a lazy continuation regardless of what's there
|
| 28 |
+
if state.sCount[nextLine] - state.blkIndent > 3:
|
| 29 |
+
nextLine += 1
|
| 30 |
+
continue
|
| 31 |
+
|
| 32 |
+
# quirk for blockquotes, this line should already be checked by that rule
|
| 33 |
+
if state.sCount[nextLine] < 0:
|
| 34 |
+
nextLine += 1
|
| 35 |
+
continue
|
| 36 |
+
|
| 37 |
+
# Some tags can terminate paragraph without empty line.
|
| 38 |
+
terminate = False
|
| 39 |
+
for terminatorRule in terminatorRules:
|
| 40 |
+
if terminatorRule(state, nextLine, endLine, True):
|
| 41 |
+
terminate = True
|
| 42 |
+
break
|
| 43 |
+
|
| 44 |
+
if terminate:
|
| 45 |
+
break
|
| 46 |
+
|
| 47 |
+
nextLine += 1
|
| 48 |
+
|
| 49 |
+
content = state.getLines(startLine, nextLine, state.blkIndent, False).strip()
|
| 50 |
+
|
| 51 |
+
state.line = nextLine
|
| 52 |
+
|
| 53 |
+
token = state.push("paragraph_open", "p", 1)
|
| 54 |
+
token.map = [startLine, state.line]
|
| 55 |
+
|
| 56 |
+
token = state.push("inline", "", 0)
|
| 57 |
+
token.content = content
|
| 58 |
+
token.map = [startLine, state.line]
|
| 59 |
+
token.children = []
|
| 60 |
+
|
| 61 |
+
token = state.push("paragraph_close", "p", -1)
|
| 62 |
+
|
| 63 |
+
state.parentType = oldParentType
|
| 64 |
+
|
| 65 |
+
return True
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/state_block.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING, Literal
|
| 4 |
+
|
| 5 |
+
from ..common.utils import isStrSpace
|
| 6 |
+
from ..ruler import StateBase
|
| 7 |
+
from ..token import Token
|
| 8 |
+
from ..utils import EnvType
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from markdown_it.main import MarkdownIt
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class StateBlock(StateBase):
|
| 15 |
+
def __init__(
|
| 16 |
+
self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token]
|
| 17 |
+
) -> None:
|
| 18 |
+
self.src = src
|
| 19 |
+
|
| 20 |
+
# link to parser instance
|
| 21 |
+
self.md = md
|
| 22 |
+
|
| 23 |
+
self.env = env
|
| 24 |
+
|
| 25 |
+
#
|
| 26 |
+
# Internal state variables
|
| 27 |
+
#
|
| 28 |
+
|
| 29 |
+
self.tokens = tokens
|
| 30 |
+
|
| 31 |
+
self.bMarks: list[int] = [] # line begin offsets for fast jumps
|
| 32 |
+
self.eMarks: list[int] = [] # line end offsets for fast jumps
|
| 33 |
+
# offsets of the first non-space characters (tabs not expanded)
|
| 34 |
+
self.tShift: list[int] = []
|
| 35 |
+
self.sCount: list[int] = [] # indents for each line (tabs expanded)
|
| 36 |
+
|
| 37 |
+
# An amount of virtual spaces (tabs expanded) between beginning
|
| 38 |
+
# of each line (bMarks) and real beginning of that line.
|
| 39 |
+
#
|
| 40 |
+
# It exists only as a hack because blockquotes override bMarks
|
| 41 |
+
# losing information in the process.
|
| 42 |
+
#
|
| 43 |
+
# It's used only when expanding tabs, you can think about it as
|
| 44 |
+
# an initial tab length, e.g. bsCount=21 applied to string `\t123`
|
| 45 |
+
# means first tab should be expanded to 4-21%4 === 3 spaces.
|
| 46 |
+
#
|
| 47 |
+
self.bsCount: list[int] = []
|
| 48 |
+
|
| 49 |
+
# block parser variables
|
| 50 |
+
self.blkIndent = 0 # required block content indent (for example, if we are
|
| 51 |
+
# inside a list, it would be positioned after list marker)
|
| 52 |
+
self.line = 0 # line index in src
|
| 53 |
+
self.lineMax = 0 # lines count
|
| 54 |
+
self.tight = False # loose/tight mode for lists
|
| 55 |
+
self.ddIndent = -1 # indent of the current dd block (-1 if there isn't any)
|
| 56 |
+
self.listIndent = -1 # indent of the current list block (-1 if there isn't any)
|
| 57 |
+
|
| 58 |
+
# can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
|
| 59 |
+
# used in lists to determine if they interrupt a paragraph
|
| 60 |
+
self.parentType = "root"
|
| 61 |
+
|
| 62 |
+
self.level = 0
|
| 63 |
+
|
| 64 |
+
# renderer
|
| 65 |
+
self.result = ""
|
| 66 |
+
|
| 67 |
+
# Create caches
|
| 68 |
+
# Generate markers.
|
| 69 |
+
indent_found = False
|
| 70 |
+
|
| 71 |
+
start = pos = indent = offset = 0
|
| 72 |
+
length = len(self.src)
|
| 73 |
+
|
| 74 |
+
for pos, character in enumerate(self.src):
|
| 75 |
+
if not indent_found:
|
| 76 |
+
if isStrSpace(character):
|
| 77 |
+
indent += 1
|
| 78 |
+
|
| 79 |
+
if character == "\t":
|
| 80 |
+
offset += 4 - offset % 4
|
| 81 |
+
else:
|
| 82 |
+
offset += 1
|
| 83 |
+
continue
|
| 84 |
+
else:
|
| 85 |
+
indent_found = True
|
| 86 |
+
|
| 87 |
+
if character == "\n" or pos == length - 1:
|
| 88 |
+
if character != "\n":
|
| 89 |
+
pos += 1
|
| 90 |
+
self.bMarks.append(start)
|
| 91 |
+
self.eMarks.append(pos)
|
| 92 |
+
self.tShift.append(indent)
|
| 93 |
+
self.sCount.append(offset)
|
| 94 |
+
self.bsCount.append(0)
|
| 95 |
+
|
| 96 |
+
indent_found = False
|
| 97 |
+
indent = 0
|
| 98 |
+
offset = 0
|
| 99 |
+
start = pos + 1
|
| 100 |
+
|
| 101 |
+
# Push fake entry to simplify cache bounds checks
|
| 102 |
+
self.bMarks.append(length)
|
| 103 |
+
self.eMarks.append(length)
|
| 104 |
+
self.tShift.append(0)
|
| 105 |
+
self.sCount.append(0)
|
| 106 |
+
self.bsCount.append(0)
|
| 107 |
+
|
| 108 |
+
self.lineMax = len(self.bMarks) - 1 # don't count last fake line
|
| 109 |
+
|
| 110 |
+
# pre-check if code blocks are enabled, to speed up is_code_block method
|
| 111 |
+
self._code_enabled = "code" in self.md["block"].ruler.get_active_rules()
|
| 112 |
+
|
| 113 |
+
def __repr__(self) -> str:
|
| 114 |
+
return (
|
| 115 |
+
f"{self.__class__.__name__}"
|
| 116 |
+
f"(line={self.line},level={self.level},tokens={len(self.tokens)})"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token:
|
| 120 |
+
"""Push new token to "stream"."""
|
| 121 |
+
token = Token(ttype, tag, nesting)
|
| 122 |
+
token.block = True
|
| 123 |
+
if nesting < 0:
|
| 124 |
+
self.level -= 1 # closing tag
|
| 125 |
+
token.level = self.level
|
| 126 |
+
if nesting > 0:
|
| 127 |
+
self.level += 1 # opening tag
|
| 128 |
+
self.tokens.append(token)
|
| 129 |
+
return token
|
| 130 |
+
|
| 131 |
+
def isEmpty(self, line: int) -> bool:
|
| 132 |
+
"""."""
|
| 133 |
+
return (self.bMarks[line] + self.tShift[line]) >= self.eMarks[line]
|
| 134 |
+
|
| 135 |
+
def skipEmptyLines(self, from_pos: int) -> int:
|
| 136 |
+
"""."""
|
| 137 |
+
while from_pos < self.lineMax:
|
| 138 |
+
try:
|
| 139 |
+
if (self.bMarks[from_pos] + self.tShift[from_pos]) < self.eMarks[
|
| 140 |
+
from_pos
|
| 141 |
+
]:
|
| 142 |
+
break
|
| 143 |
+
except IndexError:
|
| 144 |
+
pass
|
| 145 |
+
from_pos += 1
|
| 146 |
+
return from_pos
|
| 147 |
+
|
| 148 |
+
def skipSpaces(self, pos: int) -> int:
|
| 149 |
+
"""Skip spaces from given position."""
|
| 150 |
+
while True:
|
| 151 |
+
try:
|
| 152 |
+
current = self.src[pos]
|
| 153 |
+
except IndexError:
|
| 154 |
+
break
|
| 155 |
+
if not isStrSpace(current):
|
| 156 |
+
break
|
| 157 |
+
pos += 1
|
| 158 |
+
return pos
|
| 159 |
+
|
| 160 |
+
def skipSpacesBack(self, pos: int, minimum: int) -> int:
|
| 161 |
+
"""Skip spaces from given position in reverse."""
|
| 162 |
+
if pos <= minimum:
|
| 163 |
+
return pos
|
| 164 |
+
while pos > minimum:
|
| 165 |
+
pos -= 1
|
| 166 |
+
if not isStrSpace(self.src[pos]):
|
| 167 |
+
return pos + 1
|
| 168 |
+
return pos
|
| 169 |
+
|
| 170 |
+
def skipChars(self, pos: int, code: int) -> int:
|
| 171 |
+
"""Skip character code from given position."""
|
| 172 |
+
while True:
|
| 173 |
+
try:
|
| 174 |
+
current = self.srcCharCode[pos]
|
| 175 |
+
except IndexError:
|
| 176 |
+
break
|
| 177 |
+
if current != code:
|
| 178 |
+
break
|
| 179 |
+
pos += 1
|
| 180 |
+
return pos
|
| 181 |
+
|
| 182 |
+
def skipCharsStr(self, pos: int, ch: str) -> int:
|
| 183 |
+
"""Skip character string from given position."""
|
| 184 |
+
while True:
|
| 185 |
+
try:
|
| 186 |
+
current = self.src[pos]
|
| 187 |
+
except IndexError:
|
| 188 |
+
break
|
| 189 |
+
if current != ch:
|
| 190 |
+
break
|
| 191 |
+
pos += 1
|
| 192 |
+
return pos
|
| 193 |
+
|
| 194 |
+
def skipCharsBack(self, pos: int, code: int, minimum: int) -> int:
|
| 195 |
+
"""Skip character code reverse from given position - 1."""
|
| 196 |
+
if pos <= minimum:
|
| 197 |
+
return pos
|
| 198 |
+
while pos > minimum:
|
| 199 |
+
pos -= 1
|
| 200 |
+
if code != self.srcCharCode[pos]:
|
| 201 |
+
return pos + 1
|
| 202 |
+
return pos
|
| 203 |
+
|
| 204 |
+
def skipCharsStrBack(self, pos: int, ch: str, minimum: int) -> int:
|
| 205 |
+
"""Skip character string reverse from given position - 1."""
|
| 206 |
+
if pos <= minimum:
|
| 207 |
+
return pos
|
| 208 |
+
while pos > minimum:
|
| 209 |
+
pos -= 1
|
| 210 |
+
if ch != self.src[pos]:
|
| 211 |
+
return pos + 1
|
| 212 |
+
return pos
|
| 213 |
+
|
| 214 |
+
def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str:
|
| 215 |
+
"""Cut lines range from source."""
|
| 216 |
+
line = begin
|
| 217 |
+
if begin >= end:
|
| 218 |
+
return ""
|
| 219 |
+
|
| 220 |
+
queue = [""] * (end - begin)
|
| 221 |
+
|
| 222 |
+
i = 1
|
| 223 |
+
while line < end:
|
| 224 |
+
lineIndent = 0
|
| 225 |
+
lineStart = first = self.bMarks[line]
|
| 226 |
+
last = (
|
| 227 |
+
self.eMarks[line] + 1
|
| 228 |
+
if line + 1 < end or keepLastLF
|
| 229 |
+
else self.eMarks[line]
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
while (first < last) and (lineIndent < indent):
|
| 233 |
+
ch = self.src[first]
|
| 234 |
+
if isStrSpace(ch):
|
| 235 |
+
if ch == "\t":
|
| 236 |
+
lineIndent += 4 - (lineIndent + self.bsCount[line]) % 4
|
| 237 |
+
else:
|
| 238 |
+
lineIndent += 1
|
| 239 |
+
elif first - lineStart < self.tShift[line]:
|
| 240 |
+
lineIndent += 1
|
| 241 |
+
else:
|
| 242 |
+
break
|
| 243 |
+
first += 1
|
| 244 |
+
|
| 245 |
+
if lineIndent > indent:
|
| 246 |
+
# partially expanding tabs in code blocks, e.g '\t\tfoobar'
|
| 247 |
+
# with indent=2 becomes ' \tfoobar'
|
| 248 |
+
queue[i - 1] = (" " * (lineIndent - indent)) + self.src[first:last]
|
| 249 |
+
else:
|
| 250 |
+
queue[i - 1] = self.src[first:last]
|
| 251 |
+
|
| 252 |
+
line += 1
|
| 253 |
+
i += 1
|
| 254 |
+
|
| 255 |
+
return "".join(queue)
|
| 256 |
+
|
| 257 |
+
def is_code_block(self, line: int) -> bool:
|
| 258 |
+
"""Check if line is a code block,
|
| 259 |
+
i.e. the code block rule is enabled and text is indented by more than 3 spaces.
|
| 260 |
+
"""
|
| 261 |
+
return self._code_enabled and (self.sCount[line] - self.blkIndent) >= 4
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_block/table.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GFM table, https://github.github.com/gfm/#tables-extension-
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
from ..common.utils import charStrAt, isStrSpace
|
| 7 |
+
from .state_block import StateBlock
|
| 8 |
+
|
| 9 |
+
headerLineRe = re.compile(r"^:?-+:?$")
|
| 10 |
+
enclosingPipesRe = re.compile(r"^\||\|$")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def getLine(state: StateBlock, line: int) -> str:
|
| 14 |
+
pos = state.bMarks[line] + state.tShift[line]
|
| 15 |
+
maximum = state.eMarks[line]
|
| 16 |
+
|
| 17 |
+
# return state.src.substr(pos, max - pos)
|
| 18 |
+
return state.src[pos:maximum]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def escapedSplit(string: str) -> list[str]:
|
| 22 |
+
result: list[str] = []
|
| 23 |
+
pos = 0
|
| 24 |
+
max = len(string)
|
| 25 |
+
isEscaped = False
|
| 26 |
+
lastPos = 0
|
| 27 |
+
current = ""
|
| 28 |
+
ch = charStrAt(string, pos)
|
| 29 |
+
|
| 30 |
+
while pos < max:
|
| 31 |
+
if ch == "|":
|
| 32 |
+
if not isEscaped:
|
| 33 |
+
# pipe separating cells, '|'
|
| 34 |
+
result.append(current + string[lastPos:pos])
|
| 35 |
+
current = ""
|
| 36 |
+
lastPos = pos + 1
|
| 37 |
+
else:
|
| 38 |
+
# escaped pipe, '\|'
|
| 39 |
+
current += string[lastPos : pos - 1]
|
| 40 |
+
lastPos = pos
|
| 41 |
+
|
| 42 |
+
isEscaped = ch == "\\"
|
| 43 |
+
pos += 1
|
| 44 |
+
|
| 45 |
+
ch = charStrAt(string, pos)
|
| 46 |
+
|
| 47 |
+
result.append(current + string[lastPos:])
|
| 48 |
+
|
| 49 |
+
return result
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def table(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool:
|
| 53 |
+
tbodyLines = None
|
| 54 |
+
|
| 55 |
+
# should have at least two lines
|
| 56 |
+
if startLine + 2 > endLine:
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
nextLine = startLine + 1
|
| 60 |
+
|
| 61 |
+
if state.sCount[nextLine] < state.blkIndent:
|
| 62 |
+
return False
|
| 63 |
+
|
| 64 |
+
if state.is_code_block(nextLine):
|
| 65 |
+
return False
|
| 66 |
+
|
| 67 |
+
# first character of the second line should be '|', '-', ':',
|
| 68 |
+
# and no other characters are allowed but spaces;
|
| 69 |
+
# basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
|
| 70 |
+
|
| 71 |
+
pos = state.bMarks[nextLine] + state.tShift[nextLine]
|
| 72 |
+
if pos >= state.eMarks[nextLine]:
|
| 73 |
+
return False
|
| 74 |
+
first_ch = state.src[pos]
|
| 75 |
+
pos += 1
|
| 76 |
+
if first_ch not in ("|", "-", ":"):
|
| 77 |
+
return False
|
| 78 |
+
|
| 79 |
+
if pos >= state.eMarks[nextLine]:
|
| 80 |
+
return False
|
| 81 |
+
second_ch = state.src[pos]
|
| 82 |
+
pos += 1
|
| 83 |
+
if second_ch not in ("|", "-", ":") and not isStrSpace(second_ch):
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
# if first character is '-', then second character must not be a space
|
| 87 |
+
# (due to parsing ambiguity with list)
|
| 88 |
+
if first_ch == "-" and isStrSpace(second_ch):
|
| 89 |
+
return False
|
| 90 |
+
|
| 91 |
+
while pos < state.eMarks[nextLine]:
|
| 92 |
+
ch = state.src[pos]
|
| 93 |
+
|
| 94 |
+
if ch not in ("|", "-", ":") and not isStrSpace(ch):
|
| 95 |
+
return False
|
| 96 |
+
|
| 97 |
+
pos += 1
|
| 98 |
+
|
| 99 |
+
lineText = getLine(state, startLine + 1)
|
| 100 |
+
|
| 101 |
+
columns = lineText.split("|")
|
| 102 |
+
aligns = []
|
| 103 |
+
for i in range(len(columns)):
|
| 104 |
+
t = columns[i].strip()
|
| 105 |
+
if not t:
|
| 106 |
+
# allow empty columns before and after table, but not in between columns;
|
| 107 |
+
# e.g. allow ` |---| `, disallow ` ---||--- `
|
| 108 |
+
if i == 0 or i == len(columns) - 1:
|
| 109 |
+
continue
|
| 110 |
+
else:
|
| 111 |
+
return False
|
| 112 |
+
|
| 113 |
+
if not headerLineRe.search(t):
|
| 114 |
+
return False
|
| 115 |
+
if charStrAt(t, len(t) - 1) == ":":
|
| 116 |
+
aligns.append("center" if charStrAt(t, 0) == ":" else "right")
|
| 117 |
+
elif charStrAt(t, 0) == ":":
|
| 118 |
+
aligns.append("left")
|
| 119 |
+
else:
|
| 120 |
+
aligns.append("")
|
| 121 |
+
|
| 122 |
+
lineText = getLine(state, startLine).strip()
|
| 123 |
+
if "|" not in lineText:
|
| 124 |
+
return False
|
| 125 |
+
if state.is_code_block(startLine):
|
| 126 |
+
return False
|
| 127 |
+
columns = escapedSplit(lineText)
|
| 128 |
+
if columns and columns[0] == "":
|
| 129 |
+
columns.pop(0)
|
| 130 |
+
if columns and columns[-1] == "":
|
| 131 |
+
columns.pop()
|
| 132 |
+
|
| 133 |
+
# header row will define an amount of columns in the entire table,
|
| 134 |
+
# and align row should be exactly the same (the rest of the rows can differ)
|
| 135 |
+
columnCount = len(columns)
|
| 136 |
+
if columnCount == 0 or columnCount != len(aligns):
|
| 137 |
+
return False
|
| 138 |
+
|
| 139 |
+
if silent:
|
| 140 |
+
return True
|
| 141 |
+
|
| 142 |
+
oldParentType = state.parentType
|
| 143 |
+
state.parentType = "table"
|
| 144 |
+
|
| 145 |
+
# use 'blockquote' lists for termination because it's
|
| 146 |
+
# the most similar to tables
|
| 147 |
+
terminatorRules = state.md.block.ruler.getRules("blockquote")
|
| 148 |
+
|
| 149 |
+
token = state.push("table_open", "table", 1)
|
| 150 |
+
token.map = tableLines = [startLine, 0]
|
| 151 |
+
|
| 152 |
+
token = state.push("thead_open", "thead", 1)
|
| 153 |
+
token.map = [startLine, startLine + 1]
|
| 154 |
+
|
| 155 |
+
token = state.push("tr_open", "tr", 1)
|
| 156 |
+
token.map = [startLine, startLine + 1]
|
| 157 |
+
|
| 158 |
+
for i in range(len(columns)):
|
| 159 |
+
token = state.push("th_open", "th", 1)
|
| 160 |
+
if aligns[i]:
|
| 161 |
+
token.attrs = {"style": "text-align:" + aligns[i]}
|
| 162 |
+
|
| 163 |
+
token = state.push("inline", "", 0)
|
| 164 |
+
# note in markdown-it this map was removed in v12.0.0 however, we keep it,
|
| 165 |
+
# since it is helpful to propagate to children tokens
|
| 166 |
+
token.map = [startLine, startLine + 1]
|
| 167 |
+
token.content = columns[i].strip()
|
| 168 |
+
token.children = []
|
| 169 |
+
|
| 170 |
+
token = state.push("th_close", "th", -1)
|
| 171 |
+
|
| 172 |
+
token = state.push("tr_close", "tr", -1)
|
| 173 |
+
token = state.push("thead_close", "thead", -1)
|
| 174 |
+
|
| 175 |
+
nextLine = startLine + 2
|
| 176 |
+
while nextLine < endLine:
|
| 177 |
+
if state.sCount[nextLine] < state.blkIndent:
|
| 178 |
+
break
|
| 179 |
+
|
| 180 |
+
terminate = False
|
| 181 |
+
for i in range(len(terminatorRules)):
|
| 182 |
+
if terminatorRules[i](state, nextLine, endLine, True):
|
| 183 |
+
terminate = True
|
| 184 |
+
break
|
| 185 |
+
|
| 186 |
+
if terminate:
|
| 187 |
+
break
|
| 188 |
+
lineText = getLine(state, nextLine).strip()
|
| 189 |
+
if not lineText:
|
| 190 |
+
break
|
| 191 |
+
if state.is_code_block(nextLine):
|
| 192 |
+
break
|
| 193 |
+
columns = escapedSplit(lineText)
|
| 194 |
+
if columns and columns[0] == "":
|
| 195 |
+
columns.pop(0)
|
| 196 |
+
if columns and columns[-1] == "":
|
| 197 |
+
columns.pop()
|
| 198 |
+
|
| 199 |
+
if nextLine == startLine + 2:
|
| 200 |
+
token = state.push("tbody_open", "tbody", 1)
|
| 201 |
+
token.map = tbodyLines = [startLine + 2, 0]
|
| 202 |
+
|
| 203 |
+
token = state.push("tr_open", "tr", 1)
|
| 204 |
+
token.map = [nextLine, nextLine + 1]
|
| 205 |
+
|
| 206 |
+
for i in range(columnCount):
|
| 207 |
+
token = state.push("td_open", "td", 1)
|
| 208 |
+
if aligns[i]:
|
| 209 |
+
token.attrs = {"style": "text-align:" + aligns[i]}
|
| 210 |
+
|
| 211 |
+
token = state.push("inline", "", 0)
|
| 212 |
+
# note in markdown-it this map was removed in v12.0.0 however, we keep it,
|
| 213 |
+
# since it is helpful to propagate to children tokens
|
| 214 |
+
token.map = [nextLine, nextLine + 1]
|
| 215 |
+
try:
|
| 216 |
+
token.content = columns[i].strip() if columns[i] else ""
|
| 217 |
+
except IndexError:
|
| 218 |
+
token.content = ""
|
| 219 |
+
token.children = []
|
| 220 |
+
|
| 221 |
+
token = state.push("td_close", "td", -1)
|
| 222 |
+
|
| 223 |
+
token = state.push("tr_close", "tr", -1)
|
| 224 |
+
|
| 225 |
+
nextLine += 1
|
| 226 |
+
|
| 227 |
+
if tbodyLines:
|
| 228 |
+
token = state.push("tbody_close", "tbody", -1)
|
| 229 |
+
tbodyLines[1] = nextLine
|
| 230 |
+
|
| 231 |
+
token = state.push("table_close", "table", -1)
|
| 232 |
+
|
| 233 |
+
tableLines[1] = nextLine
|
| 234 |
+
state.parentType = oldParentType
|
| 235 |
+
state.line = nextLine
|
| 236 |
+
return True
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (511 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc
ADDED
|
Binary file (592 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc
ADDED
|
Binary file (507 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc
ADDED
|
Binary file (557 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc
ADDED
|
Binary file (2.73 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc
ADDED
|
Binary file (3.27 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc
ADDED
|
Binary file (946 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/block.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..token import Token
|
| 2 |
+
from .state_core import StateCore
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def block(state: StateCore) -> None:
|
| 6 |
+
if state.inlineMode:
|
| 7 |
+
token = Token("inline", "", 0)
|
| 8 |
+
token.content = state.src
|
| 9 |
+
token.map = [0, 1]
|
| 10 |
+
token.children = []
|
| 11 |
+
state.tokens.append(token)
|
| 12 |
+
else:
|
| 13 |
+
state.md.block.parse(state.src, state.md, state.env, state.tokens)
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/smartquotes.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Convert straight quotation marks to typographic ones
|
| 2 |
+
"""
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import re
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
from ..common.utils import charCodeAt, isMdAsciiPunct, isPunctChar, isWhiteSpace
|
| 9 |
+
from ..token import Token
|
| 10 |
+
from .state_core import StateCore
|
| 11 |
+
|
| 12 |
+
QUOTE_TEST_RE = re.compile(r"['\"]")
|
| 13 |
+
QUOTE_RE = re.compile(r"['\"]")
|
| 14 |
+
APOSTROPHE = "\u2019" # ’
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def replaceAt(string: str, index: int, ch: str) -> str:
|
| 18 |
+
# When the index is negative, the behavior is different from the js version.
|
| 19 |
+
# But basically, the index will not be negative.
|
| 20 |
+
assert index >= 0
|
| 21 |
+
return string[:index] + ch + string[index + 1 :]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def process_inlines(tokens: list[Token], state: StateCore) -> None:
|
| 25 |
+
stack: list[dict[str, Any]] = []
|
| 26 |
+
|
| 27 |
+
for i, token in enumerate(tokens):
|
| 28 |
+
thisLevel = token.level
|
| 29 |
+
|
| 30 |
+
j = 0
|
| 31 |
+
for j in range(len(stack))[::-1]:
|
| 32 |
+
if stack[j]["level"] <= thisLevel:
|
| 33 |
+
break
|
| 34 |
+
else:
|
| 35 |
+
# When the loop is terminated without a "break".
|
| 36 |
+
# Subtract 1 to get the same index as the js version.
|
| 37 |
+
j -= 1
|
| 38 |
+
|
| 39 |
+
stack = stack[: j + 1]
|
| 40 |
+
|
| 41 |
+
if token.type != "text":
|
| 42 |
+
continue
|
| 43 |
+
|
| 44 |
+
text = token.content
|
| 45 |
+
pos = 0
|
| 46 |
+
maximum = len(text)
|
| 47 |
+
|
| 48 |
+
while pos < maximum:
|
| 49 |
+
goto_outer = False
|
| 50 |
+
lastIndex = pos
|
| 51 |
+
t = QUOTE_RE.search(text[lastIndex:])
|
| 52 |
+
if not t:
|
| 53 |
+
break
|
| 54 |
+
|
| 55 |
+
canOpen = canClose = True
|
| 56 |
+
pos = t.start(0) + lastIndex + 1
|
| 57 |
+
isSingle = t.group(0) == "'"
|
| 58 |
+
|
| 59 |
+
# Find previous character,
|
| 60 |
+
# default to space if it's the beginning of the line
|
| 61 |
+
lastChar: None | int = 0x20
|
| 62 |
+
|
| 63 |
+
if t.start(0) + lastIndex - 1 >= 0:
|
| 64 |
+
lastChar = charCodeAt(text, t.start(0) + lastIndex - 1)
|
| 65 |
+
else:
|
| 66 |
+
for j in range(i)[::-1]:
|
| 67 |
+
if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
|
| 68 |
+
break
|
| 69 |
+
# should skip all tokens except 'text', 'html_inline' or 'code_inline'
|
| 70 |
+
if not tokens[j].content:
|
| 71 |
+
continue
|
| 72 |
+
|
| 73 |
+
lastChar = charCodeAt(tokens[j].content, len(tokens[j].content) - 1)
|
| 74 |
+
break
|
| 75 |
+
|
| 76 |
+
# Find next character,
|
| 77 |
+
# default to space if it's the end of the line
|
| 78 |
+
nextChar: None | int = 0x20
|
| 79 |
+
|
| 80 |
+
if pos < maximum:
|
| 81 |
+
nextChar = charCodeAt(text, pos)
|
| 82 |
+
else:
|
| 83 |
+
for j in range(i + 1, len(tokens)):
|
| 84 |
+
# nextChar defaults to 0x20
|
| 85 |
+
if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak":
|
| 86 |
+
break
|
| 87 |
+
# should skip all tokens except 'text', 'html_inline' or 'code_inline'
|
| 88 |
+
if not tokens[j].content:
|
| 89 |
+
continue
|
| 90 |
+
|
| 91 |
+
nextChar = charCodeAt(tokens[j].content, 0)
|
| 92 |
+
break
|
| 93 |
+
|
| 94 |
+
isLastPunctChar = lastChar is not None and (
|
| 95 |
+
isMdAsciiPunct(lastChar) or isPunctChar(chr(lastChar))
|
| 96 |
+
)
|
| 97 |
+
isNextPunctChar = nextChar is not None and (
|
| 98 |
+
isMdAsciiPunct(nextChar) or isPunctChar(chr(nextChar))
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
isLastWhiteSpace = lastChar is not None and isWhiteSpace(lastChar)
|
| 102 |
+
isNextWhiteSpace = nextChar is not None and isWhiteSpace(nextChar)
|
| 103 |
+
|
| 104 |
+
if isNextWhiteSpace: # noqa: SIM114
|
| 105 |
+
canOpen = False
|
| 106 |
+
elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar):
|
| 107 |
+
canOpen = False
|
| 108 |
+
|
| 109 |
+
if isLastWhiteSpace: # noqa: SIM114
|
| 110 |
+
canClose = False
|
| 111 |
+
elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar):
|
| 112 |
+
canClose = False
|
| 113 |
+
|
| 114 |
+
if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102
|
| 115 |
+
if (
|
| 116 |
+
lastChar is not None and lastChar >= 0x30 and lastChar <= 0x39
|
| 117 |
+
): # 0x30: 0, 0x39: 9
|
| 118 |
+
# special case: 1"" - count first quote as an inch
|
| 119 |
+
canClose = canOpen = False
|
| 120 |
+
|
| 121 |
+
if canOpen and canClose:
|
| 122 |
+
# Replace quotes in the middle of punctuation sequence, but not
|
| 123 |
+
# in the middle of the words, i.e.:
|
| 124 |
+
#
|
| 125 |
+
# 1. foo " bar " baz - not replaced
|
| 126 |
+
# 2. foo-"-bar-"-baz - replaced
|
| 127 |
+
# 3. foo"bar"baz - not replaced
|
| 128 |
+
canOpen = isLastPunctChar
|
| 129 |
+
canClose = isNextPunctChar
|
| 130 |
+
|
| 131 |
+
if not canOpen and not canClose:
|
| 132 |
+
# middle of word
|
| 133 |
+
if isSingle:
|
| 134 |
+
token.content = replaceAt(
|
| 135 |
+
token.content, t.start(0) + lastIndex, APOSTROPHE
|
| 136 |
+
)
|
| 137 |
+
continue
|
| 138 |
+
|
| 139 |
+
if canClose:
|
| 140 |
+
# this could be a closing quote, rewind the stack to get a match
|
| 141 |
+
for j in range(len(stack))[::-1]:
|
| 142 |
+
item = stack[j]
|
| 143 |
+
if stack[j]["level"] < thisLevel:
|
| 144 |
+
break
|
| 145 |
+
if item["single"] == isSingle and stack[j]["level"] == thisLevel:
|
| 146 |
+
item = stack[j]
|
| 147 |
+
|
| 148 |
+
if isSingle:
|
| 149 |
+
openQuote = state.md.options.quotes[2]
|
| 150 |
+
closeQuote = state.md.options.quotes[3]
|
| 151 |
+
else:
|
| 152 |
+
openQuote = state.md.options.quotes[0]
|
| 153 |
+
closeQuote = state.md.options.quotes[1]
|
| 154 |
+
|
| 155 |
+
# replace token.content *before* tokens[item.token].content,
|
| 156 |
+
# because, if they are pointing at the same token, replaceAt
|
| 157 |
+
# could mess up indices when quote length != 1
|
| 158 |
+
token.content = replaceAt(
|
| 159 |
+
token.content, t.start(0) + lastIndex, closeQuote
|
| 160 |
+
)
|
| 161 |
+
tokens[item["token"]].content = replaceAt(
|
| 162 |
+
tokens[item["token"]].content, item["pos"], openQuote
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
pos += len(closeQuote) - 1
|
| 166 |
+
if item["token"] == i:
|
| 167 |
+
pos += len(openQuote) - 1
|
| 168 |
+
|
| 169 |
+
text = token.content
|
| 170 |
+
maximum = len(text)
|
| 171 |
+
|
| 172 |
+
stack = stack[:j]
|
| 173 |
+
goto_outer = True
|
| 174 |
+
break
|
| 175 |
+
if goto_outer:
|
| 176 |
+
goto_outer = False
|
| 177 |
+
continue
|
| 178 |
+
|
| 179 |
+
if canOpen:
|
| 180 |
+
stack.append(
|
| 181 |
+
{
|
| 182 |
+
"token": i,
|
| 183 |
+
"pos": t.start(0) + lastIndex,
|
| 184 |
+
"single": isSingle,
|
| 185 |
+
"level": thisLevel,
|
| 186 |
+
}
|
| 187 |
+
)
|
| 188 |
+
elif canClose and isSingle:
|
| 189 |
+
token.content = replaceAt(
|
| 190 |
+
token.content, t.start(0) + lastIndex, APOSTROPHE
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def smartquotes(state: StateCore) -> None:
|
| 195 |
+
if not state.md.options.typographer:
|
| 196 |
+
return
|
| 197 |
+
|
| 198 |
+
for token in state.tokens:
|
| 199 |
+
if token.type != "inline" or not QUOTE_RE.search(token.content):
|
| 200 |
+
continue
|
| 201 |
+
if token.children is not None:
|
| 202 |
+
process_inlines(token.children, state)
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_core/state_core.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING
|
| 4 |
+
|
| 5 |
+
from ..ruler import StateBase
|
| 6 |
+
from ..token import Token
|
| 7 |
+
from ..utils import EnvType
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
from markdown_it import MarkdownIt
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class StateCore(StateBase):
|
| 14 |
+
def __init__(
|
| 15 |
+
self,
|
| 16 |
+
src: str,
|
| 17 |
+
md: MarkdownIt,
|
| 18 |
+
env: EnvType,
|
| 19 |
+
tokens: list[Token] | None = None,
|
| 20 |
+
) -> None:
|
| 21 |
+
self.src = src
|
| 22 |
+
self.md = md # link to parser instance
|
| 23 |
+
self.env = env
|
| 24 |
+
self.tokens: list[Token] = tokens or []
|
| 25 |
+
self.inlineMode = False
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = (
|
| 2 |
+
"StateInline",
|
| 3 |
+
"text",
|
| 4 |
+
"fragments_join",
|
| 5 |
+
"link_pairs",
|
| 6 |
+
"linkify",
|
| 7 |
+
"escape",
|
| 8 |
+
"newline",
|
| 9 |
+
"backtick",
|
| 10 |
+
"emphasis",
|
| 11 |
+
"image",
|
| 12 |
+
"link",
|
| 13 |
+
"autolink",
|
| 14 |
+
"entity",
|
| 15 |
+
"html_inline",
|
| 16 |
+
"strikethrough",
|
| 17 |
+
)
|
| 18 |
+
from . import emphasis, strikethrough
|
| 19 |
+
from .autolink import autolink
|
| 20 |
+
from .backticks import backtick
|
| 21 |
+
from .balance_pairs import link_pairs
|
| 22 |
+
from .entity import entity
|
| 23 |
+
from .escape import escape
|
| 24 |
+
from .fragments_join import fragments_join
|
| 25 |
+
from .html_inline import html_inline
|
| 26 |
+
from .image import image
|
| 27 |
+
from .link import link
|
| 28 |
+
from .linkify import linkify
|
| 29 |
+
from .newline import newline
|
| 30 |
+
from .state_inline import StateInline
|
| 31 |
+
from .text import text
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (772 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc
ADDED
|
Binary file (1.45 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc
ADDED
|
Binary file (1.28 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc
ADDED
|
Binary file (1.87 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc
ADDED
|
Binary file (2.12 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc
ADDED
|
Binary file (1.28 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc
ADDED
|
Binary file (1.2 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc
ADDED
|
Binary file (1.15 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc
ADDED
|
Binary file (2.01 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc
ADDED
|
Binary file (1.89 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc
ADDED
|
Binary file (1.42 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc
ADDED
|
Binary file (962 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc
ADDED
|
Binary file (3.97 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc
ADDED
|
Binary file (2.3 kB). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc
ADDED
|
Binary file (658 Bytes). View file
|
|
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/autolink.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Process autolinks '<protocol:...>'
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from .state_inline import StateInline
|
| 5 |
+
|
| 6 |
+
EMAIL_RE = re.compile(
|
| 7 |
+
r"^([a-zA-Z0-9.!#$%&\'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$" # noqa: E501
|
| 8 |
+
)
|
| 9 |
+
AUTOLINK_RE = re.compile(r"^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def autolink(state: StateInline, silent: bool) -> bool:
|
| 13 |
+
pos = state.pos
|
| 14 |
+
|
| 15 |
+
if state.src[pos] != "<":
|
| 16 |
+
return False
|
| 17 |
+
|
| 18 |
+
start = state.pos
|
| 19 |
+
maximum = state.posMax
|
| 20 |
+
|
| 21 |
+
while True:
|
| 22 |
+
pos += 1
|
| 23 |
+
if pos >= maximum:
|
| 24 |
+
return False
|
| 25 |
+
|
| 26 |
+
ch = state.src[pos]
|
| 27 |
+
|
| 28 |
+
if ch == "<":
|
| 29 |
+
return False
|
| 30 |
+
if ch == ">":
|
| 31 |
+
break
|
| 32 |
+
|
| 33 |
+
url = state.src[start + 1 : pos]
|
| 34 |
+
|
| 35 |
+
if AUTOLINK_RE.search(url) is not None:
|
| 36 |
+
fullUrl = state.md.normalizeLink(url)
|
| 37 |
+
if not state.md.validateLink(fullUrl):
|
| 38 |
+
return False
|
| 39 |
+
|
| 40 |
+
if not silent:
|
| 41 |
+
token = state.push("link_open", "a", 1)
|
| 42 |
+
token.attrs = {"href": fullUrl}
|
| 43 |
+
token.markup = "autolink"
|
| 44 |
+
token.info = "auto"
|
| 45 |
+
|
| 46 |
+
token = state.push("text", "", 0)
|
| 47 |
+
token.content = state.md.normalizeLinkText(url)
|
| 48 |
+
|
| 49 |
+
token = state.push("link_close", "a", -1)
|
| 50 |
+
token.markup = "autolink"
|
| 51 |
+
token.info = "auto"
|
| 52 |
+
|
| 53 |
+
state.pos += len(url) + 2
|
| 54 |
+
return True
|
| 55 |
+
|
| 56 |
+
if EMAIL_RE.search(url) is not None:
|
| 57 |
+
fullUrl = state.md.normalizeLink("mailto:" + url)
|
| 58 |
+
if not state.md.validateLink(fullUrl):
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
if not silent:
|
| 62 |
+
token = state.push("link_open", "a", 1)
|
| 63 |
+
token.attrs = {"href": fullUrl}
|
| 64 |
+
token.markup = "autolink"
|
| 65 |
+
token.info = "auto"
|
| 66 |
+
|
| 67 |
+
token = state.push("text", "", 0)
|
| 68 |
+
token.content = state.md.normalizeLinkText(url)
|
| 69 |
+
|
| 70 |
+
token = state.push("link_close", "a", -1)
|
| 71 |
+
token.markup = "autolink"
|
| 72 |
+
token.info = "auto"
|
| 73 |
+
|
| 74 |
+
state.pos += len(url) + 2
|
| 75 |
+
return True
|
| 76 |
+
|
| 77 |
+
return False
|
evalkit_tf446/lib/python3.10/site-packages/markdown_it/rules_inline/backticks.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Parse backticks
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from .state_inline import StateInline
|
| 5 |
+
|
| 6 |
+
regex = re.compile("^ (.+) $")
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def backtick(state: StateInline, silent: bool) -> bool:
|
| 10 |
+
pos = state.pos
|
| 11 |
+
|
| 12 |
+
if state.src[pos] != "`":
|
| 13 |
+
return False
|
| 14 |
+
|
| 15 |
+
start = pos
|
| 16 |
+
pos += 1
|
| 17 |
+
maximum = state.posMax
|
| 18 |
+
|
| 19 |
+
# scan marker length
|
| 20 |
+
while pos < maximum and (state.src[pos] == "`"):
|
| 21 |
+
pos += 1
|
| 22 |
+
|
| 23 |
+
marker = state.src[start:pos]
|
| 24 |
+
openerLength = len(marker)
|
| 25 |
+
|
| 26 |
+
if state.backticksScanned and state.backticks.get(openerLength, 0) <= start:
|
| 27 |
+
if not silent:
|
| 28 |
+
state.pending += marker
|
| 29 |
+
state.pos += openerLength
|
| 30 |
+
return True
|
| 31 |
+
|
| 32 |
+
matchStart = matchEnd = pos
|
| 33 |
+
|
| 34 |
+
# Nothing found in the cache, scan until the end of the line (or until marker is found)
|
| 35 |
+
while True:
|
| 36 |
+
try:
|
| 37 |
+
matchStart = state.src.index("`", matchEnd)
|
| 38 |
+
except ValueError:
|
| 39 |
+
break
|
| 40 |
+
matchEnd = matchStart + 1
|
| 41 |
+
|
| 42 |
+
# scan marker length
|
| 43 |
+
while matchEnd < maximum and (state.src[matchEnd] == "`"):
|
| 44 |
+
matchEnd += 1
|
| 45 |
+
|
| 46 |
+
closerLength = matchEnd - matchStart
|
| 47 |
+
|
| 48 |
+
if closerLength == openerLength:
|
| 49 |
+
# Found matching closer length.
|
| 50 |
+
if not silent:
|
| 51 |
+
token = state.push("code_inline", "code", 0)
|
| 52 |
+
token.markup = marker
|
| 53 |
+
token.content = state.src[pos:matchStart].replace("\n", " ")
|
| 54 |
+
if (
|
| 55 |
+
token.content.startswith(" ")
|
| 56 |
+
and token.content.endswith(" ")
|
| 57 |
+
and len(token.content.strip()) > 0
|
| 58 |
+
):
|
| 59 |
+
token.content = token.content[1:-1]
|
| 60 |
+
state.pos = matchEnd
|
| 61 |
+
return True
|
| 62 |
+
|
| 63 |
+
# Some different length found, put it in cache as upper limit of where closer can be found
|
| 64 |
+
state.backticks[closerLength] = matchStart
|
| 65 |
+
|
| 66 |
+
# Scanned through the end, didn't find anything
|
| 67 |
+
state.backticksScanned = True
|
| 68 |
+
|
| 69 |
+
if not silent:
|
| 70 |
+
state.pending += marker
|
| 71 |
+
state.pos += openerLength
|
| 72 |
+
return True
|