file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
mdast_util_to_markdown/src/util/contains_control_or_whitespace.rs | Rust | pub fn contains_control_or_whitespace(value: &str) -> bool {
value.chars().any(|c| c.is_whitespace() || c.is_control())
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/format_code_as_indented.rs | Rust | //! JS equivalent https://github.com/syntax-tree/mdast-util-to-markdown/blob/main/lib/util/format-code-as-indented.js
use crate::state::State;
use markdown::mdast::Code;
use regex::Regex;
pub fn format_code_as_indented(code: &Code, state: &State) -> bool {
let non_whitespace = code.value.chars().any(|c| !c.is_whitespace());
let blank = Regex::new(r"^[\t ]*(?:[\r\n]|$)|(?:^|[\r\n])[\t ]*$").unwrap();
!state.options.fences
&& !code.value.is_empty()
&& code.lang.is_none()
&& non_whitespace
&& !blank.is_match(&code.value)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/format_heading_as_setext.rs | Rust | //! JS equivalent https://github.com/syntax-tree/mdast-util-to-markdown/blob/main/lib/util/format-heading-as-setext.js
use alloc::string::{String, ToString};
use markdown::mdast::{Heading, Node};
use regex::Regex;
use crate::state::State;
pub fn format_heading_as_setext(heading: &Heading, state: &State) -> bool {
let line_break = Regex::new(r"\r?\n|\r").unwrap();
let mut literal_with_line_break = false;
for child in &heading.children {
if include_literal_with_line_break(child, &line_break) {
literal_with_line_break = true;
break;
}
}
heading.depth < 3
&& !to_string(&heading.children).is_empty()
&& (state.options.setext || literal_with_line_break)
}
/// See: <https://github.com/syntax-tree/mdast-util-to-markdown/blob/main/lib/util/format-heading-as-setext.js>.
fn include_literal_with_line_break(node: &Node, regex: &Regex) -> bool {
match node {
Node::Break(_) => true,
// Literals.
Node::Code(x) => regex.is_match(&x.value),
Node::Html(x) => regex.is_match(&x.value),
Node::InlineCode(x) => regex.is_match(&x.value),
Node::InlineMath(x) => regex.is_match(&x.value),
Node::Math(x) => regex.is_match(&x.value),
Node::MdxFlowExpression(x) => regex.is_match(&x.value),
Node::MdxTextExpression(x) => regex.is_match(&x.value),
Node::MdxjsEsm(x) => regex.is_match(&x.value),
Node::Text(x) => regex.is_match(&x.value),
Node::Toml(x) => regex.is_match(&x.value),
Node::Yaml(x) => regex.is_match(&x.value),
// Anything else.
_ => {
if let Some(children) = node.children() {
for child in children {
if include_literal_with_line_break(child, regex) {
return true;
}
}
}
false
}
}
}
/// Tiny version of `mdast-util-to-string`.
fn to_string(children: &[Node]) -> String {
children.iter().map(ToString::to_string).collect()
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/format_link_as_auto_link.rs | Rust | //! JS equivalent https://github.com/syntax-tree/mdast-util-to-markdown/blob/main/lib/util/format-link-as-autolink.js
use crate::state::State;
use alloc::{format, string::ToString};
use markdown::mdast::{Link, Node};
use regex::RegexBuilder;
pub fn format_link_as_auto_link(link: &Link, node: &Node, state: &State) -> bool {
let raw = node.to_string();
if let Some(children) = node.children() {
if children.len() != 1 {
return false;
}
let mailto = format!("mailto:{}", raw);
let start_with_protocol = RegexBuilder::new("^[a-z][a-z+.-]+:")
.case_insensitive(true)
.build()
.unwrap();
return !state.options.resource_link
&& !link.url.is_empty()
&& link.title.is_none()
&& matches!(children[0], Node::Text(_))
&& (raw == link.url || mailto == link.url)
&& start_with_protocol.is_match(&link.url)
&& is_valid_url(&link.url);
}
false
}
fn is_valid_url(url: &str) -> bool {
!url.chars()
.any(|c| c.is_control() || c.is_whitespace() || c == '<' || c == '>')
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/longest_char_streak.rs | Rust | //! JS equivalent https://github.com/wooorm/longest-streak/blob/main/index.js
pub fn longest_char_streak(haystack: &str, needle: char) -> usize {
let mut max = 0;
let mut chars = haystack.chars();
while let Some(char) = chars.next() {
if char == needle {
let mut count = 1;
for char in chars.by_ref() {
if char == needle {
count += 1;
} else {
break;
}
}
max = count.max(max);
}
}
max
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn longest_streak_tests() {
assert_eq!(longest_char_streak("", 'f'), 0);
assert_eq!(longest_char_streak("foo", 'o'), 2);
assert_eq!(longest_char_streak("fo foo fo", 'o'), 2);
assert_eq!(longest_char_streak("fo foo foo", 'o'), 2);
assert_eq!(longest_char_streak("fo fooo fo", 'o'), 3);
assert_eq!(longest_char_streak("fo fooo foo", 'o'), 3);
assert_eq!(longest_char_streak("ooo", 'o'), 3);
assert_eq!(longest_char_streak("fo fooo fooooo", 'o'), 5);
assert_eq!(longest_char_streak("fo fooooo fooo", 'o'), 5);
assert_eq!(longest_char_streak("fo fooooo fooooo", 'o'), 5);
assert_eq!(longest_char_streak("'`'", '`'), 1);
assert_eq!(longest_char_streak("'`'", '`'), 1);
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/mod.rs | Rust | pub mod check_bullet;
pub mod check_bullet_ordered;
pub mod check_bullet_other;
pub mod check_emphasis;
pub mod check_fence;
pub mod check_quote;
pub mod check_rule;
pub mod check_rule_repetition;
pub mod check_strong;
pub mod contains_control_or_whitespace;
pub mod format_code_as_indented;
pub mod format_heading_as_setext;
pub mod format_link_as_auto_link;
pub mod longest_char_streak;
pub mod pattern_in_scope;
pub mod safe;
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/pattern_in_scope.rs | Rust | //! JS equivalent https://github.com/syntax-tree/mdast-util-to-markdown/blob/main/lib/util/pattern-in-scope.js
use crate::{construct_name::ConstructName, r#unsafe::Unsafe};
/// JS: <https://github.com/syntax-tree/mdast-util-to-markdown/blob/fd6a508/lib/util/pattern-in-scope.js#L24>.
fn list_in_scope(stack: &[ConstructName], list: &[ConstructName], none: bool) -> bool {
if list.is_empty() {
return none;
}
for construct_name in list {
if stack.contains(construct_name) {
return true;
}
}
false
}
/// JS: <https://github.com/syntax-tree/mdast-util-to-markdown/blob/fd6a508/lib/util/pattern-in-scope.js#L11>.
pub fn pattern_in_scope(stack: &[ConstructName], pattern: &Unsafe) -> bool {
list_in_scope(stack, &pattern.in_construct, true)
&& !list_in_scope(stack, &pattern.not_in_construct, false)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/src/util/safe.rs | Rust | //! JS equivalent https://github.com/syntax-tree/mdast-util-to-markdown/blob/fd6a508/lib/util/safe.js
use alloc::{format, string::String, vec::Vec};
use regex::Regex;
pub struct EscapeInfos {
pub after: bool,
pub before: bool,
}
pub struct SafeConfig<'a> {
pub after: &'a str,
pub before: &'a str,
pub encode: Option<char>,
}
impl<'a> SafeConfig<'a> {
pub(crate) fn new(before: &'a str, after: &'a str, encode: Option<char>) -> Self {
SafeConfig {
after,
before,
encode,
}
}
}
/// JS: <https://github.com/syntax-tree/mdast-util-to-markdown/blob/fd6a508/lib/util/safe.js#L148>.
pub fn escape_backslashes(value: &str, after: &str) -> String {
let expression = Regex::new(r"\\[!-/:-@\[-`{-~]").unwrap();
let mut results: String = String::new();
let whole = format!("{}{}", value, after);
let positions: Vec<usize> = expression.find_iter(&whole).map(|m| m.start()).collect();
let mut start = 0;
for position in &positions {
if start != *position {
results.push_str(&value[start..*position]);
}
results.push('\\');
start = *position;
}
results.push_str(&value[start..]);
results
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/blockquote.rs | Rust | use markdown::mdast::{
Blockquote, Break, Code, Definition, Emphasis, Heading, Html, Image, ImageReference,
InlineCode, Link, LinkReference, List, ListItem, Node, Paragraph, ReferenceKind, Strong, Text,
ThematicBreak,
};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn block_quote() {
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![],
position: None,
}))
.unwrap(),
">\n",
"should support a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
}))
.unwrap(),
"> a\n",
"should support a block quote w/ a child"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::ThematicBreak(ThematicBreak { position: None }),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
}),
],
position: None,
}))
.unwrap(),
"> a\n>\n> ***\n>\n> b\n",
"should support a block quote w/ children"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None
}),],
position: None,
}))
.unwrap(),
"> a\n> b\n",
"should support text w/ a line ending in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a"),
position: None
}),
Node::Text(Text {
value: String::from("b"),
position: None
})
],
position: None
}),],
position: None,
}))
.unwrap(),
"> ab\n",
"should support adjacent texts in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None
}),
Node::Blockquote(Blockquote {
children: vec![
Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::InlineCode(InlineCode {
value: String::from("b\nc"),
position: None
}),
Node::Text(Text {
value: String::from("\nd"),
position: None
}),
],
position: None
}),
Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a b"),
position: None
})],
position: None,
depth: 1
})
],
position: None
}),
],
position: None,
}))
.unwrap(),
"> a\n> b\n>\n> > a\n> > `b\n> > c`\n> > d\n> >\n> > # a b\n",
"should support a block quote in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a"),
position: None
}),
Node::Break(Break { position: None }),
Node::Text(Text {
value: String::from("b"),
position: None
})
],
position: None
}),],
position: None,
}))
.unwrap(),
"> a\\\n> b\n",
"should support a break in a block quote"
);
assert_eq!(
to_md_with_opts(
&Node::Blockquote(Blockquote {
children: vec![Node::Code(Code {
value: String::from("a\nb\n\nc"),
position: None,
lang: None,
meta: None
})],
position: None,
}),
&Options {
fences: false,
..Default::default()
}
)
.unwrap(),
"> a\n> b\n>\n> c\n",
"should support code (flow, indented) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Code(Code {
value: String::from("c\nd\n\ne"),
position: None,
lang: String::from("a\nb").into(),
meta: None
})],
position: None,
}))
.unwrap(),
"> ```a
b\n> c\n> d\n>\n> e\n> ```\n",
"should support code (flow, fenced) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::InlineCode(InlineCode {
value: String::from("b\nc"),
position: None
}),
Node::Text(Text {
value: String::from("\nd"),
position: None
})
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> `b\n> c`\n> d\n",
"should support code (text) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![
Node::Definition(Definition {
position: None,
title: Some("e\nf".into()),
url: "c\nd".into(),
identifier: "a\nb".into(),
label: None
}),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None
})
],
position: None,
}))
.unwrap(),
"> [a\n> b]: <c
d> \"e\n> f\"\n>\n> a\n> b\n",
"should support a definition in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::Emphasis(Emphasis {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
}),],
position: None
}),
Node::Text(Text {
value: String::from("\nd"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> *c\n> d*\n> d\n",
"should support an emphasis in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None,
depth: 3
})],
position: None,
}))
.unwrap(),
"> ### a
b\n",
"should support a heading (atx) in a block quote"
);
assert_eq!(
to_md_with_opts(
&Node::Blockquote(Blockquote {
children: vec![Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None,
depth: 1
})],
position: None,
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"> a\n> b\n> =\n",
"should support a heading (setext) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Html(Html {
value: String::from("<div\nhidden>"),
position: None
})],
position: None,
}))
.unwrap(),
"> <div\n> hidden>\n",
"should support html (flow) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a"),
position: None
}),
Node::Html(Html {
value: String::from("<span\nhidden>"),
position: None
}),
Node::Text(Text {
value: String::from("\nb"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a <span\n> hidden>\n> b\n",
"should support html (text) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::Image(Image {
position: None,
alt: String::from("d\ne"),
url: String::from("b\nc"),
title: Some(String::from("f\ng"))
}),
Node::Text(Text {
value: String::from("\nh"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> \n> h\n",
"should support an image (resource) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::ImageReference(ImageReference {
position: None,
alt: String::from("b\nc"),
label: Some(String::from("d\ne")),
reference_kind: ReferenceKind::Collapsed,
identifier: String::from("f"),
}),
Node::Text(Text {
value: String::from("\ng"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> ![b\n> c][d\n> e]\n> g\n",
"should support an image (reference) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("d\ne"),
position: None
})],
position: None,
url: String::from("b\nc"),
title: Some(String::from("f\ng"))
}),
Node::Text(Text {
value: String::from("\nh"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> [d\n> e](<b
c> \"f\n> g\")\n> h\n",
"should support a link (resource) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("b\nc"),
position: None
}),],
position: None,
reference_kind: ReferenceKind::Collapsed,
identifier: String::from("f"),
label: Some(String::from("d\ne"))
}),
Node::Text(Text {
value: String::from("\ng"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> [b\n> c][d\n> e]\n> g\n",
"should support a link (reference) in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None
}),
Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("e\nf"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: false,
start: None,
spread: false
})
],
position: None,
}))
.unwrap(),
"> a\n> b\n>\n> - c\n> d\n> - ***\n> - e\n> f\n",
"should support a list in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a\n"),
position: None
}),
Node::Strong(Strong {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
}),
Node::Text(Text {
value: String::from("\nd"),
position: None
}),
],
position: None
})],
position: None,
}))
.unwrap(),
"> a\n> **c\n> d**\n> d\n",
"should support a strong in a block quote"
);
assert_eq!(
to(&Node::Blockquote(Blockquote {
children: vec![
Node::ThematicBreak(ThematicBreak { position: None }),
Node::ThematicBreak(ThematicBreak { position: None })
],
position: None,
}))
.unwrap(),
"> ***\n>\n> ***\n",
"should support a thematic break in a block quote"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/break.rs | Rust | use markdown::{
mdast::{Break, Heading, Node, Text},
to_mdast as from,
};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn r#break() {
assert_eq!(
to(&Node::Break(Break { position: None })).unwrap(),
"\\\n",
"should support a break"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![
Node::Text(Text {
value: String::from("a"),
position: None
}),
Node::Break(Break { position: None }),
Node::Text(Text {
value: String::from("b"),
position: None
}),
],
position: None,
depth: 3
}))
.unwrap(),
"### a b\n",
"should serialize breaks in heading (atx) as a space"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![
Node::Text(Text {
value: String::from("a "),
position: None
}),
Node::Break(Break { position: None }),
Node::Text(Text {
value: String::from("b"),
position: None
}),
],
position: None,
depth: 3
}))
.unwrap(),
"### a b\n",
"should serialize breaks in heading (atx) as a space"
);
assert_eq!(
to_md_with_opts(
&from("a \nb\n=\n", &Default::default()).unwrap(),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"a\\\nb\n=\n",
"should serialize breaks in heading (setext)"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/code.rs | Rust | use markdown::mdast::{Code, Node};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn text() {
assert_eq!(
to_md_with_opts(
&Node::Code(Code {
value: String::from("a"),
position: None,
lang: None,
meta: None
}),
&Options {
fences: false,
..Default::default()
}
)
.unwrap(),
" a\n",
"should support code w/ a value (indent)"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from("a"),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"```\na\n```\n",
"should support code w/ a value (fences)"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a".to_string()),
meta: None
}))
.unwrap(),
"```a\n```\n",
"should support code w/ a lang"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: None,
meta: Some("a".to_string())
}))
.unwrap(),
"```\n```\n",
"should support (ignore) code w/ only a meta"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a".to_string()),
meta: Some("b".to_string())
}))
.unwrap(),
"```a b\n```\n",
"should support code w/ lang and meta"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a b".to_string()),
meta: None
}))
.unwrap(),
"```a b\n```\n",
"should encode a space in `lang`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a\nb".to_string()),
meta: None
}))
.unwrap(),
"```a
b\n```\n",
"should encode a line ending in `lang`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a`b".to_string()),
meta: None
}))
.unwrap(),
"```a`b\n```\n",
"should encode a grave accent in `lang`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a\\-b".to_string()),
meta: None
}))
.unwrap(),
"```a\\\\-b\n```\n",
"should escape a backslash in `lang`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("x".to_string()),
meta: Some("a b".to_string())
}))
.unwrap(),
"```x a b\n```\n",
"should not encode a space in `meta`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("x".to_string()),
meta: Some("a\nb".to_string())
}))
.unwrap(),
"```x a
b\n```\n",
"should encode a line ending in `meta`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("x".to_string()),
meta: Some("a`b".to_string())
}))
.unwrap(),
"```x a`b\n```\n",
"should encode a grave accent in `meta`"
);
assert_eq!(
to(&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("x".to_string()),
meta: Some("a\\-b".to_string())
}))
.unwrap(),
"```x a\\\\-b\n```\n",
"should escape a backslash in `meta`"
);
assert_eq!(
to_md_with_opts(
&Node::Code(Code {
value: String::new(),
position: None,
lang: None,
meta: None
}),
&Options {
fence: '~',
..Default::default()
}
)
.unwrap(),
"~~~\n~~~\n",
"should support fenced code w/ tildes when `fence: \"~\"`"
);
assert_eq!(
to_md_with_opts(
&Node::Code(Code {
value: String::new(),
position: None,
lang: Some("a`b".to_string()),
meta: None
}),
&Options {
fence: '~',
..Default::default()
}
)
.unwrap(),
"~~~a`b\n~~~\n",
"should not encode a grave accent when using tildes for fences"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from("```\nasd\n```"),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"````\n```\nasd\n```\n````\n",
"should use more grave accents for fences if there are streaks of grave accents in the value (fences)"
);
assert_eq!(
to_md_with_opts(
&Node::Code(Code {
value: String::from("~~~\nasd\n~~~"),
position: None,
lang: None,
meta: None
}),
&Options {
fence: '~',
..Default::default()
}
)
.unwrap(),
"~~~~\n~~~\nasd\n~~~\n~~~~\n",
"should use more tildes for fences if there are streaks of tildes in the value (fences)"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from("b"),
position: None,
lang: Some("a".to_string()),
meta: None
}))
.unwrap(),
"```a\nb\n```\n",
"should use a fence if there is an info"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from(" "),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"```\n \n```\n",
"should use a fence if there is only whitespace"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from("\na"),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"```\n\na\n```\n",
"should use a fence if there first line is blank (void)"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from(" \na"),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"```\n \na\n```\n",
"should use a fence if there first line is blank (filled)"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from("a\n"),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"```\na\n\n```\n",
"should use a fence if there last line is blank (void)"
);
assert_eq!(
to(&Node::Code(Code {
value: String::from("a\n "),
position: None,
lang: None,
meta: None
}))
.unwrap(),
"```\na\n \n```\n",
"should use a fence if there last line is blank (filled)"
);
assert_eq!(
to_md_with_opts(
&Node::Code(Code {
value: String::from(" a\n\n b"),
position: None,
lang: None,
meta: None
}),
&Options {
fences: false,
..Default::default()
}
)
.unwrap(),
" a\n\n b\n",
"should use an indent if the value is indented"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/core.rs | Rust | use markdown::mdast::{
Break, Code, Definition, Heading, List, ListItem, Node, Paragraph, Root, Text, ThematicBreak,
};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn core() {
assert_eq!(
to(&Node::Root(Root {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::ThematicBreak(ThematicBreak { position: None }),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
}),
],
position: None
}))
.unwrap(),
"a\n\n***\n\nb\n",
"should support root"
);
assert_eq!(
to(&Node::Root(Root {
children: vec![
Node::Text(Text {
value: String::from("a"),
position: None
}),
Node::Break(Break { position: None }),
Node::Text(Text {
value: String::from("b"),
position: None
}),
],
position: None
}))
.unwrap(),
"a\\\nb\n",
"should not use blank lines between nodes when given phrasing"
);
assert_eq!(
to(&Node::Root(Root {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Definition(Definition {
position: None,
url: String::new(),
title: None,
identifier: String::from("b"),
label: None
}),
Node::Definition(Definition {
position: None,
url: String::new(),
title: None,
identifier: String::from("c"),
label: None
}),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("d"),
position: None
})],
position: None
}),
],
position: None
}))
.unwrap(),
"a\n\n[b]: <>\n\n[c]: <>\n\nd\n",
"should support adjacent definitions"
);
assert_eq!(
to_md_with_opts(
&Node::Root(Root {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Definition(Definition {
position: None,
url: String::new(),
title: None,
identifier: String::from("b"),
label: None
}),
Node::Definition(Definition {
position: None,
url: String::new(),
title: None,
identifier: String::from("c"),
label: None
}),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("d"),
position: None
})],
position: None
}),
],
position: None
}),
&Options {
tight_definitions: true,
..Default::default()
}
)
.unwrap(),
"a\n\n[b]: <>\n[c]: <>\n\nd\n",
"should support tight adjacent definitions when `tight_definitions: true`"
);
assert_eq!(
to(&Node::Root(Root {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: false,
start: None,
spread: false
}),
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: false,
start: None,
spread: false
}),
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: true,
start: None,
spread: false
}),
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: true,
start: None,
spread: false
}),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("d"),
position: None
})],
position: None
}),
],
position: None
}))
.unwrap(),
"a\n\n*\n\n-\n\n1.\n\n1)\n\nd\n",
"should use a different marker for adjacent lists"
);
assert_eq!(
to_md_with_opts(
&Node::Root(Root {
children: vec![
Node::Code(Code {
value: String::from("a"),
position: None,
lang: None,
meta: None
}),
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: false,
start: None,
spread: false
}),
Node::Code(Code {
value: String::from("b"),
position: None,
lang: None,
meta: None
}),
],
position: None
}),
&Options {
fences: false,
..Default::default()
}
)
.unwrap(),
" a\n\n*\n\n<!---->\n\n b\n",
"should inject HTML comments between lists and an indented code"
);
assert_eq!(
to_md_with_opts(
&Node::Root(Root {
children: vec![
Node::Code(Code {
value: String::from("a"),
position: None,
lang: None,
meta: None
}),
Node::Code(Code {
value: String::from("b"),
position: None,
lang: None,
meta: None
}),
],
position: None
}),
&Options {
fences: false,
..Default::default()
}
)
.unwrap(),
" a\n\n<!---->\n\n b\n",
"should inject HTML comments between adjacent indented code"
);
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
}),
],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"* a\n\n b\n",
"should not honour `spread: false` for two paragraphs"
);
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Definition(Definition {
position: None,
url: String::from("d"),
title: None,
identifier: String::from("b"),
label: Some(String::from("c"))
}),
],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"* a\n\n [c]: d\n",
"should not honour `spread: false` for a paragraph and a definition"
);
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None,
depth: 1
})
],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"* a\n # b\n",
"should honour `spread: false` for a paragraph and a heading"
);
assert_eq!(
to_md_with_opts(
&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None,
depth: 1
})
],
position: None,
spread: false,
checked: None
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"* a\n\n b\n =\n",
"should not honour `spread: false` for a paragraph and a setext heading"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/definition.rs | Rust | use markdown::mdast::{Definition, Node};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn defintion() {
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::new(),
position: None,
label: None
}))
.unwrap(),
"[]: <>\n",
"should support a definition w/o label"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::new(),
position: None,
label: Some(String::from("a"))
}))
.unwrap(),
"[a]: <>\n",
"should support a definition w/ label"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::new(),
position: None,
label: Some(String::from("\\"))
}))
.unwrap(),
"[\\\\]: <>\n",
"should escape a backslash in `label`"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::new(),
position: None,
label: Some(String::from("["))
}))
.unwrap(),
"[\\[]: <>\n",
"should escape an opening bracket in `label`"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::new(),
position: None,
label: Some(String::from("]"))
}))
.unwrap(),
"[\\]]: <>\n",
"should escape a closing bracket in `label`"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <>\n",
"should support a definition w/ identifier"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::from(r"\\"),
position: None,
label: None
}))
.unwrap(),
"[\\\\]: <>\n",
"should escape a backslash in `identifier`"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::from("["),
position: None,
label: None
}))
.unwrap(),
"[\\[]: <>\n",
"should escape an opening bracket in `identifier`"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: None,
identifier: String::from("]"),
position: None,
label: None
}))
.unwrap(),
"[\\]]: <>\n",
"should escape a closing bracket in `identifier`"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: b\n",
"should support a definition w/ url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <b c>\n",
"should support a definition w/ enclosed url w/ whitespace in url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b <c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <b \\<c>\n",
"should escape an opening angle bracket in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b >c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <b \\>c>\n",
"should escape a closing angle bracket in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b \\.c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <b \\\\.c>\n",
"should escape a backslash in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b\nc"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <b
c>\n",
"should encode a line ending in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("\x0C"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <\x0C>\n",
"should encode a line ending in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b(c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: b\\(c\n",
"should escape an opening paren in `url` in a raw url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b)c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: b\\)c\n",
"should escape a closing paren in `url` in a raw url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b\\?c"),
title: None,
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: b\\\\?c\n",
"should escape a backslash in `url` in a raw url"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: String::from("b").into(),
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <> \"b\"\n",
"should support a definition w/ title"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::from("b"),
title: String::from("c").into(),
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: b \"c\"\n",
"should support a definition w/ url & title"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: String::from("\"").into(),
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <> \"\\\"\"\n",
"should escape a quote in `title` in a title"
);
assert_eq!(
to(&Node::Definition(Definition {
url: String::new(),
title: String::from("\\").into(),
identifier: String::from("a"),
position: None,
label: None
}))
.unwrap(),
"[a]: <> \"\\\\\"\n",
"should escape a backslash in `title` in a title"
);
assert_eq!(
to_md_with_opts(
&Node::Definition(Definition {
url: String::new(),
title: String::from("b").into(),
identifier: String::from("a"),
position: None,
label: None
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"[a]: <> 'b'\n",
"should support a definition w/ title when `quote: \"\'\"`"
);
assert_eq!(
to_md_with_opts(
&Node::Definition(Definition {
url: String::new(),
title: String::from("'").into(),
identifier: String::from("a"),
position: None,
label: None
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"[a]: <> '\\''\n",
"should escape a quote in `title` in a title when `quote: \"\'\"`"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/emphasis.rs | Rust | use markdown::mdast::{Emphasis, Node, Paragraph, Text};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn emphasis() {
assert_eq!(
to(&Node::Emphasis(Emphasis {
children: Vec::new(),
position: None
}))
.unwrap(),
"**\n",
"should support an empty emphasis"
);
assert_eq!(
to(&Node::Emphasis(Emphasis {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None,
})],
position: None
}))
.unwrap(),
"*a*\n",
"should support an emphasis w/ children"
);
assert_eq!(
to_md_with_opts(
&Node::Emphasis(Emphasis {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None,
})],
position: None
}),
&Options {
emphasis: '_',
..Default::default()
}
)
.unwrap(),
"_a_\n",
"should support an emphasis w/ underscores when `emphasis: \"_\"`"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("𝄞"),
position: None
}),
Node::Emphasis(Emphasis {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None,
})],
position: None
})
],
position: None
}))
.unwrap(),
"𝄞*a*\n",
"should support non-ascii before emphasis"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Emphasis(Emphasis {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None,
})],
position: None
}),
Node::Text(Text {
value: String::from("𝄞"),
position: None
}),
],
position: None
}))
.unwrap(),
"*a*𝄞\n",
"should support non-ascii after emphasis"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/heading.rs | Rust | use markdown::mdast::{Break, Heading, Html, Node, Text};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn heading() {
assert_eq!(
to(&Node::Heading(Heading {
children: vec![],
position: None,
depth: 1
}))
.unwrap(),
"#\n",
"should serialize a heading w/ rank 1"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![],
position: None,
depth: 6
}))
.unwrap(),
"######\n",
"should serialize a heading w/ rank 6"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![],
position: None,
depth: 7
}))
.unwrap(),
"######\n",
"should serialize a heading w/ rank 7 as 6"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![],
position: None,
depth: 0
}))
.unwrap(),
"#\n",
"should serialize a heading w/ rank 0 as 1"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# a\n",
"should serialize a heading w/ content"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
depth: 1
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"a\n=\n",
"should serialize a heading w/ rank 1 as setext when `setext: true`"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
depth: 2
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"a\n-\n",
"should serialize a heading w/ rank 2 as setext when `setext: true`"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
depth: 3
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"### a\n",
"should serialize a heading w/ rank 3 as atx when `setext: true`"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("aa\rb"),
position: None
})],
position: None,
depth: 2
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"aa\rb\n-\n",
"should serialize a setext underline as long as the last line (1)"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a\r\nbbb"),
position: None
})],
position: None,
depth: 1
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"a\r\nbbb\n===\n",
"should serialize a setext underline as long as the last line (2)"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![],
position: None,
depth: 1
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"#\n",
"should serialize an empty heading w/ rank 1 as atx when `setext: true`"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![],
position: None,
depth: 2
}),
&Options {
setext: true,
..Default::default()
}
)
.unwrap(),
"##\n",
"should serialize an empty heading w/ rank 1 as atx when `setext: true`"
);
//assert_eq!(
// to(&Node::Heading(Heading {
// children: vec![],
// position: None,
// depth: 1
// }),)
// .unwrap(),
// "`\n`\n=\n",
// "should serialize an heading w/ rank 1 and code w/ a line ending as setext"
//);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Html(Html {
value: "<a\n/>".to_string(),
position: None
})],
position: None,
depth: 1
}),)
.unwrap(),
"<a\n/>\n==\n",
"should serialize an heading w/ rank 1 and html w/ a line ending as setext"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"a\nb\n=\n",
"should serialize an heading w/ rank 1 and text w/ a line ending as setext"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![
Node::Text(Text {
value: String::from("a"),
position: None
}),
Node::Break(Break { position: None }),
Node::Text(Text {
value: String::from("b"),
position: None
}),
],
position: None,
depth: 1
}))
.unwrap(),
"a\\\nb\n=\n",
"should serialize an heading w/ rank 1 and a break as setext"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![],
position: None,
depth: 1
}),
&Options {
close_atx: true,
..Default::default()
}
)
.unwrap(),
"# #\n",
"should serialize a heading with a closing sequence when `closeAtx` (empty)"
);
assert_eq!(
to_md_with_opts(
&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
depth: 3
}),
&Options {
close_atx: true,
..Default::default()
}
)
.unwrap(),
"### a ###\n",
"should serialize a with a closing sequence when `closeAtx` (content)"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("# a"),
position: None
})],
position: None,
depth: 2
}))
.unwrap(),
"## # a\n",
"should not escape a `#` at the start of phrasing in a heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("1) a"),
position: None
})],
position: None,
depth: 2
}))
.unwrap(),
"## 1) a\n",
"should not escape a `1)` at the start of phrasing in a heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("+ a"),
position: None
})],
position: None,
depth: 2
}))
.unwrap(),
"## + a\n",
"should not escape a `+` at the start of phrasing in a heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("- a"),
position: None
})],
position: None,
depth: 2
}))
.unwrap(),
"## - a\n",
"should not escape a `-` at the start of phrasing in a heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("= a"),
position: None
})],
position: None,
depth: 2
}))
.unwrap(),
"## = a\n",
"should not escape a `=` at the start of phrasing in a heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("> a"),
position: None
})],
position: None,
depth: 2
}))
.unwrap(),
"## > a\n",
"should not escape a `>` at the start of phrasing in a heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a #"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# a \\#\n",
"should escape a `#` at the end of a heading (1)"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a ##"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# a #\\#\n",
"should escape a `#` at the end of a heading (2)"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a # b"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# a # b\n",
"should not escape a `#` in a heading (2)"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from(" a"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"#   a\n",
"should encode a space at the start of an atx heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("\t\ta"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# 	\ta\n",
"should encode a tab at the start of an atx heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a "),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# a  \n",
"should encode a space at the end of an atx heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a\t\t"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"# a\t	\n",
"should encode a tab at the end of an atx heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a \n b"),
position: None
})],
position: None,
depth: 1
}))
.unwrap(),
"a \n b\n=======\n",
"should encode spaces around a line ending in a setext heading"
);
assert_eq!(
to(&Node::Heading(Heading {
children: vec![Node::Text(Text {
value: String::from("a \n b"),
position: None
})],
position: None,
depth: 3
}))
.unwrap(),
"### a 
 b\n",
"should not need to encode spaces around a line ending in an atx heading (because the line ending is encoded)"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/html.rs | Rust | use markdown::mdast::{Html, Node, Paragraph, Text};
use mdast_util_to_markdown::to_markdown as to;
use pretty_assertions::assert_eq;
#[test]
fn html() {
assert_eq!(
to(&Node::Html(Html {
value: String::new(),
position: None
}))
.unwrap(),
"",
"should support an empty html"
);
assert_eq!(
to(&Node::Html(Html {
value: String::from("a\nb"),
position: None
}))
.unwrap(),
"a\nb\n",
"should support html"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: "a\n".to_string(),
position: None
}),
Node::Html(Html {
value: "<div>".to_string(),
position: None
})
],
position: None
}))
.unwrap(),
"a <div>\n",
"should prevent html (text) from becoming html (flow) (1)"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: "a\r".to_string(),
position: None
}),
Node::Html(Html {
value: "<div>".to_string(),
position: None
})
],
position: None
}))
.unwrap(),
"a <div>\n",
"should prevent html (text) from becoming html (flow) (2)"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: "a\r\n".to_string(),
position: None
}),
Node::Html(Html {
value: "<div>".to_string(),
position: None
})
],
position: None
}))
.unwrap(),
"a <div>\n",
"should prevent html (text) from becoming html (flow) (3)"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Html(Html {
value: "<x>".to_string(),
position: None
}),
Node::Text(Text {
value: "a".to_string(),
position: None
})
],
position: None
}))
.unwrap(),
"<x>a\n",
"should serialize html (text)"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/image.rs | Rust | use markdown::mdast::{Image, Node};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn image() {
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::new(),
title: None
}))
.unwrap(),
"![]()\n",
"should support an image"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::from("a"),
url: String::new(),
title: None
}))
.unwrap(),
"![a]()\n",
"should support `alt`"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("a"),
title: None
}))
.unwrap(),
"\n",
"should support a url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::new(),
title: Some(String::from("a"))
}))
.unwrap(),
"\n",
"should support a title"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("a"),
title: Some(String::from("b"))
}))
.unwrap(),
"\n",
"should support a url and title"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b c"),
title: None
}))
.unwrap(),
"\n",
"should support an image w/ enclosed url w/ whitespace in url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b <c"),
title: None
}))
.unwrap(),
"\n",
"should escape an opening angle bracket in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b >c"),
title: None
}))
.unwrap(),
"\n",
"should escape a closing angle bracket in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b \\+c"),
title: None
}))
.unwrap(),
"\n",
"should escape a backslash in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b\nc"),
title: None
}))
.unwrap(),
"\n",
"should encode a line ending in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b(c"),
title: None
}))
.unwrap(),
"\n",
"should escape an opening paren in `url` in a raw url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b)c"),
title: None
}))
.unwrap(),
"c)\n",
"should escape a closing paren in `url` in a raw url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("b\\+c"),
title: None
}))
.unwrap(),
"\n",
"should escape a backslash in `url` in a raw url"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::from("\x0C"),
title: None
}))
.unwrap(),
"\n",
"should support control characters in images"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::new(),
title: Some(String::from("b\"c"))
}))
.unwrap(),
"\n",
"should escape a double quote in `title`"
);
assert_eq!(
to(&Node::Image(Image {
position: None,
alt: String::new(),
url: String::new(),
title: Some(String::from("b\\.c"))
}))
.unwrap(),
"\n",
"should escape a backslash in `title`"
);
assert_eq!(
to_md_with_opts(
&Node::Image(Image {
position: None,
alt: String::new(),
url: String::new(),
title: Some(String::from("b"))
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"\n",
"should support an image w/ title when `quote: \"\'\"`"
);
assert_eq!(
to_md_with_opts(
&Node::Image(Image {
position: None,
alt: String::new(),
url: String::new(),
title: Some(String::from("'"))
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"\n",
"should escape a quote in `title` in a title when `quote: \"\'\"`"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/image_reference.rs | Rust | use markdown::mdast::{ImageReference, Node, Paragraph, ReferenceKind};
use mdast_util_to_markdown::to_markdown as to;
use pretty_assertions::assert_eq;
#[test]
fn image_reference() {
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::new(),
reference_kind: ReferenceKind::Full,
identifier: String::new(),
label: None
}))
.unwrap(),
"![][]\n",
"should support a link reference (nonsensical)"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::from("a"),
reference_kind: ReferenceKind::Full,
identifier: String::new(),
label: None
}))
.unwrap(),
"![a][]\n",
"should support `alt`"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::new(),
reference_kind: ReferenceKind::Full,
identifier: String::from("a"),
label: None
}))
.unwrap(),
"![][a]\n",
"should support an `identifier` (nonsensical)"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::new(),
reference_kind: ReferenceKind::Full,
identifier: String::new(),
label: String::from("a").into()
}))
.unwrap(),
"![][a]\n",
"should support a `label` (nonsensical)"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::from("A"),
reference_kind: ReferenceKind::Shortcut,
identifier: String::from("A"),
label: None
}))
.unwrap(),
"![A]\n",
"should support `reference_kind: \"ReferenceKind::Shortcut\"`"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::from("A"),
reference_kind: ReferenceKind::Collapsed,
identifier: String::from("A"),
label: None
}))
.unwrap(),
"![A][]\n",
"should support `reference_kind: \"ReferenceKind::Collapsed\"`"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::from("A"),
reference_kind: ReferenceKind::Full,
identifier: String::from("A"),
label: None
}))
.unwrap(),
"![A][A]\n",
"should support `reference_kind: \"ReferenceKind::Full\"`"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
alt: String::from("&"),
label: String::from("&").into(),
reference_kind: ReferenceKind::Full,
identifier: String::from("&"),
}))
.unwrap(),
"![&][&]\n",
"should prefer label over identifier"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
label: None,
alt: String::from("&"),
reference_kind: ReferenceKind::Full,
identifier: String::from("&"),
}))
.unwrap(),
"![&][&]\n",
"should decode `identifier` if w/o `label`"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::ImageReference(ImageReference {
position: None,
label: None,
alt: String::from("&a;"),
reference_kind: ReferenceKind::Full,
identifier: String::from("&b;"),
})],
position: None
}))
.unwrap(),
"![\\&a;][&b;]\n",
"should support incorrect character references"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
label: None,
alt: String::from("+"),
reference_kind: ReferenceKind::Full,
identifier: String::from("\\+"),
}))
.unwrap(),
"![+][+]\n",
"should unescape `identifier` if w/o `label`"
);
assert_eq!(
to(&Node::ImageReference(ImageReference {
position: None,
label: None,
alt: String::from("a"),
reference_kind: ReferenceKind::Collapsed,
identifier: String::from("b"),
}))
.unwrap(),
"![a][b]\n",
"should use a full reference if w/o `ReferenceKind` and the label does not match the reference"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/inline_code.rs | Rust | use markdown::mdast::{InlineCode, Node};
use mdast_util_to_markdown::to_markdown as to;
use pretty_assertions::assert_eq;
#[test]
fn text() {
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::new(),
position: None
}))
.unwrap(),
"``\n",
"should support an empty code text"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a"),
position: None
}))
.unwrap(),
"`a`\n",
"should support a code text"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from(" "),
position: None
}))
.unwrap(),
"` `\n",
"should support a space"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("\n"),
position: None
}))
.unwrap(),
"`\n`\n",
"should support an eol"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from(" "),
position: None
}))
.unwrap(),
"` `\n",
"should support several spaces"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a`b"),
position: None
}))
.unwrap(),
"``a`b``\n",
"should use a fence of two grave accents if the value contains one"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a``b"),
position: None
}))
.unwrap(),
"`a``b`\n",
"should use a fence of one grave accent if the value contains two"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a``b`c"),
position: None
}))
.unwrap(),
"```a``b`c```\n",
"should use a fence of three grave accents if the value contains two and one"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("`a"),
position: None
}))
.unwrap(),
"`` `a ``\n",
"should pad w/ a space if the value starts w/ a grave accent"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a`"),
position: None
}))
.unwrap(),
"`` a` ``\n",
"should pad w/ a space if the value ends w/ a grave accent"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from(" a "),
position: None
}))
.unwrap(),
"` a `\n",
"should pad w/ a space if the value starts and ends w/ a space"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from(" a"),
position: None
}))
.unwrap(),
"` a`\n",
"should not pad w/ spaces if the value ends w/ a non-space"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a "),
position: None
}))
.unwrap(),
"`a `\n",
"should not pad w/ spaces if the value starts w/ a non-space"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a\n- b"),
position: None
}))
.unwrap(),
"`a - b`\n",
"should prevent breaking out of code (-)"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a\n#"),
position: None
}))
.unwrap(),
"`a #`\n",
"should prevent breaking out of code (#)"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a\n1. "),
position: None
}))
.unwrap(),
"`a 1. `\n",
"should prevent breaking out of code (\\d\\.)"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a\r- b"),
position: None
}))
.unwrap(),
"`a - b`\n",
"should prevent breaking out of code (cr)"
);
assert_eq!(
to(&Node::InlineCode(InlineCode {
value: String::from("a\r\n- b"),
position: None
}))
.unwrap(),
"`a - b`\n",
"should prevent breaking out of code (crlf)"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/link.rs | Rust | use markdown::mdast::{Link, Node, Text};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn text() {
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::new(),
title: None
}))
.unwrap(),
"[]()\n",
"should support a link"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
url: String::new(),
title: None
}))
.unwrap(),
"[a]()\n",
"should support children"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("a"),
title: None
}))
.unwrap(),
"[](a)\n",
"should support a url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::new(),
title: Some(String::from("a"))
}))
.unwrap(),
"[](<> \"a\")\n",
"should support a title"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("a"),
title: Some(String::from("b"))
}))
.unwrap(),
"[](a \"b\")\n",
"should support a url and title"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b c"),
title: None
}))
.unwrap(),
"[](<b c>)\n",
"should support a link w/ enclosed url w/ whitespace in url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b <c"),
title: None
}))
.unwrap(),
"[](<b \\<c>)\n",
"should escape an opening angle bracket in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b >c"),
title: None
}))
.unwrap(),
"[](<b \\>c>)\n",
"should escape a closing angle bracket in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b \\+c"),
title: None
}))
.unwrap(),
"[](<b \\\\+c>)\n",
"should escape a backslash in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b\nc"),
title: None
}))
.unwrap(),
"[](<b
c>)\n",
"should encode a line ending in `url` in an enclosed url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b(c"),
title: None
}))
.unwrap(),
"[](b\\(c)\n",
"should escape an opening paren in `url` in a raw url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b)c"),
title: None
}))
.unwrap(),
"[](b\\)c)\n",
"should escape a closing paren in `url` in a raw url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("b\\.c"),
title: None
}))
.unwrap(),
"[](b\\\\.c)\n",
"should escape a backslash in `url` in a raw url"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("\x0C"),
title: None
}))
.unwrap(),
"[](<\x0C>)\n",
"should support control characters in links"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::new(),
title: Some(String::from("b\\-c"))
}))
.unwrap(),
"[](<> \"b\\\\-c\")\n",
"should escape a backslash in `title`"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("tel:123"),
position: None
})],
position: None,
url: String::from("tel:123"),
title: None
}))
.unwrap(),
"<tel:123>\n",
"should use an autolink for nodes w/ a value similar to the url and a protocol"
);
assert_eq!(
to_md_with_opts(
&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("tel:123"),
position: None
})],
position: None,
url: String::from("tel:123"),
title: None
}),
&Options {
resource_link: true,
..Default::default()
}
)
.unwrap(),
"[tel:123](tel:123)\n",
"should use a resource link (`resourceLink: true`)"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
url: String::from("a"),
title: None
}),)
.unwrap(),
"[a](a)\n",
"should use a normal link for nodes w/ a value similar to the url w/o a protocol"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("tel:123"),
position: None
})],
position: None,
url: String::from("tel:123"),
title: None
}),)
.unwrap(),
"<tel:123>\n",
"should use an autolink for nodes w/ a value similar to the url and a protocol"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("tel:123"),
position: None
})],
position: None,
url: String::from("tel:123"),
title: Some(String::from("a"))
}),)
.unwrap(),
"[tel:123](tel:123 \"a\")\n",
"should use a normal link for nodes w/ a value similar to the url w/ a title"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("a@b.c"),
position: None
})],
position: None,
url: String::from("mailto:a@b.c"),
title: None
}),)
.unwrap(),
"<a@b.c>\n",
"should use an autolink for nodes w/ a value similar to the url and a protocol (email)"
);
assert_eq!(
to(&Node::Link(Link {
children: vec![Node::Text(Text {
value: String::from("a.b-c_d@a.b"),
position: None
})],
position: None,
url: String::from("mailto:a.b-c_d@a.b"),
title: None
}),)
.unwrap(),
"<a.b-c_d@a.b>\n",
"should not escape in autolinks"
);
assert_eq!(
to_md_with_opts(
&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::new(),
title: Some("b".to_string())
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"[](<> 'b')\n",
"should support a link w/ title when `quote: \"\'\"`"
);
assert_eq!(
to_md_with_opts(
&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::new(),
title: Some("'".to_string())
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"[](<> '\\'')\n",
"should escape a quote in `title` in a title when `quote: \"\'\"`'"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: "a b,
title: None
}))
.unwrap(),
"[](<a b\n",
"should not escape unneeded characters in a `DestinationLiteral`"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: "a,
title: None
}))
.unwrap(),
"[](a![b]\\(c*d_e[f_g`h<i</j)\n",
"should not escape unneeded characters in a `DestinationRaw`"
);
assert_eq!(
to(&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("#"),
title: Some("a)
}))
.unwrap(),
"[](# \"a\n",
"should not escape unneeded characters in a `title` (double quotes)"
);
assert_eq!(
to_md_with_opts(
&Node::Link(Link {
children: Vec::new(),
position: None,
url: String::from("#"),
title: Some("a)
}),
&Options {
quote: '\'',
..Default::default()
}
)
.unwrap(),
"[](# 'a\n",
"should not escape unneeded characters in a `title` (single quotes)"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/link_reference.rs | Rust | use markdown::mdast::{LinkReference, Node, Paragraph, ReferenceKind, Text};
use mdast_util_to_markdown::to_markdown as to;
use pretty_assertions::assert_eq;
#[test]
fn link_reference() {
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: Vec::new(),
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::new(),
label: None
}))
.unwrap(),
"[][]\n",
"should support a link reference (nonsensical"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::new(),
label: None
}))
.unwrap(),
"[a][]\n",
"should support `children`"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: Vec::new(),
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::from("a"),
label: None
}))
.unwrap(),
"[][a]\n",
"should support an `identifier` (nonsensical)"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: Vec::new(),
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::new(),
label: Some(String::from("a")),
}))
.unwrap(),
"[][a]\n",
"should support a `label` (nonsensical)"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("A"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Shortcut,
identifier: String::from("A"),
label: None
}))
.unwrap(),
"[A]\n",
"should support `reference_type: ReferenceKind::Shortcut`"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("A"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Collapsed,
identifier: String::from("A"),
label: Some("A".into())
}))
.unwrap(),
"[A][]\n",
"should support `reference_type: ReferenceKind::Collapsed`"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("A"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::from("A"),
label: Some("A".into())
}))
.unwrap(),
"[A][A]\n",
"should support `reference_type: ReferenceKind::Full`"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("&"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::from("&"),
label: Some("&".into())
}))
.unwrap(),
"[&][&]\n",
"should prefer label over identifier"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("&"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::from("&"),
label: None
}))
.unwrap(),
"[&][&]\n",
"should decode `identifier` if w/o `label`"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::LinkReference(LinkReference {
position: None,
label: None,
children: vec![Node::Text(Text {
value: String::from("&a;"),
position: None
})],
reference_kind: ReferenceKind::Full,
identifier: String::from("&b;"),
})],
position: None
}))
.unwrap(),
"[\\&a;][&b;]\n",
"should support incorrect character references"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![],
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::from("a,
label: None
}))
.unwrap(),
"[][a!\\[b\\](c*d_e\\[f_g`h<i</j]\n",
"should not escape unneeded characters in a `reference`"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("+"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Full,
identifier: String::from("\\+"),
label: None
}))
.unwrap(),
"[+][+]\n",
"should unescape `identifier` if w/o `label`"
);
assert_eq!(
to(&Node::LinkReference(LinkReference {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None,
reference_kind: ReferenceKind::Collapsed,
identifier: String::from("a"),
label: Some("b".to_string())
}))
.unwrap(),
"[a][b]\n",
"should use `reference_type: ReferenceKind::Full` if the label doesn't match the reference"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/list.rs | Rust | use markdown::mdast::{List, ListItem, Node, Paragraph, Text, ThematicBreak};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, IndentOptions, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn list() {
assert_eq!(
to(&Node::List(List {
children: vec![],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"",
"should support an empty list"
);
assert_eq!(
to(&Node::List(List {
children: vec![Node::ListItem(ListItem {
children: Vec::new(),
position: None,
spread: false,
checked: None
})],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"*\n",
"should support a list w/ an item"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"- a\n- ***\n- b\n",
"should support a list w/ items"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"- a\n- ***\n",
"should not use blank lines between items for lists w/ `spread: false`"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
})
],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"- a\n\n b\n- ***\n",
"should support a list w/ `spread: false`, w/ a spread item"
);
assert_eq!(
to(&Node::List(List {
children: vec![Node::ListItem(ListItem {
children: Vec::new(),
position: None,
spread: false,
checked: None
})],
position: None,
ordered: true,
start: None,
spread: false
}))
.unwrap(),
"1.\n",
"should support a list w/ `ordered` and an empty item"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: true,
start: None,
spread: false
}))
.unwrap(),
"1. a\n2. ***\n3. b\n",
"should support a list w/ `ordered`"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: true,
start: None,
spread: false
}))
.unwrap(),
"1. a\n2. ***\n3. b\n",
"should support a list w/ `ordered` and `spread: false`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: true,
start: None,
spread: false
}),
&Options {
increment_list_marker: false,
..Default::default()
}
)
.unwrap(),
"1. a\n1. ***\n1. b\n",
"should support a list w/ `ordered` when `increment_list_marker: false`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: true,
start: Some(0),
spread: false
}),
&Options {
list_item_indent: IndentOptions::One,
..Default::default()
}
)
.unwrap(),
"0. a\n1. ***\n",
"should support a list w/ `ordered` and `start`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: false,
start: None,
spread: false
}),
&Options {
list_item_indent: IndentOptions::Mixed,
..Default::default()
}
)
.unwrap(),
"* a\n b\n* c\n d\n",
"should support a correct prefix and indent `list_item_indent: IndentOptions::Mixed` and a tight list"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: false,
start: None,
spread:true
}),
&Options {
list_item_indent: IndentOptions::Mixed,
..Default::default()
}
)
.unwrap(),
"* a\n b\n\n* c\n d\n",
"should support a correct prefix and indent `list_item_indent: IndentOptions::Mixed` and a tight list"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: true,
start: Some(9),
spread: false
}),
&Options {
list_item_indent: IndentOptions::One,
..Default::default()
}
)
.unwrap(),
"9. a\n b\n10. c\n d\n",
"should support a correct prefix and indent for items 9 and 10 when `list_item_indent: IndentOptions::One`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: true,
start: Some(99),
spread: false
}),
&Options {
list_item_indent: IndentOptions::One,
..Default::default()
}
)
.unwrap(),
"99. a\n b\n100. c\n d\n",
"should support a correct prefix and indent for items 90 and 100 when `list_item_indent: IndentOptions::One`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: true,
start: Some(999),
spread: false
}),
&Options {
list_item_indent: IndentOptions::One,
..Default::default()
}
)
.unwrap(),
"999. a\n b\n1000. c\n d\n",
"should support a correct prefix and indent for items 999 and 1000 when `list_item_indent: IndentOptions::One`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: true,
start: Some(9),
spread: false
}),
&Options {
list_item_indent: IndentOptions::Tab,
..Default::default()
}
)
.unwrap(),
"9. a\n b\n10. c\n d\n",
"should support a correct prefix and indent for items 9 and 10 when `list_item_indent: IndentOptions::Tab`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: true,
start: Some(99),
spread: false
}),
&Options {
list_item_indent: IndentOptions::Tab,
..Default::default()
}
)
.unwrap(),
"99. a\n b\n100. c\n d\n",
"should support a correct prefix and indent for items 99 and 100 when `list_item_indent: IndentOptions::Tab`"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
}),],
position: None
})],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("c\nd"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}),
],
position: None,
ordered: true,
start: Some(999),
spread: false
}),
&Options {
list_item_indent: IndentOptions::Tab,
..Default::default()
}
)
.unwrap(),
"999. a\n b\n1000. c\n d\n",
"should support a correct prefix and indent for items 999 and 1000 when `list_item_indent: IndentOptions::Tab`"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/list_item.rs | Rust | use markdown::mdast::{List, ListItem, Node, Paragraph, Root, Text, ThematicBreak};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, IndentOptions, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn list_item() {
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"*\n",
"should support a list item"
);
assert_eq!(
to_md_with_opts(
&Node::ListItem(ListItem {
children: Vec::new(),
position: None,
spread: false,
checked: None
}),
&Options {
bullet: '+',
..Default::default()
}
)
.unwrap(),
"+\n",
"should serialize an item w/ a plus as bullet when `bullet: \" + \"`"
);
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"* a\n",
"should support a list item w/ a child"
);
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::ThematicBreak(ThematicBreak { position: None }),
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("b"),
position: None
})],
position: None
}),
],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"* a\n ***\n b\n",
"should support a list item w/ children"
);
assert_eq!(
to_md_with_opts(
&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::ThematicBreak(ThematicBreak { position: None })
],
position: None,
spread: false,
checked: None
}),
&Options {
list_item_indent: IndentOptions::One,
..Default::default()
}
)
.unwrap(),
"* a\n ***\n",
"should use one space after the bullet for `list_item_indent: \"IndentOptions::One\"`"
);
assert_eq!(
to_md_with_opts(
&Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),],
position: None,
spread: false,
checked: None
}),
&Options {
list_item_indent: IndentOptions::Mixed,
..Default::default()
}
)
.unwrap(),
"* a\n",
"should use one space after the bullet for `list_item_indent: \"IndentOptions::Mixed\"`, when the item is not spread"
);
assert_eq!(
to_md_with_opts(
&Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: true,
checked: None
}),
&Options {
list_item_indent: IndentOptions::Mixed,
..Default::default()
}
)
.unwrap(),
"* a\n\n ***\n",
"should use a tab stop of spaces after the bullet for `list_item_indent: \"IndentOptions::Mixed\"`, when the item is spread"
);
assert_eq!(
to(&Node::ListItem(ListItem {
children: vec![
Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),
Node::ThematicBreak(ThematicBreak { position: None }),
],
position: None,
spread: false,
checked: None
}))
.unwrap(),
"* a\n ***\n",
"should not use blank lines between child blocks for items w/ `spread: false`"
);
assert_eq!(
to_md_with_opts(
&create_list(create_list(create_list::<Option<Node>>(None))),
&Options {
bullet_other: '+',
..Default::default()
}
)
.unwrap(),
"* * +\n",
"should support `bullet_other`"
);
assert_eq!(
to_md_with_opts(
&create_list(create_list(create_list::<Option<Node>>(None))),
&Options {
bullet: '-',
..Default::default()
}
)
.unwrap(),
"- - *\n",
"should default to an `bullet_other` different from `bullet` (1)"
);
assert_eq!(
to_md_with_opts(
&create_list(create_list(create_list::<Option<Node>>(None))),
&Options {
bullet: '*',
..Default::default()
}
)
.unwrap(),
"* * -\n",
"should default to an `bullet_other` different from `bullet` (2)"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
}),],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![Node::ThematicBreak(ThematicBreak { position: None })],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"- a\n- ***\n",
"should use a different bullet than a thematic rule marker, if the first child of a list item is a thematic break (2)"
);
assert_eq!(
to(&create_list(create_list::<Option<Node>>(None))).unwrap(),
"* *\n",
"should *not* use a different bullet for an empty list item in two lists"
);
assert_eq!(
to(&create_list(create_list(create_list::<Option<Node>>(None)))).unwrap(),
"* * -\n",
"should use a different bullet for an empty list item in three lists (1)"
);
assert_eq!(
to(&Node::List(List {
children: vec![
Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
}),
Node::ListItem(ListItem {
children: vec![create_list(create_list::<Option<Node>>(None))],
position: None,
spread: false,
checked: None
})
],
position: None,
ordered: false,
start: None,
spread: false
}))
.unwrap(),
"*\n* * -\n",
"should use a different bullet for an empty list item in three lists (2)"
);
assert_eq!(
to_md_with_opts(
&create_list(create_list(create_list::<Option<Node>>(None))),
&Options {
bullet: '+',
..Default::default()
}
)
.unwrap(),
"+ + +\n",
"should not use a different bullet for an empty list item in three lists if `bullet` isn’t a thematic rule marker"
);
assert_eq!(
to(&create_list(create_list(create_list(create_list::<
Option<Node>,
>(None)))))
.unwrap(),
"* * * -\n",
"should use a different bullet for an empty list item in four lists"
);
assert_eq!(
to(&create_list(create_list(create_list(create_list(
create_list::<Option<Node>>(None)
)))))
.unwrap(),
"* * * * -\n",
"should use a different bullet for an empty list item in five lists"
);
assert_eq!(
to(&create_list(create_list(vec![
create_list(Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None
})],
position: None
})),
create_list::<Option<Node>>(None)
])))
.unwrap(),
"* * * a\n -\n",
"should not use a different bullet for an empty list item at non-head in two lists"
);
assert_eq!(
to_md_with_opts(
&Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: true,
start: None,
spread: false
}),
&Options {
bullet_ordered: ')',
..Default::default()
}
)
.unwrap(),
"1)\n",
"should support `bullet_ordered`"
);
assert_eq!(
to_md_with_opts(
&Node::Root(Root {
children: vec![
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: true,
start: None,
spread: false
}),
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: vec![],
position: None,
spread: false,
checked: None
})],
position: None,
ordered: true,
start: None,
spread: false
}),
],
position: None
}),
&Options {
bullet_ordered: ')',
..Default::default()
}
)
.unwrap(),
"1)\n\n1.\n",
"should use a different bullet for adjacent ordered lists"
);
}
trait IntoVecNode {
fn into_vec(self) -> Vec<Node>;
}
impl IntoVecNode for Node {
fn into_vec(self) -> Vec<Node> {
vec![self]
}
}
impl IntoVecNode for Option<Node> {
fn into_vec(self) -> Vec<Node> {
self.map(|n| vec![n]).unwrap_or_default()
}
}
impl IntoVecNode for Vec<Node> {
fn into_vec(self) -> Vec<Node> {
self
}
}
fn create_list<T>(d: T) -> Node
where
T: IntoVecNode,
{
Node::List(List {
children: vec![Node::ListItem(ListItem {
children: d.into_vec(),
position: None,
spread: false,
checked: None,
})],
position: None,
ordered: false,
start: None,
spread: false,
})
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/math.rs | Rust | use markdown::mdast::{Definition, InlineMath, Math, Node, Paragraph, Text};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn math() {
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a"),
position: None
}))
.unwrap(),
"$a$\n",
"should serialize math (text)"
);
assert_eq!(
to_md_with_opts(
&Node::InlineMath(InlineMath {
value: String::from("a"),
position: None
}),
&Options {
single_dollar_text_math: false,
..Default::default()
}
)
.unwrap(),
"$$a$$\n",
"should serialize math (text) with at least 2 dollars w/ `single_dollar_text_math: false`"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::new(),
position: None
}))
.unwrap(),
"$$\n",
"should serialize math (text) w/o `value`"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a \\$ b"),
position: None
}))
.unwrap(),
"$$a \\$ b$$\n",
"should serialize math (text) w/ two dollar signs when including a dollar"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a \\$"),
position: None
}))
.unwrap(),
"$$ a \\$ $$\n",
"should serialize math (text) w/ padding when ending in a dollar sign"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("$ a"),
position: None
}))
.unwrap(),
"$$ $ a $$\n",
"should serialize math (text) w/ padding when starting in a dollar sign"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from(" a "),
position: None
}))
.unwrap(),
"$ a $\n",
"should pad w/ a space if the value starts and ends w/ a space"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from(" a"),
position: None
}))
.unwrap(),
"$ a$\n",
"should not pad w/ spaces if the value ends w/ a non-space"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a "),
position: None
}))
.unwrap(),
"$a $\n",
"should not pad w/ spaces if the value starts w/ a non-space"
);
assert_eq!(
to(&Node::Math(Math {
value: String::from("a"),
position: None,
meta: None
}))
.unwrap(),
"$$\na\n$$\n",
"should serialize math (flow)"
);
assert_eq!(
to(&Node::Math(Math {
value: String::new(),
position: None,
meta: None
}))
.unwrap(),
"$$\n$$\n",
"should serialize math (flow) w/o `value`"
);
assert_eq!(
to(&Node::Math(Math {
value: String::new(),
position: None,
meta: String::from("a").into()
}))
.unwrap(),
"$$a\n$$\n",
"should serialize math (flow) w/ `meta`"
);
assert_eq!(
to(&Node::Math(Math {
value: String::from("$$"),
position: None,
meta: None
}))
.unwrap(),
"$$$\n$$\n$$$\n",
"should serialize math (flow) w/ more dollars than occur together in `value`"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a $ b"),
position: None
})],
position: None
}))
.unwrap(),
"a \\$ b\n",
"should escape `$` in phrasing"
);
assert_eq!(
to_md_with_opts(
&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a $ b"),
position: None
})],
position: None
}),
&Options {
single_dollar_text_math: false,
..Default::default()
}
)
.unwrap(),
"a $ b\n",
"should not escape a single dollar in phrasing w/ `single_dollar_text_math: false`'"
);
assert_eq!(
to_md_with_opts(
&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a $$ b"),
position: None
})],
position: None
}),
&Options {
single_dollar_text_math: false,
..Default::default()
}
)
.unwrap(),
"a \\$$ b\n",
"should escape two dollars in phrasing w/ `single_dollar_text_math: false`"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![
Node::Text(Text {
value: String::from("a $"),
position: None
}),
Node::InlineMath(InlineMath {
value: String::from("b"),
position: None
}),
Node::Text(Text {
value: String::from("$ c"),
position: None
}),
],
position: None
}))
.unwrap(),
"a \\$$b$\\$ c\n",
"should escape `$` around math (text)"
);
assert_eq!(
to(&Node::Definition(Definition {
position: None,
url: String::from("b"),
title: String::from("a\n$\nb").into(),
identifier: String::from("a"),
label: String::from("a").into(),
}))
.unwrap(),
"[a]: b \"a\n$\nb\"\n",
"should not escape `$` at the start of a line"
);
assert_eq!(
to(&Node::Math(Math {
value: String::new(),
position: None,
meta: String::from("a\rb\nc").into()
}))
.unwrap(),
"$$a
b
c\n$$\n",
"should escape `\\r`, `\\n` when in `meta` of math (flow)"
);
assert_eq!(
to(&Node::Math(Math {
value: String::new(),
position: None,
meta: String::from("a$b").into()
}))
.unwrap(),
"$$a$b\n$$\n",
"should escape `$` when in `meta` of math (flow)"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a\n- b"),
position: None
}))
.unwrap(),
"$a - b$\n",
"should prevent breaking out of code (-)"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a\n#"),
position: None
}))
.unwrap(),
"$a #$\n",
"should prevent breaking out of code (#)"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a\n1. "),
position: None
}))
.unwrap(),
"$a 1. $\n",
"should prevent breaking out of code (\\d\\.)"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a\r- b"),
position: None
}))
.unwrap(),
"$a - b$\n",
"should prevent breaking out of code (cr)"
);
assert_eq!(
to(&Node::InlineMath(InlineMath {
value: String::from("a\n- b"),
position: None
}))
.unwrap(),
"$a - b$\n",
"should prevent breaking out of code (crlf)"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/paragraph.rs | Rust | use markdown::mdast::{Node, Paragraph, Text};
use mdast_util_to_markdown::to_markdown as to;
use pretty_assertions::assert_eq;
#[test]
fn paragraph() {
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![],
position: None
}))
.unwrap(),
"",
"should support an empty paragraph"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\nb"),
position: None
})],
position: None
}))
.unwrap(),
"a\nb\n",
"should support a paragraph"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from(" a"),
position: None
})],
position: None
}))
.unwrap(),
"  a\n",
"should encode spaces at the start of paragraphs"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a "),
position: None
})],
position: None
}))
.unwrap(),
"a  \n",
"should encode spaces at the end of paragraphs"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("\t\ta"),
position: None
})],
position: None
}))
.unwrap(),
"	\ta\n",
"should encode tabs at the start of paragraphs"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\t\t"),
position: None
})],
position: None
}))
.unwrap(),
"a\t	\n",
"should encode tabs at the end of paragraphs"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a \n b"),
position: None
})],
position: None
}))
.unwrap(),
"a  \n  b\n",
"should encode spaces around line endings in paragraphs"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("a\t\t\n\t\tb"),
position: None
})],
position: None
}))
.unwrap(),
"a\t	\n	\tb\n",
"should encode spaces around line endings in paragraphs"
);
assert_eq!(
to(&Node::Paragraph(Paragraph {
children: vec![Node::Text(Text {
value: String::from("я_я"),
position: None
})],
position: None
}))
.unwrap(),
"яяя\n",
"should support escaping around non-ascii"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/roundtrip.rs | Rust | use markdown::{mdast::Node, to_mdast as from};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn roundtrip() {
let doc: String = document(vec![
"> * Lorem ipsum dolor sit amet",
">",
"> * consectetur adipisicing elit",
"",
]);
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = document(vec![
"* Lorem ipsum dolor sit amet",
"",
" 1. consectetur adipisicing elit",
"",
" 2. sed do eiusmod tempor incididunt",
"",
]);
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = document(vec![
"* 1. Lorem ipsum dolor sit amet",
"",
" 2. consectetur adipisicing elit",
"",
]);
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = document(vec![
"* hello",
" * world",
" how",
"",
" are",
" you",
"",
" * today",
"* hi",
"",
]);
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = "An autolink: <http://example.com/?foo=1&bar=2>.\n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = document(vec![
"A [primary][toString], [secondary][constructor], and [tertiary][__proto__] link.",
"",
"[toString]: http://primary.com",
"",
"[__proto__]: http://tertiary.com",
"",
"[constructor]: http://secondary.com",
"",
]);
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = document(vec![
"* foo",
"",
"*",
"",
"* bar",
"",
"* baz",
"",
"*",
"",
"* qux quux",
"",
]);
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = "* a\n\n<!---->\n\n* b\n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = document(vec![
" <h3>Header 3</h3>",
"",
" <blockquote>",
" <p>This is a blockquote.</p>",
" ",
" <p>This is the second paragraph in the blockquote.</p>",
" ",
" <h2>This is an H2 in a blockquote</h2>",
" </blockquote>",
"",
]);
assert_eq!(
to_md_with_opts(
&from(&doc, &Default::default()).unwrap(),
&Options {
fences: false,
..Default::default()
}
)
.unwrap(),
doc
);
let doc: String = "> a\n\n> b\n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = "[**https://unifiedjs.com/**](https://unifiedjs.com/)\n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let step1 = "\\ \\\\ \\\\\\ \\\\\\\\";
let step2 = "\\ \\ \\\\\\ \\\\\\\\\n";
assert_eq!(
to(&from(step1, &Default::default()).unwrap()).unwrap(),
step2
);
assert_eq!(
to(&from(step2, &Default::default()).unwrap()).unwrap(),
step2
);
let doc = "\\\\\\*a\n";
assert_eq!(to(&from(doc, &Default::default()).unwrap()).unwrap(), doc);
let doc = "\\\\*a\\\\\\*";
assert_eq!(
remove_pos(&mut from(doc, &Default::default()).unwrap()),
remove_pos(
&mut from(
&to(&from(doc, &Default::default()).unwrap()).unwrap(),
&Default::default()
)
.unwrap()
)
);
let doc = "```\n \n```\n";
assert_eq!(to(&from(doc, &Default::default()).unwrap()).unwrap(), doc);
let doc = "* * -\n";
assert_eq!(to(&from(doc, &Default::default()).unwrap()).unwrap(), doc);
let doc = "- ***\n";
assert_eq!(to(&from(doc, &Default::default()).unwrap()).unwrap(), doc);
let mut tree = from("* a\n- b", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(
&mut from(
&to_md_with_opts(
&tree,
&Options {
bullet: '*',
bullet_other: '-',
..Default::default()
}
)
.unwrap(),
&Default::default()
)
.unwrap()
)
);
let mut tree = from("* ---\n- - +\n+ b", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(
&mut from(
&to_md_with_opts(
&tree,
&Options {
bullet: '*',
bullet_other: '-',
..Default::default()
}
)
.unwrap(),
&Default::default()
)
.unwrap()
)
);
let mut tree = from("- - +\n* ---\n+ b", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(
&mut from(
&to_md_with_opts(
&tree,
&Options {
bullet: '*',
bullet_other: '-',
..Default::default()
}
)
.unwrap(),
&Default::default()
)
.unwrap()
)
);
let mut tree = from("- - +\n- -", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(
&mut from(
&to_md_with_opts(
&tree,
&Options {
bullet: '*',
bullet_other: '-',
..Default::default()
}
)
.unwrap(),
&Default::default()
)
.unwrap()
)
);
let mut tree = from("* - +\n *\n -\n +", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(
&mut from(
&to_md_with_opts(
&tree,
&Options {
bullet: '*',
bullet_other: '-',
..Default::default()
}
)
.unwrap(),
&Default::default()
)
.unwrap()
)
);
let mut tree = from("- +\n- *\n -\n +", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(
&mut from(
&to_md_with_opts(
&tree,
&Options {
bullet: '*',
bullet_other: '-',
..Default::default()
}
)
.unwrap(),
&Default::default()
)
.unwrap()
)
);
let mut tree = from("1. a\n1) b", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let mut tree = from("1. ---\n1) 1. 1)\n1. b", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let mut tree = from("1. 1. 1)\n1) ---\n1. b", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let mut tree = from("1. 1. 1)\n1. 1.", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let mut tree = from("1. 1) 1.\n 1.\n 1)\n 1.", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let mut tree = from("1. 1) 1.\n 1) 1.\n 1)\n 1.", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let mut tree = from("1. 1)\n1. 1.\n 1)\n 1.", &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut tree),
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap())
);
let doc: String = " \n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = "	\n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = "  a  \n	\tb\t	\n".to_string();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = "Separate paragraphs:
a * is this emphasis? *
a ** is this emphasis? **
a *** is this emphasis? ***
a *\\* is this emphasis? *\\*
a \\** is this emphasis? \\**
a **\\* is this emphasis? **\\*
a *\\** is this emphasis? *\\**
One paragraph:
a * is this emphasis? *
a ** is this emphasis? **
a *** is this emphasis? ***
a *\\* is this emphasis? *\\*
a \\** is this emphasis? \\**
a **\\* is this emphasis? **\\*
a *\\** is this emphasis? *\\**"
.to_string();
let mut tree = from(&doc, &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap()),
remove_pos(&mut tree),
);
let doc: String = "Separate paragraphs:
a _ is this emphasis? _
a __ is this emphasis? __
a ___ is this emphasis? ___
a _\\_ is this emphasis? _\\_
a \\__ is this emphasis? \\__
a __\\_ is this emphasis? __\\_
a _\\__ is this emphasis? _\\__
One paragraph:
a _ is this emphasis? _
a __ is this emphasis? __
a ___ is this emphasis? ___
a _\\_ is this emphasis? _\\_
a \\__ is this emphasis? \\__
a __\\_ is this emphasis? __\\_
a _\\__ is this emphasis? _\\__"
.to_string();
let mut tree = from(&doc, &Default::default()).unwrap();
assert_eq!(
remove_pos(&mut from(&to(&tree).unwrap(), &Default::default()).unwrap()),
remove_pos(&mut tree),
);
let doc: String = to(&from("(____", &Default::default()).unwrap()).unwrap();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
let doc: String = to(&from(
"Once activated, a service worker ______, then transitions to idle…",
&Default::default(),
)
.unwrap())
.unwrap();
assert_eq!(to(&from(&doc, &Default::default()).unwrap()).unwrap(), doc);
}
fn remove_pos(node: &mut Node) {
node.position_set(None);
if let Some(children) = node.children_mut() {
for child in children {
remove_pos(child);
}
}
}
fn document(doc: Vec<&str>) -> String {
doc.join("\n")
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/strong.rs | Rust | use markdown::mdast::{Node, Strong, Text};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn strong() {
assert_eq!(
to(&Node::Strong(Strong {
children: Vec::new(),
position: None
}))
.unwrap(),
"****\n",
"should support an empty strong"
);
assert_eq!(
to(&Node::Strong(Strong {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None,
})],
position: None
}))
.unwrap(),
"**a**\n",
"should support a strong w/ children"
);
assert_eq!(
to_md_with_opts(
&Node::Strong(Strong {
children: vec![Node::Text(Text {
value: String::from("a"),
position: None,
})],
position: None
}),
&Options {
strong: '_',
..Default::default()
}
)
.unwrap(),
"__a__\n",
"should support a strong w/ underscores when `emphasis: \"_\"`"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/text.rs | Rust | use markdown::mdast::{Node, Text};
use mdast_util_to_markdown::to_markdown as to;
use pretty_assertions::assert_eq;
#[test]
fn text() {
assert_eq!(
to(&Node::Text(Text {
value: String::new(),
position: None,
}))
.unwrap(),
"",
"should support an empty text"
);
assert_eq!(
to(&Node::Text(Text {
value: String::from("a\nb"),
position: None,
}))
.unwrap(),
"a\nb\n",
"should support text"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
mdast_util_to_markdown/tests/thematic_break.rs | Rust | use markdown::mdast::{Node, ThematicBreak};
use mdast_util_to_markdown::{
to_markdown as to, to_markdown_with_options as to_md_with_opts, Options,
};
use pretty_assertions::assert_eq;
#[test]
fn thematic_break() {
assert_eq!(
to(&Node::ThematicBreak(ThematicBreak { position: None })).unwrap(),
"***\n",
"should support a thematic break"
);
assert_eq!(
to_md_with_opts(
&Node::ThematicBreak(ThematicBreak { position: None }),
&Options {
rule: '-',
..Default::default()
}
)
.unwrap(),
"---\n",
"should support a thematic break w/ dashes when `rule: \"-\"`"
);
assert_eq!(
to_md_with_opts(
&Node::ThematicBreak(ThematicBreak { position: None }),
&Options {
rule: '_',
..Default::default()
}
)
.unwrap(),
"___\n",
"should support a thematic break w/ underscores when `rule: \"_\"`"
);
assert_eq!(
to_md_with_opts(
&Node::ThematicBreak(ThematicBreak { position: None }),
&Options {
rule_repetition: 5,
..Default::default()
}
)
.unwrap(),
"*****\n",
"should support a thematic break w/ more repetitions w/ `rule_repetition`"
);
assert_eq!(
to_md_with_opts(
&Node::ThematicBreak(ThematicBreak { position: None }),
&Options {
rule_spaces: true,
..Default::default()
}
)
.unwrap(),
"* * *\n",
"should support a thematic break w/ spaces w/ `rule_spaces`"
);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/configuration.rs | Rust | use crate::util::{
line_ending::LineEnding,
mdx::{EsmParse as MdxEsmParse, ExpressionParse as MdxExpressionParse},
};
use alloc::{boxed::Box, fmt, string::String};
/// Control which constructs are enabled.
///
/// Not all constructs can be configured.
/// Notably, blank lines and paragraphs cannot be turned off.
///
/// ## Examples
///
/// ```
/// use markdown::Constructs;
/// # fn main() {
///
/// // Use the default trait to get `CommonMark` constructs:
/// let commonmark = Constructs::default();
///
/// // To turn on all of GFM, use the `gfm` method:
/// let gfm = Constructs::gfm();
///
/// // Or, mix and match:
/// let custom = Constructs {
/// math_flow: true,
/// math_text: true,
/// ..Constructs::gfm()
/// };
/// # }
/// ```
#[allow(clippy::struct_excessive_bools)]
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "camelCase")
)]
pub struct Constructs {
/// Attention.
///
/// ```markdown
/// > | a *b* c **d**.
/// ^^^ ^^^^^
/// ```
pub attention: bool,
/// Autolink.
///
/// ```markdown
/// > | a <https://example.com> b <user@example.org>.
/// ^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^
/// ```
pub autolink: bool,
/// Block quote.
///
/// ```markdown
/// > | > a
/// ^^^
/// ```
pub block_quote: bool,
/// Character escape.
///
/// ```markdown
/// > | a \* b
/// ^^
/// ```
pub character_escape: bool,
/// Character reference.
///
/// ```markdown
/// > | a & b
/// ^^^^^
/// ```
pub character_reference: bool,
/// Code (indented).
///
/// ```markdown
/// > | a
/// ^^^^^
/// ```
pub code_indented: bool,
/// Code (fenced).
///
/// ```markdown
/// > | ~~~js
/// ^^^^^
/// > | console.log(1)
/// ^^^^^^^^^^^^^^
/// > | ~~~
/// ^^^
/// ```
pub code_fenced: bool,
/// Code (text).
///
/// ```markdown
/// > | a `b` c
/// ^^^
/// ```
pub code_text: bool,
/// Definition.
///
/// ```markdown
/// > | [a]: b "c"
/// ^^^^^^^^^^
/// ```
pub definition: bool,
/// Frontmatter.
///
/// ````markdown
/// > | ---
/// ^^^
/// > | title: Neptune
/// ^^^^^^^^^^^^^^
/// > | ---
/// ^^^
/// ````
pub frontmatter: bool,
/// GFM: autolink literal.
///
/// ```markdown
/// > | https://example.com
/// ^^^^^^^^^^^^^^^^^^^
/// ```
pub gfm_autolink_literal: bool,
/// GFM: footnote definition.
///
/// ```markdown
/// > | [^a]: b
/// ^^^^^^^
/// ```
pub gfm_footnote_definition: bool,
/// GFM: footnote label start.
///
/// ```markdown
/// > | a[^b]
/// ^^
/// ```
pub gfm_label_start_footnote: bool,
///
/// ```markdown
/// > | a ~b~ c.
/// ^^^
/// ```
pub gfm_strikethrough: bool,
/// GFM: table.
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// > | | - |
/// ^^^^^
/// > | | b |
/// ^^^^^
/// ```
pub gfm_table: bool,
/// GFM: task list item.
///
/// ```markdown
/// > | * [x] y.
/// ^^^
/// ```
pub gfm_task_list_item: bool,
/// Hard break (escape).
///
/// ```markdown
/// > | a\
/// ^
/// | b
/// ```
pub hard_break_escape: bool,
/// Hard break (trailing).
///
/// ```markdown
/// > | a␠␠
/// ^^
/// | b
/// ```
pub hard_break_trailing: bool,
/// Heading (atx).
///
/// ```markdown
/// > | # a
/// ^^^
/// ```
pub heading_atx: bool,
/// Heading (setext).
///
/// ```markdown
/// > | a
/// ^^
/// > | ==
/// ^^
/// ```
pub heading_setext: bool,
/// HTML (flow).
///
/// ```markdown
/// > | <div>
/// ^^^^^
/// ```
pub html_flow: bool,
/// HTML (text).
///
/// ```markdown
/// > | a <b> c
/// ^^^
/// ```
pub html_text: bool,
/// Label start (image).
///
/// ```markdown
/// > | a  d
/// ^^
/// ```
pub label_start_image: bool,
/// Label start (link).
///
/// ```markdown
/// > | a [b](c) d
/// ^
/// ```
pub label_start_link: bool,
/// Label end.
///
/// ```markdown
/// > | a [b](c) d
/// ^^^^
/// ```
pub label_end: bool,
/// List items.
///
/// ```markdown
/// > | * a
/// ^^^
/// ```
pub list_item: bool,
/// Math (flow).
///
/// ```markdown
/// > | $$
/// ^^
/// > | \frac{1}{2}
/// ^^^^^^^^^^^
/// > | $$
/// ^^
/// ```
pub math_flow: bool,
/// Math (text).
///
/// ```markdown
/// > | a $b$ c
/// ^^^
/// ```
pub math_text: bool,
/// MDX: ESM.
///
/// ```markdown
/// > | import a from 'b'
/// ^^^^^^^^^^^^^^^^^
/// ```
///
/// > 👉 **Note**: to support ESM, you *must* pass
/// > [`mdx_esm_parse`][MdxEsmParse] in [`ParseOptions`][] too.
/// > Otherwise, ESM is treated as normal markdown.
pub mdx_esm: bool,
/// MDX: expression (flow).
///
/// ```markdown
/// > | {Math.PI}
/// ^^^^^^^^^
/// ```
///
/// > 👉 **Note**: You *can* pass
/// > [`mdx_expression_parse`][MdxExpressionParse] in [`ParseOptions`][]
/// > too, to parse expressions according to a certain grammar (typically,
/// > a programming language).
/// > Otherwise, expressions are parsed with a basic algorithm that only
/// > cares about braces.
pub mdx_expression_flow: bool,
/// MDX: expression (text).
///
/// ```markdown
/// > | a {Math.PI} c
/// ^^^^^^^^^
/// ```
///
/// > 👉 **Note**: You *can* pass
/// > [`mdx_expression_parse`][MdxExpressionParse] in [`ParseOptions`][]
/// > too, to parse expressions according to a certain grammar (typically,
/// > a programming language).
/// > Otherwise, expressions are parsed with a basic algorithm that only
/// > cares about braces.
pub mdx_expression_text: bool,
/// MDX: JSX (flow).
///
/// ```markdown
/// > | <Component />
/// ^^^^^^^^^^^^^
/// ```
///
/// > 👉 **Note**: You *must* pass `html_flow: false` to use this,
/// > as it’s preferred when on over `mdx_jsx_flow`.
///
/// > 👉 **Note**: You *can* pass
/// > [`mdx_expression_parse`][MdxExpressionParse] in [`ParseOptions`][]
/// > too, to parse expressions in JSX according to a certain grammar
/// > (typically, a programming language).
/// > Otherwise, expressions are parsed with a basic algorithm that only
/// > cares about braces.
pub mdx_jsx_flow: bool,
/// MDX: JSX (text).
///
/// ```markdown
/// > | a <Component /> c
/// ^^^^^^^^^^^^^
/// ```
///
/// > 👉 **Note**: You *must* pass `html_text: false` to use this,
/// > as it’s preferred when on over `mdx_jsx_text`.
///
/// > 👉 **Note**: You *can* pass
/// > [`mdx_expression_parse`][MdxExpressionParse] in [`ParseOptions`][]
/// > too, to parse expressions in JSX according to a certain grammar
/// > (typically, a programming language).
/// > Otherwise, expressions are parsed with a basic algorithm that only
/// > cares about braces.
pub mdx_jsx_text: bool,
/// Thematic break.
///
/// ```markdown
/// > | ***
/// ^^^
/// ```
pub thematic_break: bool,
}
impl Default for Constructs {
/// `CommonMark`.
///
/// `CommonMark` is a relatively strong specification of how markdown
/// works.
/// Most markdown parsers try to follow it.
///
/// For more information, see the `CommonMark` specification:
/// <https://spec.commonmark.org>.
fn default() -> Self {
Self {
attention: true,
autolink: true,
block_quote: true,
character_escape: true,
character_reference: true,
code_indented: true,
code_fenced: true,
code_text: true,
definition: true,
frontmatter: false,
gfm_autolink_literal: false,
gfm_label_start_footnote: false,
gfm_footnote_definition: false,
gfm_strikethrough: false,
gfm_table: false,
gfm_task_list_item: false,
hard_break_escape: true,
hard_break_trailing: true,
heading_atx: true,
heading_setext: true,
html_flow: true,
html_text: true,
label_start_image: true,
label_start_link: true,
label_end: true,
list_item: true,
math_flow: false,
math_text: false,
mdx_esm: false,
mdx_expression_flow: false,
mdx_expression_text: false,
mdx_jsx_flow: false,
mdx_jsx_text: false,
thematic_break: true,
}
}
}
impl Constructs {
/// GFM.
///
/// GFM stands for **GitHub flavored markdown**.
/// GFM extends `CommonMark` and adds support for autolink literals,
/// footnotes, strikethrough, tables, and tasklists.
///
/// For more information, see the GFM specification:
/// <https://github.github.com/gfm/>.
pub fn gfm() -> Self {
Self {
gfm_autolink_literal: true,
gfm_footnote_definition: true,
gfm_label_start_footnote: true,
gfm_strikethrough: true,
gfm_table: true,
gfm_task_list_item: true,
..Self::default()
}
}
/// MDX.
///
/// This turns on `CommonMark`, turns off some conflicting constructs
/// (autolinks, code (indented), and HTML), and turns on MDX (ESM,
/// expressions, and JSX).
///
/// For more information, see the MDX website:
/// <https://mdxjs.com>.
///
/// > 👉 **Note**: to support ESM, you *must* pass
/// > [`mdx_esm_parse`][MdxEsmParse] in [`ParseOptions`][] too.
/// > Otherwise, ESM is treated as normal markdown.
/// >
/// > You *can* pass
/// > [`mdx_expression_parse`][MdxExpressionParse]
/// > to parse expressions according to a certain grammar (typically, a
/// > programming language).
/// > Otherwise, expressions are parsed with a basic algorithm that only
/// > cares about braces.
pub fn mdx() -> Self {
Self {
autolink: false,
code_indented: false,
html_flow: false,
html_text: false,
mdx_esm: true,
mdx_expression_flow: true,
mdx_expression_text: true,
mdx_jsx_flow: true,
mdx_jsx_text: true,
..Self::default()
}
}
}
/// Configuration that describes how to compile to HTML.
///
/// You likely either want to turn on the dangerous options
/// (`allow_dangerous_html`, `allow_dangerous_protocol`) when dealing with
/// input you trust, or want to customize how GFM footnotes are compiled
/// (typically because the input markdown is not in English).
///
/// ## Examples
///
/// ```
/// use markdown::CompileOptions;
/// # fn main() {
///
/// // Use the default trait to get safe defaults:
/// let safe = CompileOptions::default();
///
/// // Live dangerously / trust the author:
/// let danger = CompileOptions {
/// allow_dangerous_html: true,
/// allow_dangerous_protocol: true,
/// ..CompileOptions::default()
/// };
///
/// // In French:
/// let enFrançais = CompileOptions {
/// gfm_footnote_back_label: Some("Arrière".into()),
/// gfm_footnote_label: Some("Notes de bas de page".into()),
/// ..CompileOptions::default()
/// };
/// # }
/// ```
#[allow(clippy::struct_excessive_bools)]
#[derive(Clone, Debug, Default)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(default, rename_all = "camelCase")
)]
pub struct CompileOptions {
/// Whether to allow all values in images.
///
/// The default is `false`,
/// which lets `allow_dangerous_protocol` control protocol safety for
/// both links and images.
///
/// Pass `true` to allow all values as `src` on images,
/// regardless of `allow_dangerous_protocol`.
/// This is safe because the
/// [HTML specification][whatwg-html-image-processing]
/// does not allow executable code in images.
///
/// [whatwg-html-image-processing]: https://html.spec.whatwg.org/multipage/images.html#images-processing-model
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // By default, some protocols in image sources are dropped:
/// assert_eq!(
/// to_html_with_options(
/// "",
/// &Options::default()
/// )?,
/// "<p><img src=\"\" alt=\"\" /></p>"
/// );
///
/// // Turn `allow_any_img_src` on to allow all values as `src` on images.
/// // This is safe because browsers do not execute code in images.
/// assert_eq!(
/// to_html_with_options(
/// ")",
/// &Options {
/// compile: CompileOptions {
/// allow_any_img_src: true,
/// ..CompileOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p><img src=\"javascript:alert(1)\" alt=\"\" /></p>"
/// );
/// # Ok(())
/// # }
/// ```
pub allow_any_img_src: bool,
/// Whether to allow (dangerous) HTML.
///
/// The default is `false`, which still parses the HTML according to
/// `CommonMark` but shows the HTML as text instead of as elements.
///
/// Pass `true` for trusted content to get actual HTML elements.
///
/// When using GFM, make sure to also turn off `gfm_tagfilter`.
/// Otherwise, some dangerous HTML is still ignored.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html, to_html_with_options, CompileOptions, Options};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `markdown-rs` is safe by default:
/// assert_eq!(
/// to_html("Hi, <i>venus</i>!"),
/// "<p>Hi, <i>venus</i>!</p>"
/// );
///
/// // Turn `allow_dangerous_html` on to allow potentially dangerous HTML:
/// assert_eq!(
/// to_html_with_options(
/// "Hi, <i>venus</i>!",
/// &Options {
/// compile: CompileOptions {
/// allow_dangerous_html: true,
/// ..CompileOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p>Hi, <i>venus</i>!</p>"
/// );
/// # Ok(())
/// # }
/// ```
pub allow_dangerous_html: bool,
/// Whether to allow dangerous protocols in links and images.
///
/// The default is `false`, which drops URLs in links and images that use
/// dangerous protocols.
///
/// Pass `true` for trusted content to support all protocols.
///
/// URLs that have no protocol (which means it’s relative to the current
/// page, such as `./some/page.html`) and URLs that have a safe protocol
/// (for images: `http`, `https`; for links: `http`, `https`, `irc`,
/// `ircs`, `mailto`, `xmpp`), are safe.
/// All other URLs are dangerous and dropped.
///
/// When the option `allow_all_protocols_in_img` is enabled,
/// `allow_dangerous_protocol` only applies to links.
///
/// This is safe because the
/// [HTML specification][whatwg-html-image-processing]
/// does not allow executable code in images.
/// All modern browsers respect this.
///
/// [whatwg-html-image-processing]: https://html.spec.whatwg.org/multipage/images.html#images-processing-model
///
/// ## Examples
///
/// ```
/// use markdown::{to_html, to_html_with_options, CompileOptions, Options};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `markdown-rs` is safe by default:
/// assert_eq!(
/// to_html("<javascript:alert(1)>"),
/// "<p><a href=\"\">javascript:alert(1)</a></p>"
/// );
///
/// // Turn `allow_dangerous_protocol` on to allow potentially dangerous protocols:
/// assert_eq!(
/// to_html_with_options(
/// "<javascript:alert(1)>",
/// &Options {
/// compile: CompileOptions {
/// allow_dangerous_protocol: true,
/// ..CompileOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p><a href=\"javascript:alert(1)\">javascript:alert(1)</a></p>"
/// );
/// # Ok(())
/// # }
/// ```
pub allow_dangerous_protocol: bool,
// To do: `doc_markdown` is broken.
#[allow(clippy::doc_markdown)]
/// Default line ending to use when compiling to HTML, for line endings not
/// in `value`.
///
/// Generally, `markdown-rs` copies line endings (`\r`, `\n`, `\r\n`) in
/// the markdown document over to the compiled HTML.
/// In some cases, such as `> a`, CommonMark requires that extra line
/// endings are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
///
/// To create that line ending, the document is checked for the first line
/// ending that is used.
/// If there is no line ending, `default_line_ending` is used.
/// If that isn’t configured, `\n` is used.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html, to_html_with_options, CompileOptions, LineEnding, Options};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `markdown-rs` uses `\n` by default:
/// assert_eq!(
/// to_html("> a"),
/// "<blockquote>\n<p>a</p>\n</blockquote>"
/// );
///
/// // Define `default_line_ending` to configure the default:
/// assert_eq!(
/// to_html_with_options(
/// "> a",
/// &Options {
/// compile: CompileOptions {
/// default_line_ending: LineEnding::CarriageReturnLineFeed,
/// ..CompileOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<blockquote>\r\n<p>a</p>\r\n</blockquote>"
/// );
/// # Ok(())
/// # }
/// ```
pub default_line_ending: LineEnding,
/// Textual label to describe the backreference back to footnote calls.
///
/// The default value is `"Back to content"`.
/// Change it when the markdown is not in English.
///
/// This label is used in the `aria-label` attribute on each backreference
/// (the `↩` links).
/// It affects users of assistive technology.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `"Back to content"` is used by default:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options::gfm()
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
///
/// // Pass `gfm_footnote_back_label` to use something else:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// gfm_footnote_back_label: Some("Arrière".into()),
/// ..CompileOptions::gfm()
/// }
/// }
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Arrière\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
/// # Ok(())
/// # }
/// ```
pub gfm_footnote_back_label: Option<String>,
/// Prefix to use before the `id` attribute on footnotes to prevent them
/// from *clobbering*.
///
/// The default is `"user-content-"`.
/// Pass `Some("".into())` for trusted markdown and when you are careful
/// with polyfilling.
/// You could pass a different prefix.
///
/// DOM clobbering is this:
///
/// ```html
/// <p id="x"></p>
/// <script>alert(x) // `x` now refers to the `p#x` DOM element</script>
/// ```
///
/// The above example shows that elements are made available by browsers,
/// by their ID, on the `window` object.
/// This is a security risk because you might be expecting some other
/// variable at that place.
/// It can also break polyfills.
/// Using a prefix solves these problems.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `"user-content-"` is used by default:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options::gfm()
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
///
/// // Pass `gfm_footnote_clobber_prefix` to use something else:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// gfm_footnote_clobber_prefix: Some("".into()),
/// ..CompileOptions::gfm()
/// }
/// }
/// )?,
/// "<p><sup><a href=\"#fn-a\" id=\"fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"fn-a\">\n<p>b <a href=\"#fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
/// # Ok(())
/// # }
/// ```
pub gfm_footnote_clobber_prefix: Option<String>,
/// Attributes to use on the footnote label.
///
/// The default value is `"class=\"sr-only\""`.
/// Change it to show the label and add other attributes.
///
/// This label is typically hidden visually (assuming a `sr-only` CSS class
/// is defined that does that), and thus affects screen readers only.
/// If you do have such a class, but want to show this section to everyone,
/// pass an empty string.
/// You can also add different attributes.
///
/// > 👉 **Note**: `id="footnote-label"` is always added, because footnote
/// > calls use it with `aria-describedby` to provide an accessible label.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `"class=\"sr-only\""` is used by default:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options::gfm()
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
///
/// // Pass `gfm_footnote_label_attributes` to use something else:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// gfm_footnote_label_attributes: Some("class=\"footnote-heading\"".into()),
/// ..CompileOptions::gfm()
/// }
/// }
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"footnote-heading\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
/// # Ok(())
/// # }
/// ```
pub gfm_footnote_label_attributes: Option<String>,
/// HTML tag name to use for the footnote label element.
///
/// The default value is `"h2"`.
/// Change it to match your document structure.
///
/// This label is typically hidden visually (assuming a `sr-only` CSS class
/// is defined that does that), and thus affects screen readers only.
/// If you do have such a class, but want to show this section to everyone,
/// pass different attributes with the `gfm_footnote_label_attributes`
/// option.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `"h2"` is used by default:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options::gfm()
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
///
/// // Pass `gfm_footnote_label_tag_name` to use something else:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// gfm_footnote_label_tag_name: Some("h1".into()),
/// ..CompileOptions::gfm()
/// }
/// }
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h1 id=\"footnote-label\" class=\"sr-only\">Footnotes</h1>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
/// # Ok(())
/// # }
/// ```
pub gfm_footnote_label_tag_name: Option<String>,
/// Textual label to use for the footnotes section.
///
/// The default value is `"Footnotes"`.
/// Change it when the markdown is not in English.
///
/// This label is typically hidden visually (assuming a `sr-only` CSS class
/// is defined that does that), and thus affects screen readers only.
/// If you do have such a class, but want to show this section to everyone,
/// pass different attributes with the `gfm_footnote_label_attributes`
/// option.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `"Footnotes"` is used by default:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options::gfm()
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Footnotes</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
///
/// // Pass `gfm_footnote_label` to use something else:
/// assert_eq!(
/// to_html_with_options(
/// "[^a]\n\n[^a]: b",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// gfm_footnote_label: Some("Notes de bas de page".into()),
/// ..CompileOptions::gfm()
/// }
/// }
/// )?,
/// "<p><sup><a href=\"#user-content-fn-a\" id=\"user-content-fnref-a\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">1</a></sup></p>\n<section data-footnotes=\"\" class=\"footnotes\"><h2 id=\"footnote-label\" class=\"sr-only\">Notes de bas de page</h2>\n<ol>\n<li id=\"user-content-fn-a\">\n<p>b <a href=\"#user-content-fnref-a\" data-footnote-backref=\"\" aria-label=\"Back to content\" class=\"data-footnote-backref\">↩</a></p>\n</li>\n</ol>\n</section>\n"
/// );
/// # Ok(())
/// # }
/// ```
pub gfm_footnote_label: Option<String>,
/// Whether or not GFM task list html `<input>` items are enabled.
///
/// This determines whether or not the user of the browser is able
/// to click and toggle generated checkbox items. The default is false.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // With `gfm_task_list_item_checkable`, generated `<input type="checkbox" />`
/// // tags do not contain the attribute `disabled=""` and are thus toggleable by
/// // browser users.
/// assert_eq!(
/// to_html_with_options(
/// "* [x] y.",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// gfm_task_list_item_checkable: true,
/// ..CompileOptions::gfm()
/// }
/// }
/// )?,
/// "<ul>\n<li><input type=\"checkbox\" checked=\"\" /> y.</li>\n</ul>"
/// );
/// # Ok(())
/// # }
/// ```
pub gfm_task_list_item_checkable: bool,
/// Whether to support the GFM tagfilter.
///
/// This option does nothing if `allow_dangerous_html` is not turned on.
/// The default is `false`, which does not apply the GFM tagfilter to HTML.
/// Pass `true` for output that is a bit closer to GitHub’s actual output.
///
/// The tagfilter is kinda weird and kinda useless.
/// The tag filter is a naïve attempt at XSS protection.
/// You should use a proper HTML sanitizing algorithm instead.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // With `allow_dangerous_html`, `markdown-rs` passes HTML through untouched:
/// assert_eq!(
/// to_html_with_options(
/// "<iframe>",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// allow_dangerous_html: true,
/// ..CompileOptions::default()
/// }
/// }
/// )?,
/// "<iframe>"
/// );
///
/// // Pass `gfm_tagfilter: true` to make some of that safe:
/// assert_eq!(
/// to_html_with_options(
/// "<iframe>",
/// &Options {
/// parse: ParseOptions::gfm(),
/// compile: CompileOptions {
/// allow_dangerous_html: true,
/// gfm_tagfilter: true,
/// ..CompileOptions::default()
/// }
/// }
/// )?,
/// "<iframe>"
/// );
/// # Ok(())
/// # }
/// ```
///
/// ## References
///
/// * [*§ 6.1 Disallowed Raw HTML (extension)* in GFM](https://github.github.com/gfm/#disallowed-raw-html-extension-)
/// * [`cmark-gfm#extensions/tagfilter.c`](https://github.com/github/cmark-gfm/blob/master/extensions/tagfilter.c)
pub gfm_tagfilter: bool,
}
impl CompileOptions {
/// GFM.
///
/// GFM stands for **GitHub flavored markdown**.
/// On the compilation side, GFM turns on the GFM tag filter.
/// The tagfilter is useless, but it’s included here for consistency, and
/// this method exists for parity to parse options.
///
/// For more information, see the GFM specification:
/// <https://github.github.com/gfm/>.
pub fn gfm() -> Self {
Self {
gfm_tagfilter: true,
..Self::default()
}
}
}
/// Configuration that describes how to parse from markdown.
///
/// You can use this:
///
/// * To control what markdown constructs are turned on and off
/// * To control some of those constructs
/// * To add support for certain programming languages when parsing MDX
///
/// In most cases, you will want to use the default trait or `gfm` method.
///
/// ## Examples
///
/// ```
/// use markdown::ParseOptions;
/// # fn main() {
///
/// // Use the default trait to parse markdown according to `CommonMark`:
/// let commonmark = ParseOptions::default();
///
/// // Use the `gfm` method to parse markdown according to GFM:
/// let gfm = ParseOptions::gfm();
/// # }
/// ```
#[allow(clippy::struct_excessive_bools)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(default, rename_all = "camelCase")
)]
pub struct ParseOptions {
// Note: when adding fields, don’t forget to add them to `fmt::Debug` below.
/// Which constructs to enable and disable.
///
/// The default is to follow `CommonMark`.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html, to_html_with_options, Constructs, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `markdown-rs` follows CommonMark by default:
/// assert_eq!(
/// to_html(" indented code?"),
/// "<pre><code>indented code?\n</code></pre>"
/// );
///
/// // Pass `constructs` to choose what to enable and disable:
/// assert_eq!(
/// to_html_with_options(
/// " indented code?",
/// &Options {
/// parse: ParseOptions {
/// constructs: Constructs {
/// code_indented: false,
/// ..Constructs::default()
/// },
/// ..ParseOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p>indented code?</p>"
/// );
/// # Ok(())
/// # }
/// ```
#[cfg_attr(feature = "serde", serde(default))]
pub constructs: Constructs,
/// Whether to support GFM strikethrough with a single tilde
///
/// This option does nothing if `gfm_strikethrough` is not turned on in
/// `constructs`.
/// This option does not affect strikethrough with double tildes.
///
/// The default is `true`, which follows how markdown on `github.com`
/// works, as strikethrough with single tildes is supported.
/// Pass `false`, to follow the GFM spec more strictly, by not allowing
/// strikethrough with single tildes.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, Constructs, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `markdown-rs` supports single tildes by default:
/// assert_eq!(
/// to_html_with_options(
/// "~a~",
/// &Options {
/// parse: ParseOptions {
/// constructs: Constructs::gfm(),
/// ..ParseOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p><del>a</del></p>"
/// );
///
/// // Pass `gfm_strikethrough_single_tilde: false` to turn that off:
/// assert_eq!(
/// to_html_with_options(
/// "~a~",
/// &Options {
/// parse: ParseOptions {
/// constructs: Constructs::gfm(),
/// gfm_strikethrough_single_tilde: false,
/// ..ParseOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p>~a~</p>"
/// );
/// # Ok(())
/// # }
/// ```
#[cfg_attr(feature = "serde", serde(default))]
pub gfm_strikethrough_single_tilde: bool,
/// Whether to support math (text) with a single dollar
///
/// This option does nothing if `math_text` is not turned on in
/// `constructs`.
/// This option does not affect math (text) with two or more dollars.
///
/// The default is `true`, which is more close to how code (text) and
/// Pandoc work, as it allows math with a single dollar to form.
/// However, single dollars can interfere with “normal” dollars in text.
/// Pass `false`, to only allow math (text) to form when two or more
/// dollars are used.
/// If you pass `false`, you can still use two or more dollars for text
/// math.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, Constructs, Options, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // `markdown-rs` supports single dollars by default:
/// assert_eq!(
/// to_html_with_options(
/// "$a$",
/// &Options {
/// parse: ParseOptions {
/// constructs: Constructs {
/// math_text: true,
/// ..Constructs::default()
/// },
/// ..ParseOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p><code class=\"language-math math-inline\">a</code></p>"
/// );
///
/// // Pass `math_text_single_dollar: false` to turn that off:
/// assert_eq!(
/// to_html_with_options(
/// "$a$",
/// &Options {
/// parse: ParseOptions {
/// constructs: Constructs {
/// math_text: true,
/// ..Constructs::default()
/// },
/// math_text_single_dollar: false,
/// ..ParseOptions::default()
/// },
/// ..Options::default()
/// }
/// )?,
/// "<p>$a$</p>"
/// );
/// # Ok(())
/// # }
/// ```
#[cfg_attr(feature = "serde", serde(default))]
pub math_text_single_dollar: bool,
/// Function to parse expressions with.
///
/// This function can be used to add support for arbitrary programming
/// languages within expressions.
///
/// It only makes sense to pass this when compiling to a syntax tree
/// with [`to_mdast()`][crate::to_mdast()].
///
/// For an example that adds support for JavaScript with SWC, see
/// `tests/test_utils/mod.rs`.
#[cfg_attr(feature = "serde", serde(skip))]
pub mdx_expression_parse: Option<Box<MdxExpressionParse>>,
/// Function to parse ESM with.
///
/// This function can be used to add support for arbitrary programming
/// languages within ESM blocks, however, the keywords (`export`,
/// `import`) are currently hardcoded JavaScript-specific.
///
/// > 👉 **Note**: please raise an issue if you’re interested in working on
/// > MDX that is aware of, say, Rust, or other programming languages.
///
/// It only makes sense to pass this when compiling to a syntax tree
/// with [`to_mdast()`][crate::to_mdast()].
///
/// For an example that adds support for JavaScript with SWC, see
/// `tests/test_utils/mod.rs`.
#[cfg_attr(feature = "serde", serde(skip))]
pub mdx_esm_parse: Option<Box<MdxEsmParse>>,
// Note: when adding fields, don’t forget to add them to `fmt::Debug` below.
}
impl fmt::Debug for ParseOptions {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ParseOptions")
.field("constructs", &self.constructs)
.field(
"gfm_strikethrough_single_tilde",
&self.gfm_strikethrough_single_tilde,
)
.field("math_text_single_dollar", &self.math_text_single_dollar)
.field(
"mdx_expression_parse",
&self.mdx_expression_parse.as_ref().map(|_d| "[Function]"),
)
.field(
"mdx_esm_parse",
&self.mdx_esm_parse.as_ref().map(|_d| "[Function]"),
)
.finish()
}
}
impl Default for ParseOptions {
/// `CommonMark` defaults.
fn default() -> Self {
Self {
constructs: Constructs::default(),
gfm_strikethrough_single_tilde: true,
math_text_single_dollar: true,
mdx_expression_parse: None,
mdx_esm_parse: None,
}
}
}
impl ParseOptions {
/// GFM.
///
/// GFM stands for GitHub flavored markdown.
/// GFM extends `CommonMark` and adds support for autolink literals,
/// footnotes, strikethrough, tables, and tasklists.
///
/// For more information, see the GFM specification:
/// <https://github.github.com/gfm/>
pub fn gfm() -> Self {
Self {
constructs: Constructs::gfm(),
..Self::default()
}
}
/// MDX.
///
/// This turns on `CommonMark`, turns off some conflicting constructs
/// (autolinks, code (indented), and HTML), and turns on MDX (ESM,
/// expressions, and JSX).
///
/// For more information, see the MDX website:
/// <https://mdxjs.com>.
///
/// > 👉 **Note**: to support ESM, you *must* pass
/// > [`mdx_esm_parse`][MdxEsmParse] in [`ParseOptions`][] too.
/// > Otherwise, ESM is treated as normal markdown.
/// >
/// > You *can* pass
/// > [`mdx_expression_parse`][MdxExpressionParse]
/// > to parse expressions according to a certain grammar (typically, a
/// > programming language).
/// > Otherwise, expressions are parsed with a basic algorithm that only
/// > cares about braces.
pub fn mdx() -> Self {
Self {
constructs: Constructs::mdx(),
..Self::default()
}
}
}
/// Configuration that describes how to parse from markdown and compile to
/// HTML.
///
/// In most cases, you will want to use the default trait or `gfm` method.
///
/// ## Examples
///
/// ```
/// use markdown::Options;
/// # fn main() {
///
/// // Use the default trait to compile markdown to HTML according to `CommonMark`:
/// let commonmark = Options::default();
///
/// // Use the `gfm` method to compile markdown to HTML according to GFM:
/// let gfm = Options::gfm();
/// # }
/// ```
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug, Default)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(default)
)]
pub struct Options {
/// Configuration that describes how to parse from markdown.
pub parse: ParseOptions,
/// Configuration that describes how to compile to HTML.
pub compile: CompileOptions,
}
impl Options {
/// GFM.
///
/// GFM stands for GitHub flavored markdown.
/// GFM extends `CommonMark` and adds support for autolink literals,
/// footnotes, strikethrough, tables, and tasklists.
/// On the compilation side, GFM turns on the GFM tag filter.
/// The tagfilter is useless, but it’s included here for consistency.
///
/// For more information, see the GFM specification:
/// <https://github.github.com/gfm/>
pub fn gfm() -> Self {
Self {
parse: ParseOptions::gfm(),
compile: CompileOptions::gfm(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::mdx::Signal;
use alloc::format;
#[test]
fn test_constructs() {
Constructs::default();
Constructs::gfm();
Constructs::mdx();
let constructs = Constructs::default();
assert!(constructs.attention, "should default to `CommonMark` (1)");
assert!(
!constructs.gfm_autolink_literal,
"should default to `CommonMark` (2)"
);
assert!(
!constructs.mdx_jsx_flow,
"should default to `CommonMark` (3)"
);
assert!(
!constructs.frontmatter,
"should default to `CommonMark` (4)"
);
let constructs = Constructs::gfm();
assert!(constructs.attention, "should support `gfm` shortcut (1)");
assert!(
constructs.gfm_autolink_literal,
"should support `gfm` shortcut (2)"
);
assert!(
!constructs.mdx_jsx_flow,
"should support `gfm` shortcut (3)"
);
assert!(!constructs.frontmatter, "should support `gfm` shortcut (4)");
let constructs = Constructs::mdx();
assert!(constructs.attention, "should support `gfm` shortcut (1)");
assert!(
!constructs.gfm_autolink_literal,
"should support `mdx` shortcut (2)"
);
assert!(constructs.mdx_jsx_flow, "should support `mdx` shortcut (3)");
assert!(!constructs.frontmatter, "should support `mdx` shortcut (4)");
}
#[test]
fn test_parse_options() {
ParseOptions::default();
ParseOptions::gfm();
ParseOptions::mdx();
let options = ParseOptions::default();
assert!(
options.constructs.attention,
"should default to `CommonMark` (1)"
);
assert!(
!options.constructs.gfm_autolink_literal,
"should default to `CommonMark` (2)"
);
assert!(
!options.constructs.mdx_jsx_flow,
"should default to `CommonMark` (3)"
);
let options = ParseOptions::gfm();
assert!(
options.constructs.attention,
"should support `gfm` shortcut (1)"
);
assert!(
options.constructs.gfm_autolink_literal,
"should support `gfm` shortcut (2)"
);
assert!(
!options.constructs.mdx_jsx_flow,
"should support `gfm` shortcut (3)"
);
let options = ParseOptions::mdx();
assert!(
options.constructs.attention,
"should support `mdx` shortcut (1)"
);
assert!(
!options.constructs.gfm_autolink_literal,
"should support `mdx` shortcut (2)"
);
assert!(
options.constructs.mdx_jsx_flow,
"should support `mdx` shortcut (3)"
);
assert_eq!(
format!("{:?}", ParseOptions::default()),
"ParseOptions { constructs: Constructs { attention: true, autolink: true, block_quote: true, character_escape: true, character_reference: true, code_indented: true, code_fenced: true, code_text: true, definition: true, frontmatter: false, gfm_autolink_literal: false, gfm_footnote_definition: false, gfm_label_start_footnote: false, gfm_strikethrough: false, gfm_table: false, gfm_task_list_item: false, hard_break_escape: true, hard_break_trailing: true, heading_atx: true, heading_setext: true, html_flow: true, html_text: true, label_start_image: true, label_start_link: true, label_end: true, list_item: true, math_flow: false, math_text: false, mdx_esm: false, mdx_expression_flow: false, mdx_expression_text: false, mdx_jsx_flow: false, mdx_jsx_text: false, thematic_break: true }, gfm_strikethrough_single_tilde: true, math_text_single_dollar: true, mdx_expression_parse: None, mdx_esm_parse: None }",
"should support `Debug` trait"
);
assert_eq!(
format!("{:?}", ParseOptions {
mdx_esm_parse: Some(Box::new(|_value| {
Signal::Ok
})),
mdx_expression_parse: Some(Box::new(|_value, _kind| {
Signal::Ok
})),
..Default::default()
}),
"ParseOptions { constructs: Constructs { attention: true, autolink: true, block_quote: true, character_escape: true, character_reference: true, code_indented: true, code_fenced: true, code_text: true, definition: true, frontmatter: false, gfm_autolink_literal: false, gfm_footnote_definition: false, gfm_label_start_footnote: false, gfm_strikethrough: false, gfm_table: false, gfm_task_list_item: false, hard_break_escape: true, hard_break_trailing: true, heading_atx: true, heading_setext: true, html_flow: true, html_text: true, label_start_image: true, label_start_link: true, label_end: true, list_item: true, math_flow: false, math_text: false, mdx_esm: false, mdx_expression_flow: false, mdx_expression_text: false, mdx_jsx_flow: false, mdx_jsx_text: false, thematic_break: true }, gfm_strikethrough_single_tilde: true, math_text_single_dollar: true, mdx_expression_parse: Some(\"[Function]\"), mdx_esm_parse: Some(\"[Function]\") }",
"should support `Debug` trait on mdx functions"
);
}
#[test]
fn test_compile_options() {
CompileOptions::default();
CompileOptions::gfm();
let options = CompileOptions::default();
assert!(
!options.allow_dangerous_html,
"should default to safe `CommonMark` (1)"
);
assert!(
!options.gfm_tagfilter,
"should default to safe `CommonMark` (2)"
);
let options = CompileOptions::gfm();
assert!(
!options.allow_dangerous_html,
"should support safe `gfm` shortcut (1)"
);
assert!(
options.gfm_tagfilter,
"should support safe `gfm` shortcut (1)"
);
}
#[test]
fn test_options() {
Options::default();
let options = Options::default();
assert!(
options.parse.constructs.attention,
"should default to safe `CommonMark` (1)"
);
assert!(
!options.parse.constructs.gfm_autolink_literal,
"should default to safe `CommonMark` (2)"
);
assert!(
!options.parse.constructs.mdx_jsx_flow,
"should default to safe `CommonMark` (3)"
);
assert!(
!options.compile.allow_dangerous_html,
"should default to safe `CommonMark` (4)"
);
let options = Options::gfm();
assert!(
options.parse.constructs.attention,
"should support safe `gfm` shortcut (1)"
);
assert!(
options.parse.constructs.gfm_autolink_literal,
"should support safe `gfm` shortcut (2)"
);
assert!(
!options.parse.constructs.mdx_jsx_flow,
"should support safe `gfm` shortcut (3)"
);
assert!(
!options.compile.allow_dangerous_html,
"should support safe `gfm` shortcut (4)"
);
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/attention.rs | Rust | //! Attention (emphasis, strong, optionally GFM strikethrough) occurs in the
//! [text][] content type.
//!
//! ## Grammar
//!
//! Attention sequences form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! attention_sequence ::= 1*'*' | 1*'_'
//! gfm_attention_sequence ::= 1*'~'
//! ```
//!
//! Sequences are matched together to form attention based on which character
//! they contain, how long they are, and what character occurs before and after
//! each sequence.
//! Otherwise they are turned into data.
//!
//! ## HTML
//!
//! When asterisk/underscore sequences match, and two markers can be “taken”
//! from them, they together relate to the `<strong>` element in HTML.
//! When one marker can be taken, they relate to the `<em>` element.
//! See [*§ 4.5.2 The `em` element*][html-em] and
//! [*§ 4.5.3 The `strong` element*][html-strong] in the HTML spec for more
//! info.
//!
//! When tilde sequences match, they together relate to the `<del>` element in
//! HTML.
//! See [*§ 4.7.2 The `del` element*][html-del] in the HTML spec for more info.
//!
//! ## Recommendation
//!
//! It is recommended to use asterisks for emphasis/strong attention when
//! writing markdown.
//!
//! There are some small differences in whether sequences can open and/or close
//! based on whether they are formed with asterisks or underscores.
//! Because underscores also frequently occur in natural language inside words,
//! while asterisks typically never do, `CommonMark` prohibits underscore
//! sequences from opening or closing when *inside* a word.
//!
//! Because asterisks can be used to form the most markdown constructs, using
//! them has the added benefit of making it easier to gloss over markdown: you
//! can look for asterisks to find syntax while not worrying about other
//! characters.
//!
//! For strikethrough attention, it is recommended to use two markers.
//! While `github.com` allows single tildes too, it technically prohibits it in
//! their spec.
//!
//! ## Tokens
//!
//! * [`Emphasis`][Name::Emphasis]
//! * [`EmphasisSequence`][Name::EmphasisSequence]
//! * [`EmphasisText`][Name::EmphasisText]
//! * [`GfmStrikethrough`][Name::GfmStrikethrough]
//! * [`GfmStrikethroughSequence`][Name::GfmStrikethroughSequence]
//! * [`GfmStrikethroughText`][Name::GfmStrikethroughText]
//! * [`Strong`][Name::Strong]
//! * [`StrongSequence`][Name::StrongSequence]
//! * [`StrongText`][Name::StrongText]
//!
//! > 👉 **Note**: while parsing, [`AttentionSequence`][Name::AttentionSequence]
//! > is used, which is later compiled away.
//!
//! ## References
//!
//! * [`attention.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/attention.js)
//! * [`micromark-extension-gfm-strikethrough`](https://github.com/micromark/micromark-extension-gfm-strikethrough)
//! * [*§ 6.2 Emphasis and strong emphasis* in `CommonMark`](https://spec.commonmark.org/0.31/#emphasis-and-strong-emphasis)
//! * [*§ 6.5 Strikethrough (extension)* in `GFM`](https://github.github.com/gfm/#strikethrough-extension-)
//!
//! [text]: crate::construct::text
//! [html-em]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-em-element
//! [html-strong]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-strong-element
//! [html-del]: https://html.spec.whatwg.org/multipage/edits.html#the-del-element
use crate::event::{Event, Kind, Name, Point};
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
use crate::util::char::{
after_index as char_after_index, before_index as char_before_index, classify_opt,
Kind as CharacterKind,
};
use alloc::{vec, vec::Vec};
/// Attentention sequence that we can take markers from.
#[derive(Debug)]
struct Sequence {
/// Marker as a byte (`u8`) used in this sequence.
marker: u8,
/// We track whether sequences are in balanced events, and where those
/// events start, so that one attention doesn’t start in say, one link, and
/// end in another.
stack: Vec<usize>,
/// The index into events where this sequence’s `Enter` currently resides.
index: usize,
/// The (shifted) point where this sequence starts.
start_point: Point,
/// The (shifted) point where this sequence end.
end_point: Point,
/// The number of markers we can still use.
size: usize,
/// Whether this sequence can open attention.
open: bool,
/// Whether this sequence can close attention.
close: bool,
}
/// At start of attention.
///
/// ```markdown
/// > | **
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// Emphasis/strong:
if (tokenizer.parse_state.options.constructs.attention
&& matches!(tokenizer.current, Some(b'*' | b'_')))
// GFM strikethrough:
|| (tokenizer.parse_state.options.constructs.gfm_strikethrough && tokenizer.current == Some(b'~'))
{
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.enter(Name::AttentionSequence);
State::Retry(StateName::AttentionInside)
} else {
State::Nok
}
}
/// In sequence.
///
/// ```markdown
/// > | **
/// ^^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.consume();
State::Next(StateName::AttentionInside)
} else {
tokenizer.exit(Name::AttentionSequence);
tokenizer.register_resolver(ResolveName::Attention);
tokenizer.tokenize_state.marker = 0;
State::Ok
}
}
/// Resolve sequences.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
// Find all sequences, gather info about them.
let mut sequences = get_sequences(tokenizer);
// Now walk through them and match them.
let mut close = 0;
while close < sequences.len() {
let sequence_close = &sequences[close];
let mut next_index = close + 1;
// Find a sequence that can close.
if sequence_close.close {
let mut open = close;
// Now walk back to find an opener.
while open > 0 {
open -= 1;
let sequence_open = &sequences[open];
// An opener matching our closer:
if sequence_open.open
&& sequence_close.marker == sequence_open.marker
&& sequence_close.stack == sequence_open.stack
{
// If the opening can close or the closing can open,
// and the close size *is not* a multiple of three,
// but the sum of the opening and closing size *is*
// multiple of three, then **don’t** match.
if (sequence_open.close || sequence_close.open)
&& sequence_close.size % 3 != 0
&& (sequence_open.size + sequence_close.size) % 3 == 0
{
continue;
}
// For GFM strikethrough:
// * both sequences must have the same size
// * more than 2 markers don’t work
// * one marker is prohibited by the spec, but supported by GH
if sequence_close.marker == b'~'
&& (sequence_close.size != sequence_open.size
|| sequence_close.size > 2
|| sequence_close.size == 1
&& !tokenizer.parse_state.options.gfm_strikethrough_single_tilde)
{
continue;
}
// We found a match!
next_index = match_sequences(tokenizer, &mut sequences, open, close);
break;
}
}
}
close = next_index;
}
// Mark remaining sequences as data.
let mut index = 0;
while index < sequences.len() {
let sequence = &sequences[index];
tokenizer.events[sequence.index].name = Name::Data;
tokenizer.events[sequence.index + 1].name = Name::Data;
index += 1;
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
/// Get sequences.
fn get_sequences(tokenizer: &mut Tokenizer) -> Vec<Sequence> {
let mut index = 0;
let mut stack = vec![];
let mut sequences = vec![];
while index < tokenizer.events.len() {
let enter = &tokenizer.events[index];
if enter.name == Name::AttentionSequence {
if enter.kind == Kind::Enter {
let end = index + 1;
let exit = &tokenizer.events[end];
let marker = tokenizer.parse_state.bytes[enter.point.index];
let before_char = char_before_index(tokenizer.parse_state.bytes, enter.point.index);
let before = classify_opt(before_char);
let after_char = char_after_index(tokenizer.parse_state.bytes, exit.point.index);
let after = classify_opt(after_char);
let open = after == CharacterKind::Other
|| (after == CharacterKind::Punctuation && before != CharacterKind::Other)
// For regular attention markers (not strikethrough), the
// other attention markers can be used around them
|| (marker != b'~' && matches!(after_char, Some('*' | '_')))
|| (marker != b'~' && tokenizer.parse_state.options.constructs.gfm_strikethrough && matches!(after_char, Some('~')));
let close = before == CharacterKind::Other
|| (before == CharacterKind::Punctuation && after != CharacterKind::Other)
|| (marker != b'~' && matches!(before_char, Some('*' | '_')))
|| (marker != b'~'
&& tokenizer.parse_state.options.constructs.gfm_strikethrough
&& matches!(before_char, Some('~')));
sequences.push(Sequence {
index,
stack: stack.clone(),
start_point: enter.point.clone(),
end_point: exit.point.clone(),
size: exit.point.index - enter.point.index,
open: if marker == b'_' {
open && (before != CharacterKind::Other || !close)
} else {
open
},
close: if marker == b'_' {
close && (after != CharacterKind::Other || !open)
} else {
close
},
marker,
});
}
} else if enter.kind == Kind::Enter {
stack.push(index);
} else {
stack.pop();
}
index += 1;
}
sequences
}
/// Match two sequences.
#[allow(clippy::too_many_lines)]
fn match_sequences(
tokenizer: &mut Tokenizer,
sequences: &mut Vec<Sequence>,
open: usize,
close: usize,
) -> usize {
// Where to move to next.
// Stay on this closing sequence for the next iteration: it
// might close more things.
// It’s changed if sequences are removed.
let mut next = close;
// Number of markers to use from the sequence.
let take = if sequences[open].size > 1 && sequences[close].size > 1 {
2
} else {
1
};
// We’re *on* a closing sequence, with a matching opening
// sequence.
// Now we make sure that we can’t have misnested attention:
//
// ```html
// <em>a <strong>b</em> c</strong>
// ```
//
// Do that by marking everything between it as no longer
// possible to open anything.
// Theoretically we should mark as `close: false` too, but
// we don’t look for closers backwards, so it’s not needed.
let mut between = open + 1;
while between < close {
sequences[between].open = false;
between += 1;
}
let (group_name, seq_name, text_name) = if sequences[open].marker == b'~' {
(
Name::GfmStrikethrough,
Name::GfmStrikethroughSequence,
Name::GfmStrikethroughText,
)
} else if take == 1 {
(Name::Emphasis, Name::EmphasisSequence, Name::EmphasisText)
} else {
(Name::Strong, Name::StrongSequence, Name::StrongText)
};
let open_index = sequences[open].index;
let close_index = sequences[close].index;
let open_exit = sequences[open].end_point.clone();
let close_enter = sequences[close].start_point.clone();
// No need to worry about `VS`, because sequences are only actual characters.
sequences[open].size -= take;
sequences[close].size -= take;
sequences[open].end_point.column -= take;
sequences[open].end_point.index -= take;
sequences[close].start_point.column += take;
sequences[close].start_point.index += take;
// Opening.
tokenizer.map.add_before(
// Add after the current sequence (it might remain).
open_index + 2,
0,
vec![
Event {
kind: Kind::Enter,
name: group_name.clone(),
point: sequences[open].end_point.clone(),
link: None,
},
Event {
kind: Kind::Enter,
name: seq_name.clone(),
point: sequences[open].end_point.clone(),
link: None,
},
Event {
kind: Kind::Exit,
name: seq_name.clone(),
point: open_exit.clone(),
link: None,
},
Event {
kind: Kind::Enter,
name: text_name.clone(),
point: open_exit,
link: None,
},
],
);
// Closing.
tokenizer.map.add(
close_index,
0,
vec![
Event {
kind: Kind::Exit,
name: text_name,
point: close_enter.clone(),
link: None,
},
Event {
kind: Kind::Enter,
name: seq_name.clone(),
point: close_enter,
link: None,
},
Event {
kind: Kind::Exit,
name: seq_name,
point: sequences[close].start_point.clone(),
link: None,
},
Event {
kind: Kind::Exit,
name: group_name,
point: sequences[close].start_point.clone(),
link: None,
},
],
);
// Remove closing sequence if fully used.
if sequences[close].size == 0 {
sequences.remove(close);
tokenizer.map.add(close_index, 2, vec![]);
} else {
// Shift remaining closing sequence forward.
// Do it here because a sequence can open and close different
// other sequences, and the remainder can be on any side or
// somewhere in the middle.
tokenizer.events[close_index].point = sequences[close].start_point.clone();
}
if sequences[open].size == 0 {
sequences.remove(open);
tokenizer.map.add(open_index, 2, vec![]);
// Everything shifts one to the left, account for it in next iteration.
next -= 1;
} else {
tokenizer.events[open_index + 1].point = sequences[open].end_point.clone();
}
next
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/autolink.rs | Rust | //! Autolink occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Autolink forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! autolink ::= '<' (url | email) '>'
//!
//! url ::= protocol *url_byte
//! protocol ::= ascii_alphabetic 0*31(protocol_byte) ':'
//! protocol_byte ::= '+' '-' '.' ascii_alphanumeric
//! url_byte ::= byte - ascii_control - ' '
//!
//! email ::= 1*ascii_atext '@' email_domain *('.' email_domain)
//! ; Restriction: up to (including) 63 character are allowed in each domain.
//! email_domain ::= ascii_alphanumeric *(ascii_alphanumeric | '-' ascii_alphanumeric)
//!
//! ascii_atext ::= ascii_alphanumeric | '!' | '"' | '#' | '$' | '%' | '&' | '\'' | '*' | '+' | '-' | '/' | '=' | '?' | '^' | '_' | '`' | '{' | '|' | '}' | '~'
//! ```
//!
//! The maximum allowed size of a scheme is `31` (inclusive), which is defined
//! in [`AUTOLINK_SCHEME_SIZE_MAX`][].
//! The maximum allowed size of a domain is `63` (inclusive), which is defined
//! in [`AUTOLINK_DOMAIN_SIZE_MAX`][].
//!
//! The grammar for autolinks is quite strict and prohibits the use of ASCII control
//! characters or spaces.
//! To use non-ascii characters and otherwise impossible characters in URLs,
//! you can use percent encoding:
//!
//! ```markdown
//! <https://example.com/alpha%20bravo>
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="https://example.com/alpha%20bravo">https://example.com/alpha%20bravo</a></p>
//! ```
//!
//! There are several cases where incorrect encoding of URLs would, in other
//! languages, result in a parse error.
//! In markdown, there are no errors, and URLs are normalized.
//! In addition, many characters are percent encoded
//! ([`sanitize_uri`][sanitize_uri]).
//! For example:
//!
//! ```markdown
//! <https://a👍b%>
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="https://a%F0%9F%91%8Db%25">https://a👍b%</a></p>
//! ```
//!
//! Interestingly, there are a couple of things that are valid autolinks in
//! markdown but in HTML would be valid tags, such as `<svg:rect>` and
//! `<xml:lang/>`.
//! However, because `CommonMark` employs a naïve HTML parsing algorithm, those
//! are not considered HTML.
//!
//! While `CommonMark` restricts links from occurring in other links in the
//! case of labels (see [label end][label_end]), this restriction is not in
//! place for autolinks inside labels:
//!
//! ```markdown
//! [<https://example.com>](#)
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="#"><a href="https://example.com">https://example.com</a></a></p>
//! ```
//!
//! The generated output, in this case, is invalid according to HTML.
//! When a browser sees that markup, it will instead parse it as:
//!
//! ```html
//! <p><a href="#"></a><a href="https://example.com">https://example.com</a></p>
//! ```
//!
//! ## HTML
//!
//! Autolinks relate to the `<a>` element in HTML.
//! See [*§ 4.5.1 The `a` element*][html_a] in the HTML spec for more info.
//! When an email autolink is used (so, without a protocol), the string
//! `mailto:` is prepended before the email, when generating the `href`
//! attribute of the hyperlink.
//!
//! ## Recommendation
//!
//! It is recommended to use labels ([label start link][label_start_link],
//! [label end][label_end]), either with a resource or a definition
//! ([definition][]), instead of autolinks, as those allow more characters in
//! URLs, and allow relative URLs and `www.` URLs.
//! They also allow for descriptive text to explain the URL in prose.
//!
//! ## Tokens
//!
//! * [`Autolink`][Name::Autolink]
//! * [`AutolinkEmail`][Name::AutolinkEmail]
//! * [`AutolinkMarker`][Name::AutolinkMarker]
//! * [`AutolinkProtocol`][Name::AutolinkProtocol]
//!
//! ## References
//!
//! * [`autolink.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/autolink.js)
//! * [*§ 6.4 Autolinks* in `CommonMark`](https://spec.commonmark.org/0.31/#autolinks)
//!
//! [text]: crate::construct::text
//! [definition]: crate::construct::definition
//! [label_start_link]: crate::construct::label_start_link
//! [label_end]: crate::construct::label_end
//! [autolink_scheme_size_max]: crate::util::constant::AUTOLINK_SCHEME_SIZE_MAX
//! [autolink_domain_size_max]: crate::util::constant::AUTOLINK_DOMAIN_SIZE_MAX
//! [sanitize_uri]: crate::util::sanitize_uri
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::{AUTOLINK_DOMAIN_SIZE_MAX, AUTOLINK_SCHEME_SIZE_MAX};
/// Start of an autolink.
///
/// ```markdown
/// > | a<https://example.com>b
/// ^
/// > | a<user@example.com>b
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.autolink && tokenizer.current == Some(b'<') {
tokenizer.enter(Name::Autolink);
tokenizer.enter(Name::AutolinkMarker);
tokenizer.consume();
tokenizer.exit(Name::AutolinkMarker);
tokenizer.enter(Name::AutolinkProtocol);
State::Next(StateName::AutolinkOpen)
} else {
State::Nok
}
}
/// After `<`, at protocol or atext.
///
/// ```markdown
/// > | a<https://example.com>b
/// ^
/// > | a<user@example.com>b
/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphabetic.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::AutolinkSchemeOrEmailAtext)
}
Some(b'@') => State::Nok,
_ => State::Retry(StateName::AutolinkEmailAtext),
}
}
/// At second byte of protocol or atext.
///
/// ```markdown
/// > | a<https://example.com>b
/// ^
/// > | a<user@example.com>b
/// ^
/// ```
pub fn scheme_or_email_atext(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphanumeric and `+`, `-`, and `.`.
Some(b'+' | b'-' | b'.' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
// Count the previous alphabetical from `open` too.
tokenizer.tokenize_state.size = 1;
State::Retry(StateName::AutolinkSchemeInsideOrEmailAtext)
}
_ => State::Retry(StateName::AutolinkEmailAtext),
}
}
/// In ambiguous protocol or atext.
///
/// ```markdown
/// > | a<https://example.com>b
/// ^
/// > | a<user@example.com>b
/// ^
/// ```
pub fn scheme_inside_or_email_atext(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b':') => {
tokenizer.consume();
tokenizer.tokenize_state.size = 0;
State::Next(StateName::AutolinkUrlInside)
}
// ASCII alphanumeric and `+`, `-`, and `.`.
Some(b'+' | b'-' | b'.' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
if tokenizer.tokenize_state.size < AUTOLINK_SCHEME_SIZE_MAX =>
{
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
State::Next(StateName::AutolinkSchemeInsideOrEmailAtext)
}
_ => {
tokenizer.tokenize_state.size = 0;
State::Retry(StateName::AutolinkEmailAtext)
}
}
}
/// After protocol, in URL.
///
/// ```markdown
/// > | a<https://example.com>b
/// ^
/// ```
pub fn url_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => {
tokenizer.exit(Name::AutolinkProtocol);
tokenizer.enter(Name::AutolinkMarker);
tokenizer.consume();
tokenizer.exit(Name::AutolinkMarker);
tokenizer.exit(Name::Autolink);
State::Ok
}
// ASCII control, space, or `<`.
None | Some(b'\0'..=0x1F | b' ' | b'<' | 0x7F) => State::Nok,
Some(_) => {
tokenizer.consume();
State::Next(StateName::AutolinkUrlInside)
}
}
}
/// In email atext.
///
/// ```markdown
/// > | a<user.name@example.com>b
/// ^
/// ```
pub fn email_atext(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'@') => {
tokenizer.consume();
State::Next(StateName::AutolinkEmailAtSignOrDot)
}
// ASCII atext.
//
// atext is an ASCII alphanumeric (see [`is_ascii_alphanumeric`][]), or
// a byte in the inclusive ranges U+0023 NUMBER SIGN (`#`) to U+0027
// APOSTROPHE (`'`), U+002A ASTERISK (`*`), U+002B PLUS SIGN (`+`),
// U+002D DASH (`-`), U+002F SLASH (`/`), U+003D EQUALS TO (`=`),
// U+003F QUESTION MARK (`?`), U+005E CARET (`^`) to U+0060 GRAVE
// ACCENT (`` ` ``), or U+007B LEFT CURLY BRACE (`{`) to U+007E TILDE
// (`~`).
//
// See:
// **\[RFC5322]**:
// [Internet Message Format](https://tools.ietf.org/html/rfc5322).
// P. Resnick.
// IETF.
//
// [`is_ascii_alphanumeric`]: char::is_ascii_alphanumeric
Some(
b'#'..=b'\'' | b'*' | b'+' | b'-'..=b'9' | b'=' | b'?' | b'A'..=b'Z' | b'^'..=b'~',
) => {
tokenizer.consume();
State::Next(StateName::AutolinkEmailAtext)
}
_ => State::Nok,
}
}
/// In label, after at-sign or dot.
///
/// ```markdown
/// > | a<user.name@example.com>b
/// ^ ^
/// ```
pub fn email_at_sign_or_dot(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphanumeric.
Some(b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
State::Retry(StateName::AutolinkEmailValue)
}
_ => State::Nok,
}
}
/// In label, where `.` and `>` are allowed.
///
/// ```markdown
/// > | a<user.name@example.com>b
/// ^
/// ```
pub fn email_label(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'.') => {
tokenizer.consume();
tokenizer.tokenize_state.size = 0;
State::Next(StateName::AutolinkEmailAtSignOrDot)
}
Some(b'>') => {
let index = tokenizer.events.len();
tokenizer.exit(Name::AutolinkProtocol);
// Change the event name.
tokenizer.events[index - 1].name = Name::AutolinkEmail;
tokenizer.events[index].name = Name::AutolinkEmail;
tokenizer.enter(Name::AutolinkMarker);
tokenizer.consume();
tokenizer.exit(Name::AutolinkMarker);
tokenizer.exit(Name::Autolink);
tokenizer.tokenize_state.size = 0;
State::Ok
}
_ => State::Retry(StateName::AutolinkEmailValue),
}
}
/// In label, where `.` and `>` are *not* allowed.
///
/// Though, this is also used in `email_label` to parse other values.
///
/// ```markdown
/// > | a<user.name@ex-ample.com>b
/// ^
/// ```
pub fn email_value(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphanumeric or `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
if tokenizer.tokenize_state.size < AUTOLINK_DOMAIN_SIZE_MAX =>
{
let name = if matches!(tokenizer.current, Some(b'-')) {
StateName::AutolinkEmailValue
} else {
StateName::AutolinkEmailLabel
};
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(name)
}
_ => {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/blank_line.rs | Rust | //! Blank lines occur in the [flow][] content type.
//!
//! ## Grammar
//!
//! Blank lines form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! blank_line ::= *space_or_tab
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! Blank lines are sometimes needed, such as to differentiate a [paragraph][]
//! from a definition.
//! In several cases, blank lines are not needed between flow constructs,
//! such as between two [heading (atx)][heading_atx]s.
//! Sometimes, whether blank lines are present, changes the behavior of how
//! HTML is rendered, such as whether blank lines are present inside or between
//! [list items][list_item].
//! More than one blank line is never needed in `CommonMark`.
//!
//! Because blank lines can be empty (line endings are not considered part of
//! it), and events cannot be empty, blank lines are not present as an event.
//!
//! ## HTML
//!
//! Blank lines do not relate to an element in HTML, except for the role they
//! play when inside or between [list items][list_item].
//!
//! ## Recommendation
//!
//! It is recommended to always use a blank line between every flow construct,
//! to use blank lines (consistently) between list items as desired, and to
//! never use more than one blank line.
//!
//! ## Tokens
//!
//! * [`SpaceOrTab`][crate::event::Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`blank-line.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/blank-line.js)
//! * [*§ 4.9 Blank lines* in `CommonMark`](https://spec.commonmark.org/0.31/#blank-lines)
//!
//! [heading_atx]: crate::construct::heading_atx
//! [list_item]: crate::construct::list_item
//! [paragraph]: crate::construct::paragraph
//! [flow]: crate::construct::flow
use crate::construct::partial_space_or_tab::space_or_tab;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of blank line.
///
/// > 👉 **Note**: `␠` represents a space character.
///
/// ```markdown
/// > | ␠␠␊
/// ^
/// > | ␊
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::BlankLineAfter), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::BlankLineAfter)
}
}
/// At eof/eol, after optional whitespace.
///
/// ```markdown
/// > | ␠␠␊
/// ^
/// > | ␊
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => State::Ok,
_ => State::Nok,
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/block_quote.rs | Rust | //! Block quotes occur in the [document][] content type.
//!
//! ## Grammar
//!
//! Block quotes form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! block_quote_start ::= '>' [ space_or_tab ]
//! block_quote_cont ::= '>' [ space_or_tab ]
//! ```
//!
//! Further lines that are not prefixed with `block_quote_cont` cause the block
//! quote to be exited, except when those lines are lazy continuation.
//! Like so many things in markdown, block quotes too are complex.
//! See [*§ Phase 1: block structure* in `CommonMark`][commonmark-block] for
//! more on parsing details.
//!
//! As block quote is a container, it takes several bytes from the start of the
//! line, while the rest of the line includes more containers or flow.
//!
//! ## HTML
//!
//! Block quote relates to the `<blockquote>` element in HTML.
//! See [*§ 4.4.4 The `blockquote` element*][html-blockquote] in the HTML spec
//! for more info.
//!
//! ## Recommendation
//!
//! Always use a single space after a block quote marker (`>`).
//! Never use lazy continuation.
//!
//! ## Tokens
//!
//! * [`BlockQuote`][Name::BlockQuote]
//! * [`BlockQuoteMarker`][Name::BlockQuoteMarker]
//! * [`BlockQuotePrefix`][Name::BlockQuotePrefix]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`block-quote.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/block-quote.js)
//! * [*§ 5.1 Block quotes* in `CommonMark`](https://spec.commonmark.org/0.31/#block-quotes)
//!
//! [document]: crate::construct::document
//! [html-blockquote]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-blockquote-element
//! [commonmark-block]: https://spec.commonmark.org/0.31/#phase-1-block-structure
use crate::construct::partial_space_or_tab::space_or_tab_min_max;
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::TAB_SIZE;
/// Start of block quote.
///
/// ```markdown
/// > | > a
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.block_quote {
tokenizer.enter(Name::BlockQuote);
State::Retry(StateName::BlockQuoteContStart)
} else {
State::Nok
}
}
/// Start of block quote continuation.
///
/// Also used to parse the first block quote opening.
///
/// ```markdown
/// | > a
/// > | > b
/// ^
/// ```
pub fn cont_start(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::BlockQuoteContBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
1,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::BlockQuoteContBefore)
}
}
/// At `>`, after optional whitespace.
///
/// Also used to parse the first block quote opening.
///
/// ```markdown
/// | > a
/// > | > b
/// ^
/// ```
pub fn cont_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => {
tokenizer.enter(Name::BlockQuotePrefix);
tokenizer.enter(Name::BlockQuoteMarker);
tokenizer.consume();
tokenizer.exit(Name::BlockQuoteMarker);
State::Next(StateName::BlockQuoteContAfter)
}
_ => State::Nok,
}
}
/// After `>`, before optional whitespace.
///
/// ```markdown
/// > | > a
/// ^
/// > | >b
/// ^
/// ```
pub fn cont_after(tokenizer: &mut Tokenizer) -> State {
if let Some(b'\t' | b' ') = tokenizer.current {
tokenizer.enter(Name::SpaceOrTab);
tokenizer.consume();
tokenizer.exit(Name::SpaceOrTab);
}
tokenizer.exit(Name::BlockQuotePrefix);
State::Ok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/character_escape.rs | Rust | //! Character escapes occur in the [string][] and [text][] content types.
//!
//! ## Grammar
//!
//! Character escapes form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! character_escape ::= '\\' ascii_punctuation
//! ```
//!
//! Like much of markdown, there are no “invalid” character escapes: just a
//! slash, or a slash followed by anything other than an ASCII punctuation
//! character, is just a slash.
//!
//! To escape other characters, use a [character reference][character_reference]
//! instead (as in, `&`, `{`, or say `	`).
//!
//! It is also possible to escape a line ending in text with a similar
//! construct: a [hard break (escape)][hard_break_escape] is a backslash followed
//! by a line ending (that is part of the construct instead of ending it).
//!
//! ## Recommendation
//!
//! If possible, use a character escape.
//! Otherwise, use a character reference.
//!
//! ## Tokens
//!
//! * [`CharacterEscape`][Name::CharacterEscape]
//! * [`CharacterEscapeMarker`][Name::CharacterEscapeMarker]
//! * [`CharacterEscapeValue`][Name::CharacterEscapeValue]
//!
//! ## References
//!
//! * [`character-escape.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/character-escape.js)
//! * [*§ 2.4 Backslash escapes* in `CommonMark`](https://spec.commonmark.org/0.31/#backslash-escapes)
//!
//! [string]: crate::construct::string
//! [text]: crate::construct::text
//! [character_reference]: crate::construct::character_reference
//! [hard_break_escape]: crate::construct::hard_break_escape
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of character escape.
///
/// ```markdown
/// > | a\*b
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.character_escape && tokenizer.current == Some(b'\\')
{
tokenizer.enter(Name::CharacterEscape);
tokenizer.enter(Name::CharacterEscapeMarker);
tokenizer.consume();
tokenizer.exit(Name::CharacterEscapeMarker);
State::Next(StateName::CharacterEscapeInside)
} else {
State::Nok
}
}
/// After `\`, at punctuation.
///
/// ```markdown
/// > | a\*b
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII punctuation.
Some(b'!'..=b'/' | b':'..=b'@' | b'['..=b'`' | b'{'..=b'~') => {
tokenizer.enter(Name::CharacterEscapeValue);
tokenizer.consume();
tokenizer.exit(Name::CharacterEscapeValue);
tokenizer.exit(Name::CharacterEscape);
State::Ok
}
_ => State::Nok,
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/character_reference.rs | Rust | //! Character references occur in the [string][] and [text][] content types.
//!
//! ## Grammar
//!
//! Character references form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! character_reference ::= '&' (numeric | named) ';'
//!
//! numeric ::= '#' (hexadecimal | decimal)
//! ; Note: Limit of `6` imposed, as all bigger numbers are invalid.
//! hexadecimal ::= ('x' | 'X') 1*6(ascii_hexdigit)
//! ; Note: Limit of `7` imposed, as all bigger numbers are invalid.
//! decimal ::= 1*7(ascii_digit)
//! ; Note: Limit of `31` imposed, for `CounterClockwiseContourIntegral`.
//! ; Note: Limited to any known named character reference (see `constants.rs`)
//! named ::= 1*31(ascii_alphanumeric)
//! ```
//!
//! Like much of markdown, there are no “invalid” character references.
//! However, for security reasons, several numeric character references parse
//! fine but are not rendered as their corresponding character.
//! They are instead replaced by a U+FFFD REPLACEMENT CHARACTER (`�`).
//! See [`decode_numeric`][decode_numeric] for more info.
//!
//! To escape ASCII punctuation characters, use the terser
//! [character escape][character_escape] construct instead (as in, `\&`).
//!
//! Character references in markdown are not the same as character references
//! in HTML.
//! Notably, HTML allows several character references without a closing
//! semicolon.
//! See [*§ 13.2.5.72 Character reference state* in the HTML spec][html] for more info.
//!
//! Character references are parsed insensitive to casing.
//! The casing of hexadecimal numeric character references has no effect.
//! The casing of named character references does not matter when parsing, but
//! does affect whether they match.
//! Depending on the name, one or more cases are allowed, such as that `AMP`
//! and `amp` are both allowed but other cases are not.
//! See [`CHARACTER_REFERENCES`][character_references] for which
//! names match.
//!
//! ## Recommendation
//!
//! If possible, use a character escape.
//! Otherwise, use a character reference.
//!
//! ## Tokens
//!
//! * [`CharacterReference`][Name::CharacterReference]
//! * [`CharacterReferenceMarker`][Name::CharacterReferenceMarker]
//! * [`CharacterReferenceMarkerHexadecimal`][Name::CharacterReferenceMarkerHexadecimal]
//! * [`CharacterReferenceMarkerNumeric`][Name::CharacterReferenceMarkerNumeric]
//! * [`CharacterReferenceMarkerSemi`][Name::CharacterReferenceMarkerSemi]
//! * [`CharacterReferenceValue`][Name::CharacterReferenceValue]
//!
//! ## References
//!
//! * [`character-reference.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/character-reference.js)
//! * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.31/#entity-and-numeric-character-references)
//!
//! [string]: crate::construct::string
//! [text]: crate::construct::text
//! [character_escape]: crate::construct::character_reference
//! [decode_numeric]: crate::util::character_reference::decode_numeric
//! [character_references]: crate::util::constant::CHARACTER_REFERENCES
//! [html]: https://html.spec.whatwg.org/multipage/parsing.html#character-reference-state
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
character_reference::{decode_named, value_max, value_test},
slice::Slice,
};
/// Start of character reference.
///
/// ```markdown
/// > | a&b
/// ^
/// > | a{b
/// ^
/// > | a	b
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.character_reference
&& tokenizer.current == Some(b'&')
{
tokenizer.enter(Name::CharacterReference);
tokenizer.enter(Name::CharacterReferenceMarker);
tokenizer.consume();
tokenizer.exit(Name::CharacterReferenceMarker);
State::Next(StateName::CharacterReferenceOpen)
} else {
State::Nok
}
}
/// After `&`, at `#` for numeric references or alphanumeric for named
/// references.
///
/// ```markdown
/// > | a&b
/// ^
/// > | a{b
/// ^
/// > | a	b
/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer) -> State {
if let Some(b'#') = tokenizer.current {
tokenizer.enter(Name::CharacterReferenceMarkerNumeric);
tokenizer.consume();
tokenizer.exit(Name::CharacterReferenceMarkerNumeric);
State::Next(StateName::CharacterReferenceNumeric)
} else {
tokenizer.tokenize_state.marker = b'&';
tokenizer.enter(Name::CharacterReferenceValue);
State::Retry(StateName::CharacterReferenceValue)
}
}
/// After `#`, at `x` for hexadecimals or digit for decimals.
///
/// ```markdown
/// > | a{b
/// ^
/// > | a	b
/// ^
/// ```
pub fn numeric(tokenizer: &mut Tokenizer) -> State {
if let Some(b'x' | b'X') = tokenizer.current {
tokenizer.enter(Name::CharacterReferenceMarkerHexadecimal);
tokenizer.consume();
tokenizer.exit(Name::CharacterReferenceMarkerHexadecimal);
tokenizer.enter(Name::CharacterReferenceValue);
tokenizer.tokenize_state.marker = b'x';
State::Next(StateName::CharacterReferenceValue)
} else {
tokenizer.enter(Name::CharacterReferenceValue);
tokenizer.tokenize_state.marker = b'#';
State::Retry(StateName::CharacterReferenceValue)
}
}
/// After markers (`&#x`, `&#`, or `&`), in value, before `;`.
///
/// The character reference kind defines what and how many characters are
/// allowed.
///
/// ```markdown
/// > | a&b
/// ^^^
/// > | a{b
/// ^^^
/// > | a	b
/// ^
/// ```
pub fn value(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b';')) && tokenizer.tokenize_state.size > 0 {
// Named.
if tokenizer.tokenize_state.marker == b'&' {
// Guaranteed to be valid ASCII bytes.
let slice = Slice::from_indices(
tokenizer.parse_state.bytes,
tokenizer.point.index - tokenizer.tokenize_state.size,
tokenizer.point.index,
);
if decode_named(slice.as_str(), true).is_none() {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
return State::Nok;
}
}
tokenizer.exit(Name::CharacterReferenceValue);
tokenizer.enter(Name::CharacterReferenceMarkerSemi);
tokenizer.consume();
tokenizer.exit(Name::CharacterReferenceMarkerSemi);
tokenizer.exit(Name::CharacterReference);
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
return State::Ok;
}
if let Some(byte) = tokenizer.current {
if tokenizer.tokenize_state.size < value_max(tokenizer.tokenize_state.marker)
&& value_test(tokenizer.tokenize_state.marker)(&byte)
{
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
return State::Next(StateName::CharacterReferenceValue);
}
}
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
State::Nok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/code_indented.rs | Rust | //! Code (indented) occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! Code (indented) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! code_indented ::= filled_line *( eol *( blank_line eol ) filled_line )
//!
//! ; Restriction: at least one `line` byte must be `text`.
//! filled_line ::= 4(space_or_tab) *line
//! blank_line ::= *space_or_tab
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! In markdown, it is also possible to use [code (text)][raw_text] in the
//! [text][] content type.
//! It is also possible to create code with the [code (fenced)][raw_flow]
//! construct.
//!
//! ## HTML
//!
//! Code (indented) relates to both the `<pre>` and the `<code>` elements in
//! HTML.
//! See [*§ 4.4.3 The `pre` element*][html_pre] and the [*§ 4.5.15 The `code`
//! element*][html_code] in the HTML spec for more info.
//!
//! ## Recommendation
//!
//! It is recommended to use code (fenced) instead of code (indented).
//! Code (fenced) is more explicit, similar to code (text), and has support
//! for specifying the programming language.
//!
//! ## Tokens
//!
//! * [`CodeIndented`][Name::CodeIndented]
//! * [`CodeFlowChunk`][Name::CodeFlowChunk]
//! * [`LineEnding`][Name::LineEnding]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`code-indented.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/code-indented.js)
//! * [*§ 4.4 Indented code blocks* in `CommonMark`](https://spec.commonmark.org/0.31/#indented-code-blocks)
//!
//! [flow]: crate::construct::flow
//! [text]: crate::construct::text
//! [raw_flow]: crate::construct::raw_flow
//! [raw_text]: crate::construct::raw_text
//! [html_code]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-code-element
//! [html_pre]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-pre-element
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::TAB_SIZE;
/// Start of code (indented).
///
/// > **Parsing note**: it is not needed to check if this first line is a
/// > filled line (that it has a non-whitespace character), because blank lines
/// > are parsed already, so we never run into that.
///
/// ```markdown
/// > | aaa
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// Do not interrupt paragraphs.
if !tokenizer.interrupt
&& tokenizer.parse_state.options.constructs.code_indented
&& matches!(tokenizer.current, Some(b'\t' | b' '))
{
tokenizer.enter(Name::CodeIndented);
tokenizer.attempt(State::Next(StateName::CodeIndentedAtBreak), State::Nok);
State::Retry(space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE))
} else {
State::Nok
}
}
/// At a break.
///
/// ```markdown
/// > | aaa
/// ^ ^
/// ```
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Retry(StateName::CodeIndentedAfter),
Some(b'\n') => {
tokenizer.attempt(
State::Next(StateName::CodeIndentedAtBreak),
State::Next(StateName::CodeIndentedAfter),
);
State::Retry(StateName::CodeIndentedFurtherStart)
}
_ => {
tokenizer.enter(Name::CodeFlowChunk);
State::Retry(StateName::CodeIndentedInside)
}
}
}
/// In code content.
///
/// ```markdown
/// > | aaa
/// ^^^^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::CodeFlowChunk);
State::Retry(StateName::CodeIndentedAtBreak)
}
_ => {
tokenizer.consume();
State::Next(StateName::CodeIndentedInside)
}
}
}
/// After indented code.
///
/// ```markdown
/// > | aaa
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Name::CodeIndented);
// Feel free to interrupt.
tokenizer.interrupt = false;
State::Ok
}
/// At eol, trying to parse another indent.
///
/// ```markdown
/// > | aaa
/// ^
/// | bbb
/// ```
pub fn further_start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.lazy || tokenizer.pierce {
return State::Nok;
}
if tokenizer.current == Some(b'\n') {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::CodeIndentedFurtherStart)
} else {
tokenizer.attempt(State::Ok, State::Next(StateName::CodeIndentedFurtherBegin));
State::Retry(space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE))
}
}
/// At the beginning of a line that is not indented enough.
///
/// ```markdown
/// | aaa
/// > | bbb
/// ^
/// ```
pub fn further_begin(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::CodeIndentedFurtherAfter), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
State::Nok
}
}
/// After whitespace, not indented enough.
///
/// ```markdown
/// | aaa
/// > | bbb
/// ^
/// ```
pub fn further_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => State::Retry(StateName::CodeIndentedFurtherStart),
_ => State::Nok,
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/content.rs | Rust | //! Content occurs in the [flow][] content type.
//!
//! Content contains zero or more [definition][definition]s, followed by zero
//! or one [paragraph][].
//!
//! The constructs found in flow are:
//!
//! * [Definition][crate::construct::definition]
//! * [Paragraph][crate::construct::paragraph]
//!
//! ## Tokens
//!
//! * [`Content`][Name::Content]
//!
//! > 👉 **Note**: while parsing, [`Content`][Name::Content]
//! > is used, which is later compiled away.
//!
//! ## References
//!
//! * [`content.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/content.js)
//!
//! [flow]: crate::construct::flow
//! [definition]: crate::construct::definition
//! [paragraph]: crate::construct::paragraph
use crate::event::{Content, Kind, Link, Name};
use crate::message;
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::{subtokenize, Subresult};
use crate::tokenizer::Tokenizer;
use alloc::vec;
/// Before a content chunk.
///
/// ```markdown
/// > | abc
/// ^
/// ```
pub fn chunk_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => unreachable!("unexpected eol/eof"),
_ => {
tokenizer.enter_link(
Name::Content,
Link {
previous: None,
next: None,
content: Content::Content,
},
);
State::Retry(StateName::ContentChunkInside)
}
}
}
/// In a content chunk.
///
/// ```markdown
/// > | abc
/// ^^^
/// ```
pub fn chunk_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::Content);
tokenizer.register_resolver_before(ResolveName::Content);
// You’d be interrupting.
tokenizer.interrupt = true;
State::Ok
}
_ => {
tokenizer.consume();
State::Next(StateName::ContentChunkInside)
}
}
}
/// Before a definition.
///
/// ```markdown
/// > | [a]: b
/// ^
/// ```
pub fn definition_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::ContentDefinitionAfter),
State::Next(StateName::ParagraphStart),
);
State::Retry(StateName::DefinitionStart)
}
/// After a definition.
///
/// ```markdown
/// > | [a]: b
/// ^
/// | c
/// ```
pub fn definition_after(tokenizer: &mut Tokenizer) -> State {
debug_assert!(matches!(tokenizer.current, None | Some(b'\n')));
if tokenizer.current.is_none() {
State::Ok
} else {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::ContentDefinitionBefore)
}
}
/// Merge `Content` chunks, which currently span a single line, into actual
/// `Content`s that span multiple lines.
pub fn resolve(tokenizer: &mut Tokenizer) -> Result<Option<Subresult>, message::Message> {
let mut index = 0;
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.kind == Kind::Enter && event.name == Name::Content {
// Exit:Content
let mut exit_index = index + 1;
loop {
let mut enter_index = exit_index + 1;
if enter_index == tokenizer.events.len()
|| tokenizer.events[enter_index].name != Name::LineEnding
{
break;
}
// Skip past line ending.
enter_index += 2;
// Skip past prefix.
while enter_index < tokenizer.events.len() {
let event = &tokenizer.events[enter_index];
if event.name != Name::SpaceOrTab
&& event.name != Name::BlockQuotePrefix
&& event.name != Name::BlockQuoteMarker
{
break;
}
enter_index += 1;
}
if enter_index == tokenizer.events.len()
|| tokenizer.events[enter_index].name != Name::Content
{
break;
}
// Set Exit:Content point to Exit:LineEnding.
tokenizer.events[exit_index].point = tokenizer.events[exit_index + 2].point.clone();
// Remove Enter:LineEnding, Exit:LineEnding.
tokenizer.map.add(exit_index + 1, 2, vec![]);
// Link Enter:Content to Enter:Content on this line and vice versa.
tokenizer.events[exit_index - 1].link.as_mut().unwrap().next = Some(enter_index);
tokenizer.events[enter_index]
.link
.as_mut()
.unwrap()
.previous = Some(exit_index - 1);
// Potential next start.
exit_index = enter_index + 1;
}
// Move to `Exit:Content`.
index = exit_index;
}
index += 1;
}
tokenizer.map.consume(&mut tokenizer.events);
let result = subtokenize(
&mut tokenizer.events,
tokenizer.parse_state,
Some(&Content::Content),
)?;
Ok(Some(result))
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/definition.rs | Rust | //! Definition occurs in the [content] content type.
//!
//! ## Grammar
//!
//! Definition forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! definition ::= label ':' [ space_or_tab_eol ] destination [ space_or_tab_eol title ] [ space_or_tab ]
//!
//! ; See the `destination`, `title`, and `label` constructs for the BNF of
//! ; those parts.
//! ```
//!
//! This construct must be followed by an eol (line ending) or eof (end of
//! file), like flow constructs.
//!
//! See [`destination`][destination], [`label`][label], and [`title`][title]
//! for grammar, notes, and recommendations on each part.
//!
//! The `destination`, `label`, and `title` parts are interpreted as the
//! [string][] content type.
//! That means that [character escapes][character_escape] and
//! [character references][character_reference] are allowed.
//!
//! Definitions match to references through identifiers.
//! To match, both labels must be equal after normalizing with
//! [`normalize_identifier`][normalize_identifier].
//! One definition can match to multiple references.
//! Multiple definitions with the same, normalized, identifier are ignored: the
//! first definition is preferred.
//! To illustrate, the definition with a destination of `x` wins:
//!
//! ```markdown
//! [a]: x
//! [a]: y
//!
//! [a]
//! ```
//!
//! Importantly, while labels *can* include [string][] content (character
//! escapes and character references), these are not considered when matching.
//! To illustrate, neither definition matches the reference:
//!
//! ```markdown
//! [a&b]: x
//! [a\&b]: y
//!
//! [a&b]
//! ```
//!
//! For info on how to encode characters in URLs, see
//! [`destination`][destination].
//! For info on how characters are encoded as `href` on `<a>` or `src` on
//! `<img>` when compiling, see
//! [`sanitize_uri`][sanitize_uri].
//!
//! ## HTML
//!
//! Definitions in markdown do not, on their own, relate to anything in HTML.
//! When matched with a [label end (reference)][label_end], they together
//! relate to the `<a>` or `<img>` elements in HTML.
//! The definition forms its `href` or `src`, and optionally `title`,
//! attributes.
//! See [*§ 4.5.1 The `a` element*][html_a] and
//! [*§ 4.8.3 The `img` element*][html_img] in the HTML spec for more info.
//!
//! ## Tokens
//!
//! * [`Definition`][Name::Definition]
//! * [`DefinitionDestination`][Name::DefinitionDestination]
//! * [`DefinitionDestinationLiteral`][Name::DefinitionDestinationLiteral]
//! * [`DefinitionDestinationLiteralMarker`][Name::DefinitionDestinationLiteralMarker]
//! * [`DefinitionDestinationRaw`][Name::DefinitionDestinationRaw]
//! * [`DefinitionDestinationString`][Name::DefinitionDestinationString]
//! * [`DefinitionLabel`][Name::DefinitionLabel]
//! * [`DefinitionLabelMarker`][Name::DefinitionLabelMarker]
//! * [`DefinitionLabelString`][Name::DefinitionLabelString]
//! * [`DefinitionMarker`][Name::DefinitionMarker]
//! * [`DefinitionTitle`][Name::DefinitionTitle]
//! * [`DefinitionTitleMarker`][Name::DefinitionTitleMarker]
//! * [`DefinitionTitleString`][Name::DefinitionTitleString]
//! * [`LineEnding`][Name::LineEnding]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`definition.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/definition.js)
//! * [*§ 4.7 Link reference definitions* in `CommonMark`](https://spec.commonmark.org/0.31/#link-reference-definitions)
//!
//! [content]: crate::construct::content
//! [string]: crate::construct::string
//! [character_escape]: crate::construct::character_escape
//! [character_reference]: crate::construct::character_reference
//! [destination]: crate::construct::partial_destination
//! [label]: crate::construct::partial_label
//! [label_end]: crate::construct::label_end
//! [title]: crate::construct::partial_title
//! [sanitize_uri]: crate::util::sanitize_uri::sanitize
//! [normalize_identifier]: crate::util::normalize_identifier
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
//! [html_img]: https://html.spec.whatwg.org/multipage/embedded-content.html#the-img-element
use crate::construct::partial_space_or_tab::space_or_tab;
use crate::construct::partial_space_or_tab_eol::space_or_tab_eol;
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
normalize_identifier::normalize_identifier,
skip,
slice::{Position, Slice},
};
/// At start of a definition.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// Do not interrupt paragraphs (but do follow definitions).
if tokenizer.parse_state.options.constructs.definition
&& (!tokenizer.interrupt
|| (!tokenizer.events.is_empty()
&& tokenizer.events[skip::opt_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::LineEnding, Name::SpaceOrTab],
)]
.name
== Name::Definition))
{
tokenizer.enter(Name::Definition);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
// Note: arbitrary whitespace allowed even if code (indented) is on.
tokenizer.attempt(State::Next(StateName::DefinitionBefore), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::DefinitionBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, at `[`.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'[') => {
tokenizer.tokenize_state.token_1 = Name::DefinitionLabel;
tokenizer.tokenize_state.token_2 = Name::DefinitionLabelMarker;
tokenizer.tokenize_state.token_3 = Name::DefinitionLabelString;
tokenizer.attempt(
State::Next(StateName::DefinitionLabelAfter),
State::Next(StateName::DefinitionLabelNok),
);
State::Retry(StateName::LabelStart)
}
_ => State::Nok,
}
}
/// After label.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn label_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
match tokenizer.current {
Some(b':') => {
tokenizer.tokenize_state.end = skip::to_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::DefinitionLabelString],
);
tokenizer.enter(Name::DefinitionMarker);
tokenizer.consume();
tokenizer.exit(Name::DefinitionMarker);
State::Next(StateName::DefinitionMarkerAfter)
}
_ => State::Nok,
}
}
/// At a non-label
///
/// ```markdown
/// > | []
/// ^
/// ```
pub fn label_nok(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
State::Nok
}
/// After marker.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn marker_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::DefinitionDestinationBefore),
State::Next(StateName::DefinitionDestinationBefore),
);
State::Retry(space_or_tab_eol(tokenizer))
}
/// Before destination.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn destination_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::DefinitionDestination;
tokenizer.tokenize_state.token_2 = Name::DefinitionDestinationLiteral;
tokenizer.tokenize_state.token_3 = Name::DefinitionDestinationLiteralMarker;
tokenizer.tokenize_state.token_4 = Name::DefinitionDestinationRaw;
tokenizer.tokenize_state.token_5 = Name::DefinitionDestinationString;
tokenizer.tokenize_state.size_b = usize::MAX;
tokenizer.attempt(
State::Next(StateName::DefinitionDestinationAfter),
State::Next(StateName::DefinitionDestinationMissing),
);
State::Retry(StateName::DestinationStart)
}
/// After destination.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn destination_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.size_b = 0;
tokenizer.attempt(
State::Next(StateName::DefinitionAfter),
State::Next(StateName::DefinitionAfter),
);
State::Retry(StateName::DefinitionTitleBefore)
}
/// Without destination.
pub fn destination_missing(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.size_b = 0;
tokenizer.tokenize_state.end = 0;
State::Nok
}
/// After definition.
///
/// ```markdown
/// > | [a]: b
/// ^
/// > | [a]: b "c"
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::DefinitionAfterWhitespace),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::DefinitionAfterWhitespace)
}
}
/// After definition, after optional whitespace.
///
/// ```markdown
/// > | [a]: b
/// ^
/// > | [a]: b "c"
/// ^
/// ```
pub fn after_whitespace(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::Definition);
// Note: we don’t care about uniqueness.
// It’s likely that that doesn’t happen very frequently.
// It is more likely that it wastes precious time.
tokenizer.tokenize_state.definitions.push(
// Note: we don’t care about virtual spaces, so `as_str` is fine.
normalize_identifier(
Slice::from_position(
tokenizer.parse_state.bytes,
&Position::from_exit_event(&tokenizer.events, tokenizer.tokenize_state.end),
)
.as_str(),
),
);
tokenizer.tokenize_state.end = 0;
// You’d be interrupting.
tokenizer.interrupt = true;
State::Ok
}
_ => {
tokenizer.tokenize_state.end = 0;
State::Nok
}
}
}
/// After destination, at whitespace.
///
/// ```markdown
/// > | [a]: b
/// ^
/// > | [a]: b "c"
/// ^
/// ```
pub fn title_before(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b'\n' | b' ')) {
tokenizer.attempt(
State::Next(StateName::DefinitionTitleBeforeMarker),
State::Nok,
);
State::Retry(space_or_tab_eol(tokenizer))
} else {
State::Nok
}
}
/// At title.
///
/// ```markdown
/// | [a]: b
/// > | "c"
/// ^
/// ```
pub fn title_before_marker(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::DefinitionTitle;
tokenizer.tokenize_state.token_2 = Name::DefinitionTitleMarker;
tokenizer.tokenize_state.token_3 = Name::DefinitionTitleString;
tokenizer.attempt(State::Next(StateName::DefinitionTitleAfter), State::Nok);
State::Retry(StateName::TitleStart)
}
/// After title.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn title_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::DefinitionTitleAfterOptionalWhitespace),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::DefinitionTitleAfterOptionalWhitespace)
}
}
/// After title, after optional whitespace.
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
pub fn title_after_optional_whitespace(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => State::Ok,
_ => State::Nok,
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/document.rs | Rust | //! The document content type.
//!
//! **Document** represents the containers, such as block quotes, list items,
//! or GFM footnotes, which structure the document and contain other sections.
//!
//! The constructs found in flow are:
//!
//! * [Block quote][crate::construct::block_quote]
//! * [List item][crate::construct::list_item]
//! * [GFM: Footnote definition][crate::construct::gfm_footnote_definition]
use crate::event::{Content, Event, Kind, Link, Name};
use crate::message;
use crate::state::{Name as StateName, State};
use crate::subtokenize::divide_events;
use crate::tokenizer::{Container, ContainerState, Tokenizer};
use crate::util::skip;
use alloc::{boxed::Box, vec::Vec};
/// Phases where we can exit containers.
#[derive(Debug, PartialEq)]
enum Phase {
/// After parsing a line of lazy flow which resulted in something that
/// exits containers before the line.
///
/// ```markdown
/// | * a
/// > | ```js
/// ^
/// | b
/// | ```
/// ```
After,
/// When a new container replaces an existing container.
///
/// ```markdown
/// | * a
/// > | > b
/// ^
/// ```
Prefix,
/// After everything.
///
/// ```markdown
/// > | * a
/// ^
/// ```
Eof,
}
/// Start of document, at an optional BOM.
///
/// ```markdown
/// > | a
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.document_child = Some(Box::new(Tokenizer::new(
tokenizer.point.clone(),
tokenizer.parse_state,
)));
tokenizer.attempt(
State::Next(StateName::DocumentBeforeFrontmatter),
State::Next(StateName::DocumentBeforeFrontmatter),
);
State::Retry(StateName::BomStart)
}
/// At optional frontmatter.
///
/// ```markdown
/// > | ---
/// ^
/// | title: Venus
/// | ---
/// ```
pub fn before_frontmatter(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::DocumentContainerNewBefore),
State::Next(StateName::DocumentContainerNewBefore),
);
State::Retry(StateName::FrontmatterStart)
}
/// At optional existing containers.
//
/// ```markdown
/// | * a
/// > | > b
/// ^
/// ```
pub fn container_existing_before(tokenizer: &mut Tokenizer) -> State {
// If there are more existing containers, check whether the next one continues.
if tokenizer.tokenize_state.document_continued
< tokenizer.tokenize_state.document_container_stack.len()
{
let container = &tokenizer.tokenize_state.document_container_stack
[tokenizer.tokenize_state.document_continued];
let name = match container.kind {
Container::BlockQuote => StateName::BlockQuoteContStart,
Container::GfmFootnoteDefinition => StateName::GfmFootnoteDefinitionContStart,
Container::ListItem => StateName::ListItemContStart,
};
tokenizer.attempt(
State::Next(StateName::DocumentContainerExistingAfter),
State::Next(StateName::DocumentContainerNewBefore),
);
State::Retry(name)
}
// Otherwise, check new containers.
else {
State::Retry(StateName::DocumentContainerNewBefore)
}
}
/// After continued existing container.
//
/// ```markdown
/// | * a
/// > | b
/// ^
/// ```
pub fn container_existing_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.document_continued += 1;
State::Retry(StateName::DocumentContainerExistingBefore)
}
/// At new containers.
//
/// ```markdown
/// > | * a
/// ^
/// > | > b
/// ^
/// ```
pub fn container_new_before(tokenizer: &mut Tokenizer) -> State {
// If we have completely continued, restore the flow’s past `interrupt`
// status.
if tokenizer.tokenize_state.document_continued
== tokenizer.tokenize_state.document_container_stack.len()
{
let child = tokenizer.tokenize_state.document_child.as_ref().unwrap();
tokenizer.interrupt = child.interrupt;
// …and if we’re in a concrete construct, new containers can’t “pierce”
// into them.
if child.concrete {
return State::Retry(StateName::DocumentContainersAfter);
}
}
// Check for a new container.
// Block quote?
// Add a new container at the end of the stack.
let tail = tokenizer.tokenize_state.document_container_stack.len();
tokenizer
.tokenize_state
.document_container_stack
.push(ContainerState {
kind: Container::BlockQuote,
blank_initial: false,
size: 0,
});
// Swap the existing container with the new one.
tokenizer
.tokenize_state
.document_container_stack
.swap(tokenizer.tokenize_state.document_continued, tail);
tokenizer.attempt(
State::Next(StateName::DocumentContainerNewAfter),
State::Next(StateName::DocumentContainerNewBeforeNotBlockQuote),
);
State::Retry(StateName::BlockQuoteStart)
}
/// At new container, but not a block quote.
//
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn container_new_before_not_block_quote(tokenizer: &mut Tokenizer) -> State {
// List item?
// We replace the empty block quote container for this new list item one.
tokenizer.tokenize_state.document_container_stack
[tokenizer.tokenize_state.document_continued] = ContainerState {
kind: Container::ListItem,
blank_initial: false,
size: 0,
};
tokenizer.attempt(
State::Next(StateName::DocumentContainerNewAfter),
State::Next(StateName::DocumentContainerNewBeforeNotList),
);
State::Retry(StateName::ListItemStart)
}
/// At new container, but not a block quote or list item.
//
/// ```markdown
/// > | a
/// ^
/// ```
pub fn container_new_before_not_list(tokenizer: &mut Tokenizer) -> State {
// Footnote definition?
// We replace the empty list item container for this new footnote
// definition one.
tokenizer.tokenize_state.document_container_stack
[tokenizer.tokenize_state.document_continued] = ContainerState {
kind: Container::GfmFootnoteDefinition,
blank_initial: false,
size: 0,
};
tokenizer.attempt(
State::Next(StateName::DocumentContainerNewAfter),
State::Next(StateName::DocumentContainerNewBeforeNotGfmFootnoteDefinition),
);
State::Retry(StateName::GfmFootnoteDefinitionStart)
}
/// At new container, but not a block quote, list item, or footnote definition.
//
/// ```markdown
/// > | a
/// ^
/// ```
pub fn container_new_before_not_footnote_definition(tokenizer: &mut Tokenizer) -> State {
// It wasn’t a new block quote, list item, or footnote definition.
// Swap the new container (in the middle) with the existing one (at the end).
// Drop what was in the middle.
tokenizer
.tokenize_state
.document_container_stack
.swap_remove(tokenizer.tokenize_state.document_continued);
State::Retry(StateName::DocumentContainersAfter)
}
/// After new container.
///
/// ```markdown
/// > | * a
/// ^
/// > | > b
/// ^
/// ```
pub fn container_new_after(tokenizer: &mut Tokenizer) -> State {
// It was a new block quote, list item, or footnote definition.
// Swap the new container (in the middle) with the existing one (at the end).
// Take the new container.
let container = tokenizer
.tokenize_state
.document_container_stack
.swap_remove(tokenizer.tokenize_state.document_continued);
// If we did not continue all existing containers, and there is a new one,
// close the flow and those containers.
if tokenizer.tokenize_state.document_continued
!= tokenizer.tokenize_state.document_container_stack.len()
{
if let Err(message) = exit_containers(tokenizer, &Phase::Prefix) {
return State::Error(message);
}
}
// We are “piercing” into the flow with a new container.
tokenizer
.tokenize_state
.document_child
.as_mut()
.unwrap()
.pierce = true;
tokenizer
.tokenize_state
.document_container_stack
.push(container);
tokenizer.tokenize_state.document_continued += 1;
tokenizer.interrupt = false;
State::Retry(StateName::DocumentContainerNewBefore)
}
/// After containers, at flow.
//
/// ```markdown
/// > | * a
/// ^
/// > | > b
/// ^
/// ```
pub fn containers_after(tokenizer: &mut Tokenizer) -> State {
let child = tokenizer.tokenize_state.document_child.as_mut().unwrap();
child.lazy = tokenizer.tokenize_state.document_continued
!= tokenizer.tokenize_state.document_container_stack.len();
child.define_skip(tokenizer.point.clone());
if tokenizer.current.is_none() {
State::Retry(StateName::DocumentFlowEnd)
} else {
let current = tokenizer.events.len();
let previous = tokenizer.tokenize_state.document_data_index;
if let Some(previous) = previous {
tokenizer.events[previous].link.as_mut().unwrap().next = Some(current);
}
tokenizer.tokenize_state.document_data_index = Some(current);
tokenizer.enter_link(
Name::Data,
Link {
previous,
next: None,
content: Content::Flow,
},
);
State::Retry(StateName::DocumentFlowInside)
}
}
/// In flow.
//
/// ```markdown
/// > | * ab
/// ^
/// ```
pub fn flow_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.exit(Name::Data);
State::Retry(StateName::DocumentFlowEnd)
}
// Note: EOL is part of data.
Some(b'\n') => {
tokenizer.consume();
tokenizer.exit(Name::Data);
State::Next(StateName::DocumentFlowEnd)
}
Some(_) => {
tokenizer.consume();
State::Next(StateName::DocumentFlowInside)
}
}
}
/// After flow (after eol or at eof).
//
/// ```markdown
/// | * a
/// > | > b
/// ^ ^
/// ```
pub fn flow_end(tokenizer: &mut Tokenizer) -> State {
let child = tokenizer.tokenize_state.document_child.as_mut().unwrap();
let state = tokenizer
.tokenize_state
.document_child_state
.take()
.unwrap_or(State::Next(StateName::FlowStart));
tokenizer.tokenize_state.document_exits.push(None);
let state = child.push(
(child.point.index, child.point.vs),
(tokenizer.point.index, tokenizer.point.vs),
state,
);
tokenizer.tokenize_state.document_child_state = Some(state);
// If we’re in a lazy line, and the previous (lazy or not) line is something
// that can be lazy, and this line is that too, allow it.
//
// Accept:
//
// ```markdown
// | * a
// > | b
// ^
// | ```
// ```
//
// Do not accept:
//
// ```markdown
// | * # a
// > | b
// ^
// | ```
// ```
//
// Do not accept:
//
// ```markdown
// | * a
// > | # b
// ^
// | ```
// ```
let mut document_lazy_continuation_current = false;
let mut stack_index = child.stack.len();
// Use two algo’s: one for when we’re suspended or in multiline things
// like definitions, another for when we fed the line ending and closed.
while !document_lazy_continuation_current && stack_index > 0 {
stack_index -= 1;
let name = &child.stack[stack_index];
if name == &Name::Content || name == &Name::GfmTableHead {
document_lazy_continuation_current = true;
}
}
// …another because we parse each “rest” line as a paragraph, and we passed
// a EOL already.
if !document_lazy_continuation_current && !child.events.is_empty() {
let before = skip::opt_back(&child.events, child.events.len() - 1, &[Name::LineEnding]);
let name = &child.events[before].name;
if name == &Name::Content || name == &Name::HeadingSetextUnderline {
document_lazy_continuation_current = true;
}
}
// Reset “piercing”.
child.pierce = false;
if child.lazy
&& tokenizer.tokenize_state.document_lazy_accepting_before
&& document_lazy_continuation_current
{
tokenizer.tokenize_state.document_continued =
tokenizer.tokenize_state.document_container_stack.len();
}
if tokenizer.tokenize_state.document_continued
!= tokenizer.tokenize_state.document_container_stack.len()
{
let result = exit_containers(tokenizer, &Phase::After);
// `Phase::After` doesn’t deal with flow: it only generates exits for
// containers.
// And that never errors.
debug_assert!(result.is_ok(), "did not expect error when exiting");
}
if tokenizer.current.is_none() {
tokenizer.tokenize_state.document_continued = 0;
if let Err(message) = exit_containers(tokenizer, &Phase::Eof) {
return State::Error(message);
}
resolve(tokenizer);
State::Ok
} else {
tokenizer.tokenize_state.document_continued = 0;
tokenizer.tokenize_state.document_lazy_accepting_before =
document_lazy_continuation_current;
// Containers would only be interrupting if we’ve continued.
tokenizer.interrupt = false;
State::Retry(StateName::DocumentContainerExistingBefore)
}
}
/// Close containers (and flow if needed).
fn exit_containers(tokenizer: &mut Tokenizer, phase: &Phase) -> Result<(), message::Message> {
let mut stack_close = tokenizer
.tokenize_state
.document_container_stack
.split_off(tokenizer.tokenize_state.document_continued);
let child = tokenizer.tokenize_state.document_child.as_mut().unwrap();
// Flush if needed.
if *phase != Phase::After {
let state = tokenizer
.tokenize_state
.document_child_state
.take()
.unwrap_or(State::Next(StateName::FlowStart));
child.flush(state, false)?;
}
if !stack_close.is_empty() {
let index = tokenizer.tokenize_state.document_exits.len()
- (if *phase == Phase::After { 2 } else { 1 });
let mut exits = Vec::with_capacity(stack_close.len());
while let Some(container) = stack_close.pop() {
let name = match container.kind {
Container::BlockQuote => Name::BlockQuote,
Container::GfmFootnoteDefinition => Name::GfmFootnoteDefinition,
Container::ListItem => Name::ListItem,
};
exits.push(Event {
kind: Kind::Exit,
name: name.clone(),
point: tokenizer.point.clone(),
link: None,
});
let mut stack_index = tokenizer.stack.len();
let mut found = false;
while stack_index > 0 {
stack_index -= 1;
if tokenizer.stack[stack_index] == name {
tokenizer.stack.remove(stack_index);
found = true;
break;
}
}
debug_assert!(found, "expected to find container event to exit");
}
debug_assert!(
tokenizer.tokenize_state.document_exits[index].is_none(),
"expected no exits yet"
);
tokenizer.tokenize_state.document_exits[index] = Some(exits);
}
child.interrupt = false;
Ok(())
}
// Inject everything together.
fn resolve(tokenizer: &mut Tokenizer) {
let child = tokenizer.tokenize_state.document_child.as_mut().unwrap();
// First, add the container exits into `child`.
let mut child_index = 0;
let mut line = 0;
while child_index < child.events.len() {
if child.events[child_index].kind == Kind::Exit
&& matches!(
child.events[child_index].name,
Name::LineEnding | Name::BlankLineEnding
)
{
// Inject before `Enter:LineEnding`.
let mut inject_index = child_index - 1;
let mut point = &child.events[inject_index].point;
while child_index + 1 < child.events.len()
&& child.events[child_index + 1].kind == Kind::Exit
{
child_index += 1;
point = &child.events[child_index].point;
// Inject after `Exit:*`.
inject_index = child_index + 1;
}
if line < tokenizer.tokenize_state.document_exits.len() {
if let Some(mut exits) = tokenizer.tokenize_state.document_exits[line].take() {
let mut exit_index = 0;
while exit_index < exits.len() {
exits[exit_index].point = point.clone();
exit_index += 1;
}
child.map.add(inject_index, 0, exits);
}
}
line += 1;
}
child_index += 1;
}
child.map.consume(&mut child.events);
let mut flow_index = skip::to(&tokenizer.events, 0, &[Name::Data]);
while flow_index < tokenizer.events.len()
// To do: use `!is_some_and()` when that’s stable.
&& (tokenizer.events[flow_index].link.is_none()
|| tokenizer.events[flow_index].link.as_ref().unwrap().content != Content::Flow)
{
flow_index = skip::to(&tokenizer.events, flow_index + 1, &[Name::Data]);
}
// Now, add all child events into our parent document tokenizer.
divide_events(
&mut tokenizer.map,
&tokenizer.events,
flow_index,
&mut child.events,
(0, 0),
);
// Replace the flow data with actual events.
tokenizer.map.consume(&mut tokenizer.events);
// Now, add some final container exits due to the EOF.
// We can’t inject them into the child earlier, as they are “outside” its
// linked data.
if line < tokenizer.tokenize_state.document_exits.len() {
if let Some(mut exits) = tokenizer.tokenize_state.document_exits[line].take() {
let mut exit_index = 0;
while exit_index < exits.len() {
exits[exit_index].point = tokenizer.point.clone();
exit_index += 1;
}
tokenizer.events.append(&mut exits);
}
}
// Add the resolvers from child.
tokenizer
.resolvers
.append(&mut child.resolvers.split_off(0));
tokenizer
.tokenize_state
.definitions
.append(&mut child.tokenize_state.definitions.split_off(0));
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/flow.rs | Rust | //! The flow content type.
//!
//! **Flow** represents the sections, such as headings and code, which are
//! parsed per line.
//! An example is HTML, which has a certain starting condition (such as
//! `<script>` on its own line), then continues for a while, until an end
//! condition is found (such as `</style>`).
//! If that line with an end condition is never found, that flow goes until
//! the end.
//!
//! The constructs found in flow are:
//!
//! * [Blank line][crate::construct::blank_line]
//! * [Code (indented)][crate::construct::code_indented]
//! * [Heading (atx)][crate::construct::heading_atx]
//! * [Heading (setext)][crate::construct::heading_setext]
//! * [HTML (flow)][crate::construct::html_flow]
//! * [MDX esm][crate::construct::mdx_esm]
//! * [MDX expression (flow)][crate::construct::mdx_expression_flow]
//! * [MDX JSX (flow)][crate::construct::mdx_jsx_flow]
//! * [Raw (flow)][crate::construct::raw_flow] (code (fenced), math (flow))
//! * [Thematic break][crate::construct::thematic_break]
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of flow.
//
/// ```markdown
/// > | ## alpha
/// ^
/// > | bravo
/// ^
/// > | ***
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'#') => {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeContent),
);
State::Retry(StateName::HeadingAtxStart)
}
Some(b'$' | b'`' | b'~') => {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeContent),
);
State::Retry(StateName::RawFlowStart)
}
// Note: `-` is also used in setext heading underline so it’s not
// included here.
Some(b'*' | b'_') => {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeContent),
);
State::Retry(StateName::ThematicBreakStart)
}
Some(b'<') => {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeMdxJsx),
);
State::Retry(StateName::HtmlFlowStart)
}
Some(b'e' | b'i') => {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeContent),
);
State::Retry(StateName::MdxEsmStart)
}
Some(b'{') => {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeContent),
);
State::Retry(StateName::MdxExpressionFlowStart)
}
// Actual parsing: blank line? Indented code? Indented anything?
// Tables, setext heading underlines, definitions, and Contents are
// particularly weird.
_ => State::Retry(StateName::FlowBlankLineBefore),
}
}
/// At blank line.
///
/// ```markdown
/// > | ␠␠␊
/// ^
/// ```
pub fn blank_line_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowBlankLineAfter),
State::Next(StateName::FlowBeforeCodeIndented),
);
State::Retry(StateName::BlankLineStart)
}
/// At code (indented).
///
/// ```markdown
/// > | ␠␠␠␠a
/// ^
/// ```
pub fn before_code_indented(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeRaw),
);
State::Retry(StateName::CodeIndentedStart)
}
/// At raw.
///
/// ````markdown
/// > | ```
/// ^
/// ````
pub fn before_raw(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeHtml),
);
State::Retry(StateName::RawFlowStart)
}
/// At html (flow).
///
/// ```markdown
/// > | <a>
/// ^
/// ```
pub fn before_html(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeMdxJsx),
);
State::Retry(StateName::HtmlFlowStart)
}
/// At mdx jsx (flow).
///
/// ```markdown
/// > | <A />
/// ^
/// ```
pub fn before_mdx_jsx(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeHeadingAtx),
);
State::Retry(StateName::MdxJsxFlowStart)
}
/// At heading (atx).
///
/// ```markdown
/// > | # a
/// ^
/// ```
pub fn before_heading_atx(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeHeadingSetext),
);
State::Retry(StateName::HeadingAtxStart)
}
/// At heading (setext).
///
/// ```markdown
/// | a
/// > | =
/// ^
/// ```
pub fn before_heading_setext(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeThematicBreak),
);
State::Retry(StateName::HeadingSetextStart)
}
/// At thematic break.
///
/// ```markdown
/// > | ***
/// ^
/// ```
pub fn before_thematic_break(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeMdxExpression),
);
State::Retry(StateName::ThematicBreakStart)
}
/// At MDX expression (flow).
///
/// ```markdown
/// > | {Math.PI}
/// ^
/// ```
pub fn before_mdx_expression(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeGfmTable),
);
State::Retry(StateName::MdxExpressionFlowStart)
}
/// At GFM table.
///
/// ```markdown
/// > | | a |
/// ^
/// ```
pub fn before_gfm_table(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::FlowAfter),
State::Next(StateName::FlowBeforeContent),
);
State::Retry(StateName::GfmTableStart)
}
/// At content.
///
/// ```markdown
/// > | a
/// ^
/// ```
pub fn before_content(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(State::Next(StateName::FlowAfter), State::Nok);
State::Retry(StateName::ContentChunkStart)
}
/// After blank line.
///
/// ```markdown
/// > | ␠␠␊
/// ^
/// ```
pub fn blank_line_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Ok,
Some(b'\n') => {
tokenizer.enter(Name::BlankLineEnding);
tokenizer.consume();
tokenizer.exit(Name::BlankLineEnding);
// Feel free to interrupt.
tokenizer.interrupt = false;
State::Next(StateName::FlowStart)
}
_ => unreachable!("expected eol/eof"),
}
}
/// After flow.
///
/// ```markdown
/// > | # a␊
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Ok,
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::FlowStart)
}
_ => unreachable!("expected eol/eof"),
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/frontmatter.rs | Rust | //! Frontmatter occurs at the start of the document.
//!
//! ## Grammar
//!
//! Frontmatter forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! frontmatter ::= fence_open *( eol *byte ) eol fence_close
//! fence_open ::= sequence *space_or_tab
//! ; Restriction: markers in `sequence` must match markers in opening sequence.
//! fence_close ::= sequence *space_or_tab
//! sequence ::= 3'+' | 3'-'
//! ```
//!
//! Frontmatter can only occur once.
//! It cannot occur in a container.
//! It must have a closing fence.
//! Like flow constructs, it must be followed by an eol (line ending) or
//! eof (end of file).
//!
//! ## Extension
//!
//! > 👉 **Note**: frontmatter is not part of `CommonMark`, so frontmatter is
//! > not enabled by default.
//! > You need to enable it manually.
//! > See [`Constructs`][constructs] for more info.
//!
//! As there is no spec for frontmatter in markdown, this extension follows how
//! YAML frontmatter works on `github.com`.
//! It also parses TOML frontmatter, just like YAML except that it uses a `+`.
//!
//! ## Recommendation
//!
//! When authoring markdown with frontmatter, it’s recommended to use YAML
//! frontmatter if possible.
//! While YAML has some warts, it works in the most places, so using it
//! guarantees the highest chance of portability.
//!
//! In certain ecosystems, other flavors are widely used.
//! For example, in the Rust ecosystem, TOML is often used.
//! In such cases, using TOML is an okay choice.
//!
//! ## Tokens
//!
//! * [`Frontmatter`][Name::Frontmatter]
//! * [`FrontmatterFence`][Name::FrontmatterFence]
//! * [`FrontmatterSequence`][Name::FrontmatterSequence]
//! * [`FrontmatterChunk`][Name::FrontmatterChunk]
//! * [`LineEnding`][Name::LineEnding]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`micromark-extension-frontmatter`](https://github.com/micromark/micromark-extension-frontmatter)
//!
//! [constructs]: crate::Constructs
use crate::construct::partial_space_or_tab::space_or_tab;
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::FRONTMATTER_SEQUENCE_SIZE;
/// Start of frontmatter.
///
/// ```markdown
/// > | ---
/// ^
/// | title: "Venus"
/// | ---
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// Indent not allowed.
if tokenizer.parse_state.options.constructs.frontmatter
&& matches!(tokenizer.current, Some(b'+' | b'-'))
{
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.enter(Name::Frontmatter);
tokenizer.enter(Name::FrontmatterFence);
tokenizer.enter(Name::FrontmatterSequence);
State::Retry(StateName::FrontmatterOpenSequence)
} else {
State::Nok
}
}
/// In open sequence.
///
/// ```markdown
/// > | ---
/// ^
/// | title: "Venus"
/// | ---
/// ```
pub fn open_sequence(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::FrontmatterOpenSequence)
} else if tokenizer.tokenize_state.size == FRONTMATTER_SEQUENCE_SIZE {
tokenizer.tokenize_state.size = 0;
tokenizer.exit(Name::FrontmatterSequence);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::FrontmatterOpenAfter), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::FrontmatterOpenAfter)
}
} else {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// After open sequence.
///
/// ```markdown
/// > | ---
/// ^
/// | title: "Venus"
/// | ---
/// ```
pub fn open_after(tokenizer: &mut Tokenizer) -> State {
if let Some(b'\n') = tokenizer.current {
tokenizer.exit(Name::FrontmatterFence);
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
tokenizer.attempt(
State::Next(StateName::FrontmatterAfter),
State::Next(StateName::FrontmatterContentStart),
);
State::Next(StateName::FrontmatterCloseStart)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
}
/// Start of close sequence.
///
/// ```markdown
/// | ---
/// | title: "Venus"
/// > | ---
/// ^
/// ```
pub fn close_start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.enter(Name::FrontmatterFence);
tokenizer.enter(Name::FrontmatterSequence);
State::Retry(StateName::FrontmatterCloseSequence)
} else {
State::Nok
}
}
/// In close sequence.
///
/// ```markdown
/// | ---
/// | title: "Venus"
/// > | ---
/// ^
/// ```
pub fn close_sequence(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::FrontmatterCloseSequence)
} else if tokenizer.tokenize_state.size == FRONTMATTER_SEQUENCE_SIZE {
tokenizer.tokenize_state.size = 0;
tokenizer.exit(Name::FrontmatterSequence);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::FrontmatterCloseAfter), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::FrontmatterCloseAfter)
}
} else {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// After close sequence.
///
/// ```markdown
/// | ---
/// | title: "Venus"
/// > | ---
/// ^
/// ```
pub fn close_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::FrontmatterFence);
State::Ok
}
_ => State::Nok,
}
}
/// Start of content chunk.
///
/// ```markdown
/// | ---
/// > | title: "Venus"
/// ^
/// | ---
/// ```
pub fn content_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => State::Retry(StateName::FrontmatterContentEnd),
Some(_) => {
tokenizer.enter(Name::FrontmatterChunk);
State::Retry(StateName::FrontmatterContentInside)
}
}
}
/// In content chunk.
///
/// ```markdown
/// | ---
/// > | title: "Venus"
/// ^
/// | ---
/// ```
pub fn content_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::FrontmatterChunk);
State::Retry(StateName::FrontmatterContentEnd)
}
Some(_) => {
tokenizer.consume();
State::Next(StateName::FrontmatterContentInside)
}
}
}
/// End of content chunk.
///
/// ```markdown
/// | ---
/// > | title: "Venus"
/// ^
/// | ---
/// ```
pub fn content_end(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
tokenizer.attempt(
State::Next(StateName::FrontmatterAfter),
State::Next(StateName::FrontmatterContentStart),
);
State::Next(StateName::FrontmatterCloseStart)
}
Some(_) => unreachable!("expected eof/eol"),
}
}
/// After frontmatter.
///
/// ```markdown
/// | ---
/// | title: "Venus"
/// > | ---
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
debug_assert!(
matches!(tokenizer.current, None | Some(b'\n')),
"expected eol/eof after closing fence"
);
tokenizer.exit(Name::Frontmatter);
State::Ok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/gfm_autolink_literal.rs | Rust | //! GFM: autolink literal occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Autolink literals form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! gfm_autolink_literal ::= gfm_protocol_autolink | gfm_www_autolink | gfm_email_autolink
//!
//! ; Restriction: the code before must be `www_autolink_before`.
//! ; Restriction: the code after `.` must not be eof.
//! www_autolink ::= 3('w' | 'W') '.' [domain [path]]
//! www_autolink_before ::= eof | eol | space_or_tab | '(' | '*' | '_' | '[' | ']' | '~'
//!
//! ; Restriction: the code before must be `http_autolink_before`.
//! ; Restriction: the code after the protocol must be `http_autolink_protocol_after`.
//! http_autolink ::= ('h' | 'H') 2('t' | 'T') ('p' | 'P') ['s' | 'S'] ':' 2'/' domain [path]
//! http_autolink_before ::= byte - ascii_alpha
//! http_autolink_protocol_after ::= byte - eof - eol - ascii_control - unicode_whitespace - unicode_punctuation
//!
//! ; Restriction: the code before must be `email_autolink_before`.
//! ; Restriction: `ascii_digit` may not occur in the last label part of the label.
//! email_autolink ::= 1*('+' | '-' | '.' | '_' | ascii_alphanumeric) '@' 1*(1*label_segment label_dot_cont) 1*label_segment
//! email_autolink_before ::= byte - ascii_alpha - '/'
//!
//! ; Restriction: `_` may not occur in the last two domain parts.
//! domain ::= 1*(url_ampt_cont | domain_punct_cont | '-' | byte - eof - ascii_control - unicode_whitespace - unicode_punctuation)
//! ; Restriction: must not be followed by `punct`.
//! domain_punct_cont ::= '.' | '_'
//! ; Restriction: must not be followed by `char-ref`.
//! url_ampt_cont ::= '&'
//!
//! ; Restriction: a counter `balance = 0` is increased for every `(`, and decreased for every `)`.
//! ; Restriction: `)` must not be `paren_at_end`.
//! path ::= 1*(url_ampt_cont | path_punctuation_cont | '(' | ')' | byte - eof - eol - space_or_tab)
//! ; Restriction: must not be followed by `punct`.
//! path_punctuation_cont ::= trailing_punctuation - '<'
//! ; Restriction: must be followed by `punct` and `balance` must be less than `0`.
//! paren_at_end ::= ')'
//!
//! label_segment ::= label_dash_underscore_cont | ascii_alpha | ascii_digit
//! ; Restriction: if followed by `punct`, the whole email autolink is invalid.
//! label_dash_underscore_cont ::= '-' | '_'
//! ; Restriction: must not be followed by `punct`.
//! label_dot_cont ::= '.'
//!
//! punct ::= *trailing_punctuation ( byte - eof - eol - space_or_tab - '<' )
//! char_ref ::= *ascii_alpha ';' path_end
//! trailing_punctuation ::= '!' | '"' | '\'' | ')' | '*' | ',' | '.' | ':' | ';' | '<' | '?' | '_' | '~'
//! ```
//!
//! The grammar for GFM autolink literal is very relaxed: basically anything
//! except for whitespace is allowed after a prefix.
//! To use whitespace characters and otherwise impossible characters, in URLs,
//! you can use percent encoding:
//!
//! ```markdown
//! https://example.com/alpha%20bravo
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="https://example.com/alpha%20bravo">https://example.com/alpha%20bravo</a></p>
//! ```
//!
//! There are several cases where incorrect encoding of URLs would, in other
//! languages, result in a parse error.
//! In markdown, there are no errors, and URLs are normalized.
//! In addition, many characters are percent encoded
//! ([`sanitize_uri`][sanitize_uri]).
//! For example:
//!
//! ```markdown
//! www.a👍b%
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="http://www.a%F0%9F%91%8Db%25">www.a👍b%</a></p>
//! ```
//!
//! There is a big difference between how www and protocol literals work
//! compared to how email literals work.
//! The first two are done when parsing, and work like anything else in
//! markdown.
//! But email literals are handled afterwards: when everything is parsed, we
//! look back at the events to figure out if there were email addresses.
//! This particularly affects how they interleave with character escapes and
//! character references.
//!
//! ## HTML
//!
//! GFM autolink literals relate to the `<a>` element in HTML.
//! See [*§ 4.5.1 The `a` element*][html_a] in the HTML spec for more info.
//! When an email autolink is used, the string `mailto:` is prepended when
//! generating the `href` attribute of the hyperlink.
//! When a www autolink is used, the string `http:` is prepended.
//!
//! ## Recommendation
//!
//! It is recommended to use labels ([label start link][label_start_link],
//! [label end][label_end]), either with a resource or a definition
//! ([definition][]), instead of autolink literals, as those allow relative
//! URLs and descriptive text to explain the URL in prose.
//!
//! ## Bugs
//!
//! GitHub’s own algorithm to parse autolink literals contains three bugs.
//! A smaller bug is left unfixed in this project for consistency.
//! Two main bugs are not present in this project.
//! The issues relating to autolink literals are:
//!
//! * [GFM autolink extension (`www.`, `https?://` parts): links don’t work when after bracket](https://github.com/github/cmark-gfm/issues/278)\
//! fixed here ✅
//! * [GFM autolink extension (`www.` part): uppercase does not match on issues/PRs/comments](https://github.com/github/cmark-gfm/issues/280)\
//! fixed here ✅
//! * [GFM autolink extension (`www.` part): the word `www` matches](https://github.com/github/cmark-gfm/issues/279)\
//! present here for consistency
//!
//! ## Tokens
//!
//! * [`GfmAutolinkLiteralEmail`][Name::GfmAutolinkLiteralEmail]
//! * [`GfmAutolinkLiteralMailto`][Name::GfmAutolinkLiteralMailto]
//! * [`GfmAutolinkLiteralProtocol`][Name::GfmAutolinkLiteralProtocol]
//! * [`GfmAutolinkLiteralWww`][Name::GfmAutolinkLiteralWww]
//! * [`GfmAutolinkLiteralXmpp`][Name::GfmAutolinkLiteralXmpp]
//!
//! ## References
//!
//! * [`micromark-extension-gfm-autolink-literal`](https://github.com/micromark/micromark-extension-gfm-autolink-literal)
//! * [*§ 6.9 Autolinks (extension)* in `GFM`](https://github.github.com/gfm/#autolinks-extension-)
//!
//! > 👉 **Note**: `mailto:` and `xmpp:` protocols before email autolinks were
//! > added in `cmark-gfm@0.29.0.gfm.5` and are as of yet undocumented.
//!
//! [text]: crate::construct::text
//! [definition]: crate::construct::definition
//! [attention]: crate::construct::attention
//! [label_start_link]: crate::construct::label_start_link
//! [label_end]: crate::construct::label_end
//! [sanitize_uri]: crate::util::sanitize_uri
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
use crate::event::{Event, Kind, Name};
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
char::{kind_after_index, Kind as CharacterKind},
slice::{Position, Slice},
};
use alloc::vec::Vec;
/// Start of protocol autolink literal.
///
/// ```markdown
/// > | https://example.com/a?b#c
/// ^
/// ```
pub fn protocol_start(tokenizer: &mut Tokenizer) -> State {
if tokenizer
.parse_state
.options
.constructs
.gfm_autolink_literal &&
matches!(tokenizer.current, Some(b'H' | b'h'))
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L214>.
&& !matches!(tokenizer.previous, Some(b'A'..=b'Z' | b'a'..=b'z'))
{
tokenizer.enter(Name::GfmAutolinkLiteralProtocol);
tokenizer.attempt(
State::Next(StateName::GfmAutolinkLiteralProtocolAfter),
State::Nok,
);
tokenizer.attempt(
State::Next(StateName::GfmAutolinkLiteralDomainInside),
State::Nok,
);
tokenizer.tokenize_state.start = tokenizer.point.index;
State::Retry(StateName::GfmAutolinkLiteralProtocolPrefixInside)
} else {
State::Nok
}
}
/// After a protocol autolink literal.
///
/// ```markdown
/// > | https://example.com/a?b#c
/// ^
/// ```
pub fn protocol_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Name::GfmAutolinkLiteralProtocol);
State::Ok
}
/// In protocol.
///
/// ```markdown
/// > | https://example.com/a?b#c
/// ^^^^^
/// ```
pub fn protocol_prefix_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'A'..=b'Z' | b'a'..=b'z')
// `5` is size of `https`
if tokenizer.point.index - tokenizer.tokenize_state.start < 5 =>
{
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralProtocolPrefixInside)
}
Some(b':') => {
let slice = Slice::from_indices(
tokenizer.parse_state.bytes,
tokenizer.tokenize_state.start,
tokenizer.point.index,
);
let name = slice.as_str().to_ascii_lowercase();
tokenizer.tokenize_state.start = 0;
if name == "http" || name == "https" {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralProtocolSlashesInside)
} else {
State::Nok
}
}
_ => {
tokenizer.tokenize_state.start = 0;
State::Nok
}
}
}
/// In protocol slashes.
///
/// ```markdown
/// > | https://example.com/a?b#c
/// ^^
/// ```
pub fn protocol_slashes_inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(b'/') {
tokenizer.consume();
if tokenizer.tokenize_state.size == 0 {
tokenizer.tokenize_state.size += 1;
State::Next(StateName::GfmAutolinkLiteralProtocolSlashesInside)
} else {
tokenizer.tokenize_state.size = 0;
State::Ok
}
} else {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// Start of www autolink literal.
///
/// ```markdown
/// > | www.example.com/a?b#c
/// ^
/// ```
pub fn www_start(tokenizer: &mut Tokenizer) -> State {
if tokenizer
.parse_state
.options
.constructs
.gfm_autolink_literal &&
matches!(tokenizer.current, Some(b'W' | b'w'))
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L156>.
&& matches!(tokenizer.previous, None | Some(b'\t' | b'\n' | b' ' | b'(' | b'*' | b'_' | b'[' | b']' | b'~'))
{
tokenizer.enter(Name::GfmAutolinkLiteralWww);
tokenizer.attempt(
State::Next(StateName::GfmAutolinkLiteralWwwAfter),
State::Nok,
);
// Note: we *check*, so we can discard the `www.` we parsed.
// If it worked, we consider it as a part of the domain.
tokenizer.check(
State::Next(StateName::GfmAutolinkLiteralDomainInside),
State::Nok,
);
State::Retry(StateName::GfmAutolinkLiteralWwwPrefixInside)
} else {
State::Nok
}
}
/// After a www autolink literal.
///
/// ```markdown
/// > | www.example.com/a?b#c
/// ^
/// ```
pub fn www_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Name::GfmAutolinkLiteralWww);
State::Ok
}
/// In www prefix.
///
/// ```markdown
/// > | www.example.com
/// ^^^^
/// ```
pub fn www_prefix_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'.') if tokenizer.tokenize_state.size == 3 => {
tokenizer.tokenize_state.size = 0;
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralWwwPrefixAfter)
}
Some(b'W' | b'w') if tokenizer.tokenize_state.size < 3 => {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralWwwPrefixInside)
}
_ => {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
}
/// After www prefix.
///
/// ```markdown
/// > | www.example.com
/// ^
/// ```
pub fn www_prefix_after(tokenizer: &mut Tokenizer) -> State {
// If there is *anything*, we can link.
if tokenizer.current.is_none() {
State::Nok
} else {
State::Ok
}
}
/// In domain.
///
/// ```markdown
/// > | https://example.com/a
/// ^^^^^^^^^^^
/// ```
pub fn domain_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Check whether this marker, which is a trailing punctuation
// marker, optionally followed by more trailing markers, and then
// followed by an end.
Some(b'.' | b'_') => {
tokenizer.check(
State::Next(StateName::GfmAutolinkLiteralDomainAfter),
State::Next(StateName::GfmAutolinkLiteralDomainAtPunctuation),
);
State::Retry(StateName::GfmAutolinkLiteralTrail)
}
// Dashes and continuation bytes are fine.
Some(b'-' | 0x80..=0xBF) => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralDomainInside)
}
_ => {
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L12>.
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Other
{
tokenizer.tokenize_state.seen = true;
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralDomainInside)
} else {
State::Retry(StateName::GfmAutolinkLiteralDomainAfter)
}
}
}
}
/// In domain, at potential trailing punctuation, that was not trailing.
///
/// ```markdown
/// > | https://example.com
/// ^
/// ```
pub fn domain_at_punctuation(tokenizer: &mut Tokenizer) -> State {
// There is an underscore in the last segment of the domain
if matches!(tokenizer.current, Some(b'_')) {
tokenizer.tokenize_state.marker = b'_';
}
// Otherwise, it’s a `.`: save the last segment underscore in the
// penultimate segment slot.
else {
tokenizer.tokenize_state.marker_b = tokenizer.tokenize_state.marker;
tokenizer.tokenize_state.marker = 0;
}
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralDomainInside)
}
/// After domain
///
/// ```markdown
/// > | https://example.com/a
/// ^
/// ```
pub fn domain_after(tokenizer: &mut Tokenizer) -> State {
// No underscores allowed in last two segments.
let result = if tokenizer.tokenize_state.marker_b == b'_'
|| tokenizer.tokenize_state.marker == b'_'
// At least one character must be seen.
|| !tokenizer.tokenize_state.seen
// Note: that’s GH says a dot is needed, but it’s not true:
// <https://github.com/github/cmark-gfm/issues/279>
{
State::Nok
} else {
State::Retry(StateName::GfmAutolinkLiteralPathInside)
};
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.marker_b = 0;
result
}
/// In path.
///
/// ```markdown
/// > | https://example.com/a
/// ^^
/// ```
pub fn path_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Continuation bytes are fine, we’ve already checked the first one.
Some(0x80..=0xBF) => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralPathInside)
}
// Count opening parens.
Some(b'(') => {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralPathInside)
}
// Check whether this trailing punctuation marker is optionally
// followed by more trailing markers, and then followed
// by an end.
// If this is a paren (followed by trailing, then the end), we
// *continue* if we saw less closing parens than opening parens.
Some(
b'!' | b'"' | b'&' | b'\'' | b')' | b'*' | b',' | b'.' | b':' | b';' | b'<' | b'?'
| b']' | b'_' | b'~',
) => {
let next = if tokenizer.current == Some(b')')
&& tokenizer.tokenize_state.size_b < tokenizer.tokenize_state.size
{
StateName::GfmAutolinkLiteralPathAtPunctuation
} else {
StateName::GfmAutolinkLiteralPathAfter
};
tokenizer.check(
State::Next(next),
State::Next(StateName::GfmAutolinkLiteralPathAtPunctuation),
);
State::Retry(StateName::GfmAutolinkLiteralTrail)
}
_ => {
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L12>.
if tokenizer.current.is_none()
|| kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
{
State::Retry(StateName::GfmAutolinkLiteralPathAfter)
} else {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralPathInside)
}
}
}
}
/// In path, at potential trailing punctuation, that was not trailing.
///
/// ```markdown
/// > | https://example.com/a"b
/// ^
/// ```
pub fn path_at_punctuation(tokenizer: &mut Tokenizer) -> State {
// Count closing parens.
if tokenizer.current == Some(b')') {
tokenizer.tokenize_state.size_b += 1;
}
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralPathInside)
}
/// At end of path, reset parens.
///
/// ```markdown
/// > | https://example.com/asd(qwe).
/// ^
/// ```
pub fn path_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.size_b = 0;
State::Ok
}
/// In trail of domain or path.
///
/// ```markdown
/// > | https://example.com").
/// ^
/// ```
pub fn trail(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Regular trailing punctuation.
Some(
b'!' | b'"' | b'\'' | b')' | b'*' | b',' | b'.' | b':' | b';' | b'?' | b'_' | b'~',
) => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralTrail)
}
// `&` followed by one or more alphabeticals and then a `;`, is
// as a whole considered as trailing punctuation.
// In all other cases, it is considered as continuation of the URL.
Some(b'&') => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralTrailCharRefStart)
}
// `<` is an end.
Some(b'<') => State::Ok,
// Needed because we allow literals after `[`, as we fix:
// <https://github.com/github/cmark-gfm/issues/278>.
// Check that it is not followed by `(` or `[`.
Some(b']') => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralTrailBracketAfter)
}
_ => {
// Whitespace is the end of the URL, anything else is continuation.
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
{
State::Ok
} else {
State::Nok
}
}
}
}
/// In trail, after `]`.
///
/// > 👉 **Note**: this deviates from `cmark-gfm` to fix a bug.
/// > See end of <https://github.com/github/cmark-gfm/issues/278> for more.
///
/// ```markdown
/// > | https://example.com](
/// ^
/// ```
pub fn trail_bracket_after(tokenizer: &mut Tokenizer) -> State {
// Whitespace or something that could start a resource or reference is the end.
// Switch back to trail otherwise.
if matches!(
tokenizer.current,
None | Some(b'\t' | b'\n' | b' ' | b'(' | b'[')
) {
State::Ok
} else {
State::Retry(StateName::GfmAutolinkLiteralTrail)
}
}
/// In character-reference like trail, after `&`.
///
/// ```markdown
/// > | https://example.com&).
/// ^
/// ```
pub fn trail_char_ref_start(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'A'..=b'Z' | b'a'..=b'z')) {
State::Retry(StateName::GfmAutolinkLiteralTrailCharRefInside)
} else {
State::Nok
}
}
/// In character-reference like trail.
///
/// ```markdown
/// > | https://example.com&).
/// ^
/// ```
pub fn trail_char_ref_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralTrailCharRefInside)
}
// Switch back to trail if this is well-formed.
Some(b';') => {
tokenizer.consume();
State::Next(StateName::GfmAutolinkLiteralTrail)
}
_ => State::Nok,
}
}
/// Resolve: postprocess text to find email autolink literals.
pub fn resolve(tokenizer: &mut Tokenizer) {
tokenizer.map.consume(&mut tokenizer.events);
let mut index = 0;
let mut links = 0;
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.kind == Kind::Enter {
if event.name == Name::Link {
links += 1;
}
} else {
if event.name == Name::Data && links == 0 {
let slice = Slice::from_position(
tokenizer.parse_state.bytes,
&Position::from_exit_event(&tokenizer.events, index),
);
let bytes = slice.bytes;
let mut byte_index = 0;
let mut replace = Vec::new();
let mut point = tokenizer.events[index - 1].point.clone();
let start_index = point.index;
let mut min = 0;
while byte_index < bytes.len() {
if bytes[byte_index] == b'@' {
let mut range = (0, 0, Name::GfmAutolinkLiteralEmail);
if let Some(start) = peek_bytes_atext(bytes, min, byte_index) {
let (start, kind) = peek_protocol(bytes, min, start);
if let Some(end) = peek_bytes_email_domain(
bytes,
byte_index + 1,
kind == Name::GfmAutolinkLiteralXmpp,
) {
// Note: normally we’d truncate trailing
// punctuation from the link.
// However, email autolink literals cannot
// contain any of those markers, except for
// `.`, but that can only occur if it isn’t
// trailing.
// So we can ignore truncating while
// postprocessing!
range = (start, end, kind);
}
}
if range.1 != 0 {
byte_index = range.1;
// If there is something between the last link
// (or `min`) and this link.
if min != range.0 {
replace.push(Event {
kind: Kind::Enter,
name: Name::Data,
point: point.clone(),
link: None,
});
point = point
.shift_to(tokenizer.parse_state.bytes, start_index + range.0);
replace.push(Event {
kind: Kind::Exit,
name: Name::Data,
point: point.clone(),
link: None,
});
}
// Add the link.
replace.push(Event {
kind: Kind::Enter,
name: range.2.clone(),
point: point.clone(),
link: None,
});
point =
point.shift_to(tokenizer.parse_state.bytes, start_index + range.1);
replace.push(Event {
kind: Kind::Exit,
name: range.2.clone(),
point: point.clone(),
link: None,
});
min = range.1;
}
}
byte_index += 1;
}
// If there was a link, and we have more bytes left.
if min != 0 && min < bytes.len() {
replace.push(Event {
kind: Kind::Enter,
name: Name::Data,
point: point.clone(),
link: None,
});
replace.push(Event {
kind: Kind::Exit,
name: Name::Data,
point: event.point.clone(),
link: None,
});
}
// If there were links.
if !replace.is_empty() {
tokenizer.map.add(index - 1, 2, replace);
}
}
if event.name == Name::Link {
links -= 1;
}
}
index += 1;
}
}
/// Move back past atext.
///
/// Moving back is only used when post processing text: so for the email address
/// algorithm.
///
/// ```markdown
/// > | a contact@example.org b
/// ^-- from
/// ^-- to
/// ```
fn peek_bytes_atext(bytes: &[u8], min: usize, end: usize) -> Option<usize> {
let mut index = end;
// Take simplified atext.
// See `email_atext` in `autolink.rs` for a similar algorithm.
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L301>.
while index > min
&& matches!(bytes[index - 1], b'+' | b'-' | b'.' | b'0'..=b'9' | b'A'..=b'Z' | b'_' | b'a'..=b'z')
{
index -= 1;
}
// Do not allow a slash “inside” atext.
// The reference code is a bit weird, but that’s what it results in.
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L307>.
// Other than slash, every preceding character is allowed.
if index == end || (index > min && bytes[index - 1] == b'/') {
None
} else {
Some(index)
}
}
/// Move back past a `mailto:` or `xmpp:` protocol.
///
/// Moving back is only used when post processing text: so for the email address
/// algorithm.
///
/// ```markdown
/// > | a mailto:contact@example.org b
/// ^-- from
/// ^-- to
/// ```
fn peek_protocol(bytes: &[u8], min: usize, end: usize) -> (usize, Name) {
let mut index = end;
if index > min && bytes[index - 1] == b':' {
index -= 1;
// Take alphanumerical.
while index > min && matches!(bytes[index - 1], b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') {
index -= 1;
}
let slice = Slice::from_indices(bytes, index, end - 1);
let name = slice.as_str().to_ascii_lowercase();
if name == "xmpp" {
return (index, Name::GfmAutolinkLiteralXmpp);
} else if name == "mailto" {
return (index, Name::GfmAutolinkLiteralMailto);
}
}
(end, Name::GfmAutolinkLiteralEmail)
}
/// Move past email domain.
///
/// Peeking like this only used when post processing text: so for the email
/// address algorithm.
///
/// ```markdown
/// > | a contact@example.org b
/// ^-- from
/// ^-- to
/// ```
fn peek_bytes_email_domain(bytes: &[u8], start: usize, xmpp: bool) -> Option<usize> {
let mut index = start;
let mut dot = false;
// Move past “domain”.
// The reference code is a bit overly complex as it handles the `@`, of which there may be just one.
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L318>
while index < bytes.len() {
match bytes[index] {
// Alphanumerical, `-`, and `_`.
b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'_' | b'a'..=b'z' => {}
b'/' if xmpp => {}
// Dot followed by alphanumerical (not `-` or `_`).
b'.' if index + 1 < bytes.len()
&& matches!(bytes[index + 1], b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') =>
{
dot = true;
}
_ => break,
}
index += 1;
}
// Domain must not be empty, must include a dot, and must end in alphabetical or `.`.
// Source: <https://github.com/github/cmark-gfm/blob/ef1cfcb/extensions/autolink.c#L332>.
if index > start && dot && matches!(bytes[index - 1], b'.' | b'A'..=b'Z' | b'a'..=b'z') {
Some(index)
} else {
None
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/gfm_footnote_definition.rs | Rust | //! GFM: Footnote definition occurs in the [document][] content type.
//!
//! ## Grammar
//!
//! Footnote definitions form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: `label` must start with `^` (and not be empty after it).
//! ; See the `label` construct for the BNF of that part.
//! gfm_footnote_definition_start ::= label ':' *space_or_tab
//!
//! ; Restriction: blank line allowed.
//! gfm_footnote_definition_cont ::= 4(space_or_tab)
//! ```
//!
//! Further lines that are not prefixed with `gfm_footnote_definition_cont`
//! cause the footnote definition to be exited, except when those lines are
//! lazy continuation or blank.
//! Like so many things in markdown, footnote definition too are complex.
//! See [*§ Phase 1: block structure* in `CommonMark`][commonmark_block] for
//! more on parsing details.
//!
//! See [`label`][label] for grammar, notes, and recommendations on that part.
//!
//! The `label` part is interpreted as the [string][] content type.
//! That means that [character escapes][character_escape] and
//! [character references][character_reference] are allowed.
//!
//! Definitions match to calls through identifiers.
//! To match, both labels must be equal after normalizing with
//! [`normalize_identifier`][].
//! One definition can match to multiple calls.
//! Multiple definitions with the same, normalized, identifier are ignored: the
//! first definition is preferred.
//! To illustrate, the definition with the content of `x` wins:
//!
//! ```markdown
//! [^a]: x
//! [^a]: y
//!
//! [^a]
//! ```
//!
//! Importantly, while labels *can* include [string][] content (character
//! escapes and character references), these are not considered when matching.
//! To illustrate, neither definition matches the call:
//!
//! ```markdown
//! [^a&b]: x
//! [^a\&b]: y
//!
//! [^a&b]
//! ```
//!
//! Because footnote definitions are containers (like block quotes and list
//! items), they can contain more footnote definitions, and they can include
//! calls to themselves.
//!
//! ## HTML
//!
//! GFM footnote definitions do not, on their own, relate to anything in HTML.
//! When matched with a [label end][label_end], which in turns matches to a
//! [GFM label start (footnote)][gfm_label_start_footnote], the definition
//! relates to several elements in HTML.
//!
//! When one or more definitions are called, a footnote section is generated
//! at the end of the document, using `<section>`, `<h2>`, and `<ol>` elements:
//!
//! ```html
//! <section data-footnotes="" class="footnotes"><h2 id="footnote-label" class="sr-only">Footnotes</h2>
//! <ol>…</ol>
//! </section>
//! ```
//!
//! Each definition is generated as a `<li>` in the `<ol>`, in the order they
//! were first called:
//!
//! ```html
//! <li id="user-content-fn-1">…</li>
//! ```
//!
//! Backreferences are injected at the end of the first paragraph, or, when
//! there is no paragraph, at the end of the definition.
//! When a definition is called multiple times, multiple backreferences are
//! generated.
//! Further backreferences use an extra counter in the `href` attribute and
//! visually in a `<span>` after `↩`.
//!
//! ```html
//! <a href="#user-content-fnref-1" data-footnote-backref="" class="data-footnote-backref" aria-label="Back to content">↩</a> <a href="#user-content-fnref-1-2" data-footnote-backref="" class="data-footnote-backref" aria-label="Back to content">↩<sup>2</sup></a>
//! ```
//!
//! See
//! [*§ 4.5.1 The `a` element*][html_a],
//! [*§ 4.3.6 The `h1`, `h2`, `h3`, `h4`, `h5`, and `h6` elements*][html_h],
//! [*§ 4.4.8 The `li` element*][html_li],
//! [*§ 4.4.5 The `ol` element*][html_ol],
//! [*§ 4.4.1 The `p` element*][html_p],
//! [*§ 4.3.3 The `section` element*][html_section], and
//! [*§ 4.5.19 The `sub` and `sup` elements*][html_sup]
//! in the HTML spec for more info.
//!
//! ## Recommendation
//!
//! When authoring markdown with footnotes, it’s recommended to use words
//! instead of numbers (or letters or anything with an order) as calls.
//! That makes it easier to reuse and reorder footnotes.
//!
//! It’s recommended to place footnotes definitions at the bottom of the document.
//!
//! ## Bugs
//!
//! GitHub’s own algorithm to parse footnote definitions contains several bugs.
//! These are not present in this project.
//! The issues relating to footnote definitions are:
//!
//! * [Footnote reference call identifiers are trimmed, but definition identifiers aren’t](https://github.com/github/cmark-gfm/issues/237)\
//! — initial and final whitespace in labels causes them not to match
//! * [Footnotes are matched case-insensitive, but links keep their casing, breaking them](https://github.com/github/cmark-gfm/issues/239)\
//! — using uppercase (or any character that will be percent encoded) in identifiers breaks links
//! * [Colons in footnotes generate links w/o `href`](https://github.com/github/cmark-gfm/issues/250)\
//! — colons in identifiers generate broken links
//! * [Character escape of `]` does not work in footnote identifiers](https://github.com/github/cmark-gfm/issues/240)\
//! — some character escapes don’t work
//! * [Footnotes in links are broken](https://github.com/github/cmark-gfm/issues/249)\
//! — while `CommonMark` prevents links in links, GitHub does not prevent footnotes (which turn into links) in links
//! * [Footnote-like brackets around image, break that image](https://github.com/github/cmark-gfm/issues/275)\
//! — images can’t be used in what looks like a footnote call
//! * [GFM footnotes: line ending in footnote definition label causes text to disappear](https://github.com/github/cmark-gfm/issues/282)\
//! — line endings in footnote definitions cause text to disappear
//!
//! ## Tokens
//!
//! * [`DefinitionMarker`][Name::DefinitionMarker]
//! * [`GfmFootnoteDefinition`][Name::GfmFootnoteDefinition]
//! * [`GfmFootnoteDefinitionLabel`][Name::GfmFootnoteDefinitionLabel]
//! * [`GfmFootnoteDefinitionLabelMarker`][Name::GfmFootnoteDefinitionLabelMarker]
//! * [`GfmFootnoteDefinitionLabelString`][Name::GfmFootnoteDefinitionLabelString]
//! * [`GfmFootnoteDefinitionMarker`][Name::GfmFootnoteDefinitionMarker]
//! * [`GfmFootnoteDefinitionPrefix`][Name::GfmFootnoteDefinitionPrefix]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`micromark-extension-gfm-footnote`](https://github.com/micromark/micromark-extension-gfm-footnote)
//!
//! > 👉 **Note**: Footnotes are not specified in GFM yet.
//! > See [`github/cmark-gfm#270`](https://github.com/github/cmark-gfm/issues/270)
//! > for the related issue.
//!
//! [document]: crate::construct::document
//! [string]: crate::construct::string
//! [character_reference]: crate::construct::character_reference
//! [character_escape]: crate::construct::character_escape
//! [label]: crate::construct::partial_label
//! [label_end]: crate::construct::label_end
//! [gfm_label_start_footnote]: crate::construct::gfm_label_start_footnote
//! [commonmark_block]: https://spec.commonmark.org/0.31/#phase-1-block-structure
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
//! [html_h]: https://html.spec.whatwg.org/multipage/sections.html#the-h1,-h2,-h3,-h4,-h5,-and-h6-elements
//! [html_li]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-li-element
//! [html_ol]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-ol-element
//! [html_p]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-p-element
//! [html_section]: https://html.spec.whatwg.org/multipage/sections.html#the-section-element
//! [html_sup]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-sub-and-sup-elements
use crate::construct::partial_space_or_tab::space_or_tab_min_max;
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
constant::{LINK_REFERENCE_SIZE_MAX, TAB_SIZE},
normalize_identifier::normalize_identifier,
skip,
slice::{Position, Slice},
};
/// Start of GFM footnote definition.
///
/// ```markdown
/// > | [^a]: b
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer
.parse_state
.options
.constructs
.gfm_footnote_definition
{
tokenizer.enter(Name::GfmFootnoteDefinition);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::GfmFootnoteDefinitionLabelBefore),
State::Nok,
);
State::Retry(space_or_tab_min_max(
tokenizer,
1,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::GfmFootnoteDefinitionLabelBefore)
}
} else {
State::Nok
}
}
/// Before definition label (after optional whitespace).
///
/// ```markdown
/// > | [^a]: b
/// ^
/// ```
pub fn label_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'[') => {
tokenizer.enter(Name::GfmFootnoteDefinitionPrefix);
tokenizer.enter(Name::GfmFootnoteDefinitionLabel);
tokenizer.enter(Name::GfmFootnoteDefinitionLabelMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmFootnoteDefinitionLabelMarker);
State::Next(StateName::GfmFootnoteDefinitionLabelAtMarker)
}
_ => State::Nok,
}
}
/// In label, at caret.
///
/// ```markdown
/// > | [^a]: b
/// ^
/// ```
pub fn label_at_marker(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(b'^') {
tokenizer.enter(Name::GfmFootnoteDefinitionMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmFootnoteDefinitionMarker);
tokenizer.enter(Name::GfmFootnoteDefinitionLabelString);
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
State::Next(StateName::GfmFootnoteDefinitionLabelInside)
} else {
State::Nok
}
}
/// In label.
///
/// > 👉 **Note**: `cmark-gfm` prevents whitespace from occurring in footnote
/// > definition labels.
///
/// ```markdown
/// > | [^a]: b
/// ^
/// ```
pub fn label_inside(tokenizer: &mut Tokenizer) -> State {
// Too long.
if tokenizer.tokenize_state.size > LINK_REFERENCE_SIZE_MAX
// Space or tab is not supported by GFM for some reason (`\n` and
// `[` make sense).
|| matches!(tokenizer.current, None | Some(b'\t' | b'\n' | b' ' | b'['))
// Closing brace with nothing.
|| (matches!(tokenizer.current, Some(b']')) && tokenizer.tokenize_state.size == 0)
{
tokenizer.tokenize_state.size = 0;
State::Nok
} else if matches!(tokenizer.current, Some(b']')) {
tokenizer.tokenize_state.size = 0;
tokenizer.exit(Name::Data);
tokenizer.exit(Name::GfmFootnoteDefinitionLabelString);
tokenizer.enter(Name::GfmFootnoteDefinitionLabelMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmFootnoteDefinitionLabelMarker);
tokenizer.exit(Name::GfmFootnoteDefinitionLabel);
State::Next(StateName::GfmFootnoteDefinitionLabelAfter)
} else {
let next = if matches!(tokenizer.current.unwrap(), b'\\') {
StateName::GfmFootnoteDefinitionLabelEscape
} else {
StateName::GfmFootnoteDefinitionLabelInside
};
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
State::Next(next)
}
}
/// After `\`, at a special character.
///
/// > 👉 **Note**: `cmark-gfm` currently does not support escaped brackets:
/// > <https://github.com/github/cmark-gfm/issues/240>
///
/// ```markdown
/// > | [^a\*b]: c
/// ^
/// ```
pub fn label_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'[' | b'\\' | b']') => {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::GfmFootnoteDefinitionLabelInside)
}
_ => State::Retry(StateName::GfmFootnoteDefinitionLabelInside),
}
}
/// After definition label.
///
/// ```markdown
/// > | [^a]: b
/// ^
/// ```
pub fn label_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b':') => {
let end = skip::to_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::GfmFootnoteDefinitionLabelString],
);
// Note: we don’t care about virtual spaces, so `as_str` is fine.
let id = normalize_identifier(
Slice::from_position(
tokenizer.parse_state.bytes,
&Position::from_exit_event(&tokenizer.events, end),
)
.as_str(),
);
// Note: we don’t care about uniqueness.
// It’s likely that that doesn’t happen very frequently.
// It is more likely that it wastes precious time.
tokenizer.tokenize_state.gfm_footnote_definitions.push(id);
tokenizer.enter(Name::DefinitionMarker);
tokenizer.consume();
tokenizer.exit(Name::DefinitionMarker);
tokenizer.attempt(
State::Next(StateName::GfmFootnoteDefinitionWhitespaceAfter),
State::Nok,
);
// Any whitespace after the marker is eaten, forming indented code
// is not possible.
// No space is also fine, just like a block quote marker.
State::Next(space_or_tab_min_max(tokenizer, 0, usize::MAX))
}
_ => State::Nok,
}
}
/// After definition prefix.
///
/// ```markdown
/// > | [^a]: b
/// ^
/// ```
pub fn whitespace_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Name::GfmFootnoteDefinitionPrefix);
State::Ok
}
/// Start of footnote definition continuation.
///
/// ```markdown
/// | [^a]: b
/// > | c
/// ^
/// ```
pub fn cont_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.check(
State::Next(StateName::GfmFootnoteDefinitionContBlank),
State::Next(StateName::GfmFootnoteDefinitionContFilled),
);
State::Retry(StateName::BlankLineStart)
}
/// Start of footnote definition continuation, at a blank line.
///
/// ```markdown
/// | [^a]: b
/// > | ␠␠␊
/// ^
/// ```
pub fn cont_blank(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
State::Retry(space_or_tab_min_max(tokenizer, 0, TAB_SIZE))
} else {
State::Ok
}
}
/// Start of footnote definition continuation, at a filled line.
///
/// ```markdown
/// | [^a]: b
/// > | c
/// ^
/// ```
pub fn cont_filled(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
// Consume exactly `TAB_SIZE`.
State::Retry(space_or_tab_min_max(tokenizer, TAB_SIZE, TAB_SIZE))
} else {
State::Nok
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/gfm_label_start_footnote.rs | Rust | //! Label start (footnote) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Label start (footnote) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! gfm_label_start_footnote ::= '[' '^'
//! ```
//!
//! ## HTML
//!
//! Label start (footnote) does not, on its own, relate to anything in HTML.
//! When matched with a [label end][label_end], they together relate to `<sup>`
//! and `<a>` elements in HTML.
//! See [*§ 4.5.19 The `sub` and `sup` elements*][html_sup] and
//! [*§ 4.5.1 The `a` element*][html_a] in the HTML spec for more info.
//! Without an end, the characters (`[^`) are output.
//!
//! ## Tokens
//!
//! * [`GfmFootnoteCallLabel`][Name::GfmFootnoteCallLabel]
//! * [`GfmFootnoteCallMarker`][Name::GfmFootnoteCallMarker]
//! * [`LabelMarker`][Name::LabelMarker]
//!
//! ## References
//!
//! * [`micromark-extension-gfm-footnote`](https://github.com/micromark/micromark-extension-gfm-footnote)
//!
//! > 👉 **Note**: Footnotes are not specified in GFM yet.
//! > See [`github/cmark-gfm#270`](https://github.com/github/cmark-gfm/issues/270)
//! > for the related issue.
//!
//! [text]: crate::construct::text
//! [label_end]: crate::construct::label_end
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
//! [html_sup]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-sub-and-sup-elements
use crate::event::Name;
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::tokenizer::{LabelKind, LabelStart, Tokenizer};
/// Start of label (footnote) start.
///
/// ```markdown
/// > | a [^b] c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer
.parse_state
.options
.constructs
.gfm_label_start_footnote
&& tokenizer.current == Some(b'[')
{
tokenizer.enter(Name::GfmFootnoteCallLabel);
tokenizer.enter(Name::LabelMarker);
tokenizer.consume();
tokenizer.exit(Name::LabelMarker);
State::Next(StateName::GfmLabelStartFootnoteOpen)
} else {
State::Nok
}
}
/// After `[`, at `^`.
///
/// ```markdown
/// > | a [^b] c
/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'^') => {
tokenizer.enter(Name::GfmFootnoteCallMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmFootnoteCallMarker);
tokenizer.exit(Name::GfmFootnoteCallLabel);
tokenizer.tokenize_state.label_starts.push(LabelStart {
kind: LabelKind::GfmFootnote,
start: (tokenizer.events.len() - 6, tokenizer.events.len() - 1),
inactive: false,
});
tokenizer.register_resolver_before(ResolveName::Label);
State::Ok
}
_ => State::Nok,
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/gfm_table.rs | Rust | //! GFM: table occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! Tables form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! gfm_table ::= gfm_table_head 0*(eol gfm_table_body_row)
//!
//! ; Restriction: both rows must have the same number of cells.
//! gfm_table_head ::= gfm_table_row eol gfm_table_delimiter_row
//!
//! gfm_table_row ::= ['|'] gfm_table_cell 0*('|' gfm_table_cell) ['|'] *space_or_tab
//! gfm_table_cell ::= *space_or_tab gfm_table_text *space_or_tab
//! gfm_table_text ::= 0*(line - '\\' - '|' | '\\' ['\\' | '|'])
//
//! gfm_table_delimiter_row ::= ['|'] gfm_table_delimiter_cell 0*('|' gfm_table_delimiter_cell) ['|'] *space_or_tab
//! gfm_table_delimiter_cell ::= *space_or_tab gfm_table_delimiter_value *space_or_tab
//! gfm_table_delimiter_value ::= [':'] 1*'-' [':']
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! The above grammar shows that basically anything can be a cell or a row.
//! The main thing that makes something a row, is that it occurs directly before
//! or after a delimiter row, or after another row.
//!
//! It is not required for a table to have a body: it can end right after the
//! delimiter row.
//!
//! Each column can be marked with an alignment.
//! The alignment marker is a colon (`:`) used before and/or after delimiter row
//! filler.
//! To illustrate:
//!
//! ```markdown
//! | none | left | right | center |
//! | ---- | :--- | ----: | :----: |
//! ```
//!
//! The number of cells in the delimiter row, is the number of columns of the
//! table.
//! Only the head row is required to have the same number of cells.
//! Body rows are not required to have a certain number of cells.
//! For body rows that have less cells than the number of columns of the table,
//! empty cells are injected.
//! When a row has more cells than the number of columns of the table, the
//! superfluous cells are dropped.
//! To illustrate:
//!
//! ```markdown
//! | a | b |
//! | - | - |
//! | c |
//! | d | e | f |
//! ```
//!
//! Yields:
//!
//! ```html
//! <table>
//! <thead>
//! <tr>
//! <th>a</th>
//! <th>b</th>
//! </tr>
//! </thead>
//! <tbody>
//! <tr>
//! <td>c</td>
//! <td></td>
//! </tr>
//! <tr>
//! <td>d</td>
//! <td>e</td>
//! </tr>
//! </tbody>
//! </table>
//! ```
//!
//! Each cell’s text is interpreted as the [text][] content type.
//! That means that it can include constructs such as [attention][attention].
//!
//! The grammar for cells prohibits the use of `|` in them.
//! To use pipes in cells, encode them as a character reference or character
//! escape: `|` (or `|`, `|`, `|`, `|`) or
//! `\|`.
//!
//! Escapes will typically work, but they are not supported in
//! [code (text)][raw_text] (and the math (text) extension).
//! To work around this, GitHub came up with a rather weird “trick”.
//! When inside a table cell *and* inside code, escaped pipes *are* decoded.
//! To illustrate:
//!
//! ```markdown
//! | Name | Character |
//! | - | - |
//! | Left curly brace | `{` |
//! | Pipe | `\|` |
//! | Right curly brace | `}` |
//! ```
//!
//! Yields:
//!
//! ```html
//! <table>
//! <thead>
//! <tr>
//! <th>Name</th>
//! <th>Character</th>
//! </tr>
//! </thead>
//! <tbody>
//! <tr>
//! <td>Left curly brace</td>
//! <td><code>{</code></td>
//! </tr>
//! <tr>
//! <td>Pipe</td>
//! <td><code>|</code></td>
//! </tr>
//! <tr>
//! <td>Right curly brace</td>
//! <td><code>}</code></td>
//! </tr>
//! </tbody>
//! </table>
//! ```
//!
//! > 👉 **Note**: no other character can be escaped like this.
//! > Escaping pipes in code does not work when not inside a table, either.
//!
//! ## HTML
//!
//! GFM tables relate to several HTML elements: `<table>`, `<tbody>`, `<td>`,
//! `<th>`, `<thead>`, and `<tr>`.
//! See
//! [*§ 4.9.1 The `table` element*][html_table],
//! [*§ 4.9.5 The `tbody` element*][html_tbody],
//! [*§ 4.9.9 The `td` element*][html_td],
//! [*§ 4.9.10 The `th` element*][html_th],
//! [*§ 4.9.6 The `thead` element*][html_thead], and
//! [*§ 4.9.8 The `tr` element*][html_tr]
//! in the HTML spec for more info.
//!
//! If the alignment of a column is left, right, or center, a deprecated
//! `align` attribute is added to each `<th>` and `<td>` element belonging to
//! that column.
//! That attribute is interpreted by browsers as if a CSS `text-align` property
//! was included, with its value set to that same keyword.
//!
//! ## Recommendation
//!
//! When authoring markdown with GFM tables, it’s recommended to *always* put
//! pipes around cells.
//! Without them, it can be hard to infer whether the table will work, how many
//! columns there are, and which column you are currently editing.
//!
//! It is recommended to not use many columns, as it results in very long lines,
//! making it hard to infer which column you are currently editing.
//!
//! For larger tables, particularly when cells vary in size, it is recommended
//! *not* to manually “pad” cell text.
//! While it can look better, it results in a lot of time spent realigning
//! everything when a new, longer cell is added or the longest cell removed, as
//! every row then must be changed.
//! Other than costing time, it also causes large diffs in Git.
//!
//! To illustrate, when authoring large tables, it is discouraged to pad cells
//! like this:
//!
//! ```markdown
//! | Alpha bravo charlie | delta |
//! | ------------------- | -----------------: |
//! | Echo | Foxtrot golf hotel |
//! ```
//!
//! Instead, use single spaces (and single filler dashes):
//!
//! ```markdown
//! | Alpha bravo charlie | delta |
//! | - | -: |
//! | Echo | Foxtrot golf hotel |
//! ```
//!
//! ## Bugs
//!
//! GitHub’s own algorithm to parse tables contains a bug.
//! This bug is not present in this project.
//! The issue relating to tables is:
//!
//! * [GFM tables: escaped escapes are incorrectly treated as escapes](https://github.com/github/cmark-gfm/issues/277)
//!
//! ## Tokens
//!
//! * [`GfmTable`][Name::GfmTable]
//! * [`GfmTableBody`][Name::GfmTableBody]
//! * [`GfmTableCell`][Name::GfmTableCell]
//! * [`GfmTableCellDivider`][Name::GfmTableCellDivider]
//! * [`GfmTableCellText`][Name::GfmTableCellText]
//! * [`GfmTableDelimiterCell`][Name::GfmTableDelimiterCell]
//! * [`GfmTableDelimiterCellValue`][Name::GfmTableDelimiterCellValue]
//! * [`GfmTableDelimiterFiller`][Name::GfmTableDelimiterFiller]
//! * [`GfmTableDelimiterMarker`][Name::GfmTableDelimiterMarker]
//! * [`GfmTableDelimiterRow`][Name::GfmTableDelimiterRow]
//! * [`GfmTableHead`][Name::GfmTableHead]
//! * [`GfmTableRow`][Name::GfmTableRow]
//! * [`LineEnding`][Name::LineEnding]
//!
//! ## References
//!
//! * [`micromark-extension-gfm-table`](https://github.com/micromark/micromark-extension-gfm-table)
//! * [*§ 4.10 Tables (extension)* in `GFM`](https://github.github.com/gfm/#tables-extension-)
//!
//! [flow]: crate::construct::flow
//! [text]: crate::construct::text
//! [attention]: crate::construct::attention
//! [raw_text]: crate::construct::raw_text
//! [html_table]: https://html.spec.whatwg.org/multipage/tables.html#the-table-element
//! [html_tbody]: https://html.spec.whatwg.org/multipage/tables.html#the-tbody-element
//! [html_td]: https://html.spec.whatwg.org/multipage/tables.html#the-td-element
//! [html_th]: https://html.spec.whatwg.org/multipage/tables.html#the-th-element
//! [html_thead]: https://html.spec.whatwg.org/multipage/tables.html#the-thead-element
//! [html_tr]: https://html.spec.whatwg.org/multipage/tables.html#the-tr-element
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::{Content, Event, Kind, Link, Name};
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
use crate::util::{constant::TAB_SIZE, skip::opt_back as skip_opt_back};
use alloc::vec;
/// Start of a GFM table.
///
/// If there is a valid table row or table head before, then we try to parse
/// another row.
/// Otherwise, we try to parse a head.
///
/// ```markdown
/// > | | a |
/// ^
/// | | - |
/// > | | b |
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.gfm_table {
if !tokenizer.pierce
&& !tokenizer.events.is_empty()
&& matches!(
tokenizer.events[skip_opt_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::LineEnding, Name::SpaceOrTab],
)]
.name,
Name::GfmTableHead | Name::GfmTableRow
)
{
State::Retry(StateName::GfmTableBodyRowStart)
} else {
State::Retry(StateName::GfmTableHeadRowBefore)
}
} else {
State::Nok
}
}
/// Before table head row.
///
/// ```markdown
/// > | | a |
/// ^
/// | | - |
/// | | b |
/// ```
pub fn head_row_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::GfmTableHead);
tokenizer.enter(Name::GfmTableRow);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::GfmTableHeadRowStart), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::GfmTableHeadRowStart)
}
}
/// Before table head row, after whitespace.
///
/// ```markdown
/// > | | a |
/// ^
/// | | - |
/// | | b |
/// ```
pub fn head_row_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// 4+ spaces.
Some(b'\t' | b' ') => State::Nok,
Some(b'|') => State::Retry(StateName::GfmTableHeadRowBreak),
_ => {
tokenizer.tokenize_state.seen = true;
// Count the first character, that isn’t a pipe, double.
tokenizer.tokenize_state.size_b += 1;
State::Retry(StateName::GfmTableHeadRowBreak)
}
}
}
/// At break in table head row.
///
/// ```markdown
/// > | | a |
/// ^
/// ^
/// ^
/// | | - |
/// | | b |
/// ```
pub fn head_row_break(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.size_b = 0;
State::Nok
}
Some(b'\n') => {
// If anything other than one pipe (ignoring whitespace) was used, it’s fine.
if tokenizer.tokenize_state.size_b > 1 {
tokenizer.tokenize_state.size_b = 0;
// Feel free to interrupt:
tokenizer.interrupt = true;
tokenizer.exit(Name::GfmTableRow);
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::GfmTableHeadDelimiterStart)
} else {
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.size_b = 0;
State::Nok
}
}
Some(b'\t' | b' ') => {
tokenizer.attempt(State::Next(StateName::GfmTableHeadRowBreak), State::Nok);
State::Retry(space_or_tab(tokenizer))
}
_ => {
tokenizer.tokenize_state.size_b += 1;
// Whether a delimiter was seen.
if tokenizer.tokenize_state.seen {
tokenizer.tokenize_state.seen = false;
// Header cell count.
tokenizer.tokenize_state.size += 1;
}
if tokenizer.current == Some(b'|') {
tokenizer.enter(Name::GfmTableCellDivider);
tokenizer.consume();
tokenizer.exit(Name::GfmTableCellDivider);
// Whether a delimiter was seen.
tokenizer.tokenize_state.seen = true;
State::Next(StateName::GfmTableHeadRowBreak)
} else {
// Anything else is cell data.
tokenizer.enter(Name::Data);
State::Retry(StateName::GfmTableHeadRowData)
}
}
}
}
/// In table head row data.
///
/// ```markdown
/// > | | a |
/// ^
/// | | - |
/// | | b |
/// ```
pub fn head_row_data(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\t' | b'\n' | b' ' | b'|') => {
tokenizer.exit(Name::Data);
State::Retry(StateName::GfmTableHeadRowBreak)
}
_ => {
let name = if tokenizer.current == Some(b'\\') {
StateName::GfmTableHeadRowEscape
} else {
StateName::GfmTableHeadRowData
};
tokenizer.consume();
State::Next(name)
}
}
}
/// In table head row escape.
///
/// ```markdown
/// > | | a\-b |
/// ^
/// | | ---- |
/// | | c |
/// ```
pub fn head_row_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\\' | b'|') => {
tokenizer.consume();
State::Next(StateName::GfmTableHeadRowData)
}
_ => State::Retry(StateName::GfmTableHeadRowData),
}
}
/// Before delimiter row.
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// | | b |
/// ```
pub fn head_delimiter_start(tokenizer: &mut Tokenizer) -> State {
// Reset `interrupt`.
tokenizer.interrupt = false;
if tokenizer.lazy || tokenizer.pierce {
tokenizer.tokenize_state.size = 0;
State::Nok
} else {
tokenizer.enter(Name::GfmTableDelimiterRow);
// Track if we’ve seen a `:` or `|`.
tokenizer.tokenize_state.seen = false;
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(
State::Next(StateName::GfmTableHeadDelimiterBefore),
State::Next(StateName::GfmTableHeadDelimiterNok),
);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
}
_ => State::Retry(StateName::GfmTableHeadDelimiterBefore),
}
}
}
/// Before delimiter row, after optional whitespace.
///
/// Reused when a `|` is found later, to parse another cell.
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// | | b |
/// ```
pub fn head_delimiter_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-' | b':') => State::Retry(StateName::GfmTableHeadDelimiterValueBefore),
Some(b'|') => {
tokenizer.tokenize_state.seen = true;
// If we start with a pipe, we open a cell marker.
tokenizer.enter(Name::GfmTableCellDivider);
tokenizer.consume();
tokenizer.exit(Name::GfmTableCellDivider);
State::Next(StateName::GfmTableHeadDelimiterCellBefore)
}
// More whitespace / empty row not allowed at start.
_ => State::Retry(StateName::GfmTableHeadDelimiterNok),
}
}
/// After `|`, before delimiter cell.
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// ```
pub fn head_delimiter_cell_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(
State::Next(StateName::GfmTableHeadDelimiterValueBefore),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
}
_ => State::Retry(StateName::GfmTableHeadDelimiterValueBefore),
}
}
/// Before delimiter cell value.
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// ```
pub fn head_delimiter_value_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => State::Retry(StateName::GfmTableHeadDelimiterCellAfter),
Some(b':') => {
// Align: left.
tokenizer.tokenize_state.size_b += 1;
tokenizer.tokenize_state.seen = true;
tokenizer.enter(Name::GfmTableDelimiterMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmTableDelimiterMarker);
State::Next(StateName::GfmTableHeadDelimiterLeftAlignmentAfter)
}
Some(b'-') => {
// Align: none.
tokenizer.tokenize_state.size_b += 1;
State::Retry(StateName::GfmTableHeadDelimiterLeftAlignmentAfter)
}
_ => State::Retry(StateName::GfmTableHeadDelimiterNok),
}
}
/// After delimiter cell left alignment marker.
///
/// ```markdown
/// | | a |
/// > | | :- |
/// ^
/// ```
pub fn head_delimiter_left_alignment_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.enter(Name::GfmTableDelimiterFiller);
State::Retry(StateName::GfmTableHeadDelimiterFiller)
}
// Anything else is not ok after the left-align colon.
_ => State::Retry(StateName::GfmTableHeadDelimiterNok),
}
}
/// In delimiter cell filler.
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// ```
pub fn head_delimiter_filler(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
State::Next(StateName::GfmTableHeadDelimiterFiller)
}
Some(b':') => {
// Align is `center` if it was `left`, `right` otherwise.
tokenizer.tokenize_state.seen = true;
tokenizer.exit(Name::GfmTableDelimiterFiller);
tokenizer.enter(Name::GfmTableDelimiterMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmTableDelimiterMarker);
State::Next(StateName::GfmTableHeadDelimiterRightAlignmentAfter)
}
_ => {
tokenizer.exit(Name::GfmTableDelimiterFiller);
State::Retry(StateName::GfmTableHeadDelimiterRightAlignmentAfter)
}
}
}
/// After delimiter cell right alignment marker.
///
/// ```markdown
/// | | a |
/// > | | -: |
/// ^
/// ```
pub fn head_delimiter_right_alignment_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(
State::Next(StateName::GfmTableHeadDelimiterCellAfter),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
}
_ => State::Retry(StateName::GfmTableHeadDelimiterCellAfter),
}
}
/// After delimiter cell.
///
/// ```markdown
/// | | a |
/// > | | -: |
/// ^
/// ```
pub fn head_delimiter_cell_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
// Exit when:
// * there was no `:` or `|` at all (it’s a thematic break or setext
// underline instead)
// * the header cell count is not the delimiter cell count
if !tokenizer.tokenize_state.seen
|| tokenizer.tokenize_state.size != tokenizer.tokenize_state.size_b
{
State::Retry(StateName::GfmTableHeadDelimiterNok)
} else {
// Reset.
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.size_b = 0;
tokenizer.exit(Name::GfmTableDelimiterRow);
tokenizer.exit(Name::GfmTableHead);
tokenizer.register_resolver(ResolveName::GfmTable);
State::Ok
}
}
Some(b'|') => State::Retry(StateName::GfmTableHeadDelimiterBefore),
_ => State::Retry(StateName::GfmTableHeadDelimiterNok),
}
}
/// In delimiter row, at a disallowed byte.
///
/// ```markdown
/// | | a |
/// > | | x |
/// ^
/// ```
pub fn head_delimiter_nok(tokenizer: &mut Tokenizer) -> State {
// Reset.
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.size_b = 0;
State::Nok
}
/// Before table body row.
///
/// ```markdown
/// | | a |
/// | | - |
/// > | | b |
/// ^
/// ```
pub fn body_row_start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.lazy {
State::Nok
} else {
tokenizer.enter(Name::GfmTableRow);
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(State::Next(StateName::GfmTableBodyRowBreak), State::Nok);
// We’re parsing a body row.
// If we’re here, we already attempted blank lines and indented
// code.
// So parse as much whitespace as needed:
State::Retry(space_or_tab_min_max(tokenizer, 0, usize::MAX))
}
_ => State::Retry(StateName::GfmTableBodyRowBreak),
}
}
}
/// At break in table body row.
///
/// ```markdown
/// | | a |
/// | | - |
/// > | | b |
/// ^
/// ^
/// ^
/// ```
pub fn body_row_break(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::GfmTableRow);
State::Ok
}
Some(b'\t' | b' ') => {
tokenizer.attempt(State::Next(StateName::GfmTableBodyRowBreak), State::Nok);
State::Retry(space_or_tab(tokenizer))
}
Some(b'|') => {
tokenizer.enter(Name::GfmTableCellDivider);
tokenizer.consume();
tokenizer.exit(Name::GfmTableCellDivider);
State::Next(StateName::GfmTableBodyRowBreak)
}
// Anything else is cell content.
_ => {
tokenizer.enter(Name::Data);
State::Retry(StateName::GfmTableBodyRowData)
}
}
}
/// In table body row data.
///
/// ```markdown
/// | | a |
/// | | - |
/// > | | b |
/// ^
/// ```
pub fn body_row_data(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\t' | b'\n' | b' ' | b'|') => {
tokenizer.exit(Name::Data);
State::Retry(StateName::GfmTableBodyRowBreak)
}
_ => {
let name = if tokenizer.current == Some(b'\\') {
StateName::GfmTableBodyRowEscape
} else {
StateName::GfmTableBodyRowData
};
tokenizer.consume();
State::Next(name)
}
}
}
/// In table body row escape.
///
/// ```markdown
/// | | a |
/// | | ---- |
/// > | | b\-c |
/// ^
/// ```
pub fn body_row_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\\' | b'|') => {
tokenizer.consume();
State::Next(StateName::GfmTableBodyRowData)
}
_ => State::Retry(StateName::GfmTableBodyRowData),
}
}
/// Resolve GFM table.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
let mut index = 0;
let mut in_first_cell_awaiting_pipe = true;
let mut in_row = false;
let mut in_delimiter_row = false;
let mut last_cell = (0, 0, 0, 0);
let mut cell = (0, 0, 0, 0);
let mut after_head_awaiting_first_body_row = false;
let mut last_table_end = 0;
let mut last_table_has_body = false;
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.kind == Kind::Enter {
// Start of head.
if event.name == Name::GfmTableHead {
after_head_awaiting_first_body_row = false;
// Inject previous (body end and) table end.
if last_table_end != 0 {
flush_table_end(tokenizer, last_table_end, last_table_has_body);
last_table_has_body = false;
last_table_end = 0;
}
// Inject table start.
let enter = Event {
kind: Kind::Enter,
name: Name::GfmTable,
point: tokenizer.events[index].point.clone(),
link: None,
};
tokenizer.map.add(index, 0, vec![enter]);
} else if matches!(event.name, Name::GfmTableRow | Name::GfmTableDelimiterRow) {
in_delimiter_row = event.name == Name::GfmTableDelimiterRow;
in_row = true;
in_first_cell_awaiting_pipe = true;
last_cell = (0, 0, 0, 0);
cell = (0, index + 1, 0, 0);
// Inject table body start.
if after_head_awaiting_first_body_row {
after_head_awaiting_first_body_row = false;
last_table_has_body = true;
let enter = Event {
kind: Kind::Enter,
name: Name::GfmTableBody,
point: tokenizer.events[index].point.clone(),
link: None,
};
tokenizer.map.add(index, 0, vec![enter]);
}
}
// Cell data.
else if in_row
&& matches!(
event.name,
Name::Data | Name::GfmTableDelimiterMarker | Name::GfmTableDelimiterFiller
)
{
in_first_cell_awaiting_pipe = false;
// First value in cell.
if cell.2 == 0 {
if last_cell.1 != 0 {
cell.0 = cell.1;
flush_cell(tokenizer, last_cell, in_delimiter_row, None);
last_cell = (0, 0, 0, 0);
}
cell.2 = index;
}
} else if event.name == Name::GfmTableCellDivider {
if in_first_cell_awaiting_pipe {
in_first_cell_awaiting_pipe = false;
} else {
if last_cell.1 != 0 {
cell.0 = cell.1;
flush_cell(tokenizer, last_cell, in_delimiter_row, None);
}
last_cell = cell;
cell = (last_cell.1, index, 0, 0);
}
}
// Exit events.
} else if event.name == Name::GfmTableHead {
after_head_awaiting_first_body_row = true;
last_table_end = index;
} else if matches!(event.name, Name::GfmTableRow | Name::GfmTableDelimiterRow) {
in_row = false;
last_table_end = index;
if last_cell.1 != 0 {
cell.0 = cell.1;
flush_cell(tokenizer, last_cell, in_delimiter_row, Some(index));
} else if cell.1 != 0 {
flush_cell(tokenizer, cell, in_delimiter_row, Some(index));
}
} else if in_row
&& (matches!(
event.name,
Name::Data | Name::GfmTableDelimiterMarker | Name::GfmTableDelimiterFiller
))
{
cell.3 = index;
}
index += 1;
}
if last_table_end != 0 {
flush_table_end(tokenizer, last_table_end, last_table_has_body);
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
/// Generate a cell.
fn flush_cell(
tokenizer: &mut Tokenizer,
range: (usize, usize, usize, usize),
in_delimiter_row: bool,
row_end: Option<usize>,
) {
let group_name = if in_delimiter_row {
Name::GfmTableDelimiterCell
} else {
Name::GfmTableCell
};
let value_name = if in_delimiter_row {
Name::GfmTableDelimiterCellValue
} else {
Name::GfmTableCellText
};
// Insert an exit for the previous cell, if there is one.
//
// ```markdown
// > | | aa | bb | cc |
// ^-- exit
// ^^^^-- this cell
// ```
if range.0 != 0 {
tokenizer.map.add(
range.0,
0,
vec![Event {
kind: Kind::Exit,
name: group_name.clone(),
point: tokenizer.events[range.0].point.clone(),
link: None,
}],
);
}
// Insert enter of this cell.
//
// ```markdown
// > | | aa | bb | cc |
// ^-- enter
// ^^^^-- this cell
// ```
tokenizer.map.add(
range.1,
0,
vec![Event {
kind: Kind::Enter,
name: group_name.clone(),
point: tokenizer.events[range.1].point.clone(),
link: None,
}],
);
// Insert text start at first data start and end at last data end, and
// remove events between.
//
// ```markdown
// > | | aa | bb | cc |
// ^-- enter
// ^-- exit
// ^^^^-- this cell
// ```
if range.2 != 0 {
tokenizer.map.add(
range.2,
0,
vec![Event {
kind: Kind::Enter,
name: value_name.clone(),
point: tokenizer.events[range.2].point.clone(),
link: None,
}],
);
debug_assert_ne!(range.3, 0);
if !in_delimiter_row {
tokenizer.events[range.2].link = Some(Link {
previous: None,
next: None,
content: Content::Text,
});
// To do: positional info of the remaining `data` nodes likely have
// to be fixed.
if range.3 > range.2 + 1 {
let a = range.2 + 1;
let b = range.3 - range.2 - 1;
tokenizer.map.add(a, b, vec![]);
}
}
tokenizer.map.add(
range.3 + 1,
0,
vec![Event {
kind: Kind::Exit,
name: value_name,
point: tokenizer.events[range.3].point.clone(),
link: None,
}],
);
}
// Insert an exit for the last cell, if at the row end.
//
// ```markdown
// > | | aa | bb | cc |
// ^-- exit
// ^^^^^^-- this cell (the last one contains two “between” parts)
// ```
if let Some(row_end) = row_end {
tokenizer.map.add(
row_end,
0,
vec![Event {
kind: Kind::Exit,
name: group_name,
point: tokenizer.events[row_end].point.clone(),
link: None,
}],
);
}
}
/// Generate table end (and table body end).
fn flush_table_end(tokenizer: &mut Tokenizer, index: usize, body: bool) {
let mut exits = vec![];
if body {
exits.push(Event {
kind: Kind::Exit,
name: Name::GfmTableBody,
point: tokenizer.events[index].point.clone(),
link: None,
});
}
exits.push(Event {
kind: Kind::Exit,
name: Name::GfmTable,
point: tokenizer.events[index].point.clone(),
link: None,
});
tokenizer.map.add(index + 1, 0, exits);
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/gfm_task_list_item_check.rs | Rust | //! GFM: Task list item check occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Checks form with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! gfm_task_list_item_check ::= '[' (0x09 | ' ' | 'X' | 'x') ']'
//! ```
//!
//! The check is only allowed at the start of the first paragraph, optionally
//! following zero or more definitions or a blank line, in a list item.
//! The check must be followed by whitespace, which is in turn followed by
//! non-whitespace.
//!
//! ## HTML
//!
//! Checks relate to the `<input>` element, in the checkbox state
//! (`type=checkbox`), in HTML.
//! See [*§ 4.10.5.1.15 Checkbox state (`type=checkbox`)*][html-input-checkbox]
//! in the HTML spec for more info.
//!
//! ## Recommendation
//!
//! It is recommended to use lowercase `x` (instead of uppercase `X`), because
//! in markdown, it is more common to use lowercase in places where casing does
//! not matter.
//! It is also recommended to use a space (instead of a tab), as there is no
//! benefit of using tabs in this case.
//!
//! ## Tokens
//!
//! * [`GfmTaskListItemCheck`][Name::GfmTaskListItemCheck]
//! * [`GfmTaskListItemMarker`][Name::GfmTaskListItemMarker]
//! * [`GfmTaskListItemValueChecked`][Name::GfmTaskListItemValueChecked]
//! * [`GfmTaskListItemValueUnchecked`][Name::GfmTaskListItemValueUnchecked]
//!
//! ## References
//!
//! * [`micromark-extension-gfm-task-list-item`](https://github.com/micromark/micromark-extension-gfm-task-list-item)
//! * [*§ 5.3 Task list items (extension)* in `GFM`](https://github.github.com/gfm/#task-list-items-extension-)
//!
//! [text]: crate::construct::text
//! [html-input-checkbox]: https://html.spec.whatwg.org/multipage/input.html#checkbox-state-(type=checkbox)
use crate::construct::partial_space_or_tab::space_or_tab;
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// At start of task list item check.
///
/// ```markdown
/// > | * [x] y.
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.gfm_task_list_item
&& tokenizer
.tokenize_state
.document_at_first_paragraph_of_list_item
&& tokenizer.current == Some(b'[')
&& tokenizer.previous.is_none()
{
tokenizer.enter(Name::GfmTaskListItemCheck);
tokenizer.enter(Name::GfmTaskListItemMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmTaskListItemMarker);
State::Next(StateName::GfmTaskListItemCheckInside)
} else {
State::Nok
}
}
/// In task list item check.
///
/// ```markdown
/// > | * [x] y.
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b'\n' | b' ') => {
tokenizer.enter(Name::GfmTaskListItemValueUnchecked);
tokenizer.consume();
tokenizer.exit(Name::GfmTaskListItemValueUnchecked);
State::Next(StateName::GfmTaskListItemCheckClose)
}
Some(b'X' | b'x') => {
tokenizer.enter(Name::GfmTaskListItemValueChecked);
tokenizer.consume();
tokenizer.exit(Name::GfmTaskListItemValueChecked);
State::Next(StateName::GfmTaskListItemCheckClose)
}
_ => State::Nok,
}
}
/// At close of task list item check.
///
/// ```markdown
/// > | * [x] y.
/// ^
/// ```
pub fn close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b']') => {
tokenizer.enter(Name::GfmTaskListItemMarker);
tokenizer.consume();
tokenizer.exit(Name::GfmTaskListItemMarker);
tokenizer.exit(Name::GfmTaskListItemCheck);
State::Next(StateName::GfmTaskListItemCheckAfter)
}
_ => State::Nok,
}
}
/// After task list item check.
///
/// ```markdown
/// > | * [x] y.
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// EOL in paragraph means there must be something else after it.
Some(b'\n') => State::Ok,
// Space or tab?
// Check what comes after.
Some(b'\t' | b' ') => {
tokenizer.check(State::Ok, State::Nok);
tokenizer.attempt(
State::Next(StateName::GfmTaskListItemCheckAfterSpaceOrTab),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
}
// EOF, or non-whitespace, both wrong.
_ => State::Nok,
}
}
/// After whitespace, after task list item check.
///
/// ```markdown
/// > | * [x] y.
/// ^
/// ```
pub fn after_space_or_tab(tokenizer: &mut Tokenizer) -> State {
// End of paragraph, after whitespace, after check, is not okay.
if tokenizer.current.is_none() {
State::Nok
} else {
State::Ok
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/hard_break_escape.rs | Rust | //! Hard break (escape) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Hard break (escape) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: followed by a line ending (that is part of the content
//! ; instead of ending it).
//! hard_break_escape ::= '\\'
//! ```
//!
//! It is also possible to create a hard break with a
//! [hard break (trailing)][hard_break_trailing].
//!
//! Punctuation characters can be escaped with a similar
//! construct: a [character escape][character_escape] is a backslash followed
//! by an ASCII punctuation character.
//! Arbitrary characters can be escaped with
//! [character references][character_reference].
//!
//! ## HTML
//!
//! Hard breaks in markdown relate to the HTML element `<br>`.
//! See [*§ 4.5.27 The `br` element* in the HTML spec][html] for more info.
//!
//! ## Recommendation
//!
//! Always use hard break (escape), never hard break (trailing).
//!
//! ## Tokens
//!
//! * [`HardBreakEscape`][Name::HardBreakEscape]
//!
//! ## References
//!
//! * [`hard-break-escape.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/hard-break-escape.js)
//! * [*§ 6.7 Hard line breaks* in `CommonMark`](https://spec.commonmark.org/0.31/#hard-line-breaks)
//!
//! [text]: crate::construct::text
//! [character_escape]: crate::construct::character_escape
//! [character_reference]: crate::construct::character_reference
//! [hard_break_trailing]: crate::construct::partial_whitespace
//! [html]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-br-element
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of hard break (escape).
///
/// ```markdown
/// > | a\
/// ^
/// | b
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.hard_break_escape
&& tokenizer.current == Some(b'\\')
{
tokenizer.enter(Name::HardBreakEscape);
tokenizer.consume();
State::Next(StateName::HardBreakEscapeAfter)
} else {
State::Nok
}
}
/// After `\`, at eol.
///
/// ```markdown
/// > | a\
/// ^
/// | b
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.exit(Name::HardBreakEscape);
State::Ok
}
_ => State::Nok,
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/heading_atx.rs | Rust | //! Heading (atx) occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! Heading (atx) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! heading_atx ::= 1*6'#' [ 1*space_or_tab line [ 1*space_or_tab 1*'#' ] ] *space_or_tab
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! `CommonMark` introduced the requirement on whitespace existing after the
//! opening sequence and before text.
//! In older markdown versions, this was not required, and headings would form
//! without it.
//!
//! In markdown, it is also possible to create headings with a
//! [heading (setext)][heading_setext] construct.
//! The benefit of setext headings is that their text can include line endings,
//! and by extensions also hard breaks (e.g., with
//! [hard break (escape)][hard_break_escape]).
//! However, their limit is that they cannot form `<h3>` through `<h6>`
//! headings.
//!
//! > 🏛 **Background**: the word *setext* originates from a small markup
//! > language by Ian Feldman from 1991.
//! > See [*§ Setext* on Wikipedia][wiki_setext] for more info.
//! > The word *atx* originates from a tiny markup language by Aaron Swartz
//! > from 2002.
//! > See [*§ atx, the true structured text format* on `aaronsw.com`][atx] for
//! > more info.
//!
//! ## HTML
//!
//! Headings in markdown relate to the `<h1>` through `<h6>` elements in HTML.
//! See [*§ 4.3.6 The `h1`, `h2`, `h3`, `h4`, `h5`, and `h6` elements* in the
//! HTML spec][html] for more info.
//!
//! ## Recommendation
//!
//! Always use heading (atx), never heading (setext).
//!
//! ## Tokens
//!
//! * [`HeadingAtx`][Name::HeadingAtx]
//! * [`HeadingAtxSequence`][Name::HeadingAtxSequence]
//! * [`HeadingAtxText`][Name::HeadingAtxText]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`heading-atx.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/heading-atx.js)
//! * [*§ 4.2 ATX headings* in `CommonMark`](https://spec.commonmark.org/0.31/#atx-headings)
//!
//! [flow]: crate::construct::flow
//! [heading_setext]: crate::construct::heading_setext
//! [hard_break_escape]: crate::construct::hard_break_escape
//! [html]: https://html.spec.whatwg.org/multipage/sections.html#the-h1,-h2,-h3,-h4,-h5,-and-h6-elements
//! [wiki_setext]: https://en.wikipedia.org/wiki/Setext
//! [atx]: http://www.aaronsw.com/2002/atx/
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::{Content, Event, Kind, Link, Name};
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
use crate::util::constant::{HEADING_ATX_OPENING_FENCE_SIZE_MAX, TAB_SIZE};
use alloc::vec;
/// Start of a heading (atx).
///
/// ```markdown
/// > | ## aa
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.heading_atx {
tokenizer.enter(Name::HeadingAtx);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::HeadingAtxBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::HeadingAtxBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, at `#`.
///
/// ```markdown
/// > | ## aa
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
if Some(b'#') == tokenizer.current {
tokenizer.enter(Name::HeadingAtxSequence);
State::Retry(StateName::HeadingAtxSequenceOpen)
} else {
State::Nok
}
}
/// In opening sequence.
///
/// ```markdown
/// > | ## aa
/// ^
/// ```
pub fn sequence_open(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(b'#')
&& tokenizer.tokenize_state.size < HEADING_ATX_OPENING_FENCE_SIZE_MAX
{
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::HeadingAtxSequenceOpen)
}
// Always at least one `#`.
else if matches!(tokenizer.current, None | Some(b'\t' | b'\n' | b' ')) {
tokenizer.tokenize_state.size = 0;
tokenizer.exit(Name::HeadingAtxSequence);
State::Retry(StateName::HeadingAtxAtBreak)
} else {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// After something, before something else.
///
/// ```markdown
/// > | ## aa
/// ^
/// ```
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::HeadingAtx);
tokenizer.register_resolver(ResolveName::HeadingAtx);
// Feel free to interrupt.
tokenizer.interrupt = false;
State::Ok
}
Some(b'\t' | b' ') => {
tokenizer.attempt(State::Next(StateName::HeadingAtxAtBreak), State::Nok);
State::Retry(space_or_tab(tokenizer))
}
Some(b'#') => {
tokenizer.enter(Name::HeadingAtxSequence);
State::Retry(StateName::HeadingAtxSequenceFurther)
}
Some(_) => {
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::Text,
},
);
State::Retry(StateName::HeadingAtxData)
}
}
}
/// In further sequence (after whitespace).
///
/// Could be normal “visible” hashes in the heading or a final sequence.
///
/// ```markdown
/// > | ## aa ##
/// ^
/// ```
pub fn sequence_further(tokenizer: &mut Tokenizer) -> State {
if let Some(b'#') = tokenizer.current {
tokenizer.consume();
State::Next(StateName::HeadingAtxSequenceFurther)
} else {
tokenizer.exit(Name::HeadingAtxSequence);
State::Retry(StateName::HeadingAtxAtBreak)
}
}
/// In text.
///
/// ```markdown
/// > | ## aa
/// ^
/// ```
pub fn data(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Note: `#` for closing sequence must be preceded by whitespace, otherwise it’s just text.
None | Some(b'\t' | b'\n' | b' ') => {
tokenizer.exit(Name::Data);
State::Retry(StateName::HeadingAtxAtBreak)
}
_ => {
tokenizer.consume();
State::Next(StateName::HeadingAtxData)
}
}
}
/// Resolve heading (atx).
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
let mut index = 0;
let mut heading_inside = false;
let mut data_start: Option<usize> = None;
let mut data_end: Option<usize> = None;
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.name == Name::HeadingAtx {
if event.kind == Kind::Enter {
heading_inside = true;
} else {
if let Some(start) = data_start {
// If `start` is some, `end` is too.
let end = data_end.unwrap();
tokenizer.map.add(
start,
0,
vec![Event {
kind: Kind::Enter,
name: Name::HeadingAtxText,
point: tokenizer.events[start].point.clone(),
link: None,
}],
);
// Remove everything between the start and the end.
tokenizer.map.add(start + 1, end - start - 1, vec![]);
tokenizer.map.add(
end + 1,
0,
vec![Event {
kind: Kind::Exit,
name: Name::HeadingAtxText,
point: tokenizer.events[end].point.clone(),
link: None,
}],
);
}
heading_inside = false;
data_start = None;
data_end = None;
}
} else if heading_inside && event.name == Name::Data {
if event.kind == Kind::Enter {
if data_start.is_none() {
data_start = Some(index);
}
} else {
data_end = Some(index);
}
}
index += 1;
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/heading_setext.rs | Rust | //! Heading (setext) occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! Heading (setext) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! heading_setext ::= paragraph eol *space_or_tab (1*'-' | 1*'=') *space_or_tab
//!
//! ; See the `paragraph` construct for the BNF of that part.
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! See [`paragraph`][paragraph] for grammar, notes, and recommendations on
//! that part.
//!
//! In markdown, it is also possible to create headings with a
//! [heading (atx)][heading_atx] construct.
//! The benefit of setext headings is that their text can include line endings,
//! and by extensions also hard breaks (e.g., with
//! [hard break (escape)][hard_break_escape]).
//! However, their limit is that they cannot form `<h3>` through `<h6>`
//! headings.
//!
//! [Thematic breaks][thematic_break] formed with dashes and without whitespace
//! could be interpreted as a heading (setext).
//! Which one forms depends on whether there is text directly in fron of the
//! sequence.
//!
//! > 🏛 **Background**: the word *setext* originates from a small markup
//! > language by Ian Feldman from 1991.
//! > See [*§ Setext* on Wikipedia][wiki_setext] for more info.
//! > The word *atx* originates from a tiny markup language by Aaron Swartz
//! > from 2002.
//! > See [*§ atx, the true structured text format* on `aaronsw.com`][atx] for
//! > more info.
//!
//! ## HTML
//!
//! Heading (setext) in markdown relates to the `<h1>` and `<h2>` elements in
//! HTML.
//! See [*§ 4.3.6 The `h1`, `h2`, `h3`, `h4`, `h5`, and `h6` elements* in the
//! HTML spec][html] for more info.
//!
//! ## Recommendation
//!
//! Always use heading (atx), never heading (setext).
//!
//! ## Tokens
//!
//! * [`HeadingSetext`][Name::HeadingSetext]
//! * [`HeadingSetextText`][Name::HeadingSetextText]
//! * [`HeadingSetextUnderline`][Name::HeadingSetextUnderline]
//! * [`HeadingSetextUnderlineSequence`][Name::HeadingSetextUnderlineSequence]
//!
//! ## References
//!
//! * [`setext-underline.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/setext-underline.js)
//! * [*§ 4.3 Setext headings* in `CommonMark`](https://spec.commonmark.org/0.31/#setext-headings)
//!
//! [flow]: crate::construct::flow
//! [paragraph]: crate::construct::paragraph
//! [heading_atx]: crate::construct::heading_atx
//! [thematic_break]: crate::construct::thematic_break
//! [hard_break_escape]: crate::construct::hard_break_escape
//! [html]: https://html.spec.whatwg.org/multipage/sections.html#the-h1,-h2,-h3,-h4,-h5,-and-h6-elements
//! [wiki_setext]: https://en.wikipedia.org/wiki/Setext
//! [atx]: http://www.aaronsw.com/2002/atx/
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::{Content, Event, Kind, Link, Name};
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
use crate::util::{constant::TAB_SIZE, skip};
use alloc::vec;
/// At start of heading (setext) underline.
///
/// ```markdown
/// | aa
/// > | ==
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.heading_setext
&& !tokenizer.lazy
&& !tokenizer.pierce
// Require a paragraph before.
&& (!tokenizer.events.is_empty()
&& matches!(tokenizer.events[skip::opt_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::LineEnding, Name::SpaceOrTab],
)]
.name, Name::Content | Name::HeadingSetextUnderline))
{
tokenizer.enter(Name::HeadingSetextUnderline);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::HeadingSetextBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::HeadingSetextBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, at `-` or `=`.
///
/// ```markdown
/// | aa
/// > | ==
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-' | b'=') => {
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.enter(Name::HeadingSetextUnderlineSequence);
State::Retry(StateName::HeadingSetextInside)
}
_ => State::Nok,
}
}
/// In sequence.
///
/// ```markdown
/// | aa
/// > | ==
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.consume();
State::Next(StateName::HeadingSetextInside)
} else {
tokenizer.tokenize_state.marker = 0;
tokenizer.exit(Name::HeadingSetextUnderlineSequence);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::HeadingSetextAfter), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::HeadingSetextAfter)
}
}
}
/// After sequence, after optional whitespace.
///
/// ```markdown
/// | aa
/// > | ==
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
// Feel free to interrupt.
tokenizer.interrupt = false;
tokenizer.register_resolver(ResolveName::HeadingSetext);
tokenizer.exit(Name::HeadingSetextUnderline);
State::Ok
}
_ => State::Nok,
}
}
/// Resolve heading (setext).
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
let mut enter = skip::to(&tokenizer.events, 0, &[Name::HeadingSetextUnderline]);
while enter < tokenizer.events.len() {
let exit = skip::to(
&tokenizer.events,
enter + 1,
&[Name::HeadingSetextUnderline],
);
// Find paragraph before
let paragraph_exit_before = skip::opt_back(
&tokenizer.events,
enter - 1,
&[Name::SpaceOrTab, Name::LineEnding, Name::BlockQuotePrefix],
);
// There’s a paragraph before: this is a setext heading.
if tokenizer.events[paragraph_exit_before].name == Name::Paragraph {
let paragraph_enter = skip::to_back(
&tokenizer.events,
paragraph_exit_before - 1,
&[Name::Paragraph],
);
// Change types of Enter:Paragraph, Exit:Paragraph.
tokenizer.events[paragraph_enter].name = Name::HeadingSetextText;
tokenizer.events[paragraph_exit_before].name = Name::HeadingSetextText;
// Add Enter:HeadingSetext, Exit:HeadingSetext.
let mut heading_enter = tokenizer.events[paragraph_enter].clone();
heading_enter.name = Name::HeadingSetext;
tokenizer.map.add(paragraph_enter, 0, vec![heading_enter]);
let mut heading_exit = tokenizer.events[exit].clone();
heading_exit.name = Name::HeadingSetext;
tokenizer.map.add(exit + 1, 0, vec![heading_exit]);
} else {
// There’s a following paragraph, move this underline inside it.
if exit + 3 < tokenizer.events.len()
&& tokenizer.events[exit + 1].name == Name::LineEnding
&& tokenizer.events[exit + 3].name == Name::Paragraph
{
// Swap type, HeadingSetextUnderline:Enter -> Paragraph:Enter.
tokenizer.events[enter].name = Name::Paragraph;
// Swap type, LineEnding -> Data.
tokenizer.events[exit + 1].name = Name::Data;
tokenizer.events[exit + 2].name = Name::Data;
// Move new data (was line ending) back to include whole line,
// and link data together.
tokenizer.events[exit + 1].point = tokenizer.events[enter].point.clone();
tokenizer.events[exit + 1].link = Some(Link {
previous: None,
next: Some(exit + 4),
content: Content::Text,
});
tokenizer.events[exit + 4].link.as_mut().unwrap().previous = Some(exit + 1);
// Remove *including* HeadingSetextUnderline:Exit, until the line ending.
tokenizer.map.add(enter + 1, exit - enter, vec![]);
// Remove old Paragraph:Enter.
tokenizer.map.add(exit + 3, 1, vec![]);
} else {
// Swap type.
tokenizer.events[enter].name = Name::Paragraph;
tokenizer.events[exit].name = Name::Paragraph;
// Replace what’s inside the underline (whitespace, sequence).
tokenizer.map.add(
enter + 1,
exit - enter - 1,
vec![
Event {
name: Name::Data,
kind: Kind::Enter,
point: tokenizer.events[enter].point.clone(),
link: Some(Link {
previous: None,
next: None,
content: Content::Text,
}),
},
Event {
name: Name::Data,
kind: Kind::Exit,
point: tokenizer.events[exit].point.clone(),
link: None,
},
],
);
}
}
enter = skip::to(&tokenizer.events, exit + 1, &[Name::HeadingSetextUnderline]);
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/html_flow.rs | Rust | //! HTML (flow) occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! HTML (flow) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! html_flow ::= raw | comment | instruction | declaration | cdata | basic | complete
//!
//! ; Note: closing tag name does not need to match opening tag name.
//! raw ::= '<' raw_tag_name [[space_or_tab *line | '>' *line] eol] *(*line eol) ['</' raw_tag_name *line]
//! comment ::= '<!--' [*'-' '>' *line | *line *(eol *line) ['-->' *line]]
//! instruction ::= '<?' ['>' *line | *line *(eol *line) ['?>' *line]]
//! declaration ::= '<!' ascii_alphabetic *line *(eol *line) ['>' *line]
//! cdata ::= '<![CDATA[' *line *(eol *line) [']]>' *line]
//! basic ::= '< ['/'] basic_tag_name [['/'] '>' *line *(eol 1*line)]
//! complete ::= (opening_tag | closing_tag) [*space_or_tab *(eol 1*line)]
//!
//! raw_tag_name ::= 'pre' | 'script' | 'style' | 'textarea' ; Note: case-insensitive.
//! basic_tag_name ::= 'address' | 'article' | 'aside' | ... ; See `constants.rs`, and note: case-insensitive.
//! opening_tag ::= '<' tag_name *(1*space_or_tab attribute) [*space_or_tab '/'] *space_or_tab '>'
//! closing_tag ::= '</' tag_name *space_or_tab '>'
//! tag_name ::= ascii_alphabetic *('-' | ascii_alphanumeric)
//! attribute ::= attribute_name [*space_or_tab '=' *space_or_tab attribute_value]
//! attribute_name ::= (':' | '_' | ascii_alphabetic) *('-' | '.' | ':' | '_' | ascii_alphanumeric)
//! attribute_value ::= '"' *(line - '"') '"' | "'" *(line - "'") "'" | 1*(text - '"' - "'" - '/' - '<' - '=' - '>' - '`')
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! The grammar for HTML in markdown does not follow the rules of parsing
//! HTML according to the [*§ 13.2 Parsing HTML documents* in the HTML
//! spec][html_parsing].
//! As such, HTML in markdown *resembles* HTML, but is instead a (naïve?)
//! attempt to parse an XML-like language.
//! By extension, another notable property of the grammar is that it can
//! result in invalid HTML, in that it allows things that wouldn’t work or
//! wouldn’t work well in HTML, such as mismatched tags.
//!
//! Interestingly, most of the productions above have a clear opening and
//! closing condition (raw, comment, insutrction, declaration, cdata), but the
//! closing condition does not need to be satisfied.
//! In this case, the parser never has to backtrack.
//!
//! Because the **basic** and **complete** productions in the grammar form with
//! a tag, followed by more stuff, and stop at a blank line, it is possible to
//! interleave (a word for switching between languages) markdown and HTML
//! together, by placing the opening and closing tags on their own lines,
//! with blank lines between them and markdown.
//! For example:
//!
//! ```markdown
//! <div>This is <code>code</code> but this is not *emphasis*.</div>
//!
//! <div>
//!
//! This is a paragraph in a `div` and with `code` and *emphasis*.
//!
//! </div>
//! ```
//!
//! The **complete** production of HTML (flow) is not allowed to interrupt
//! content.
//! That means that a blank line is needed between a [paragraph][] and it.
//! However, [HTML (text)][html_text] has a similar production, which will
//! typically kick-in instead.
//!
//! The list of tag names allowed in the **raw** production are defined in
//! [`HTML_RAW_NAMES`][].
//! This production exists because there are a few cases where markdown
//! *inside* some elements, and hence interleaving, does not make sense.
//!
//! The list of tag names allowed in the **basic** production are defined in
//! [`HTML_BLOCK_NAMES`][].
//! This production exists because there are a few cases where we can decide
//! early that something is going to be a flow (block) element instead of a
//! phrasing (inline) element.
//! We *can* interrupt and don’t have to care too much about it being
//! well-formed.
//!
//! ## Tokens
//!
//! * [`HtmlFlow`][Name::HtmlFlow]
//! * [`HtmlFlowData`][Name::HtmlFlowData]
//! * [`LineEnding`][Name::LineEnding]
//!
//! ## References
//!
//! * [`html-flow.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/html-flow.js)
//! * [*§ 4.6 HTML blocks* in `CommonMark`](https://spec.commonmark.org/0.31/#html-blocks)
//!
//! [flow]: crate::construct::flow
//! [html_text]: crate::construct::html_text
//! [paragraph]: crate::construct::paragraph
//! [html_raw_names]: crate::util::constant::HTML_RAW_NAMES
//! [html_block_names]: crate::util::constant::HTML_BLOCK_NAMES
//! [html_parsing]: https://html.spec.whatwg.org/multipage/parsing.html#parsing
use crate::construct::partial_space_or_tab::{
space_or_tab_with_options, Options as SpaceOrTabOptions,
};
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
constant::{HTML_BLOCK_NAMES, HTML_CDATA_PREFIX, HTML_RAW_NAMES, HTML_RAW_SIZE_MAX, TAB_SIZE},
slice::Slice,
};
/// Symbol for `<script>` (condition 1).
const RAW: u8 = 1;
/// Symbol for `<!---->` (condition 2).
const COMMENT: u8 = 2;
/// Symbol for `<?php?>` (condition 3).
const INSTRUCTION: u8 = 3;
/// Symbol for `<!doctype>` (condition 4).
const DECLARATION: u8 = 4;
/// Symbol for `<![CDATA[]]>` (condition 5).
const CDATA: u8 = 5;
/// Symbol for `<div` (condition 6).
const BASIC: u8 = 6;
/// Symbol for `<x>` (condition 7).
const COMPLETE: u8 = 7;
/// Start of HTML (flow).
///
/// ```markdown
/// > | <x />
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.html_flow {
tokenizer.enter(Name::HtmlFlow);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::HtmlFlowBefore), State::Nok);
State::Retry(space_or_tab_with_options(
tokenizer,
SpaceOrTabOptions {
kind: Name::HtmlFlowData,
min: 0,
max: if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
connect: false,
content: None,
},
))
} else {
State::Retry(StateName::HtmlFlowBefore)
}
} else {
State::Nok
}
}
/// At `<`, after optional whitespace.
///
/// ```markdown
/// > | <x />
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
if Some(b'<') == tokenizer.current {
tokenizer.enter(Name::HtmlFlowData);
tokenizer.consume();
State::Next(StateName::HtmlFlowOpen)
} else {
State::Nok
}
}
/// After `<`, at tag name or other stuff.
///
/// ```markdown
/// > | <x />
/// ^
/// > | <!doctype>
/// ^
/// > | <!--xxx-->
/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'!') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowDeclarationOpen)
}
Some(b'/') => {
tokenizer.consume();
tokenizer.tokenize_state.seen = true;
tokenizer.tokenize_state.start = tokenizer.point.index;
State::Next(StateName::HtmlFlowTagCloseStart)
}
Some(b'?') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = INSTRUCTION;
// Do not form containers.
tokenizer.concrete = true;
// While we’re in an instruction instead of a declaration, we’re on a `?`
// right now, so we do need to search for `>`, similar to declarations.
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.tokenize_state.start = tokenizer.point.index;
State::Retry(StateName::HtmlFlowTagName)
}
_ => State::Nok,
}
}
/// After `<!`, at declaration, comment, or CDATA.
///
/// ```markdown
/// > | <!doctype>
/// ^
/// > | <!--xxx-->
/// ^
/// > | <![CDATA[>&<]]>
/// ^
/// ```
pub fn declaration_open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = COMMENT;
State::Next(StateName::HtmlFlowCommentOpenInside)
}
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = DECLARATION;
// Do not form containers.
tokenizer.concrete = true;
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
Some(b'[') => {
tokenizer.consume();
tokenizer.tokenize_state.marker = CDATA;
State::Next(StateName::HtmlFlowCdataOpenInside)
}
_ => State::Nok,
}
}
/// After `<!-`, inside a comment, at another `-`.
///
/// ```markdown
/// > | <!--xxx-->
/// ^
/// ```
pub fn comment_open_inside(tokenizer: &mut Tokenizer) -> State {
if let Some(b'-') = tokenizer.current {
tokenizer.consume();
// Do not form containers.
tokenizer.concrete = true;
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
}
/// After `<![`, inside CDATA, expecting `CDATA[`.
///
/// ```markdown
/// > | <![CDATA[>&<]]>
/// ^^^^^^
/// ```
pub fn cdata_open_inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(HTML_CDATA_PREFIX[tokenizer.tokenize_state.size]) {
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
if tokenizer.tokenize_state.size == HTML_CDATA_PREFIX.len() {
tokenizer.tokenize_state.size = 0;
// Do not form containers.
tokenizer.concrete = true;
State::Next(StateName::HtmlFlowContinuation)
} else {
State::Next(StateName::HtmlFlowCdataOpenInside)
}
} else {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// After `</`, in closing tag, at tag name.
///
/// ```markdown
/// > | </x>
/// ^
/// ```
pub fn tag_close_start(tokenizer: &mut Tokenizer) -> State {
if let Some(b'A'..=b'Z' | b'a'..=b'z') = tokenizer.current {
tokenizer.consume();
State::Next(StateName::HtmlFlowTagName)
} else {
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.start = 0;
State::Nok
}
}
/// In tag name.
///
/// ```markdown
/// > | <ab>
/// ^^
/// > | </ab>
/// ^^
/// ```
pub fn tag_name(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\t' | b'\n' | b' ' | b'/' | b'>') => {
let closing_tag = tokenizer.tokenize_state.seen;
let slash = matches!(tokenizer.current, Some(b'/'));
// Guaranteed to be valid ASCII bytes.
let slice = Slice::from_indices(
tokenizer.parse_state.bytes,
tokenizer.tokenize_state.start,
tokenizer.point.index,
);
let name = slice
.as_str()
// The line ending case might result in a `\r` that is already accounted for.
.trim()
.to_ascii_lowercase();
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.start = 0;
if !slash && !closing_tag && HTML_RAW_NAMES.contains(&name.as_str()) {
tokenizer.tokenize_state.marker = RAW;
// Do not form containers.
tokenizer.concrete = true;
State::Retry(StateName::HtmlFlowContinuation)
} else if HTML_BLOCK_NAMES.contains(&name.as_str()) {
tokenizer.tokenize_state.marker = BASIC;
if slash {
tokenizer.consume();
State::Next(StateName::HtmlFlowBasicSelfClosing)
} else {
// Do not form containers.
tokenizer.concrete = true;
State::Retry(StateName::HtmlFlowContinuation)
}
} else {
tokenizer.tokenize_state.marker = COMPLETE;
// Do not support complete HTML when interrupting.
if tokenizer.interrupt && !tokenizer.lazy {
tokenizer.tokenize_state.marker = 0;
State::Nok
} else if closing_tag {
State::Retry(StateName::HtmlFlowCompleteClosingTagAfter)
} else {
State::Retry(StateName::HtmlFlowCompleteAttributeNameBefore)
}
}
}
// ASCII alphanumerical and `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowTagName)
}
Some(_) => {
tokenizer.tokenize_state.seen = false;
State::Nok
}
}
}
/// After closing slash of a basic tag name.
///
/// ```markdown
/// > | <div/>
/// ^
/// ```
pub fn basic_self_closing(tokenizer: &mut Tokenizer) -> State {
if let Some(b'>') = tokenizer.current {
tokenizer.consume();
// Do not form containers.
tokenizer.concrete = true;
State::Next(StateName::HtmlFlowContinuation)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
}
/// After closing slash of a complete tag name.
///
/// ```markdown
/// > | <x/>
/// ^
/// ```
pub fn complete_closing_tag_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteClosingTagAfter)
}
_ => State::Retry(StateName::HtmlFlowCompleteEnd),
}
}
/// At an attribute name.
///
/// At first, this state is used after a complete tag name, after whitespace,
/// where it expects optional attributes or the end of the tag.
/// It is also reused after attributes, when expecting more optional
/// attributes.
///
/// ```markdown
/// > | <a />
/// ^
/// > | <a :b>
/// ^
/// > | <a _b>
/// ^
/// > | <a b>
/// ^
/// > | <a >
/// ^
/// ```
pub fn complete_attribute_name_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeNameBefore)
}
Some(b'/') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteEnd)
}
// ASCII alphanumerical and `:` and `_`.
Some(b'0'..=b'9' | b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeName)
}
_ => State::Retry(StateName::HtmlFlowCompleteEnd),
}
}
/// In attribute name.
///
/// ```markdown
/// > | <a :b>
/// ^
/// > | <a _b>
/// ^
/// > | <a b>
/// ^
/// ```
pub fn complete_attribute_name(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphanumerical and `-`, `.`, `:`, and `_`.
Some(b'-' | b'.' | b'0'..=b'9' | b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeName)
}
_ => State::Retry(StateName::HtmlFlowCompleteAttributeNameAfter),
}
}
/// After attribute name, at an optional initializer, the end of the tag, or
/// whitespace.
///
/// ```markdown
/// > | <a b>
/// ^
/// > | <a b=c>
/// ^
/// ```
pub fn complete_attribute_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeNameAfter)
}
Some(b'=') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeValueBefore)
}
_ => State::Retry(StateName::HtmlFlowCompleteAttributeNameBefore),
}
}
/// Before unquoted, double quoted, or single quoted attribute value, allowing
/// whitespace.
///
/// ```markdown
/// > | <a b=c>
/// ^
/// > | <a b="c">
/// ^
/// ```
pub fn complete_attribute_value_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'<' | b'=' | b'>' | b'`') => {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeValueBefore)
}
Some(b'"' | b'\'') => {
tokenizer.tokenize_state.marker_b = tokenizer.current.unwrap();
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeValueQuoted)
}
_ => State::Retry(StateName::HtmlFlowCompleteAttributeValueUnquoted),
}
}
/// In double or single quoted attribute value.
///
/// ```markdown
/// > | <a b="c">
/// ^
/// > | <a b='c'>
/// ^
/// ```
pub fn complete_attribute_value_quoted(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker_b) {
tokenizer.consume();
tokenizer.tokenize_state.marker_b = 0;
State::Next(StateName::HtmlFlowCompleteAttributeValueQuotedAfter)
} else if matches!(tokenizer.current, None | Some(b'\n')) {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.marker_b = 0;
State::Nok
} else {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeValueQuoted)
}
}
/// In unquoted attribute value.
///
/// ```markdown
/// > | <a b=c>
/// ^
/// ```
pub fn complete_attribute_value_unquoted(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\t' | b'\n' | b' ' | b'"' | b'\'' | b'/' | b'<' | b'=' | b'>' | b'`') => {
State::Retry(StateName::HtmlFlowCompleteAttributeNameAfter)
}
Some(_) => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAttributeValueUnquoted)
}
}
}
/// After double or single quoted attribute value, before whitespace or the
/// end of the tag.
///
/// ```markdown
/// > | <a b="c">
/// ^
/// ```
pub fn complete_attribute_value_quoted_after(tokenizer: &mut Tokenizer) -> State {
if let Some(b'\t' | b' ' | b'/' | b'>') = tokenizer.current {
State::Retry(StateName::HtmlFlowCompleteAttributeNameBefore)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
}
/// In certain circumstances of a complete tag where only an `>` is allowed.
///
/// ```markdown
/// > | <a b="c">
/// ^
/// ```
pub fn complete_end(tokenizer: &mut Tokenizer) -> State {
if let Some(b'>') = tokenizer.current {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAfter)
} else {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
}
/// After `>` in a complete tag.
///
/// ```markdown
/// > | <x>
/// ^
/// ```
pub fn complete_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
// Do not form containers.
tokenizer.concrete = true;
State::Retry(StateName::HtmlFlowContinuation)
}
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowCompleteAfter)
}
Some(_) => {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
}
}
/// In continuation of any HTML kind.
///
/// ```markdown
/// > | <!--xxx-->
/// ^
/// ```
pub fn continuation(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.marker == COMMENT && tokenizer.current == Some(b'-') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationCommentInside)
} else if tokenizer.tokenize_state.marker == RAW && tokenizer.current == Some(b'<') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationRawTagOpen)
} else if tokenizer.tokenize_state.marker == DECLARATION && tokenizer.current == Some(b'>') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationClose)
} else if tokenizer.tokenize_state.marker == INSTRUCTION && tokenizer.current == Some(b'?') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
} else if tokenizer.tokenize_state.marker == CDATA && tokenizer.current == Some(b']') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationCdataInside)
} else if matches!(tokenizer.tokenize_state.marker, BASIC | COMPLETE)
&& tokenizer.current == Some(b'\n')
{
tokenizer.exit(Name::HtmlFlowData);
tokenizer.check(
State::Next(StateName::HtmlFlowContinuationAfter),
State::Next(StateName::HtmlFlowContinuationStart),
);
State::Retry(StateName::HtmlFlowBlankLineBefore)
} else if matches!(tokenizer.current, None | Some(b'\n')) {
tokenizer.exit(Name::HtmlFlowData);
State::Retry(StateName::HtmlFlowContinuationStart)
} else {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuation)
}
}
/// In continuation, at eol.
///
/// ```markdown
/// > | <x>
/// ^
/// | asd
/// ```
pub fn continuation_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.check(
State::Next(StateName::HtmlFlowContinuationStartNonLazy),
State::Next(StateName::HtmlFlowContinuationAfter),
);
State::Retry(StateName::NonLazyContinuationStart)
}
/// In continuation, at eol, before non-lazy content.
///
/// ```markdown
/// > | <x>
/// ^
/// | asd
/// ```
pub fn continuation_start_non_lazy(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::HtmlFlowContinuationBefore)
}
_ => unreachable!("expected eol"),
}
}
/// In continuation, before non-lazy content.
///
/// ```markdown
/// | <x>
/// > | asd
/// ^
/// ```
pub fn continuation_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => State::Retry(StateName::HtmlFlowContinuationStart),
_ => {
tokenizer.enter(Name::HtmlFlowData);
State::Retry(StateName::HtmlFlowContinuation)
}
}
}
/// In comment continuation, after one `-`, expecting another.
///
/// ```markdown
/// > | <!--xxx-->
/// ^
/// ```
pub fn continuation_comment_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
_ => State::Retry(StateName::HtmlFlowContinuation),
}
}
/// In raw continuation, after `<`, at `/`.
///
/// ```markdown
/// > | <script>console.log(1)</script>
/// ^
/// ```
pub fn continuation_raw_tag_open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'/') => {
tokenizer.consume();
tokenizer.tokenize_state.start = tokenizer.point.index;
State::Next(StateName::HtmlFlowContinuationRawEndTag)
}
_ => State::Retry(StateName::HtmlFlowContinuation),
}
}
/// In raw continuation, after `</`, in a raw tag name.
///
/// ```markdown
/// > | <script>console.log(1)</script>
/// ^^^^^^
/// ```
pub fn continuation_raw_end_tag(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => {
// Guaranteed to be valid ASCII bytes.
let slice = Slice::from_indices(
tokenizer.parse_state.bytes,
tokenizer.tokenize_state.start,
tokenizer.point.index,
);
let name = slice.as_str().to_ascii_lowercase();
tokenizer.tokenize_state.start = 0;
if HTML_RAW_NAMES.contains(&name.as_str()) {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationClose)
} else {
State::Retry(StateName::HtmlFlowContinuation)
}
}
Some(b'A'..=b'Z' | b'a'..=b'z')
if tokenizer.point.index - tokenizer.tokenize_state.start < HTML_RAW_SIZE_MAX =>
{
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationRawEndTag)
}
_ => {
tokenizer.tokenize_state.start = 0;
State::Retry(StateName::HtmlFlowContinuation)
}
}
}
/// In cdata continuation, after `]`, expecting `]>`.
///
/// ```markdown
/// > | <![CDATA[>&<]]>
/// ^
/// ```
pub fn continuation_cdata_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b']') => {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
}
_ => State::Retry(StateName::HtmlFlowContinuation),
}
}
/// In declaration or instruction continuation, at `>`.
///
/// ```markdown
/// > | <!-->
/// ^
/// > | <?>
/// ^
/// > | <!q>
/// ^
/// > | <!--ab-->
/// ^
/// > | <![CDATA[>&<]]>
/// ^
/// ```
pub fn continuation_declaration_inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.marker == COMMENT && tokenizer.current == Some(b'-') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationDeclarationInside)
} else if tokenizer.current == Some(b'>') {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationClose)
} else {
State::Retry(StateName::HtmlFlowContinuation)
}
}
/// In closed continuation: everything we get until the eol/eof is part of it.
///
/// ```markdown
/// > | <!doctype>
/// ^
/// ```
pub fn continuation_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::HtmlFlowData);
State::Retry(StateName::HtmlFlowContinuationAfter)
}
_ => {
tokenizer.consume();
State::Next(StateName::HtmlFlowContinuationClose)
}
}
}
/// Done.
///
/// ```markdown
/// > | <!doctype>
/// ^
/// ```
pub fn continuation_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(Name::HtmlFlow);
tokenizer.tokenize_state.marker = 0;
// Feel free to interrupt.
tokenizer.interrupt = false;
// No longer concrete.
tokenizer.concrete = false;
State::Ok
}
/// Before eol, expecting blank line.
///
/// ```markdown
/// > | <div>
/// ^
/// |
/// ```
pub fn blank_line_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::BlankLineStart)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/html_text.rs | Rust | //! HTML (text) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! HTML (text) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! html_text ::= comment | instruction | declaration | cdata | tag_close | tag_open
//!
//! ; Restriction: the text is not allowed to start with `>`, `->`, or to contain `--`.
//! comment ::= '<!--' *byte '-->'
//! instruction ::= '<?' *byte '?>'
//! declaration ::= '<!' ascii_alphabetic *byte '>'
//! ; Restriction: the text is not allowed to contain `]]`.
//! cdata ::= '<![CDATA[' *byte ']]>'
//! tag_close ::= '</' tag_name [space_or_tab_eol] '>'
//! opening_tag ::= '<' tag_name *(space_or_tab_eol attribute) [[space_or_tab_eol] '/'] [space_or_tab_eol] '>'
//!
//! tag_name ::= ascii_alphabetic *( '-' | ascii_alphanumeric )
//! attribute ::= attribute_name [[space_or_tab_eol] '=' [space_or_tab_eol] attribute_value]
//! attribute_name ::= (':' | '_' | ascii_alphabetic) *('-' | '.' | ':' | '_' | ascii_alphanumeric)
//! attribute_value ::= '"' *(byte - '"') '"' | "'" *(byte - "'") "'" | 1*(text - '"' - "'" - '/' - '<' - '=' - '>' - '`')
//! ```
//!
//! The grammar for HTML in markdown does not follow the rules of parsing
//! HTML according to the [*§ 13.2 Parsing HTML documents* in the HTML
//! spec][html_parsing].
//! See the related flow construct [HTML (flow)][html_flow] for more info.
//!
//! Because the **tag open** and **tag close** productions in the grammar form
//! with just tags instead of complete elements, it is possible to interleave
//! (a word for switching between languages) markdown and HTML together.
//! For example:
//!
//! ```markdown
//! This is equivalent to <code>*emphasised* code</code>.
//! ```
//!
//! ## Tokens
//!
//! * [`HtmlText`][Name::HtmlText]
//! * [`HtmlTextData`][Name::HtmlTextData]
//!
//! ## References
//!
//! * [`html-text.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/html-text.js)
//! * [*§ 6.6 Raw HTML* in `CommonMark`](https://spec.commonmark.org/0.31/#raw-html)
//!
//! [text]: crate::construct::text
//! [html_flow]: crate::construct::html_flow
//! [html_parsing]: https://html.spec.whatwg.org/multipage/parsing.html#parsing
use crate::construct::partial_space_or_tab::space_or_tab;
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::HTML_CDATA_PREFIX;
/// Start of HTML (text).
///
/// ```markdown
/// > | a <b> c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if Some(b'<') == tokenizer.current && tokenizer.parse_state.options.constructs.html_text {
tokenizer.enter(Name::HtmlText);
tokenizer.enter(Name::HtmlTextData);
tokenizer.consume();
State::Next(StateName::HtmlTextOpen)
} else {
State::Nok
}
}
/// After `<`, at tag name or other stuff.
///
/// ```markdown
/// > | a <b> c
/// ^
/// > | a <!doctype> c
/// ^
/// > | a <!--b--> c
/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'!') => {
tokenizer.consume();
State::Next(StateName::HtmlTextDeclarationOpen)
}
Some(b'/') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagCloseStart)
}
Some(b'?') => {
tokenizer.consume();
State::Next(StateName::HtmlTextInstruction)
}
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpen)
}
_ => State::Nok,
}
}
/// After `<!`, at declaration, comment, or CDATA.
///
/// ```markdown
/// > | a <!doctype> c
/// ^
/// > | a <!--b--> c
/// ^
/// > | a <![CDATA[>&<]]> c
/// ^
/// ```
pub fn declaration_open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCommentOpenInside)
}
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextDeclaration)
}
Some(b'[') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCdataOpenInside)
}
_ => State::Nok,
}
}
/// In a comment, after `<!-`, at another `-`.
///
/// ```markdown
/// > | a <!--b--> c
/// ^
/// ```
pub fn comment_open_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCommentEnd)
}
_ => State::Nok,
}
}
/// In comment.
///
/// ```markdown
/// > | a <!--b--> c
/// ^
/// ```
pub fn comment(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Nok,
Some(b'\n') => {
tokenizer.attempt(State::Next(StateName::HtmlTextComment), State::Nok);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b'-') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCommentClose)
}
_ => {
tokenizer.consume();
State::Next(StateName::HtmlTextComment)
}
}
}
/// In comment, after `-`.
///
/// ```markdown
/// > | a <!--b--> c
/// ^
/// ```
pub fn comment_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'-') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCommentEnd)
}
_ => State::Retry(StateName::HtmlTextComment),
}
}
/// In comment, after `-`.
///
/// ```markdown
/// > | a <!--b--> c
/// ^
/// ```
pub fn comment_end(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => State::Retry(StateName::HtmlTextEnd),
Some(b'-') => State::Retry(StateName::HtmlTextCommentClose),
_ => State::Retry(StateName::HtmlTextComment),
}
}
/// After `<![`, in CDATA, expecting `CDATA[`.
///
/// ```markdown
/// > | a <![CDATA[>&<]]> b
/// ^^^^^^
/// ```
pub fn cdata_open_inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(HTML_CDATA_PREFIX[tokenizer.tokenize_state.size]) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
if tokenizer.tokenize_state.size == HTML_CDATA_PREFIX.len() {
tokenizer.tokenize_state.size = 0;
State::Next(StateName::HtmlTextCdata)
} else {
State::Next(StateName::HtmlTextCdataOpenInside)
}
} else {
State::Nok
}
}
/// In CDATA.
///
/// ```markdown
/// > | a <![CDATA[>&<]]> b
/// ^^^
/// ```
pub fn cdata(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Nok,
Some(b'\n') => {
tokenizer.attempt(State::Next(StateName::HtmlTextCdata), State::Nok);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b']') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCdataClose)
}
_ => {
tokenizer.consume();
State::Next(StateName::HtmlTextCdata)
}
}
}
/// In CDATA, after `]`, at another `]`.
///
/// ```markdown
/// > | a <![CDATA[>&<]]> b
/// ^
/// ```
pub fn cdata_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b']') => {
tokenizer.consume();
State::Next(StateName::HtmlTextCdataEnd)
}
_ => State::Retry(StateName::HtmlTextCdata),
}
}
/// In CDATA, after `]]`, at `>`.
///
/// ```markdown
/// > | a <![CDATA[>&<]]> b
/// ^
/// ```
pub fn cdata_end(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => State::Retry(StateName::HtmlTextEnd),
Some(b']') => State::Retry(StateName::HtmlTextCdataClose),
_ => State::Retry(StateName::HtmlTextCdata),
}
}
/// In declaration.
///
/// ```markdown
/// > | a <!b> c
/// ^
/// ```
pub fn declaration(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'>') => State::Retry(StateName::HtmlTextEnd),
Some(b'\n') => {
tokenizer.attempt(State::Next(StateName::HtmlTextDeclaration), State::Nok);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
_ => {
tokenizer.consume();
State::Next(StateName::HtmlTextDeclaration)
}
}
}
/// In instruction.
///
/// ```markdown
/// > | a <?b?> c
/// ^
/// ```
pub fn instruction(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Nok,
Some(b'\n') => {
tokenizer.attempt(State::Next(StateName::HtmlTextInstruction), State::Nok);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b'?') => {
tokenizer.consume();
State::Next(StateName::HtmlTextInstructionClose)
}
_ => {
tokenizer.consume();
State::Next(StateName::HtmlTextInstruction)
}
}
}
/// In instruction, after `?`, at `>`.
///
/// ```markdown
/// > | a <?b?> c
/// ^
/// ```
pub fn instruction_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => State::Retry(StateName::HtmlTextEnd),
_ => State::Retry(StateName::HtmlTextInstruction),
}
}
/// After `</`, in closing tag, at tag name.
///
/// ```markdown
/// > | a </b> c
/// ^
/// ```
pub fn tag_close_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphabetical.
Some(b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagClose)
}
_ => State::Nok,
}
}
/// After `</x`, in a tag name.
///
/// ```markdown
/// > | a </b> c
/// ^
/// ```
pub fn tag_close(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphanumerical and `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagClose)
}
_ => State::Retry(StateName::HtmlTextTagCloseBetween),
}
}
/// In closing tag, after tag name.
///
/// ```markdown
/// > | a </b> c
/// ^
/// ```
pub fn tag_close_between(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.attempt(State::Next(StateName::HtmlTextTagCloseBetween), State::Nok);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagCloseBetween)
}
_ => State::Retry(StateName::HtmlTextEnd),
}
}
/// After `<x`, in opening tag name.
///
/// ```markdown
/// > | a <b> c
/// ^
/// ```
pub fn tag_open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphanumerical and `-`.
Some(b'-' | b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpen)
}
Some(b'\t' | b'\n' | b' ' | b'/' | b'>') => State::Retry(StateName::HtmlTextTagOpenBetween),
_ => State::Nok,
}
}
/// In opening tag, after tag name.
///
/// ```markdown
/// > | a <b> c
/// ^
/// ```
pub fn tag_open_between(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.attempt(State::Next(StateName::HtmlTextTagOpenBetween), State::Nok);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenBetween)
}
Some(b'/') => {
tokenizer.consume();
State::Next(StateName::HtmlTextEnd)
}
// ASCII alphabetical and `:` and `_`.
Some(b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeName)
}
_ => State::Retry(StateName::HtmlTextEnd),
}
}
/// In attribute name.
///
/// ```markdown
/// > | a <b c> d
/// ^
/// ```
pub fn tag_open_attribute_name(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// ASCII alphabetical and `-`, `.`, `:`, and `_`.
Some(b'-' | b'.' | b'0'..=b'9' | b':' | b'A'..=b'Z' | b'_' | b'a'..=b'z') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeName)
}
_ => State::Retry(StateName::HtmlTextTagOpenAttributeNameAfter),
}
}
/// After attribute name, before initializer, the end of the tag, or
/// whitespace.
///
/// ```markdown
/// > | a <b c> d
/// ^
/// ```
pub fn tag_open_attribute_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.attempt(
State::Next(StateName::HtmlTextTagOpenAttributeNameAfter),
State::Nok,
);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeNameAfter)
}
Some(b'=') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueBefore)
}
_ => State::Retry(StateName::HtmlTextTagOpenBetween),
}
}
/// Before unquoted, double quoted, or single quoted attribute value, allowing
/// whitespace.
///
/// ```markdown
/// > | a <b c=d> e
/// ^
/// ```
pub fn tag_open_attribute_value_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'<' | b'=' | b'>' | b'`') => State::Nok,
Some(b'\n') => {
tokenizer.attempt(
State::Next(StateName::HtmlTextTagOpenAttributeValueBefore),
State::Nok,
);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
Some(b'\t' | b' ') => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueBefore)
}
Some(b'"' | b'\'') => {
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueQuoted)
}
Some(_) => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueUnquoted)
}
}
}
/// In double or single quoted attribute value.
///
/// ```markdown
/// > | a <b c="d"> e
/// ^
/// ```
pub fn tag_open_attribute_value_quoted(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.marker = 0;
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueQuotedAfter)
} else {
match tokenizer.current {
None => {
tokenizer.tokenize_state.marker = 0;
State::Nok
}
Some(b'\n') => {
tokenizer.attempt(
State::Next(StateName::HtmlTextTagOpenAttributeValueQuoted),
State::Nok,
);
State::Retry(StateName::HtmlTextLineEndingBefore)
}
_ => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueQuoted)
}
}
}
}
/// In unquoted attribute value.
///
/// ```markdown
/// > | a <b c=d> e
/// ^
/// ```
pub fn tag_open_attribute_value_unquoted(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'"' | b'\'' | b'<' | b'=' | b'`') => State::Nok,
Some(b'\t' | b'\n' | b' ' | b'/' | b'>') => State::Retry(StateName::HtmlTextTagOpenBetween),
Some(_) => {
tokenizer.consume();
State::Next(StateName::HtmlTextTagOpenAttributeValueUnquoted)
}
}
}
/// After double or single quoted attribute value, before whitespace or the end
/// of the tag.
///
/// ```markdown
/// > | a <b c="d"> e
/// ^
/// ```
pub fn tag_open_attribute_value_quoted_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b'\n' | b' ' | b'/' | b'>') => State::Retry(StateName::HtmlTextTagOpenBetween),
_ => State::Nok,
}
}
/// In certain circumstances of a tag where only an `>` is allowed.
///
/// ```markdown
/// > | a <b c="d"> e
/// ^
/// ```
pub fn end(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => {
tokenizer.consume();
tokenizer.exit(Name::HtmlTextData);
tokenizer.exit(Name::HtmlText);
State::Ok
}
_ => State::Nok,
}
}
/// At eol.
///
/// > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
/// > empty tokens.
///
/// ```markdown
/// > | a <!--a
/// ^
/// | b-->
/// ```
pub fn line_ending_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.exit(Name::HtmlTextData);
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::HtmlTextLineEndingAfter)
}
_ => unreachable!("expected eol"),
}
}
/// After eol, at optional whitespace.
///
/// > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
/// > empty tokens.
///
/// ```markdown
/// | a <!--a
/// > | b-->
/// ^
/// ```
pub fn line_ending_after(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::HtmlTextLineEndingAfterPrefix),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::HtmlTextLineEndingAfterPrefix)
}
}
/// After eol, after optional whitespace.
///
/// > 👉 **Note**: we can’t have blank lines in text, so no need to worry about
/// > empty tokens.
///
/// ```markdown
/// | a <!--a
/// > | b-->
/// ^
/// ```
pub fn line_ending_after_prefix(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::HtmlTextData);
State::Ok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/label_end.rs | Rust | //! Label end occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Label end forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! label_end ::= ']' [resource | reference_full | reference_collapsed]
//!
//! resource ::= '(' [space_or_tab_eol] destination [space_or_tab_eol title] [space_or_tab_eol] ')'
//! reference_full ::= '[' label ']'
//! reference_collapsed ::= '[' ']'
//!
//! ; See the `destination`, `title`, and `label` constructs for the BNF of
//! ; those parts.
//! ```
//!
//! See [`destination`][destination], [`label`][label], and [`title`][title]
//! for grammar, notes, and recommendations on each part.
//!
//! In the case of a resource, the destination and title are given directly
//! with the label end.
//! In the case of a reference, this information is provided by a matched
//! [definition][].
//! Full references (`[x][y]`) match to definitions through their explicit,
//! second, label (`y`).
//! Collapsed references (`[x][]`) and shortcut references (`[x]`) match by
//! interpreting the text provided between the first, implicit, label (`x`).
//! To match, the effective label of the reference must be equal to the label
//! of the definition after normalizing with
//! [`normalize_identifier`][].
//!
//! Importantly, while the label of a full reference *can* include [string][]
//! content, and in case of collapsed and shortcut references even [text][]
//! content, that content is not considered when matching.
//! To illustrate, neither label matches the definition:
//!
//! ```markdown
//! [a&b]: https://example.com
//!
//! [x][a&b], [a\&b][]
//! ```
//!
//! When the resource or reference matches, the destination forms the `href`
//! attribute in case of a [label start (link)][label_start_link], and an
//! `src` attribute in case of a [label start (image)][label_start_image].
//! The title is formed, optionally, on either `<a>` or `<img>`.
//! When matched with a [gfm label start (footnote)][gfm_label_start_footnote],
//! no reference or resource can follow the label end.
//!
//! For info on how to encode characters in URLs, see
//! [`destination`][destination].
//! For info on how characters are encoded as `href` on `<a>` or `src` on
//! `<img>` when compiling, see
//! [`sanitize_uri`][sanitize_uri].
//!
//! In case of a matched [gfm label start (footnote)][gfm_label_start_footnote],
//! a counter is injected.
//! In case of a matched [label start (link)][label_start_link], the interpreted
//! content between it and the label end, is placed between the opening and
//! closing tags.
//! In case of a matched [label start (image)][label_start_image], the text is
//! also interpreted, but used *without* the resulting tags:
//!
//! ```markdown
//! [a *b* c](#)
//!
//! 
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="#">a <em>b</em> c</a></p>
//! <p><img src="#" alt="a b c" /></p>
//! ```
//!
//! It is possible to use images in links.
//! It’s somewhat possible to have links in images (the text will be used, not
//! the HTML, see above).
//! But it’s not possible to use links (or footnotes, which result in links)
//! in links.
//! The “deepest” link (or footnote) wins.
//! To illustrate:
//!
//! ```markdown
//! a [b [c](#) d](#) e
//! ```
//!
//! Yields:
//!
//! ```html
//! <p>a [b <a href="#">c</a> d](#) e</p>
//! ```
//!
//! This limitation is imposed because links in links is invalid according to
//! HTML.
//! Technically though, in markdown it is still possible to construct them by
//! using an [autolink][] in a link.
//! You definitely should not do that.
//!
//! ## HTML
//!
//! Label end does not, on its own, relate to anything in HTML.
//! When matched with a [label start (link)][label_start_link], they together
//! relate to the `<a>` element in HTML.
//! See [*§ 4.5.1 The `a` element*][html_a] in the HTML spec for more info.
//! It can also match with [label start (image)][label_start_image], in which
//! case they form an `<img>` element.
//! See [*§ 4.8.3 The `img` element*][html_img] in the HTML spec for more info.
//! It can also match with [gfm label start (footnote)][gfm_label_start_footnote],
//! in which case they form `<sup>` and `<a>` elements in HTML.
//! See [*§ 4.5.19 The `sub` and `sup` elements*][html_sup] and
//! [*§ 4.5.1 The `a` element*][html_a] in the HTML spec for more info.
//!
//! ## Recommendation
//!
//! It is recommended to use labels for links instead of [autolinks][autolink].
//! Labels allow more characters in URLs, and allow relative URLs and `www.`
//! URLs.
//! They also allow for descriptive text to explain the URL in prose.
//!
//! In footnotes, it’s recommended to use words instead of numbers (or letters
//! or anything with an order) as calls.
//! That makes it easier to reuse and reorder footnotes.
//!
//! ## Tokens
//!
//! * [`Data`][Name::Data]
//! * [`GfmFootnoteCall`][Name::GfmFootnoteCall]
//! * [`Image`][Name::Image]
//! * [`Label`][Name::Label]
//! * [`LabelEnd`][Name::LabelEnd]
//! * [`LabelMarker`][Name::LabelMarker]
//! * [`LabelText`][Name::LabelText]
//! * [`LineEnding`][Name::LineEnding]
//! * [`Link`][Name::Link]
//! * [`Reference`][Name::Reference]
//! * [`ReferenceMarker`][Name::ReferenceMarker]
//! * [`ReferenceString`][Name::ReferenceString]
//! * [`Resource`][Name::Resource]
//! * [`ResourceDestination`][Name::ResourceDestination]
//! * [`ResourceDestinationLiteral`][Name::ResourceDestinationLiteral]
//! * [`ResourceDestinationLiteralMarker`][Name::ResourceDestinationLiteralMarker]
//! * [`ResourceDestinationRaw`][Name::ResourceDestinationRaw]
//! * [`ResourceDestinationString`][Name::ResourceDestinationString]
//! * [`ResourceMarker`][Name::ResourceMarker]
//! * [`ResourceTitle`][Name::ResourceTitle]
//! * [`ResourceTitleMarker`][Name::ResourceTitleMarker]
//! * [`ResourceTitleString`][Name::ResourceTitleString]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`label-end.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/label-end.js)
//! * [`micromark-extension-gfm-task-list-item`](https://github.com/micromark/micromark-extension-gfm-footnote)
//! * [*§ 4.7 Link reference definitions* in `CommonMark`](https://spec.commonmark.org/0.31/#link-reference-definitions)
//! * [*§ 6.3 Links* in `CommonMark`](https://spec.commonmark.org/0.31/#links)
//! * [*§ 6.4 Images* in `CommonMark`](https://spec.commonmark.org/0.31/#images)
//!
//! > 👉 **Note**: Footnotes are not specified in GFM yet.
//! > See [`github/cmark-gfm#270`](https://github.com/github/cmark-gfm/issues/270)
//! > for the related issue.
//!
//! [string]: crate::construct::string
//! [text]: crate::construct::text
//! [destination]: crate::construct::partial_destination
//! [title]: crate::construct::partial_title
//! [label]: crate::construct::partial_label
//! [label_start_image]: crate::construct::label_start_image
//! [label_start_link]: crate::construct::label_start_link
//! [gfm_label_start_footnote]: crate::construct::gfm_label_start_footnote
//! [definition]: crate::construct::definition
//! [autolink]: crate::construct::autolink
//! [sanitize_uri]: crate::util::sanitize_uri::sanitize
//! [normalize_identifier]: crate::util::normalize_identifier::normalize_identifier
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
//! [html_img]: https://html.spec.whatwg.org/multipage/embedded-content.html#the-img-element
//! [html_sup]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-sub-and-sup-elements
use crate::construct::partial_space_or_tab_eol::space_or_tab_eol;
use crate::event::{Event, Kind, Name};
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::{Label, LabelKind, LabelStart, Tokenizer};
use crate::util::{
constant::RESOURCE_DESTINATION_BALANCE_MAX,
normalize_identifier::normalize_identifier,
skip,
slice::{Position, Slice},
};
use alloc::{string::String, vec};
/// Start of label end.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// > | [a][b] c
/// ^
/// > | [a][] b
/// ^
/// > | [a] b
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if Some(b']') == tokenizer.current && tokenizer.parse_state.options.constructs.label_end {
// If there is an okay opening:
if !tokenizer.tokenize_state.label_starts.is_empty() {
let label_start = tokenizer.tokenize_state.label_starts.last().unwrap();
tokenizer.tokenize_state.end = tokenizer.events.len();
// If the corresponding label (link) start is marked as inactive,
// it means we’d be wrapping a link, like this:
//
// ```markdown
// > | a [b [c](d) e](f) g.
// ^
// ```
//
// We can’t have that, so it’s just balanced brackets.
if label_start.inactive {
return State::Retry(StateName::LabelEndNok);
}
tokenizer.enter(Name::LabelEnd);
tokenizer.enter(Name::LabelMarker);
tokenizer.consume();
tokenizer.exit(Name::LabelMarker);
tokenizer.exit(Name::LabelEnd);
return State::Next(StateName::LabelEndAfter);
}
}
State::Nok
}
/// After `]`.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// > | [a][b] c
/// ^
/// > | [a][] b
/// ^
/// > | [a] b
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
let start_index = tokenizer.tokenize_state.label_starts.len() - 1;
let start = &tokenizer.tokenize_state.label_starts[start_index];
let indices = (
tokenizer.events[start.start.1].point.index,
tokenizer.events[tokenizer.tokenize_state.end].point.index,
);
// We don’t care about virtual spaces, so `indices` and `as_str` are fine.
let mut id = normalize_identifier(
Slice::from_indices(tokenizer.parse_state.bytes, indices.0, indices.1).as_str(),
);
// See if this matches a footnote definition.
if start.kind == LabelKind::GfmFootnote {
if tokenizer.parse_state.gfm_footnote_definitions.contains(&id) {
return State::Retry(StateName::LabelEndOk);
}
// Nope, this might be a normal link?
tokenizer.tokenize_state.label_starts[start_index].kind = LabelKind::GfmUndefinedFootnote;
let mut new_id = String::new();
new_id.push('^');
new_id.push_str(&id);
id = new_id;
}
let defined = tokenizer.parse_state.definitions.contains(&id);
match tokenizer.current {
// Resource (`[asd](fgh)`)?
Some(b'(') => {
tokenizer.attempt(
State::Next(StateName::LabelEndOk),
State::Next(if defined {
StateName::LabelEndOk
} else {
StateName::LabelEndNok
}),
);
State::Retry(StateName::LabelEndResourceStart)
}
// Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?
Some(b'[') => {
tokenizer.attempt(
State::Next(StateName::LabelEndOk),
State::Next(if defined {
StateName::LabelEndReferenceNotFull
} else {
StateName::LabelEndNok
}),
);
State::Retry(StateName::LabelEndReferenceFull)
}
// Shortcut (`[asd]`) reference?
_ => State::Retry(if defined {
StateName::LabelEndOk
} else {
StateName::LabelEndNok
}),
}
}
/// After `]`, at `[`, but not at a full reference.
///
/// > 👉 **Note**: we only get here if the label is defined.
///
/// ```markdown
/// > | [a][] b
/// ^
/// > | [a] b
/// ^
/// ```
pub fn reference_not_full(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::LabelEndOk),
State::Next(StateName::LabelEndNok),
);
State::Retry(StateName::LabelEndReferenceCollapsed)
}
/// Done, we found something.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// > | [a][b] c
/// ^
/// > | [a][] b
/// ^
/// > | [a] b
/// ^
/// ```
pub fn ok(tokenizer: &mut Tokenizer) -> State {
// Remove the start.
let label_start = tokenizer.tokenize_state.label_starts.pop().unwrap();
// If this is a link or footnote, we need to mark earlier link starts as no
// longer viable for use (as they would otherwise contain a link).
// These link starts are still looking for balanced closing brackets, so
// we can’t remove them, but we can mark them.
if label_start.kind != LabelKind::Image {
let mut index = 0;
while index < tokenizer.tokenize_state.label_starts.len() {
let label_start = &mut tokenizer.tokenize_state.label_starts[index];
if label_start.kind != LabelKind::Image {
label_start.inactive = true;
}
index += 1;
}
}
tokenizer.tokenize_state.labels.push(Label {
kind: label_start.kind,
start: label_start.start,
end: (tokenizer.tokenize_state.end, tokenizer.events.len() - 1),
});
tokenizer.tokenize_state.end = 0;
tokenizer.register_resolver_before(ResolveName::Label);
State::Ok
}
/// Done, it’s nothing.
///
/// There was an okay opening, but we didn’t match anything.
///
/// ```markdown
/// > | [a](b c
/// ^
/// > | [a][b c
/// ^
/// > | [a] b
/// ^
/// ```
pub fn nok(tokenizer: &mut Tokenizer) -> State {
let start = tokenizer.tokenize_state.label_starts.pop().unwrap();
tokenizer.tokenize_state.label_starts_loose.push(start);
tokenizer.tokenize_state.end = 0;
State::Nok
}
/// At a resource.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// ```
pub fn resource_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'(') => {
tokenizer.enter(Name::Resource);
tokenizer.enter(Name::ResourceMarker);
tokenizer.consume();
tokenizer.exit(Name::ResourceMarker);
State::Next(StateName::LabelEndResourceBefore)
}
_ => unreachable!("expected `(`"),
}
}
/// In resource, after `(`, at optional whitespace.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// ```
pub fn resource_before(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b'\n' | b' ')) {
tokenizer.attempt(
State::Next(StateName::LabelEndResourceOpen),
State::Next(StateName::LabelEndResourceOpen),
);
State::Retry(space_or_tab_eol(tokenizer))
} else {
State::Retry(StateName::LabelEndResourceOpen)
}
}
/// In resource, after optional whitespace, at `)` or a destination.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// ```
pub fn resource_open(tokenizer: &mut Tokenizer) -> State {
if let Some(b')') = tokenizer.current {
State::Retry(StateName::LabelEndResourceEnd)
} else {
tokenizer.tokenize_state.token_1 = Name::ResourceDestination;
tokenizer.tokenize_state.token_2 = Name::ResourceDestinationLiteral;
tokenizer.tokenize_state.token_3 = Name::ResourceDestinationLiteralMarker;
tokenizer.tokenize_state.token_4 = Name::ResourceDestinationRaw;
tokenizer.tokenize_state.token_5 = Name::ResourceDestinationString;
tokenizer.tokenize_state.size_b = RESOURCE_DESTINATION_BALANCE_MAX;
tokenizer.attempt(
State::Next(StateName::LabelEndResourceDestinationAfter),
State::Next(StateName::LabelEndResourceDestinationMissing),
);
State::Retry(StateName::DestinationStart)
}
}
/// In resource, after destination, at optional whitespace.
///
/// ```markdown
/// > | [a](b) c
/// ^
/// ```
pub fn resource_destination_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.size_b = 0;
if matches!(tokenizer.current, Some(b'\t' | b'\n' | b' ')) {
tokenizer.attempt(
State::Next(StateName::LabelEndResourceBetween),
State::Next(StateName::LabelEndResourceEnd),
);
State::Retry(space_or_tab_eol(tokenizer))
} else {
State::Retry(StateName::LabelEndResourceEnd)
}
}
/// At invalid destination.
///
/// ```markdown
/// > | [a](<<) b
/// ^
/// ```
pub fn resource_destination_missing(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.size_b = 0;
State::Nok
}
/// In resource, after destination and whitespace, at `(` or title.
///
/// ```markdown
/// > | [a](b ) c
/// ^
/// ```
pub fn resource_between(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'"' | b'\'' | b'(') => {
tokenizer.tokenize_state.token_1 = Name::ResourceTitle;
tokenizer.tokenize_state.token_2 = Name::ResourceTitleMarker;
tokenizer.tokenize_state.token_3 = Name::ResourceTitleString;
tokenizer.attempt(
State::Next(StateName::LabelEndResourceTitleAfter),
State::Nok,
);
State::Retry(StateName::TitleStart)
}
_ => State::Retry(StateName::LabelEndResourceEnd),
}
}
/// In resource, after title, at optional whitespace.
///
/// ```markdown
/// > | [a](b "c") d
/// ^
/// ```
pub fn resource_title_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
if matches!(tokenizer.current, Some(b'\t' | b'\n' | b' ')) {
tokenizer.attempt(
State::Next(StateName::LabelEndResourceEnd),
State::Next(StateName::LabelEndResourceEnd),
);
State::Retry(space_or_tab_eol(tokenizer))
} else {
State::Retry(StateName::LabelEndResourceEnd)
}
}
/// In resource, at `)`.
///
/// ```markdown
/// > | [a](b) d
/// ^
/// ```
pub fn resource_end(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b')') => {
tokenizer.enter(Name::ResourceMarker);
tokenizer.consume();
tokenizer.exit(Name::ResourceMarker);
tokenizer.exit(Name::Resource);
State::Ok
}
_ => State::Nok,
}
}
/// In reference (full), at `[`.
///
/// ```markdown
/// > | [a][b] d
/// ^
/// ```
pub fn reference_full(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'[') => {
tokenizer.tokenize_state.token_1 = Name::Reference;
tokenizer.tokenize_state.token_2 = Name::ReferenceMarker;
tokenizer.tokenize_state.token_3 = Name::ReferenceString;
tokenizer.attempt(
State::Next(StateName::LabelEndReferenceFullAfter),
State::Next(StateName::LabelEndReferenceFullMissing),
);
State::Retry(StateName::LabelStart)
}
_ => unreachable!("expected `[`"),
}
}
/// In reference (full), after `]`.
///
/// ```markdown
/// > | [a][b] d
/// ^
/// ```
pub fn reference_full_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
if tokenizer
.parse_state
.definitions
// We don’t care about virtual spaces, so `as_str` is fine.
.contains(&normalize_identifier(
Slice::from_position(
tokenizer.parse_state.bytes,
&Position::from_exit_event(
&tokenizer.events,
skip::to_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::ReferenceString],
),
),
)
.as_str(),
))
{
State::Ok
} else {
State::Nok
}
}
/// In reference (full) that was missing.
///
/// ```markdown
/// > | [a][b d
/// ^
/// ```
pub fn reference_full_missing(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
State::Nok
}
/// In reference (collapsed), at `[`.
///
/// > 👉 **Note**: we only get here if the label is defined.
///
/// ```markdown
/// > | [a][] d
/// ^
/// ```
pub fn reference_collapsed(tokenizer: &mut Tokenizer) -> State {
// We only attempt a collapsed label if there’s a `[`.
debug_assert_eq!(tokenizer.current, Some(b'['), "expected opening bracket");
tokenizer.enter(Name::Reference);
tokenizer.enter(Name::ReferenceMarker);
tokenizer.consume();
tokenizer.exit(Name::ReferenceMarker);
State::Next(StateName::LabelEndReferenceCollapsedOpen)
}
/// In reference (collapsed), at `]`.
///
/// > 👉 **Note**: we only get here if the label is defined.
///
/// ```markdown
/// > | [a][] d
/// ^
/// ```
pub fn reference_collapsed_open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b']') => {
tokenizer.enter(Name::ReferenceMarker);
tokenizer.consume();
tokenizer.exit(Name::ReferenceMarker);
tokenizer.exit(Name::Reference);
State::Ok
}
_ => State::Nok,
}
}
/// Resolve images, links, and footnotes.
///
/// This turns matching label starts and label ends into links, images, and
/// footnotes, and turns unmatched label starts back into data.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
// Inject labels.
let labels = tokenizer.tokenize_state.labels.split_off(0);
inject_labels(tokenizer, &labels);
// Handle loose starts.
let starts = tokenizer.tokenize_state.label_starts.split_off(0);
mark_as_data(tokenizer, &starts);
let starts = tokenizer.tokenize_state.label_starts_loose.split_off(0);
mark_as_data(tokenizer, &starts);
tokenizer.map.consume(&mut tokenizer.events);
None
}
/// Inject links/images/footnotes.
fn inject_labels(tokenizer: &mut Tokenizer, labels: &[Label]) {
// Add grouping events.
let mut index = 0;
while index < labels.len() {
let label = &labels[index];
let group_name = if label.kind == LabelKind::GfmFootnote {
Name::GfmFootnoteCall
} else if label.kind == LabelKind::Image {
Name::Image
} else {
Name::Link
};
// If this is a fine link, which starts with a footnote start that did
// not match, we need to inject the caret as data.
let mut caret = vec![];
if label.kind == LabelKind::GfmUndefinedFootnote {
// Add caret.
caret.push(Event {
kind: Kind::Enter,
name: Name::Data,
// Enter:GfmFootnoteCallMarker.
point: tokenizer.events[label.start.1 - 2].point.clone().clone(),
link: None,
});
caret.push(Event {
kind: Kind::Exit,
name: Name::Data,
// Exit:GfmFootnoteCallMarker.
point: tokenizer.events[label.start.1 - 1].point.clone(),
link: None,
});
// Change and move label end.
tokenizer.events[label.start.0].name = Name::LabelLink;
tokenizer.events[label.start.1].name = Name::LabelLink;
tokenizer.events[label.start.1].point = caret[0].point.clone();
// Remove the caret.
// Enter:GfmFootnoteCallMarker, Exit:GfmFootnoteCallMarker.
tokenizer.map.add(label.start.1 - 2, 2, vec![]);
}
// Insert a group enter and label enter.
tokenizer.map.add(
label.start.0,
0,
vec![
Event {
kind: Kind::Enter,
name: group_name.clone(),
point: tokenizer.events[label.start.0].point.clone(),
link: None,
},
Event {
kind: Kind::Enter,
name: Name::Label,
point: tokenizer.events[label.start.0].point.clone(),
link: None,
},
],
);
// Empty events not allowed.
// Though: if this was what looked like a footnote, but didn’t match,
// it’s a link instead, and we need to inject the `^`.
if label.start.1 != label.end.0 || !caret.is_empty() {
tokenizer.map.add_before(
label.start.1 + 1,
0,
vec![Event {
kind: Kind::Enter,
name: Name::LabelText,
point: tokenizer.events[label.start.1].point.clone(),
link: None,
}],
);
tokenizer.map.add(
label.end.0,
0,
vec![Event {
kind: Kind::Exit,
name: Name::LabelText,
point: tokenizer.events[label.end.0].point.clone(),
link: None,
}],
);
}
if !caret.is_empty() {
tokenizer.map.add(label.start.1 + 1, 0, caret);
}
// Insert a label exit.
tokenizer.map.add(
label.end.0 + 4,
0,
vec![Event {
kind: Kind::Exit,
name: Name::Label,
point: tokenizer.events[label.end.0 + 3].point.clone(),
link: None,
}],
);
// Insert a group exit.
tokenizer.map.add(
label.end.1 + 1,
0,
vec![Event {
kind: Kind::Exit,
name: group_name,
point: tokenizer.events[label.end.1].point.clone(),
link: None,
}],
);
index += 1;
}
}
/// Remove loose label starts.
fn mark_as_data(tokenizer: &mut Tokenizer, events: &[LabelStart]) {
let mut index = 0;
while index < events.len() {
let data_enter_index = events[index].start.0;
let data_exit_index = events[index].start.1;
tokenizer.map.add(
data_enter_index,
data_exit_index - data_enter_index + 1,
vec![
Event {
kind: Kind::Enter,
name: Name::Data,
point: tokenizer.events[data_enter_index].point.clone(),
link: None,
},
Event {
kind: Kind::Exit,
name: Name::Data,
point: tokenizer.events[data_exit_index].point.clone(),
link: None,
},
],
);
index += 1;
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/label_start_image.rs | Rust | //! Label start (image) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Label start (image) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! label_start_image ::= '!' '['
//! ```
//!
//! ## HTML
//!
//! Label start (image) does not, on its own, relate to anything in HTML.
//! When matched with a [label end][label_end], they together relate to the
//! `<img>` element in HTML.
//! See [*§ 4.8.3 The `img` element*][html_img] in the HTML spec for more info.
//! Without an end, the characters (`![`) are output.
//!
//! ## Tokens
//!
//! * [`LabelImage`][Name::LabelImage]
//! * [`LabelImageMarker`][Name::LabelImageMarker]
//! * [`LabelMarker`][Name::LabelMarker]
//!
//! ## References
//!
//! * [`label-start-image.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/label-start-image.js)
//! * [*§ 6.4 Images* in `CommonMark`](https://spec.commonmark.org/0.31/#images)
//!
//! [text]: crate::construct::text
//! [label_end]: crate::construct::label_end
//! [html_img]: https://html.spec.whatwg.org/multipage/embedded-content.html#the-img-element
use crate::event::Name;
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::tokenizer::{LabelKind, LabelStart, Tokenizer};
/// Start of label (image) start.
///
/// ```markdown
/// > | a ![b] c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.label_start_image && tokenizer.current == Some(b'!')
{
tokenizer.enter(Name::LabelImage);
tokenizer.enter(Name::LabelImageMarker);
tokenizer.consume();
tokenizer.exit(Name::LabelImageMarker);
State::Next(StateName::LabelStartImageOpen)
} else {
State::Nok
}
}
/// After `!`, at `[`.
///
/// ```markdown
/// > | a ![b] c
/// ^
/// ```
pub fn open(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'[') => {
tokenizer.enter(Name::LabelMarker);
tokenizer.consume();
tokenizer.exit(Name::LabelMarker);
tokenizer.exit(Name::LabelImage);
State::Next(StateName::LabelStartImageAfter)
}
_ => State::Nok,
}
}
/// After `![`.
///
/// ```markdown
/// > | a ![b] c
/// ^
/// ```
///
/// This is needed in because, when GFM footnotes are enabled, images never
/// form when started with a `^`.
/// Instead, links form:
///
/// ```markdown
/// 
///
/// ![^a][b]
///
/// [b]: c
/// ```
///
/// ```html
/// <p>!<a href=\"b\">^a</a></p>
/// <p>!<a href=\"c\">^a</a></p>
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
if tokenizer
.parse_state
.options
.constructs
.gfm_label_start_footnote
&& tokenizer.current == Some(b'^')
{
State::Nok
} else {
tokenizer.tokenize_state.label_starts.push(LabelStart {
kind: LabelKind::Image,
start: (tokenizer.events.len() - 6, tokenizer.events.len() - 1),
inactive: false,
});
tokenizer.register_resolver_before(ResolveName::Label);
State::Ok
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/label_start_link.rs | Rust | //! Label start (link) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! Label start (link) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! label_start_link ::= '['
//! ```
//!
//! ## HTML
//!
//! Label start (link) does not, on its own, relate to anything in HTML.
//! When matched with a [label end][label_end], they together relate to the
//! `<a>` element in HTML.
//! See [*§ 4.5.1 The `a` element*][html_a] in the HTML spec for more info.
//! Without an end, the character (`[`) is output.
//!
//! ## Tokens
//!
//! * [`LabelLink`][Name::LabelLink]
//! * [`LabelMarker`][Name::LabelMarker]
//!
//! ## References
//!
//! * [`label-start-link.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/label-start-link.js)
//! * [*§ 6.3 Links* in `CommonMark`](https://spec.commonmark.org/0.31/#links)
//!
//! [text]: crate::construct::text
//! [label_end]: crate::construct::label_end
//! [html_a]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-a-element
use crate::event::Name;
use crate::resolve::Name as ResolveName;
use crate::state::State;
use crate::tokenizer::{LabelKind, LabelStart, Tokenizer};
/// Start of label (link) start.
///
/// ```markdown
/// > | a [b] c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.label_start_link && tokenizer.current == Some(b'[')
{
let start = tokenizer.events.len();
tokenizer.enter(Name::LabelLink);
tokenizer.enter(Name::LabelMarker);
tokenizer.consume();
tokenizer.exit(Name::LabelMarker);
tokenizer.exit(Name::LabelLink);
tokenizer.tokenize_state.label_starts.push(LabelStart {
kind: LabelKind::Link,
start: (start, tokenizer.events.len() - 1),
inactive: false,
});
tokenizer.register_resolver_before(ResolveName::Label);
State::Ok
} else {
State::Nok
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/list_item.rs | Rust | //! List item occurs in the [document][] content type.
//!
//! ## Grammar
//!
//! List item forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: if there is no space after the marker, the start must be followed by an `eol`.
//! ; Restriction: if the first line after the marker is not blank and starts with `5(space_or_tab)`,
//! ; only the first `space_or_tab` is part of the start.
//! list_item_start ::= '*' | '+' | '-' | 1*9(ascii_decimal) ('.' | ')') [1*4 space_or_tab]
//!
//! ; Restriction: blank line allowed, except when this is the first continuation after a blank start.
//! ; Restriction: if not blank, the line must be indented, exactly `n` times.
//! list_item_cont ::= [n(space_or_tab)]
//! ```
//!
//! Further lines that are not prefixed with `list_item_cont` cause the list
//! item to be exited, except when those lines are lazy continuation or blank.
//! Like so many things in markdown, list items too are complex.
//! See [*§ Phase 1: block structure* in `CommonMark`][commonmark_block] for
//! more on parsing details.
//!
//! As list item is a container, it takes several bytes from the start of the
//! line, while the rest of the line includes more containers or flow.
//!
//! ## HTML
//!
//! List item relates to the `<li>`, `<ol>`, and `<ul>` elements in HTML.
//! See [*§ 4.4.8 The `li` element*][html_li],
//! [*§ 4.4.5 The `ol` element*][html_ol], and
//! [*§ 4.4.7 The `ul` element*][html_ul] in the HTML spec for more info.
//!
//! ## Recommendation
//!
//! Use a single space after a marker.
//! Never use lazy continuation.
//!
//! ## Tokens
//!
//! * [`ListItem`][Name::ListItem]
//! * [`ListItemMarker`][Name::ListItemMarker]
//! * [`ListItemPrefix`][Name::ListItemPrefix]
//! * [`ListItemValue`][Name::ListItemValue]
//! * [`ListOrdered`][Name::ListOrdered]
//! * [`ListUnordered`][Name::ListUnordered]
//!
//! ## References
//!
//! * [`list.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/list.js)
//! * [*§ 5.2 List items* in `CommonMark`](https://spec.commonmark.org/0.31/#list-items)
//! * [*§ 5.3 Lists* in `CommonMark`](https://spec.commonmark.org/0.31/#lists)
//!
//! [document]: crate::construct::document
//! [html_li]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-li-element
//! [html_ol]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-ol-element
//! [html_ul]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-ul-element
//! [commonmark_block]: https://spec.commonmark.org/0.31/#phase-1-block-structure
use crate::construct::partial_space_or_tab::space_or_tab_min_max;
use crate::event::{Kind, Name};
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
use crate::util::{
constant::{LIST_ITEM_VALUE_SIZE_MAX, TAB_SIZE},
skip,
slice::{Position, Slice},
};
use alloc::{vec, vec::Vec};
/// Start of list item.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.list_item {
tokenizer.enter(Name::ListItem);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::ListItemBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::ListItemBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, at list item prefix.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
// Unordered.
if matches!(tokenizer.current, Some(b'*' | b'-')) {
tokenizer.check(State::Nok, State::Next(StateName::ListItemBeforeUnordered));
State::Retry(StateName::ThematicBreakStart)
} else if tokenizer.current == Some(b'+') {
State::Retry(StateName::ListItemBeforeUnordered)
}
// Ordered.
else if tokenizer.current == Some(b'1')
|| (matches!(tokenizer.current, Some(b'0'..=b'9')) && !tokenizer.interrupt)
{
State::Retry(StateName::ListItemBeforeOrdered)
} else {
State::Nok
}
}
/// At unordered list item marker.
///
/// The line is not a thematic break.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn before_unordered(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::ListItemPrefix);
State::Retry(StateName::ListItemMarker)
}
/// At ordered list item value.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn before_ordered(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::ListItemPrefix);
tokenizer.enter(Name::ListItemValue);
State::Retry(StateName::ListItemValue)
}
/// In ordered list item value.
///
/// ```markdown
/// > | 1. a
/// ^
/// ```
pub fn value(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'.' | b')'))
&& (!tokenizer.interrupt || tokenizer.tokenize_state.size < 2)
{
tokenizer.exit(Name::ListItemValue);
State::Retry(StateName::ListItemMarker)
} else if matches!(tokenizer.current, Some(b'0'..=b'9'))
&& tokenizer.tokenize_state.size + 1 < LIST_ITEM_VALUE_SIZE_MAX
{
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::ListItemValue)
} else {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// At list item marker.
///
/// ```markdown
/// > | * a
/// ^
/// > | 1. b
/// ^
/// ```
pub fn marker(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::ListItemMarker);
tokenizer.consume();
tokenizer.exit(Name::ListItemMarker);
State::Next(StateName::ListItemMarkerAfter)
}
/// After list item marker.
///
/// ```markdown
/// > | * a
/// ^
/// > | 1. b
/// ^
/// ```
pub fn marker_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size = 1;
tokenizer.check(
State::Next(StateName::ListItemAfter),
State::Next(StateName::ListItemMarkerAfterFilled),
);
State::Retry(StateName::BlankLineStart)
}
/// After list item marker.
///
/// The marker is not followed by a blank line.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn marker_after_filled(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.size = 0;
// Attempt to parse up to the largest allowed indent, `nok` if there is more whitespace.
tokenizer.attempt(
State::Next(StateName::ListItemAfter),
State::Next(StateName::ListItemPrefixOther),
);
State::Retry(StateName::ListItemWhitespace)
}
/// After marker, at whitespace.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn whitespace(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(State::Next(StateName::ListItemWhitespaceAfter), State::Nok);
State::Retry(space_or_tab_min_max(tokenizer, 1, TAB_SIZE))
}
/// After acceptable whitespace.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn whitespace_after(tokenizer: &mut Tokenizer) -> State {
if let Some(b'\t' | b' ') = tokenizer.current {
State::Nok
} else {
State::Ok
}
}
/// After marker, followed by no indent or more indent that needed.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn prefix_other(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.enter(Name::SpaceOrTab);
tokenizer.consume();
tokenizer.exit(Name::SpaceOrTab);
State::Next(StateName::ListItemAfter)
}
_ => State::Nok,
}
}
/// After list item prefix.
///
/// ```markdown
/// > | * a
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
let blank = tokenizer.tokenize_state.size == 1;
tokenizer.tokenize_state.size = 0;
if blank && tokenizer.interrupt {
State::Nok
} else {
let start = skip::to_back(
&tokenizer.events,
tokenizer.events.len() - 1,
&[Name::ListItem],
);
let mut prefix = Slice::from_position(
tokenizer.parse_state.bytes,
&Position {
start: &tokenizer.events[start].point,
end: &tokenizer.point,
},
)
.len();
if blank {
prefix += 1;
}
let container = &mut tokenizer.tokenize_state.document_container_stack
[tokenizer.tokenize_state.document_continued];
container.blank_initial = blank;
container.size = prefix;
tokenizer.exit(Name::ListItemPrefix);
tokenizer.register_resolver_before(ResolveName::ListItem);
State::Ok
}
}
/// Start of list item continuation.
///
/// ```markdown
/// | * a
/// > | b
/// ^
/// ```
pub fn cont_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.check(
State::Next(StateName::ListItemContBlank),
State::Next(StateName::ListItemContFilled),
);
State::Retry(StateName::BlankLineStart)
}
/// Start of blank list item continuation.
///
/// ```markdown
/// | * a
/// > |
/// ^
/// | b
/// ```
pub fn cont_blank(tokenizer: &mut Tokenizer) -> State {
let container = &mut tokenizer.tokenize_state.document_container_stack
[tokenizer.tokenize_state.document_continued];
let size = container.size;
if container.blank_initial {
State::Nok
} else if matches!(tokenizer.current, Some(b'\t' | b' ')) {
// Consume, optionally, at most `size`.
State::Retry(space_or_tab_min_max(tokenizer, 0, size))
} else {
State::Ok
}
}
/// Start of non-blank list item continuation.
///
/// ```markdown
/// | * a
/// > | b
/// ^
/// ```
pub fn cont_filled(tokenizer: &mut Tokenizer) -> State {
let container = &mut tokenizer.tokenize_state.document_container_stack
[tokenizer.tokenize_state.document_continued];
let size = container.size;
container.blank_initial = false;
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
// Consume exactly `size`.
State::Retry(space_or_tab_min_max(tokenizer, size, size))
} else {
State::Nok
}
}
/// Find adjacent list items with the same marker.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
let mut lists_wip: Vec<(u8, usize, usize, usize)> = vec![];
let mut lists: Vec<(u8, usize, usize, usize)> = vec![];
let mut index = 0;
let mut balance = 0;
// Merge list items.
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.name == Name::ListItem {
if event.kind == Kind::Enter {
let end = skip::opt(&tokenizer.events, index, &[Name::ListItem]) - 1;
let marker = skip::to(&tokenizer.events, index, &[Name::ListItemMarker]);
// Guaranteed to be a valid ASCII byte.
let marker = tokenizer.parse_state.bytes[tokenizer.events[marker].point.index];
let current = (marker, balance, index, end);
let mut list_index = lists_wip.len();
let mut matched = false;
while list_index > 0 {
list_index -= 1;
let previous = &lists_wip[list_index];
let before = skip::opt(
&tokenizer.events,
previous.3 + 1,
&[
Name::SpaceOrTab,
Name::LineEnding,
Name::BlankLineEnding,
Name::BlockQuotePrefix,
],
);
if previous.0 == current.0 && previous.1 == current.1 && before == current.2 {
let previous_mut = &mut lists_wip[list_index];
previous_mut.3 = current.3;
lists.append(&mut lists_wip.split_off(list_index + 1));
matched = true;
break;
}
}
if !matched {
let mut index = lists_wip.len();
let mut exit = None;
while index > 0 {
index -= 1;
// If the current (new) item starts after where this
// item on the stack ends, we can remove it from the
// stack.
if current.2 > lists_wip[index].3 {
exit = Some(index);
} else {
break;
}
}
if let Some(exit) = exit {
lists.append(&mut lists_wip.split_off(exit));
}
lists_wip.push(current);
}
balance += 1;
} else {
balance -= 1;
}
}
index += 1;
}
lists.append(&mut lists_wip);
// Inject events.
let mut index = 0;
while index < lists.len() {
let list_item = &lists[index];
let mut list_start = tokenizer.events[list_item.2].clone();
let mut list_end = tokenizer.events[list_item.3].clone();
let name = match list_item.0 {
b'.' | b')' => Name::ListOrdered,
_ => Name::ListUnordered,
};
list_start.name = name.clone();
list_end.name = name;
tokenizer.map.add(list_item.2, 0, vec![list_start]);
tokenizer.map.add(list_item.3 + 1, 0, vec![list_end]);
index += 1;
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/mdx_esm.rs | Rust | //! MDX ESM occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! MDX expression (flow) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! mdx_esm ::= word *line *(eol *line)
//!
//! word ::= 'e' 'x' 'p' 'o' 'r' 't' | 'i' 'm' 'p' 'o' 'r' 't'
//! ```
//!
//! This construct must be followed by a blank line or eof (end of file).
//! It can include blank lines if [`MdxEsmParse`][crate::MdxEsmParse] passed in
//! [`ParseOptions`][parse_options] allows it.
//!
//! ## Tokens
//!
//! * [`LineEnding`][Name::LineEnding]
//! * [`MdxEsm`][Name::MdxEsm]
//! * [`MdxEsmData`][Name::MdxEsmData]
//!
//! ## References
//!
//! * [`syntax.js` in `micromark-extension-mdxjs-esm`](https://github.com/micromark/micromark-extension-mdxjs-esm/blob/main/dev/lib/syntax.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [flow]: crate::construct::flow
//! [parse_options]: crate::ParseOptions
use crate::event::Name;
use crate::message;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{mdx_collect::collect, slice::Slice};
use crate::MdxSignal;
use alloc::boxed::Box;
/// Start of MDX ESM.
///
/// ```markdown
/// > | import a from 'b'
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// If it’s turned on.
if tokenizer.parse_state.options.constructs.mdx_esm
// If there is a gnostic parser.
&& tokenizer.parse_state.options.mdx_esm_parse.is_some()
// When not interrupting.
&& !tokenizer.interrupt
// Only at the start of a line, not at whitespace or in a container.
&& tokenizer.point.column == 1
&& matches!(tokenizer.current, Some(b'e' | b'i'))
{
// Place where keyword starts.
tokenizer.tokenize_state.start = tokenizer.point.index;
tokenizer.enter(Name::MdxEsm);
tokenizer.enter(Name::MdxEsmData);
tokenizer.consume();
State::Next(StateName::MdxEsmWord)
} else {
State::Nok
}
}
/// In keyword.
///
/// ```markdown
/// > | import a from 'b'
/// ^^^^^^
/// ```
pub fn word(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'a'..=b'z')) {
tokenizer.consume();
State::Next(StateName::MdxEsmWord)
} else {
let slice = Slice::from_indices(
tokenizer.parse_state.bytes,
tokenizer.tokenize_state.start,
tokenizer.point.index,
);
if matches!(slice.as_str(), "export" | "import") && tokenizer.current == Some(b' ') {
tokenizer.concrete = true;
tokenizer.tokenize_state.start = tokenizer.events.len() - 1;
tokenizer.consume();
State::Next(StateName::MdxEsmInside)
} else {
tokenizer.tokenize_state.start = 0;
State::Nok
}
}
}
/// In data.
///
/// ```markdown
/// > | import a from 'b'
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::MdxEsmData);
State::Retry(StateName::MdxEsmLineStart)
}
_ => {
tokenizer.consume();
State::Next(StateName::MdxEsmInside)
}
}
}
/// At start of line.
///
/// ```markdown
/// | import a from 'b'
/// > | export {a}
/// ^
/// ```
pub fn line_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => State::Retry(StateName::MdxEsmAtEnd),
Some(b'\n') => {
tokenizer.check(
State::Next(StateName::MdxEsmAtEnd),
State::Next(StateName::MdxEsmContinuationStart),
);
State::Retry(StateName::MdxEsmBlankLineBefore)
}
_ => {
tokenizer.enter(Name::MdxEsmData);
tokenizer.consume();
State::Next(StateName::MdxEsmInside)
}
}
}
/// At start of line that continues.
///
/// ```markdown
/// | import a from 'b'
/// > | export {a}
/// ^
/// ```
pub fn continuation_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::MdxEsmLineStart)
}
/// At start of a potentially blank line.
///
/// ```markdown
/// | import a from 'b'
/// > | export {a}
/// ^
/// ```
pub fn blank_line_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::BlankLineStart)
}
/// At end of line (blank or eof).
///
/// ```markdown
/// > | import a from 'b'
/// ^
/// ```
pub fn at_end(tokenizer: &mut Tokenizer) -> State {
let result = parse_esm(tokenizer);
// Done!.
if matches!(result, State::Ok) {
tokenizer.concrete = false;
tokenizer.exit(Name::MdxEsm);
}
result
}
/// Parse ESM with a given function.
fn parse_esm(tokenizer: &mut Tokenizer) -> State {
// We can `unwrap` because we don’t parse if this is `None`.
let parse = tokenizer
.parse_state
.options
.mdx_esm_parse
.as_ref()
.unwrap();
// Collect the body of the ESM and positional info for each run of it.
let result = collect(
&tokenizer.events,
tokenizer.parse_state.bytes,
tokenizer.tokenize_state.start,
&[Name::MdxEsmData, Name::LineEnding],
&[],
);
// Parse and handle what was signaled back.
match parse(&result.value) {
MdxSignal::Ok => State::Ok,
MdxSignal::Error(message, relative, source, rule_id) => {
let point = tokenizer
.parse_state
.location
.as_ref()
.expect("expected location index if aware mdx is on")
.relative_to_point(&result.stops, relative)
.expect("expected non-empty string");
State::Error(message::Message {
place: Some(Box::new(message::Place::Point(point))),
reason: message,
source,
rule_id,
})
}
MdxSignal::Eof(message, source, rule_id) => {
if tokenizer.current.is_none() {
State::Error(message::Message {
place: Some(Box::new(message::Place::Point(tokenizer.point.to_unist()))),
reason: message,
source,
rule_id,
})
} else {
tokenizer.tokenize_state.mdx_last_parse_error = Some((message, *source, *rule_id));
State::Retry(StateName::MdxEsmContinuationStart)
}
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/mdx_expression_flow.rs | Rust | //! MDX expression (flow) occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! MDX expression (flow) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! mdx_expression_flow ::= mdx_expression *space_or_tab
//!
//! ; See the `partial_mdx_expression` construct for the BNF of that part.
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! See [`mdx_expression`][mdx_expression] for more info.
//!
//! ## Tokens
//!
//! * [`MdxFlowExpression`][Name::MdxFlowExpression]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//! * see [`mdx_expression`][mdx_expression] for more
//!
//! ## Recommendation
//!
//! See [`mdx_expression`][mdx_expression] for recommendations.
//!
//! ## References
//!
//! * [`syntax.js` in `micromark-extension-mdx-expression`](https://github.com/micromark/micromark-extension-mdx-expression/blob/main/packages/micromark-extension-mdx-expression/dev/lib/syntax.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [flow]: crate::construct::flow
//! [mdx_expression]: crate::construct::partial_mdx_expression
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::TAB_SIZE;
/// Start of an MDX expression (flow).
///
/// ```markdown
/// > | {Math.PI}
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.mdx_expression_flow {
tokenizer.tokenize_state.token_1 = Name::MdxFlowExpression;
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::MdxExpressionFlowBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::MdxExpressionFlowBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, before expression.
///
/// ```markdown
/// > | {Math.PI}
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
if Some(b'{') == tokenizer.current {
tokenizer.concrete = true;
tokenizer.attempt(State::Next(StateName::MdxExpressionFlowAfter), State::Nok);
State::Retry(StateName::MdxExpressionStart)
} else {
State::Nok
}
}
/// After expression.
///
/// ```markdown
/// > | {Math.PI}
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(State::Next(StateName::MdxExpressionFlowEnd), State::Nok);
State::Retry(space_or_tab(tokenizer))
}
_ => State::Retry(StateName::MdxExpressionFlowEnd),
}
}
/// After expression, after optional whitespace.
///
/// ```markdown
/// > | {Math.PI}␠␊
/// ^
/// ```
pub fn end(tokenizer: &mut Tokenizer) -> State {
// We want to allow tags directly after expressions.
//
// This case is useful:
//
// ```mdx
// <a>{b}</a>
// ```
//
// This case is not (very?) useful:
//
// ```mdx
// {a}<b/>
// ```
//
// …but it would be tougher than needed to disallow.
//
// To allow that, here we call the MDX JSX flow construct, and there we
// call this one.
//
// It would introduce a cyclical interdependency if we test JSX and
// expressions here.
// Because the JSX extension already uses parts of this monorepo, we
// instead test it there.
//
// Note: in the JS version of micromark, arbitrary extensions could be
// loaded.
// Here we know that only our own construct `mdx_expression_flow` can be
// enabled.
// if matches!(tokenizer.current, None | Some(b'\n')) {
// State::Ok
// } else {
// State::Nok
// }
match tokenizer.current {
None | Some(b'\n') => {
reset(tokenizer);
State::Ok
}
// Tag.
Some(b'<') if tokenizer.parse_state.options.constructs.mdx_jsx_flow => {
// We can’t just say: fine.
// Lines of blocks have to be parsed until an eol/eof.
tokenizer.tokenize_state.token_1 = Name::MdxJsxFlowTag;
tokenizer.attempt(
State::Next(StateName::MdxJsxFlowAfter),
State::Next(StateName::MdxJsxFlowNok),
);
State::Retry(StateName::MdxJsxStart)
}
// // An expression.
// Some(b'{') if tokenizer.parse_state.options.constructs.mdx_expression_flow => {
// tokenizer.attempt(
// State::Next(StateName::MdxExpressionFlowAfter),
// State::Next(StateName::MdxExpressionFlowNok),
// );
// State::Retry(StateName::MdxExpressionFlowStart)
// }
_ => {
reset(tokenizer);
State::Nok
}
}
}
/// Reset state.
fn reset(tokenizer: &mut Tokenizer) {
tokenizer.concrete = false;
tokenizer.tokenize_state.token_1 = Name::Data;
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/mdx_expression_text.rs | Rust | //! MDX expression (text) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! MDX expression (text) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! mdx_expression_text ::= mdx_expression
//!
//! ; See the `partial_mdx_expression` construct for the BNF of that part.
//! ```
//!
//! See [`mdx_expression`][mdx_expression] for more info.
//!
//! ## Tokens
//!
//! * [`MdxTextExpression`][Name::MdxTextExpression]
//! * see [`mdx_expression`][mdx_expression] for more
//!
//! ## Recommendation
//!
//! See [`mdx_expression`][mdx_expression] for recommendations.
//!
//! ## References
//!
//! * [`syntax.js` in `micromark-extension-mdx-expression`](https://github.com/micromark/micromark-extension-mdx-expression/blob/main/packages/micromark-extension-mdx-expression/dev/lib/syntax.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [text]: crate::construct::text
//! [mdx_expression]: crate::construct::partial_mdx_expression
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of an MDX expression (text).
///
/// ```markdown
/// > | a {Math.PI} c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if Some(b'{') == tokenizer.current
&& tokenizer.parse_state.options.constructs.mdx_expression_text
{
tokenizer.tokenize_state.token_1 = Name::MdxTextExpression;
tokenizer.attempt(State::Next(StateName::MdxExpressionTextAfter), State::Nok);
State::Retry(StateName::MdxExpressionStart)
} else {
State::Nok
}
}
/// After expression.
///
/// ```markdown
/// > | a {Math.PI} c
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
State::Ok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/mdx_jsx_flow.rs | Rust | //! MDX JSX (flow) occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! MDX JSX (flow) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! mdx_jsx_flow ::= mdx_jsx *space_or_tab [mdx_jsx *space_or_tab]
//!
//! ; See the `partial_mdx_jsx` construct for the BNF of that part.
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//! It is allowed to use multiple tags after each other, optionally with only
//! whitespace between them.
//!
//! See [`mdx_jsx`][mdx_jsx] for more info.
//!
//! ## Tokens
//!
//! * [`MdxJsxFlowTag`][Name::MdxJsxFlowTag]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//! * see [`mdx_jsx`][mdx_jsx] for more
//!
//! ## Recommendation
//!
//! See [`mdx_jsx`][mdx_jsx] for recommendations.
//!
//! ## References
//!
//! * [`jsx-flow.js` in `micromark-extension-mdx-jsx`](https://github.com/micromark/micromark-extension-mdx-jsx/blob/main/dev/lib/jsx-flow.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [flow]: crate::construct::flow
//! [mdx_jsx]: crate::construct::partial_mdx_jsx
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::TAB_SIZE;
/// Start of MDX: JSX (flow).
///
/// ```markdown
/// > | <A />
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.mdx_jsx_flow {
tokenizer.tokenize_state.token_1 = Name::MdxJsxFlowTag;
tokenizer.concrete = true;
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::MdxJsxFlowBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::MdxJsxFlowBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, before of MDX JSX (flow).
///
/// ```markdown
/// > | <A />
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
if Some(b'<') == tokenizer.current {
tokenizer.attempt(
State::Next(StateName::MdxJsxFlowAfter),
State::Next(StateName::MdxJsxFlowNok),
);
State::Retry(StateName::MdxJsxStart)
} else {
State::Retry(StateName::MdxJsxFlowNok)
}
}
/// After an MDX JSX (flow) tag.
///
/// ```markdown
/// > | <A>
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(State::Next(StateName::MdxJsxFlowEnd), State::Nok);
State::Retry(space_or_tab(tokenizer))
}
_ => State::Retry(StateName::MdxJsxFlowEnd),
}
}
/// After an MDX JSX (flow) tag, after optional whitespace.
///
/// ```markdown
/// > | <A> <B>
/// ^
/// ```
pub fn end(tokenizer: &mut Tokenizer) -> State {
// We want to allow expressions directly after tags.
// See <https://github.com/micromark/micromark-extension-mdx-expression/blob/d5d92b9/packages/micromark-extension-mdx-expression/dev/lib/syntax.js#L183>
// for more info.
//
// Note: in the JS version of micromark, arbitrary extensions could be
// loaded.
// Here we know that only our own construct `mdx_expression_flow` can be
// enabled.
match tokenizer.current {
None | Some(b'\n') => {
reset(tokenizer);
State::Ok
}
// Another tag.
Some(b'<') => {
// We can’t just say: fine.
// Lines of blocks have to be parsed until an eol/eof.
tokenizer.attempt(
State::Next(StateName::MdxJsxFlowAfter),
State::Next(StateName::MdxJsxFlowNok),
);
State::Retry(StateName::MdxJsxStart)
}
// An expression.
Some(b'{') if tokenizer.parse_state.options.constructs.mdx_expression_flow => {
tokenizer.attempt(
State::Next(StateName::MdxJsxFlowAfter),
State::Next(StateName::MdxJsxFlowNok),
);
State::Retry(StateName::MdxExpressionFlowStart)
}
_ => {
reset(tokenizer);
State::Nok
}
}
}
/// At something that wasn’t an MDX JSX (flow) tag.
///
/// ```markdown
/// > | <A> x
/// ^
/// ```
pub fn nok(tokenizer: &mut Tokenizer) -> State {
reset(tokenizer);
State::Nok
}
/// Reset state.
fn reset(tokenizer: &mut Tokenizer) {
tokenizer.concrete = false;
tokenizer.tokenize_state.token_1 = Name::Data;
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/mdx_jsx_text.rs | Rust | //! MDX JSX (text) occurs in the [text][] content type.
//!
//! ## Grammar
//!
//! MDX JSX (text) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! mdx_jsx_text ::= mdx_jsx
//!
//! ; See the `partial_mdx_jsx` construct for the BNF of that part.
//! ```
//!
//! See [`mdx_jsx`][mdx_jsx] for more info.
//!
//! ## Tokens
//!
//! * [`MdxJsxTextTag`][Name::MdxJsxTextTag]
//! * see [`mdx_jsx`][mdx_jsx] for more
//!
//! ## Recommendation
//!
//! See [`mdx_jsx`][mdx_jsx] for recommendations.
//!
//! ## References
//!
//! * [`jsx-text.js` in `micromark-extension-mdx-jsx`](https://github.com/micromark/micromark-extension-mdx-jsx/blob/main/dev/lib/jsx-text.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [text]: crate::construct::text
//! [mdx_jsx]: crate::construct::partial_mdx_jsx
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of MDX: JSX (text).
///
/// ```markdown
/// > | a <B /> c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if Some(b'<') == tokenizer.current && tokenizer.parse_state.options.constructs.mdx_jsx_text {
tokenizer.tokenize_state.token_1 = Name::MdxJsxTextTag;
tokenizer.attempt(
State::Next(StateName::MdxJsxTextAfter),
State::Next(StateName::MdxJsxTextNok),
);
State::Retry(StateName::MdxJsxStart)
} else {
State::Nok
}
}
/// After an MDX JSX (text) tag.
///
/// ```markdown
/// > | a <b> c
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
State::Ok
}
/// At something that wasn’t an MDX JSX (text) tag.
///
/// ```markdown
/// > | a < b
/// ^
/// ```
pub fn nok(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = Name::Data;
State::Nok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/mod.rs | Rust | //! Constructs found in markdown.
//!
//! Constructs are grouped by content type.
//! Which content type is allowed somewhere, prescribes which constructs are
//! allowed there.
//!
//! ## Content type
//!
//! The following content types are found in markdown:
//!
//! * [document][]
//! * [flow][]
//! * [string][]
//! * [text][]
//!
//! Content types also have a *rest* thing: after all things are parsed,
//! there’s something left.
//! In document, that is [flow][].
//! In flow, that is [content][].
//! In string and text, that is [data][partial_data].
//!
//! ## Construct
//!
//! There are several *things* found when parsing markdown, such as, say, a
//! thematic break.
//! These things are called constructs here.
//!
//! Sometimes, there are several constructs that result in an equivalent thing.
//! For example, [code (fenced)][raw_flow] and
//! [code (indented)][code_indented] are considered different constructs.
//! Sometimes, constructs on their own don’t result in anything.
//! For example, a `*` is parsed as an attention sequence, but later when we
//! didn’t find another sequence, it’s turned back into plain data.
//!
//! The following constructs are found in markdown (`CommonMark`):
//!
//! * [attention][] (strong, emphasis, extension: GFM strikethrough)
//! * [autolink][]
//! * [blank line][blank_line]
//! * [block quote][block_quote]
//! * [character escape][character_escape]
//! * [character reference][character_reference]
//! * [code (indented)][code_indented]
//! * [content][]
//! * [definition][]
//! * [hard break (escape)][hard_break_escape]
//! * [heading (atx)][heading_atx]
//! * [heading (setext)][heading_setext]
//! * [html (flow)][html_flow]
//! * [html (text)][html_text]
//! * [label end][label_end]
//! * [label start (image)][label_start_image]
//! * [label start (link)][label_start_link]
//! * [list item][list_item]
//! * [paragraph][]
//! * [raw (flow)][raw_flow] (code (fenced), extensions: math (flow))
//! * [raw (text)][raw_text] (code (text), extensions: math (text))
//! * [thematic break][thematic_break]
//!
//! > 👉 **Note**: for performance reasons, hard break (trailing) is formed by
//! > [whitespace][partial_whitespace].
//!
//! The following constructs are extensions found in markdown:
//!
//! * [frontmatter][]
//! * [gfm autolink literal][gfm_autolink_literal]
//! * [gfm footnote definition][gfm_footnote_definition]
//! * [gfm label start footnote][gfm_label_start_footnote]
//! * [gfm table][gfm_table]
//! * [gfm task list item check][gfm_task_list_item_check]
//! * [mdx esm][mdx_esm]
//! * [mdx expression (flow)][mdx_expression_flow]
//! * [mdx expression (text)][mdx_expression_text]
//! * [mdx jsx (flow)][mdx_jsx_flow]
//! * [mdx jsx (text)][mdx_jsx_text]
//!
//! There are also several small subroutines typically used in different places:
//!
//! * [bom][partial_bom]
//! * [data][partial_data]
//! * [destination][partial_destination]
//! * [label][partial_label]
//! * [mdx expression][partial_mdx_expression]
//! * [mdx jsx][partial_mdx_jsx]
//! * [non lazy continuation][partial_non_lazy_continuation]
//! * [space or tab][partial_space_or_tab]
//! * [space or tab, eol][partial_space_or_tab_eol]
//! * [title][partial_title]
//! * [whitespace][partial_whitespace]
//!
//! ## Grammar
//!
//! Each construct maintained here is explained with a BNF diagram.
//!
//! Such diagrams are considered to be *non-normative*.
//! That is to say, they form illustrative, imperfect, but useful, examples.
//! The code, in Rust, is considered to be normative.
//!
//! The actual syntax of markdown can be described in Backus–Naur form (BNF) as:
//!
//! ```bnf
//! markdown = .*
//! ```
//!
//! No, that’s [not a typo][bnf]: markdown has no syntax errors; anything
//! thrown at it renders *something*.
//!
//! These diagrams contain references to character group as defined by Rust on
//! for example [char][], but also often on [u8][], which is what `micromark-rs`
//! typically works on.
//! So, for example, `ascii_punctuation` refers to
//! [`u8::is_ascii_punctuation`][u8::is_ascii_punctuation].
//!
//! For clarity, the productions used throughout are:
//!
//! ```bnf
//! ; Rust / ASCII groups:
//! ; 'a'..='z'
//! ascii_lowercase ::= 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z'
//! ; 'A'..='Z'
//! ascii_uppercase ::= 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z'
//! ; 'A'..='Z', 'a'..='z'
//! ascii_alphabetic ::= ascii_lowercase | ascii_uppercase
//! ; '0'..='9'
//! ascii_digit ::= '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9'
//! ; '0'..='9', 'A'..='F', 'a'..='f'
//! ascii_hexdigit ::= ascii_digit | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
//! ; '0'..='9', 'A'..='Z', 'a'..='z'
//! ascii_alphanumeric ::= ascii_digit | ascii_alphabetic
//! ; '!'..='/', ':'..='@', '['..='`', '{'..='~'
//! ascii_punctuation ::= '!' | '"' | '#' | '$' | '%' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | '[' | '\' | ']' | '^' | '_' | '`' | '{' | '|' | '}' | '~'
//! ; 0x00..=0x1F, 0x7F
//! ascii_control ::= 0x00 | 0x01 | 0x02 | 0x03 | 0x04 | 0x05 | 0x06 | 0x07 | 0x08 | 0x09 | 0x0A | 0x0B | 0x0C | 0x0D | 0x0E | 0x0F | 0x10 | 0x11 | 0x12 | 0x13 | 0x14 | 0x15 | 0x16 | 0x17 | 0x18 | 0x19 | 0x1A | 0x1B | 0x1C | 0x1D | 0x1E | 0x1F | 0x7F
//!
//! ; Markdown groups:
//! ; Any byte (u8)
//! byte ::= 0x00..=0xFFFF
//! space_or_tab ::= '\t' | ' '
//! eol ::= '\n' | '\r' | '\r\n'
//! line ::= byte - eol
//! text ::= line - space_or_tab
//! space_or_tab_eol ::= 1*space_or_tab | *space_or_tab eol *space_or_tab
//!
//! ; Unicode groups:
//! unicode_whitespace ::= ? ; See `char::is_whitespace`.
//! unicode_punctuation ::= ? ; See `src/unicode.rs`.
//! ```
//!
//! [bnf]: http://trevorjim.com/a-specification-for-markdown/
pub mod attention;
pub mod autolink;
pub mod blank_line;
pub mod block_quote;
pub mod character_escape;
pub mod character_reference;
pub mod code_indented;
pub mod content;
pub mod definition;
pub mod document;
pub mod flow;
pub mod frontmatter;
pub mod gfm_autolink_literal;
pub mod gfm_footnote_definition;
pub mod gfm_label_start_footnote;
pub mod gfm_table;
pub mod gfm_task_list_item_check;
pub mod hard_break_escape;
pub mod heading_atx;
pub mod heading_setext;
pub mod html_flow;
pub mod html_text;
pub mod label_end;
pub mod label_start_image;
pub mod label_start_link;
pub mod list_item;
pub mod mdx_esm;
pub mod mdx_expression_flow;
pub mod mdx_expression_text;
pub mod mdx_jsx_flow;
pub mod mdx_jsx_text;
pub mod paragraph;
pub mod partial_bom;
pub mod partial_data;
pub mod partial_destination;
pub mod partial_label;
pub mod partial_mdx_expression;
pub mod partial_mdx_jsx;
pub mod partial_non_lazy_continuation;
pub mod partial_space_or_tab;
pub mod partial_space_or_tab_eol;
pub mod partial_title;
pub mod partial_whitespace;
pub mod raw_flow;
pub mod raw_text;
pub mod string;
pub mod text;
pub mod thematic_break;
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/paragraph.rs | Rust | //! Paragraph occurs in the [content][] content type.
//!
//! ## Grammar
//!
//! Paragraph forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: lines cannot start other flow constructs.
//! ; Restriction: lines cannot be blank.
//! paragraph ::= 1*line *(eol 1*line)
//! ```
//!
//! This construct must be followed by an eol (line ending) or eof (end of
//! file), like flow constructs.
//!
//! Paragraphs can contain line endings and whitespace, but they are not
//! allowed to contain blank lines, or to be blank themselves.
//!
//! The paragraph is interpreted as the [text][] content type.
//! That means that [autolinks][autolink], [code (text)][raw_text], etc are
//! allowed.
//!
//! ## HTML
//!
//! Paragraphs in markdown relate to the `<p>` element in HTML.
//! See [*§ 4.4.1 The `p` element* in the HTML spec][html] for more info.
//!
//! ## Tokens
//!
//! * [`Paragraph`][Name::Paragraph]
//!
//! ## References
//!
//! * [`content.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/content.js)
//! * [*§ 4.8 Paragraphs* in `CommonMark`](https://spec.commonmark.org/0.31/#paragraphs)
//!
//! [content]: crate::construct::content
//! [text]: crate::construct::text
//! [autolink]: crate::construct::autolink
//! [raw_text]: crate::construct::raw_text
//! [html]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-p-element
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::subtokenize::link;
use crate::tokenizer::Tokenizer;
/// Paragraph start.
///
/// ```markdown
/// > | abc
/// ^
/// | def
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
debug_assert!(tokenizer.current.is_some());
tokenizer.enter(Name::Paragraph);
State::Retry(StateName::ParagraphLineStart)
}
/// Start of a line in a paragraph.
///
/// ```markdown
/// > | abc
/// ^
/// > | def
/// ^
/// ```
pub fn line_start(tokenizer: &mut Tokenizer) -> State {
debug_assert!(tokenizer.current.is_some());
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::Text,
},
);
if tokenizer.tokenize_state.connect {
let index = tokenizer.events.len() - 1;
link(&mut tokenizer.events, index);
} else {
tokenizer.tokenize_state.connect = true;
}
State::Retry(StateName::ParagraphInside)
}
/// In paragraph.
///
/// ```markdown
/// > | abc
/// ^^^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.tokenize_state.connect = false;
tokenizer.exit(Name::Data);
tokenizer.exit(Name::Paragraph);
State::Ok
}
Some(b'\n') => {
tokenizer.consume();
tokenizer.exit(Name::Data);
State::Next(StateName::ParagraphLineStart)
}
_ => {
tokenizer.consume();
State::Next(StateName::ParagraphInside)
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_bom.rs | Rust | //! Byte order mark occurs at the start of the document.
//!
//! ## Grammar
//!
//! Byte order mark forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! byte_order_mark ::= 0xEF 0xBB 0xBF
//! ```
//!
//! ## Recommendation
//!
//! Don’t use BOMs.
//!
//! ## Tokens
//!
//! * [`ByteOrderMark`][Name::ByteOrderMark]
//!
//! ## References
//!
//! * [`micromark/lib/preprocess.js` in `micromark`](https://github.com/micromark/micromark/blob/ed23453/packages/micromark/dev/lib/preprocess.js#L54-L60)
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Bytes of a BOM.
const BOM: [u8; 3] = [0xEF, 0xBB, 0xBF];
/// Before BOM.
///
/// ```text
/// > | 0xEF 0xBB 0xBF
/// ^^^^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(BOM[0]) {
tokenizer.enter(Name::ByteOrderMark);
State::Retry(StateName::BomInside)
} else {
State::Nok
}
}
/// In BOM.
///
/// ```text
/// > | 0xEF 0xBB 0xBF
/// ^^^^ ^^^^ ^^^^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(BOM[tokenizer.tokenize_state.size]) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
if tokenizer.tokenize_state.size == BOM.len() {
tokenizer.exit(Name::ByteOrderMark);
tokenizer.tokenize_state.size = 0;
State::Ok
} else {
State::Next(StateName::BomInside)
}
} else {
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_data.rs | Rust | //! Data occurs in the [string][] and [text][] content types.
//!
//! It can include anything (except for line endings) and stops at certain
//! characters.
//!
//! [string]: crate::construct::string
//! [text]: crate::construct::text
use crate::event::{Kind, Name};
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
use alloc::vec;
/// At beginning of data.
///
/// ```markdown
/// > | abc
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// Make sure to eat the first `markers`.
if let Some(byte) = tokenizer.current {
if tokenizer.tokenize_state.markers.contains(&byte) {
tokenizer.enter(Name::Data);
tokenizer.consume();
return State::Next(StateName::DataInside);
}
}
State::Retry(StateName::DataAtBreak)
}
/// Before something.
///
/// ```markdown
/// > | abc
/// ^
/// ```
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
if let Some(byte) = tokenizer.current {
if !tokenizer.tokenize_state.markers.contains(&byte) {
if byte == b'\n' {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
return State::Next(StateName::DataAtBreak);
}
tokenizer.enter(Name::Data);
return State::Retry(StateName::DataInside);
}
}
State::Ok
}
/// In data.
///
/// ```markdown
/// > | abc
/// ^^^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
if let Some(byte) = tokenizer.current {
if byte != b'\n' && !tokenizer.tokenize_state.markers.contains(&byte) {
tokenizer.consume();
return State::Next(StateName::DataInside);
}
}
tokenizer.exit(Name::Data);
State::Retry(StateName::DataAtBreak)
}
/// Merge adjacent data events.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
let mut index = 0;
// Loop through events and merge adjacent data events.
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.kind == Kind::Enter && event.name == Name::Data {
// Move to exit.
index += 1;
let mut exit_index = index;
// Find the farthest `data` event exit event.
while exit_index + 1 < tokenizer.events.len()
&& tokenizer.events[exit_index + 1].name == Name::Data
{
exit_index += 2;
}
if exit_index > index {
tokenizer.map.add(index, exit_index - index, vec![]);
// Change positional info.
tokenizer.events[index].point = tokenizer.events[exit_index].point.clone();
// Move to the end.
index = exit_index;
}
}
index += 1;
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_destination.rs | Rust | //! Destination occurs in [definition][] and [label end][label_end].
//!
//! ## Grammar
//!
//! Destination forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! destination ::= destination_enclosed | destination_raw
//!
//! destination_enclosed ::= '<' *(destination_enclosed_byte | destination_enclosed_escape) '>'
//! destination_enclosed_byte ::= line - '<' - '\\' - '>'
//! destination_enclosed_escape ::= '\\' ['<' | '\\' | '>']
//!
//! destination_raw ::= 1*(destination_raw_byte | destination_raw_escape)
//! ; Restriction: unbalanced `)` characters are not allowed.
//! destination_raw_byte ::= text - '\\' - ascii_control
//! destination_raw_escape ::= '\\' ['(' | ')' | '\\']
//! ```
//!
//! Balanced parens allowed in raw destinations.
//! They are counted with a counter that starts at `0`, and is incremented
//! every time `(` occurs and decremented every time `)` occurs.
//! If `)` is found when the counter is `0`, the destination closes immediately
//! before it.
//! Escaped parens do not count in balancing.
//!
//! The destination is interpreted as the [string][] content type.
//! That means that [character escapes][character_escape] and
//! [character references][character_reference] are allowed.
//!
//! The grammar for enclosed destinations (`<x>`) prohibits the use of `<`,
//! `>`, and line endings to form URLs.
//! The angle brackets can be encoded as a character reference, character
//! escape, or percent encoding:
//!
//! * `<` as `<`, `\<`, or `%3c`
//! * `>` as `>`, `\>`, or `%3e`
//!
//! The grammar for raw destinations (`x`) prohibits space (` `) and all
//! [ASCII control][u8::is_ascii_control] characters, which thus must be
//! encoded.
//! Unbalanced parens can be encoded as a character reference, character escape,
//! or percent encoding:
//!
//! * `(` as `(`, `\(`, or `%28`
//! * `)` as `)`, `\)`, or `%29`
//!
//! There are several cases where incorrect encoding of URLs would, in other
//! languages, result in a parse error.
//! In markdown, there are no errors, and URLs are normalized.
//! In addition, unicode characters are percent encoded
//! ([`sanitize_uri`][sanitize_uri]).
//! For example:
//!
//! ```markdown
//! [x]
//!
//! [x]: <https://a👍b%>
//! ```
//!
//! Yields:
//!
//! ```html
//! <p><a href="https://a%F0%9F%91%8Db%25">x</a></p>
//! ```
//!
//! ## Recommendation
//!
//! It is recommended to use the enclosed variant of destinations, as it allows
//! the most characters, including arbitrary parens, in URLs.
//!
//! ## References
//!
//! * [`micromark-factory-destination/index.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-factory-destination/dev/index.js)
//!
//! [definition]: crate::construct::definition
//! [string]: crate::construct::string
//! [character_escape]: crate::construct::character_escape
//! [character_reference]: crate::construct::character_reference
//! [label_end]: crate::construct::label_end
//! [sanitize_uri]: crate::util::sanitize_uri
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of destination.
///
/// ```markdown
/// > | <aa>
/// ^
/// > | aa
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'<') => {
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
State::Next(StateName::DestinationEnclosedBefore)
}
// ASCII control, space, closing paren, but *not* `\0`.
None | Some(0x01..=0x1F | b' ' | b')' | 0x7F) => State::Nok,
Some(_) => {
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(tokenizer.tokenize_state.token_4.clone());
tokenizer.enter(tokenizer.tokenize_state.token_5.clone());
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
State::Retry(StateName::DestinationRaw)
}
}
}
/// After `<`, at an enclosed destination.
///
/// ```markdown
/// > | <aa>
/// ^
/// ```
pub fn enclosed_before(tokenizer: &mut Tokenizer) -> State {
if let Some(b'>') = tokenizer.current {
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
State::Ok
} else {
tokenizer.enter(tokenizer.tokenize_state.token_5.clone());
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
State::Retry(StateName::DestinationEnclosed)
}
}
/// In enclosed destination.
///
/// ```markdown
/// > | <aa>
/// ^
/// ```
pub fn enclosed(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n' | b'<') => State::Nok,
Some(b'>') => {
tokenizer.exit(Name::Data);
tokenizer.exit(tokenizer.tokenize_state.token_5.clone());
State::Retry(StateName::DestinationEnclosedBefore)
}
Some(b'\\') => {
tokenizer.consume();
State::Next(StateName::DestinationEnclosedEscape)
}
_ => {
tokenizer.consume();
State::Next(StateName::DestinationEnclosed)
}
}
}
/// After `\`, at a special character.
///
/// ```markdown
/// > | <a\*a>
/// ^
/// ```
pub fn enclosed_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'<' | b'>' | b'\\') => {
tokenizer.consume();
State::Next(StateName::DestinationEnclosed)
}
_ => State::Retry(StateName::DestinationEnclosed),
}
}
/// In raw destination.
///
/// ```markdown
/// > | aa
/// ^
/// ```
pub fn raw(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.size == 0
&& matches!(tokenizer.current, None | Some(b'\t' | b'\n' | b' ' | b')'))
{
tokenizer.exit(Name::Data);
tokenizer.exit(tokenizer.tokenize_state.token_5.clone());
tokenizer.exit(tokenizer.tokenize_state.token_4.clone());
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
tokenizer.tokenize_state.size = 0;
State::Ok
} else if tokenizer.tokenize_state.size < tokenizer.tokenize_state.size_b
&& tokenizer.current == Some(b'(')
{
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
State::Next(StateName::DestinationRaw)
} else if tokenizer.current == Some(b')') {
tokenizer.consume();
tokenizer.tokenize_state.size -= 1;
State::Next(StateName::DestinationRaw)
}
// ASCII control (but *not* `\0`) and space and `(`.
else if matches!(
tokenizer.current,
None | Some(0x01..=0x1F | b' ' | b'(' | 0x7F)
) {
tokenizer.tokenize_state.size = 0;
State::Nok
} else if tokenizer.current == Some(b'\\') {
tokenizer.consume();
State::Next(StateName::DestinationRawEscape)
} else {
tokenizer.consume();
State::Next(StateName::DestinationRaw)
}
}
/// After `\`, at special character.
///
/// ```markdown
/// > | a\*a
/// ^
/// ```
pub fn raw_escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'(' | b')' | b'\\') => {
tokenizer.consume();
State::Next(StateName::DestinationRaw)
}
_ => State::Retry(StateName::DestinationRaw),
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_label.rs | Rust | //! Label occurs in [definition][] and [label end][label_end].
//!
//! ## Grammar
//!
//! Label forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: maximum `999` codes allowed between brackets.
//! ; Restriction: no blank lines.
//! ; Restriction: at least 1 `text` byte must exist.
//! label ::= '[' *(label_byte | label_escape) ']'
//! label_byte ::= code - '[' - '\\' - ']'
//! label_escape ::= '\\' ['[' | '\\' | ']']
//! ```
//!
//! The maximum allowed size of the label, without the brackets, is `999`
//! (inclusive), which is defined in
//! [`LINK_REFERENCE_SIZE_MAX`][].
//!
//! Labels can contain line endings and whitespace, but they are not allowed to
//! contain blank lines, and they must not be blank themselves.
//!
//! The label is interpreted as the [string][] content type.
//! That means that [character escapes][character_escape] and
//! [character references][character_reference] are allowed.
//!
//! > 👉 **Note**: this label relates to, but is not, the initial “label” of
//! > what is know as a reference in markdown:
//! >
//! > | Kind | Link | Image |
//! > | --------- | -------- | --------- |
//! > | Shortcut | `[x]` | `![x]` |
//! > | Collapsed | `[x][]` | `![x][]` |
//! > | Full | `[x][y]` | `![x][y]` |
//! >
//! > The 6 above things are references, in the three kinds they come in, as
//! > links and images.
//! > The label that this module focusses on is only the thing that contains
//! > `y`.
//! >
//! > The thing that contains `x` is not a single thing when parsing markdown,
//! > but instead constists of an opening
//! > ([label start (image)][label_start_image] or
//! > [label start (link)][label_start_link]) and a closing
//! > ([label end][label_end]), so as to allow further phrasing such as
//! > [code (text)][raw_text] or [attention][].
//!
//! ## References
//!
//! * [`micromark-factory-label/index.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-factory-label/dev/index.js)
//!
//! [definition]: crate::construct::definition
//! [string]: crate::construct::string
//! [attention]: crate::construct::attention
//! [character_escape]: crate::construct::character_escape
//! [character_reference]: crate::construct::character_reference
//! [label_start_image]: crate::construct::label_start_image
//! [label_start_link]: crate::construct::label_start_link
//! [label_end]: crate::construct::label_end
//! [raw_text]: crate::construct::raw_text
//! [link_reference_size_max]: crate::util::constant::LINK_REFERENCE_SIZE_MAX
use crate::construct::partial_space_or_tab_eol::{space_or_tab_eol_with_options, Options};
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::subtokenize::link;
use crate::tokenizer::Tokenizer;
use crate::util::constant::LINK_REFERENCE_SIZE_MAX;
/// Start of label.
///
/// ```markdown
/// > | [a]
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
debug_assert_eq!(tokenizer.current, Some(b'['), "expected `[`");
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
State::Next(StateName::LabelAtBreak)
}
/// In label, at something, before something else.
///
/// ```markdown
/// > | [a]
/// ^
/// ```
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.size > LINK_REFERENCE_SIZE_MAX
|| matches!(tokenizer.current, None | Some(b'['))
|| (matches!(tokenizer.current, Some(b']')) && !tokenizer.tokenize_state.seen)
{
State::Retry(StateName::LabelNok)
} else {
match tokenizer.current {
Some(b'\n') => {
tokenizer.attempt(
State::Next(StateName::LabelEolAfter),
State::Next(StateName::LabelNok),
);
State::Retry(space_or_tab_eol_with_options(
tokenizer,
Options {
content: Some(Content::String),
connect: tokenizer.tokenize_state.connect,
},
))
}
Some(b']') => {
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
tokenizer.tokenize_state.connect = false;
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.size = 0;
State::Ok
}
_ => {
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
if tokenizer.tokenize_state.connect {
let index = tokenizer.events.len() - 1;
link(&mut tokenizer.events, index);
} else {
tokenizer.tokenize_state.connect = true;
}
State::Retry(StateName::LabelInside)
}
}
}
}
/// In label, after whitespace.
///
/// ```markdown
/// | [a␊
/// > | b]
/// ^
/// ```
pub fn eol_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.connect = true;
State::Retry(StateName::LabelAtBreak)
}
/// In label, on something disallowed.
///
/// ```markdown
/// > | []
/// ^
/// ```
pub fn nok(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.connect = false;
tokenizer.tokenize_state.seen = false;
tokenizer.tokenize_state.size = 0;
State::Nok
}
/// In label, in text.
///
/// ```markdown
/// > | [a]
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n' | b'[' | b']') => {
tokenizer.exit(Name::Data);
State::Retry(StateName::LabelAtBreak)
}
Some(byte) => {
if tokenizer.tokenize_state.size > LINK_REFERENCE_SIZE_MAX {
tokenizer.exit(Name::Data);
State::Retry(StateName::LabelAtBreak)
} else {
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
if !tokenizer.tokenize_state.seen && !matches!(byte, b'\t' | b' ') {
tokenizer.tokenize_state.seen = true;
}
State::Next(if matches!(byte, b'\\') {
StateName::LabelEscape
} else {
StateName::LabelInside
})
}
}
}
}
/// After `\`, at a special character.
///
/// ```markdown
/// > | [a\*a]
/// ^
/// ```
pub fn escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'[' | b'\\' | b']') => {
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
State::Next(StateName::LabelInside)
}
_ => State::Retry(StateName::LabelInside),
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_mdx_expression.rs | Rust | //! MDX expression occurs in [MDX expression (flow)][mdx_expression_flow] and
//! [MDX expression (text)][mdx_expression_text].
//!
//! ## Grammar
//!
//! MDX expression forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! mdx_expression ::= '{' *(expression_text | expression) '}'
//! expression_text ::= char - '{' - '}'
//! ```
//!
//! ## Tokens
//!
//! * [`LineEnding`][Name::LineEnding]
//! * [`MdxExpressionMarker`][Name::MdxExpressionMarker]
//! * [`MdxExpressionData`][Name::MdxExpressionData]
//!
//! ## Recommendation
//!
//! When authoring markdown with JavaScript, keep in mind that MDX is a
//! whitespace sensitive and line-based language, while JavaScript is
//! insensitive to whitespace.
//! This affects how markdown and JavaScript interleave with eachother in MDX.
//! For more info on how it works, see [§ Interleaving][interleaving] on the
//! MDX site.
//!
//! ## Errors
//!
//! ### Unexpected end of file in expression, expected a corresponding closing brace for `{`
//!
//! This error occurs if a `{` was seen without a `}`.
//! For example:
//!
//! ```markdown
//! a { b
//! ```
//!
//! ### Unexpected lazy line in expression in container, expected line to be prefixed with `>` when in a block quote, whitespace when in a list, etc
//!
//! This error occurs if a a lazy line (of a container) is found in an expression.
//! For example:
//!
//! ```markdown
//! > {a +
//! b}
//! ```
//!
//! ## References
//!
//! * [`micromark-factory-mdx-expression`](https://github.com/micromark/micromark-extension-mdx-expression/blob/main/packages/micromark-factory-mdx-expression/dev/index.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [mdx_expression_flow]: crate::construct::mdx_expression_flow
//! [mdx_expression_text]: crate::construct::mdx_expression_text
//! [interleaving]: https://mdxjs.com/docs/what-is-mdx/#interleaving
use crate::event::Name;
use crate::message;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::mdx_collect::collect;
use crate::{MdxExpressionKind, MdxExpressionParse, MdxSignal};
use alloc::boxed::Box;
/// Start of an MDX expression.
///
/// ```markdown
/// > | a {Math.PI} c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
debug_assert_eq!(tokenizer.current, Some(b'{'));
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(Name::MdxExpressionMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxExpressionMarker);
tokenizer.tokenize_state.start = tokenizer.events.len() - 1;
State::Next(StateName::MdxExpressionBefore)
}
/// Before data.
///
/// ```markdown
/// > | a {Math.PI} c
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
let problem = tokenizer.tokenize_state.mdx_last_parse_error.take()
.unwrap_or_else(|| ("Unexpected end of file in expression, expected a corresponding closing brace for `{`".into(), "markdown-rs".into(), "unexpected-eof".into()));
State::Error(message::Message {
place: Some(Box::new(message::Place::Point(tokenizer.point.to_unist()))),
reason: problem.0,
rule_id: Box::new(problem.2),
source: Box::new(problem.1),
})
}
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::MdxExpressionEolAfter)
}
Some(b'}') if tokenizer.tokenize_state.size == 0 => {
let state = if let Some(ref parse) = tokenizer.parse_state.options.mdx_expression_parse
{
parse_expression(tokenizer, parse)
} else {
State::Ok
};
if state == State::Ok {
tokenizer.tokenize_state.start = 0;
tokenizer.enter(Name::MdxExpressionMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxExpressionMarker);
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
}
state
}
Some(_) => {
tokenizer.enter(Name::MdxExpressionData);
State::Retry(StateName::MdxExpressionInside)
}
}
}
/// In data.
///
/// ```markdown
/// > | a {Math.PI} c
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, None | Some(b'\n'))
|| (tokenizer.current == Some(b'}') && tokenizer.tokenize_state.size == 0)
{
tokenizer.exit(Name::MdxExpressionData);
State::Retry(StateName::MdxExpressionBefore)
} else {
// Don’t count if gnostic.
if tokenizer.current == Some(b'{')
&& tokenizer.parse_state.options.mdx_expression_parse.is_none()
{
tokenizer.tokenize_state.size += 1;
} else if tokenizer.current == Some(b'}') {
tokenizer.tokenize_state.size -= 1;
}
tokenizer.consume();
State::Next(StateName::MdxExpressionInside)
}
}
/// After eol.
///
/// ```markdown
/// | a {b +
/// > | c} d
/// ^
/// ```
pub fn eol_after(tokenizer: &mut Tokenizer) -> State {
// Lazy continuation in a flow expression (or flow tag) is a syntax error.
if (tokenizer.tokenize_state.token_1 == Name::MdxFlowExpression
|| tokenizer.tokenize_state.token_2 == Name::MdxJsxFlowTag)
&& tokenizer.lazy
{
State::Error(
message::Message {
place: Some(Box::new(message::Place::Point(tokenizer.point.to_unist()))),
reason: "Unexpected lazy line in expression in container, expected line to be prefixed with `>` when in a block quote, whitespace when in a list, etc".into(),
source: Box::new("markdown-rs".into()),
rule_id: Box::new("unexpected-lazy".into()),
}
)
} else if matches!(tokenizer.current, Some(b'\t' | b' ')) {
// Idea: investigate if we’d need to use more complex stripping.
// Take this example:
//
// ```markdown
// > aaa <b c={`
// > d
// > `} /> eee
// ```
//
// Currently, the “paragraph” starts at `> | aaa`, so for the next line
// here we split it into `>␠|␠␠|␠␠␠d` (prefix, this indent here,
// expression data).
tokenizer.enter(Name::LinePrefix);
State::Retry(StateName::MdxExpressionPrefix)
} else {
State::Retry(StateName::MdxExpressionBefore)
}
}
pub fn prefix(tokenizer: &mut Tokenizer) -> State {
// Tab-size to eat has to be the same as what we serialize as.
// While in some places in markdown that’s 4, in JS it’s more common as 2.
// Which is what’s also in `mdast-util-mdx-jsx`:
// <https://github.com/syntax-tree/mdast-util-mdx-jsx/blob/40b951b/lib/index.js#L52>
// <https://github.com/micromark/micromark-extension-mdx-expression/blob/7c305ff/packages/micromark-factory-mdx-expression/dev/index.js#L37>
if matches!(tokenizer.current, Some(b'\t' | b' ')) && tokenizer.tokenize_state.size_c < 2 {
tokenizer.tokenize_state.size_c += 1;
tokenizer.consume();
return State::Next(StateName::MdxExpressionPrefix);
}
tokenizer.exit(Name::LinePrefix);
tokenizer.tokenize_state.size_c = 0;
State::Retry(StateName::MdxExpressionBefore)
}
/// Parse an expression with a given function.
fn parse_expression(tokenizer: &mut Tokenizer, parse: &MdxExpressionParse) -> State {
// Collect the body of the expression and positional info for each run of it.
let result = collect(
&tokenizer.events,
tokenizer.parse_state.bytes,
tokenizer.tokenize_state.start,
&[Name::MdxExpressionData, Name::LineEnding],
&[],
);
// Turn the name of the expression into a kind.
let kind = match tokenizer.tokenize_state.token_1 {
Name::MdxFlowExpression | Name::MdxTextExpression => MdxExpressionKind::Expression,
Name::MdxJsxTagAttributeExpression => MdxExpressionKind::AttributeExpression,
Name::MdxJsxTagAttributeValueExpression => MdxExpressionKind::AttributeValueExpression,
_ => unreachable!("cannot handle unknown expression name"),
};
// Parse and handle what was signaled back.
match parse(&result.value, &kind) {
MdxSignal::Ok => State::Ok,
MdxSignal::Error(reason, relative, source, rule_id) => {
let point = tokenizer
.parse_state
.location
.as_ref()
.expect("expected location index if aware mdx is on")
.relative_to_point(&result.stops, relative)
.unwrap_or_else(|| tokenizer.point.to_unist());
State::Error(message::Message {
place: Some(Box::new(message::Place::Point(point))),
reason,
rule_id,
source,
})
}
MdxSignal::Eof(reason, source, rule_id) => {
tokenizer.tokenize_state.mdx_last_parse_error = Some((reason, *source, *rule_id));
tokenizer.enter(Name::MdxExpressionData);
tokenizer.consume();
State::Next(StateName::MdxExpressionInside)
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_mdx_jsx.rs | Rust | //! MDX JSX occurs in [MDX JSX (flow)][mdx_jsx_flow] and
//! [MDX JSX (text)][mdx_jsx_text].
//!
//! ## Grammar
//!
//! MDX JSX forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; constraint: markdown whitespace (`space_or_tab | eol`) is NOT
//! ; allowed directly after `<` in order to allow `1 < 3` in markdown.
//! mdx_jsx ::=
//! '<' [closing]
//! [*whitespace name [attributes_after_identifier] [closing]]
//! *whitespace '>'
//!
//! attributes_after_identifier ::=
//! 1*whitespace (attributes_boolean | attributes_value) |
//! *whitespace attributes_expression |
//! attributes_after_value ::=
//! *whitespace (attributes_boolean | attributes_expression | attributes_value)
//! attributes_boolean ::= key [attributes_after_identifier]
//! ; Note: in gnostic mode the value of the expression must instead be a single valid ES spread
//! ; expression
//! attributes_expression ::= expression [attributes_after_value]
//! attributes_value ::= key initializer [attributes_after_value]
//!
//! closing ::= *whitespace '/'
//!
//! name ::= identifier [local | members]
//! key ::= identifier [local]
//! local ::= *whitespace ':' *whitespace identifier
//! members ::= member *member
//! member ::= *whitespace '.' *whitespace identifier
//!
//! identifier ::= identifier_start *identifier_part
//! initializer ::= *whitespace '=' *whitespace value
//! value ::= double_quoted | single_quoted | expression
//! ; Note: in gnostic mode the value must instead be a single valid ES expression
//! expression ::= '{' *(expression_text | expression) '}'
//!
//! double_quoted ::= '"' *double_quoted_text '"'
//! single_quoted ::= "'" *single_quoted_text "'"
//!
//! text ::= char - '<' - '{'
//! whitespace ::= es_whitespace
//! double_quoted_text ::= char - '"'
//! single_quoted_text ::= char - "'"
//! expression_text ::= char - '{' - '}'
//! identifier_start ::= es_identifier_start
//! identifier_part ::= es_identifier_part | '-'
//!
//! ; ECMAScript
//! ; See “Identifier_start”: <https://tc39.es/ecma262/#prod-IdentifierStart>
//! es_identifier_start ::= ?
//! ; See “Identifier_part”: <https://tc39.es/ecma262/#prod-IdentifierPart>
//! es_identifier_part ::= ?
//! ; See “Whitespace”: <https://tc39.es/ecma262/#prod-WhiteSpace>
//! es_whitespace ::= ?
//! ```
//!
//! The grammar for JSX in markdown is much stricter than that of HTML in
//! markdown.
//! The primary benefit of this is that tags are parsed into tokens, and thus
//! can be processed.
//! Another, arguable, benefit of this is that it comes with syntax errors: if
//! an author types something that is nonsensical, an error is thrown with
//! information about where it happened, what occurred, and what was expected
//! instead.
//!
//! ## Tokens
//!
//! * [`LineEnding`][Name::LineEnding]
//! * [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace]
//! * [`MdxJsxTagMarker`][Name::MdxJsxTagMarker]
//! * [`MdxJsxTagClosingMarker`][Name::MdxJsxTagClosingMarker]
//! * [`MdxJsxTagName`][Name::MdxJsxTagName]
//! * [`MdxJsxTagNamePrimary`][Name::MdxJsxTagNamePrimary]
//! * [`MdxJsxTagNameMemberMarker`][Name::MdxJsxTagNameMemberMarker]
//! * [`MdxJsxTagNamePrefixMarker`][Name::MdxJsxTagNamePrefixMarker]
//! * [`MdxJsxTagNameMember`][Name::MdxJsxTagNameMember]
//! * [`MdxJsxTagNameLocal`][Name::MdxJsxTagNameLocal]
//! * [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute]
//! * [`MdxJsxTagAttributeName`][Name::MdxJsxTagAttributeName]
//! * [`MdxJsxTagAttributePrimaryName`][Name::MdxJsxTagAttributePrimaryName]
//! * [`MdxJsxTagAttributeNamePrefixMarker`][Name::MdxJsxTagAttributeNamePrefixMarker]
//! * [`MdxJsxTagAttributeNameLocal`][Name::MdxJsxTagAttributeNameLocal]
//! * [`MdxJsxTagAttributeInitializerMarker`][Name::MdxJsxTagAttributeInitializerMarker]
//! * [`MdxJsxTagAttributeValueLiteral`][Name::MdxJsxTagAttributeValueLiteral]
//! * [`MdxJsxTagAttributeValueLiteralMarker`][Name::MdxJsxTagAttributeValueLiteralMarker]
//! * [`MdxJsxTagAttributeValueLiteralValue`][Name::MdxJsxTagAttributeValueLiteralValue]
//! * [`MdxJsxTagSelfClosingMarker`][Name::MdxJsxTagSelfClosingMarker]
//!
//! ## Recommendation
//!
//! When authoring markdown with JSX, keep in mind that MDX is a whitespace
//! sensitive and line-based language, while JavaScript is insensitive to
//! whitespace.
//! This affects how markdown and JSX interleave with eachother in MDX.
//! For more info on how it works, see [§ Interleaving][interleaving] on the
//! MDX site.
//!
//! ###### Comments inside tags
//!
//! JavaScript comments in JSX are not supported.
//!
//! Incorrect:
//!
//! ```jsx
//! <hi/*comment!*//>
//! <hello// comment!
//! />
//! ```
//!
//! Correct:
//!
//! ```jsx
//! <hi/>
//! <hello
//! />
//! ```
//!
//! A PR that adds support for them would be accepted.
//!
//! ###### Element or fragment attribute values
//!
//! JSX elements or JSX fragments as attribute values are not supported.
//! The reason for this change is that it would be confusing whether markdown
//! would work.
//!
//! Incorrect:
//!
//! ```jsx
//! <welcome name=<>Venus</> />
//! <welcome name=<span>Pluto</span> />
//! ```
//!
//! Correct:
//!
//! ```jsx
//! <welcome name='Mars' />
//! <welcome name={<span>Jupiter</span>} />
//! ```
//!
//! ###### Greater than (`>`) and right curly brace (`}`)
//!
//! JSX does not allow U+003E GREATER THAN (`>`) or U+007D RIGHT CURLY BRACE
//! (`}`) literally in text, they need to be encoded as character references
//! (or expressions).
//! There is no good reason for this (some JSX parsers agree with us and don’t
//! crash either).
//! Therefore, in MDX, U+003E GREATER THAN (`>`) and U+007D RIGHT CURLY BRACE
//! (`}`) are fine literally and don’t need to be encoded.
//!
//! ## References
//!
//! * [`jsx-flow.js` in `micromark-extension-mdx-jsx`](https://github.com/micromark/micromark-extension-mdx-jsx/blob/main/dev/lib/jsx-flow.js)
//! * [`mdxjs.com`](https://mdxjs.com)
//!
//! [mdx_jsx_flow]: crate::construct::mdx_jsx_flow
//! [mdx_jsx_text]: crate::construct::mdx_jsx_text
//! [interleaving]: https://mdxjs.com/docs/what-is-mdx/#interleaving
use crate::event::Name;
use crate::message;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
char::{
after_index as char_after_index, format_byte, format_opt as format_char_opt,
kind_after_index, Kind as CharacterKind,
},
identifier::{id_cont, id_start},
};
use alloc::{boxed::Box, format};
use core::str;
/// Start of MDX: JSX.
///
/// ```markdown
/// > | a <B /> c
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
debug_assert_eq!(tokenizer.current, Some(b'<'));
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(Name::MdxJsxTagMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagMarker);
State::Next(StateName::MdxJsxStartAfter)
}
/// After `<`.
///
/// ```markdown
/// > | a <B /> c
/// ^
/// ```
pub fn start_after(tokenizer: &mut Tokenizer) -> State {
// Deviate from JSX, which allows arbitrary whitespace.
// See: <https://github.com/micromark/micromark-extension-mdx-jsx/issues/7>.
if let Some(b'\t' | b'\n' | b' ') = tokenizer.current {
State::Nok
} else {
tokenizer.attempt(State::Next(StateName::MdxJsxNameBefore), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
}
/// Before name, self slash, or end of tag for fragments.
///
/// ```markdown
/// > | a <B> c
/// ^
/// > | a </B> c
/// ^
/// > | a <> b
/// ^
/// ```
pub fn name_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Closing tag.
Some(b'/') => {
tokenizer.enter(Name::MdxJsxTagClosingMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagClosingMarker);
tokenizer.attempt(
State::Next(StateName::MdxJsxClosingTagNameBefore),
State::Nok,
);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
// Fragment opening tag.
Some(b'>') => State::Retry(StateName::MdxJsxTagEnd),
_ => {
if id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
)) {
tokenizer.enter(Name::MdxJsxTagName);
tokenizer.enter(Name::MdxJsxTagNamePrimary);
tokenizer.consume();
State::Next(StateName::MdxJsxPrimaryName)
} else {
crash(
tokenizer,
"before name",
&format!(
"a character that can start a name, such as a letter, `$`, or `_`{}",
if tokenizer.current == Some(b'!') {
" (note: to create a comment in MDX, use `{/* text */}`)"
} else {
""
}
),
)
}
}
}
}
/// Before name of closing tag or end of closing fragment tag.
///
/// ```markdown
/// > | a </> b
/// ^
/// > | a </B> c
/// ^
/// ```
pub fn closing_tag_name_before(tokenizer: &mut Tokenizer) -> State {
// Fragment closing tag.
if let Some(b'>') = tokenizer.current {
State::Retry(StateName::MdxJsxTagEnd)
}
// Start of a closing tag name.
else if id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
)) {
tokenizer.enter(Name::MdxJsxTagName);
tokenizer.enter(Name::MdxJsxTagNamePrimary);
tokenizer.consume();
State::Next(StateName::MdxJsxPrimaryName)
} else {
crash(
tokenizer,
"before name",
&format!(
"a character that can start a name, such as a letter, `$`, or `_`{}",
if tokenizer.current == Some(b'*' | b'/') {
" (note: JS comments in JSX tags are not supported in MDX)"
} else {
""
}
),
)
}
}
/// In primary name.
///
/// ```markdown
/// > | a <Bc> d
/// ^
/// ```
pub fn primary_name(tokenizer: &mut Tokenizer) -> State {
// End of name.
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
|| matches!(tokenizer.current, Some(b'.' | b'/' | b':' | b'>' | b'{'))
{
tokenizer.exit(Name::MdxJsxTagNamePrimary);
tokenizer.attempt(State::Next(StateName::MdxJsxPrimaryNameAfter), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
// Continuation of name: remain.
// Allow continuation bytes.
else if matches!(tokenizer.current, Some(0x80..=0xBF))
|| id_cont_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.consume();
State::Next(StateName::MdxJsxPrimaryName)
} else {
crash(
tokenizer,
"in name",
&format!(
"a name character such as letters, digits, `$`, or `_`; whitespace before attributes; or the end of the tag{}",
if tokenizer.current == Some(b'@') {
" (note: to create a link in MDX, use `[text](url)`)"
} else {
""
}
),
)
}
}
/// After primary name.
///
/// ```markdown
/// > | a <b.c> d
/// ^
/// > | a <b:c> d
/// ^
/// ```
pub fn primary_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Start of a member name.
Some(b'.') => {
tokenizer.enter(Name::MdxJsxTagNameMemberMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagNameMemberMarker);
tokenizer.attempt(State::Next(StateName::MdxJsxMemberNameBefore), State::Nok);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
// Start of a local name.
Some(b':') => {
tokenizer.enter(Name::MdxJsxTagNamePrefixMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagNamePrefixMarker);
tokenizer.attempt(State::Next(StateName::MdxJsxLocalNameBefore), State::Nok);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
// End of name.
_ => {
if matches!(tokenizer.current, Some(b'/' | b'>' | b'{'))
|| id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.exit(Name::MdxJsxTagName);
State::Retry(StateName::MdxJsxAttributeBefore)
} else {
crash(
tokenizer,
"after name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; whitespace before attributes; or the end of the tag"
)
}
}
}
}
/// Before member name.
///
/// ```markdown
/// > | a <b.c> d
/// ^
/// ```
pub fn member_name_before(tokenizer: &mut Tokenizer) -> State {
// Start of a member name.
if id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
)) {
tokenizer.enter(Name::MdxJsxTagNameMember);
tokenizer.consume();
State::Next(StateName::MdxJsxMemberName)
} else {
crash(
tokenizer,
"before member name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; whitespace before attributes; or the end of the tag"
)
}
}
/// In member name.
///
/// ```markdown
/// > | a <b.cd> e
/// ^
/// ```
pub fn member_name(tokenizer: &mut Tokenizer) -> State {
// End of name.
// Note: no `:` allowed here.
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
|| matches!(tokenizer.current, Some(b'.' | b'/' | b'>' | b'{'))
{
tokenizer.exit(Name::MdxJsxTagNameMember);
tokenizer.attempt(State::Next(StateName::MdxJsxMemberNameAfter), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
// Continuation of name: remain.
// Allow continuation bytes.
else if matches!(tokenizer.current, Some(0x80..=0xBF))
|| id_cont_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.consume();
State::Next(StateName::MdxJsxMemberName)
} else {
crash(
tokenizer,
"in member name",
&format!(
"a name character such as letters, digits, `$`, or `_`; whitespace before attributes; or the end of the tag{}",
if tokenizer.current == Some(b'@') {
" (note: to create a link in MDX, use `[text](url)`)"
} else {
""
}
),
)
}
}
/// After member name.
///
/// ```markdown
/// > | a <b.c> d
/// ^
/// > | a <b.c.d> e
/// ^
/// ```
pub fn member_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Start of another member name.
Some(b'.') => {
tokenizer.enter(Name::MdxJsxTagNameMemberMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagNameMemberMarker);
tokenizer.attempt(State::Next(StateName::MdxJsxMemberNameBefore), State::Nok);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
// End of name.
_ => {
if matches!(tokenizer.current, Some(b'/' | b'>' | b'{'))
|| id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.exit(Name::MdxJsxTagName);
State::Retry(StateName::MdxJsxAttributeBefore)
} else {
crash(
tokenizer,
"after member name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; whitespace before attributes; or the end of the tag"
)
}
}
}
}
/// Local member name.
///
/// ```markdown
/// > | a <b:c> d
/// ^
/// ```
pub fn local_name_before(tokenizer: &mut Tokenizer) -> State {
// Start of a local name.
if id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
)) {
tokenizer.enter(Name::MdxJsxTagNameLocal);
tokenizer.consume();
State::Next(StateName::MdxJsxLocalName)
} else {
crash(
tokenizer,
"before local name",
&format!(
"a character that can start a name, such as a letter, `$`, or `_`{}",
if matches!(tokenizer.current, Some(b'+' | b'/'..=b'9')) {
" (note: to create a link in MDX, use `[text](url)`)"
} else {
""
}
),
)
}
}
/// In local name.
///
/// ```markdown
/// > | a <b:cd> e
/// ^
/// ```
pub fn local_name(tokenizer: &mut Tokenizer) -> State {
// End of local name (note that we don’t expect another colon, or a member).
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
|| matches!(tokenizer.current, Some(b'/' | b'>' | b'{'))
{
tokenizer.exit(Name::MdxJsxTagNameLocal);
tokenizer.attempt(State::Next(StateName::MdxJsxLocalNameAfter), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
// Continuation of name: remain.
// Allow continuation bytes.
else if matches!(tokenizer.current, Some(0x80..=0xBF))
|| id_cont_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.consume();
State::Next(StateName::MdxJsxLocalName)
} else {
crash(
tokenizer,
"in local name",
"a name character such as letters, digits, `$`, or `_`; whitespace before attributes; or the end of the tag"
)
}
}
/// After local name.
///
/// This is like as `primary_name_after`, but we don’t expect colons or
/// periods.
///
/// ```markdown
/// > | a <b.c> d
/// ^
/// > | a <b.c.d> e
/// ^
/// ```
pub fn local_name_after(tokenizer: &mut Tokenizer) -> State {
// End of name.
if matches!(tokenizer.current, Some(b'/' | b'>' | b'{'))
|| id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.exit(Name::MdxJsxTagName);
State::Retry(StateName::MdxJsxAttributeBefore)
} else {
crash(
tokenizer,
"after local name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; whitespace before attributes; or the end of the tag"
)
}
}
/// Before attribute.
///
/// ```markdown
/// > | a <b /> c
/// ^
/// > | a <b > c
/// ^
/// > | a <b {...c}> d
/// ^
/// > | a <b c> d
/// ^
/// ```
pub fn attribute_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Self-closing.
Some(b'/') => {
tokenizer.enter(Name::MdxJsxTagSelfClosingMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagSelfClosingMarker);
tokenizer.attempt(State::Next(StateName::MdxJsxSelfClosing), State::Nok);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
// End of tag.
Some(b'>') => State::Retry(StateName::MdxJsxTagEnd),
// Attribute expression.
Some(b'{') => {
tokenizer.tokenize_state.token_2 = tokenizer.tokenize_state.token_1.clone();
tokenizer.tokenize_state.token_1 = Name::MdxJsxTagAttributeExpression;
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeExpressionAfter),
State::Nok,
);
State::Retry(StateName::MdxExpressionStart)
}
_ => {
// Start of an attribute name.
if id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
)) {
tokenizer.enter(Name::MdxJsxTagAttribute);
tokenizer.enter(Name::MdxJsxTagAttributeName);
tokenizer.enter(Name::MdxJsxTagAttributePrimaryName);
tokenizer.consume();
State::Next(StateName::MdxJsxAttributePrimaryName)
} else {
crash(
tokenizer,
"before attribute name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; whitespace before attributes; or the end of the tag"
)
}
}
}
}
/// After attribute expression.
///
/// ```markdown
/// > | a <b {c} d/> e
/// ^
/// ```
pub fn attribute_expression_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = tokenizer.tokenize_state.token_2.clone();
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.attempt(State::Next(StateName::MdxJsxAttributeBefore), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
/// In primary attribute name.
///
/// ```markdown
/// > | a <b cd/> e
/// ^
/// > | a <b c:d> e
/// ^
/// > | a <b c=d> e
/// ^
/// ```
pub fn attribute_primary_name(tokenizer: &mut Tokenizer) -> State {
// End of attribute name or tag.
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
|| matches!(tokenizer.current, Some(b'/' | b':' | b'=' | b'>' | b'{'))
{
tokenizer.exit(Name::MdxJsxTagAttributePrimaryName);
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributePrimaryNameAfter),
State::Nok,
);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
// Continuation of name: remain.
// Allow continuation bytes.
else if matches!(tokenizer.current, Some(0x80..=0xBF))
|| id_cont_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.consume();
State::Next(StateName::MdxJsxAttributePrimaryName)
} else {
crash(
tokenizer,
"in attribute name",
"an attribute name character such as letters, digits, `$`, or `_`; `=` to initialize a value; whitespace before attributes; or the end of the tag"
)
}
}
/// After primary attribute name.
///
/// ```markdown
/// > | a <b c/> d
/// ^
/// > | a <b c:d> e
/// ^
/// > | a <b c=d> e
/// ^
/// ```
pub fn attribute_primary_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Start of a local name.
Some(b':') => {
tokenizer.enter(Name::MdxJsxTagAttributeNamePrefixMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagAttributeNamePrefixMarker);
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeLocalNameBefore),
State::Nok,
);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
// Initializer: start of an attribute value.
Some(b'=') => {
tokenizer.exit(Name::MdxJsxTagAttributeName);
tokenizer.enter(Name::MdxJsxTagAttributeInitializerMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagAttributeInitializerMarker);
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeValueBefore),
State::Nok,
);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
_ => {
// End of tag / new attribute.
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
|| matches!(tokenizer.current, Some(b'/' | b'>' | b'{'))
|| id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.exit(Name::MdxJsxTagAttributeName);
tokenizer.exit(Name::MdxJsxTagAttribute);
tokenizer.attempt(State::Next(StateName::MdxJsxAttributeBefore), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
} else {
crash(
tokenizer,
"after attribute name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; `=` to initialize a value; or the end of the tag"
)
}
}
}
}
/// Before local attribute name.
///
/// ```markdown
/// > | a <b c:d/> e
/// ^
/// ```
pub fn attribute_local_name_before(tokenizer: &mut Tokenizer) -> State {
// Start of a local name.
if id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
)) {
tokenizer.enter(Name::MdxJsxTagAttributeNameLocal);
tokenizer.consume();
State::Next(StateName::MdxJsxAttributeLocalName)
} else {
crash(
tokenizer,
"before local attribute name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; `=` to initialize a value; or the end of the tag"
)
}
}
/// In local attribute name.
///
/// ```markdown
/// > | a <b c:de/> f
/// ^
/// > | a <b c:d=e/> f
/// ^
/// ```
pub fn attribute_local_name(tokenizer: &mut Tokenizer) -> State {
// End of local name (note that we don’t expect another colon).
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
|| matches!(tokenizer.current, Some(b'/' | b'=' | b'>' | b'{'))
{
tokenizer.exit(Name::MdxJsxTagAttributeNameLocal);
tokenizer.exit(Name::MdxJsxTagAttributeName);
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeLocalNameAfter),
State::Nok,
);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
// Continuation of name: remain.
// Allow continuation bytes.
else if matches!(tokenizer.current, Some(0x80..=0xBF))
|| id_cont_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.consume();
State::Next(StateName::MdxJsxAttributeLocalName)
} else {
crash(
tokenizer,
"in local attribute name",
"an attribute name character such as letters, digits, `$`, or `_`; `=` to initialize a value; whitespace before attributes; or the end of the tag"
)
}
}
/// After local attribute name.
///
/// ```markdown
/// > | a <b c:d/> f
/// ^
/// > | a <b c:d=e/> f
/// ^
/// ```
pub fn attribute_local_name_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Start of an attribute value.
Some(b'=') => {
tokenizer.enter(Name::MdxJsxTagAttributeInitializerMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagAttributeInitializerMarker);
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeValueBefore),
State::Nok,
);
State::Next(StateName::MdxJsxEsWhitespaceStart)
}
_ => {
// End of name.
if matches!(tokenizer.current, Some(b'/' | b'>' | b'{'))
|| id_start_opt(char_after_index(
tokenizer.parse_state.bytes,
tokenizer.point.index,
))
{
tokenizer.exit(Name::MdxJsxTagAttribute);
State::Retry(StateName::MdxJsxAttributeBefore)
} else {
crash(
tokenizer,
"after local attribute name",
"a character that can start an attribute name, such as a letter, `$`, or `_`; `=` to initialize a value; or the end of the tag"
)
}
}
}
}
/// After `=`, before value.
///
/// ```markdown
/// > | a <b c="d"/> e
/// ^
/// > | a <b c={d}/> e
/// ^
/// ```
pub fn attribute_value_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
// Start of double- or single quoted value.
Some(b'"' | b'\'') => {
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.enter(Name::MdxJsxTagAttributeValueLiteral);
tokenizer.enter(Name::MdxJsxTagAttributeValueLiteralMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagAttributeValueLiteralMarker);
State::Next(StateName::MdxJsxAttributeValueQuotedStart)
}
// Attribute value expression.
Some(b'{') => {
tokenizer.tokenize_state.token_2 = tokenizer.tokenize_state.token_1.clone();
tokenizer.tokenize_state.token_1 = Name::MdxJsxTagAttributeValueExpression;
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeValueExpressionAfter),
State::Nok,
);
State::Retry(StateName::MdxExpressionStart)
}
_ => crash(
tokenizer,
"before attribute value",
&format!(
"a character that can start an attribute value, such as `\"`, `'`, or `{{`{}",
if tokenizer.current == Some(b'<') {
" (note: to use an element or fragment as a prop value in MDX, use `{<element />}`)"
} else {
""
}
),
),
}
}
/// After attribute value expression.
///
/// ```markdown
/// > | a <b c={d} e/> f
/// ^
/// ```
pub fn attribute_value_expression_after(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.token_1 = tokenizer.tokenize_state.token_2.clone();
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.exit(Name::MdxJsxTagAttribute);
tokenizer.attempt(State::Next(StateName::MdxJsxAttributeBefore), State::Nok);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
/// Before quoted literal attribute value.
///
/// ```markdown
/// > | a <b c="d"/> e
/// ^
/// ```
pub fn attribute_value_quoted_start(tokenizer: &mut Tokenizer) -> State {
if let Some(byte) = tokenizer.current {
if byte == tokenizer.tokenize_state.marker {
tokenizer.tokenize_state.marker = 0;
tokenizer.enter(Name::MdxJsxTagAttributeValueLiteralMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagAttributeValueLiteralMarker);
tokenizer.exit(Name::MdxJsxTagAttributeValueLiteral);
tokenizer.exit(Name::MdxJsxTagAttribute);
tokenizer.attempt(State::Next(StateName::MdxJsxAttributeBefore), State::Nok);
State::Next(StateName::MdxJsxEsWhitespaceStart)
} else if byte == b'\n' {
tokenizer.attempt(
State::Next(StateName::MdxJsxAttributeValueQuotedStart),
State::Nok,
);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
} else {
tokenizer.enter(Name::MdxJsxTagAttributeValueLiteralValue);
State::Retry(StateName::MdxJsxAttributeValueQuoted)
}
} else {
crash(
tokenizer,
"in attribute value",
&format!(
"a corresponding closing quote {}",
format_byte(tokenizer.tokenize_state.marker)
),
)
}
}
/// In quoted literal attribute value.
///
/// ```markdown
/// > | a <b c="d"/> e
/// ^
/// ```
pub fn attribute_value_quoted(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker)
|| matches!(tokenizer.current, None | Some(b'\n'))
{
tokenizer.exit(Name::MdxJsxTagAttributeValueLiteralValue);
State::Retry(StateName::MdxJsxAttributeValueQuotedStart)
} else {
tokenizer.consume();
State::Next(StateName::MdxJsxAttributeValueQuoted)
}
}
/// After self-closing slash.
///
/// ```markdown
/// > | a <b/> c
/// ^
/// ```
pub fn self_closing(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => State::Retry(StateName::MdxJsxTagEnd),
_ => crash(
tokenizer,
"after self-closing slash",
&format!(
"`>` to end the tag{}",
if tokenizer.current == Some(b'*' | b'/') {
" (note: JS comments in JSX tags are not supported in MDX)"
} else {
""
}
),
),
}
}
/// At final `>`.
///
/// ```markdown
/// > | a <b> c
/// ^
/// ```
pub fn tag_end(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'>') => {
tokenizer.enter(Name::MdxJsxTagMarker);
tokenizer.consume();
tokenizer.exit(Name::MdxJsxTagMarker);
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
State::Ok
}
_ => unreachable!("expected `>`"),
}
}
/// Before optional ECMAScript whitespace.
///
/// ```markdown
/// > | a <a b> c
/// ^
/// ```
pub fn es_whitespace_start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::MdxJsxEsWhitespaceEolAfter)
}
_ => {
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace
{
tokenizer.enter(Name::MdxJsxEsWhitespace);
State::Retry(StateName::MdxJsxEsWhitespaceInside)
} else {
State::Ok
}
}
}
}
/// In ECMAScript whitespace.
///
/// ```markdown
/// > | a <a b> c
/// ^
/// ```
pub fn es_whitespace_inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.exit(Name::MdxJsxEsWhitespace);
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
// Allow continuation bytes.
Some(0x80..=0xBF) => {
tokenizer.consume();
State::Next(StateName::MdxJsxEsWhitespaceInside)
}
Some(_)
if kind_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
== CharacterKind::Whitespace =>
{
tokenizer.consume();
State::Next(StateName::MdxJsxEsWhitespaceInside)
}
Some(_) => {
tokenizer.exit(Name::MdxJsxEsWhitespace);
State::Ok
}
// Handle EOF.
None => State::Nok,
}
}
/// After eol in whitespace.
///
/// ```markdown
/// > | a <a\nb> c
/// ^
/// ```
pub fn es_whitespace_eol_after(tokenizer: &mut Tokenizer) -> State {
// Lazy continuation in a flow tag is a syntax error.
if tokenizer.tokenize_state.token_1 == Name::MdxJsxFlowTag && tokenizer.lazy {
State::Error(
message::Message {
place: Some(Box::new(message::Place::Point(tokenizer.point.to_unist()))),
reason: "Unexpected lazy line in jsx in container, expected line to be prefixed with `>` when in a block quote, whitespace when in a list, etc".into(),
rule_id: Box::new("unexpected-lazy".into()),
source: Box::new("markdown-rs".into()),
}
)
} else {
State::Retry(StateName::MdxJsxEsWhitespaceStart)
}
}
/// Check if a character can start a JSX identifier.
fn id_start_opt(code: Option<char>) -> bool {
code.map_or(false, id_start)
}
/// Check if a character can continue a JSX identifier.
fn id_cont_opt(code: Option<char>) -> bool {
code.map_or(false, |c| id_cont(c, true))
}
/// Crash because something happened `at`, with info on what was `expect`ed
/// instead.
fn crash(tokenizer: &Tokenizer, at: &str, expect: &str) -> State {
State::Error(message::Message {
place: Some(Box::new(message::Place::Point(tokenizer.point.to_unist()))),
reason: format!(
"Unexpected {} {}, expected {}",
format_char_opt(if tokenizer.current.is_none() {
None
} else {
char_after_index(tokenizer.parse_state.bytes, tokenizer.point.index)
}),
at,
expect
),
rule_id: Box::new(format!(
"unexpected-{}",
if tokenizer.current.is_none() {
"eof"
} else {
"character"
}
)),
source: Box::new("markdown-rs".into()),
})
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_non_lazy_continuation.rs | Rust | //! Non-lazy continuation.
//!
//! This is a tiny helper that [flow][] constructs can use to make sure that
//! the following line is not lazy.
//! For example, [html (flow)][html_flow] and ([raw (flow)][raw_flow],
//! [indented][code_indented]), stop when the next line is lazy.
//!
//! [flow]: crate::construct::flow
//! [raw_flow]: crate::construct::raw_flow
//! [code_indented]: crate::construct::code_indented
//! [html_flow]: crate::construct::html_flow
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// At eol, before continuation.
///
/// ```markdown
/// > | * ```js
/// ^
/// | b
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::NonLazyContinuationAfter)
}
_ => State::Nok,
}
}
/// A continuation.
///
/// ```markdown
/// | * ```js
/// > | b
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
if tokenizer.lazy {
State::Nok
} else {
State::Ok
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_space_or_tab.rs | Rust | //! Space or tab occurs in tons of places.
//!
//! ## Grammar
//!
//! Space or tab forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! space_or_tab ::= 1*('\t' | ' ')
//! ```
//!
//! ## References
//!
//! * [`micromark-factory-space/index.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-factory-space/dev/index.js)
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::subtokenize::link;
use crate::tokenizer::Tokenizer;
/// Configuration.
#[derive(Debug)]
pub struct Options {
/// Minimum allowed bytes (inclusive).
pub min: usize,
/// Maximum allowed bytes (inclusive).
pub max: usize,
/// Name to use for events.
pub kind: Name,
/// Connect this event to the previous.
pub connect: bool,
/// Embedded content type to use.
pub content: Option<Content>,
}
/// One or more `space_or_tab`.
///
/// ```bnf
/// space_or_tab ::= 1*( ' ' '\t' )
/// ```
pub fn space_or_tab(tokenizer: &mut Tokenizer) -> StateName {
space_or_tab_min_max(tokenizer, 1, usize::MAX)
}
/// Between `x` and `y` `space_or_tab`.
///
/// ```bnf
/// space_or_tab_min_max ::= x*y( ' ' '\t' )
/// ```
pub fn space_or_tab_min_max(tokenizer: &mut Tokenizer, min: usize, max: usize) -> StateName {
space_or_tab_with_options(
tokenizer,
Options {
kind: Name::SpaceOrTab,
min,
max,
content: None,
connect: false,
},
)
}
/// `space_or_tab`, with the given options.
pub fn space_or_tab_with_options(tokenizer: &mut Tokenizer, options: Options) -> StateName {
tokenizer.tokenize_state.space_or_tab_connect = options.connect;
tokenizer.tokenize_state.space_or_tab_content = options.content;
tokenizer.tokenize_state.space_or_tab_min = options.min;
tokenizer.tokenize_state.space_or_tab_max = options.max;
tokenizer.tokenize_state.space_or_tab_token = options.kind;
StateName::SpaceOrTabStart
}
/// Start of `space_or_tab`.
///
/// ```markdown
/// > | a␠␠b
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.tokenize_state.space_or_tab_max > 0
&& matches!(tokenizer.current, Some(b'\t' | b' '))
{
if let Some(ref content) = tokenizer.tokenize_state.space_or_tab_content {
tokenizer.enter_link(
tokenizer.tokenize_state.space_or_tab_token.clone(),
Link {
previous: None,
next: None,
content: content.clone(),
},
);
} else {
tokenizer.enter(tokenizer.tokenize_state.space_or_tab_token.clone());
}
if tokenizer.tokenize_state.space_or_tab_connect {
let index = tokenizer.events.len() - 1;
link(&mut tokenizer.events, index);
} else {
tokenizer.tokenize_state.space_or_tab_connect = true;
}
State::Retry(StateName::SpaceOrTabInside)
} else {
State::Retry(StateName::SpaceOrTabAfter)
}
}
/// In `space_or_tab`.
///
/// ```markdown
/// > | a␠␠b
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ')
if tokenizer.tokenize_state.space_or_tab_size
< tokenizer.tokenize_state.space_or_tab_max =>
{
tokenizer.consume();
tokenizer.tokenize_state.space_or_tab_size += 1;
State::Next(StateName::SpaceOrTabInside)
}
_ => {
tokenizer.exit(tokenizer.tokenize_state.space_or_tab_token.clone());
State::Retry(StateName::SpaceOrTabAfter)
}
}
}
/// After `space_or_tab`.
///
/// ```markdown
/// > | a␠␠b
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
let state = if tokenizer.tokenize_state.space_or_tab_size
>= tokenizer.tokenize_state.space_or_tab_min
{
State::Ok
} else {
State::Nok
};
tokenizer.tokenize_state.space_or_tab_connect = false;
tokenizer.tokenize_state.space_or_tab_content = None;
tokenizer.tokenize_state.space_or_tab_size = 0;
tokenizer.tokenize_state.space_or_tab_max = 0;
tokenizer.tokenize_state.space_or_tab_min = 0;
tokenizer.tokenize_state.space_or_tab_token = Name::SpaceOrTab;
state
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_space_or_tab_eol.rs | Rust | //! Space or tab (eol) occurs in [destination][], [label][], and [title][].
//!
//! ## Grammar
//!
//! Space or tab (eol) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! space_or_tab_eol ::= 1*space_or_tab | *space_or_tab eol *space_or_tab
//! ```
//!
//! Importantly, this allows one line ending, but not blank lines.
//!
//! ## References
//!
//! * [`micromark-factory-space/index.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-factory-space/dev/index.js)
//!
//! [destination]: crate::construct::partial_destination
//! [label]: crate::construct::partial_label
//! [title]: crate::construct::partial_title
use crate::construct::partial_space_or_tab::{
space_or_tab_with_options, Options as SpaceOrTabOptions,
};
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::subtokenize::link;
use crate::tokenizer::Tokenizer;
/// Configuration.
#[derive(Debug)]
pub struct Options {
/// Connect this whitespace to the previous.
pub connect: bool,
/// Embedded content type to use.
pub content: Option<Content>,
}
/// `space_or_tab_eol`
pub fn space_or_tab_eol(tokenizer: &mut Tokenizer) -> StateName {
space_or_tab_eol_with_options(
tokenizer,
Options {
content: None,
connect: false,
},
)
}
/// `space_or_tab_eol`, with the given options.
pub fn space_or_tab_eol_with_options(tokenizer: &mut Tokenizer, options: Options) -> StateName {
tokenizer.tokenize_state.space_or_tab_eol_content = options.content;
tokenizer.tokenize_state.space_or_tab_eol_connect = options.connect;
StateName::SpaceOrTabEolStart
}
/// Start of whitespace with at most one eol.
///
/// ```markdown
/// > | a␠␠b
/// ^
/// > | a␠␠␊
/// ^
/// | ␠␠b
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'\t' | b' ') => {
tokenizer.attempt(
State::Next(StateName::SpaceOrTabEolAfterFirst),
State::Next(StateName::SpaceOrTabEolAtEol),
);
State::Retry(space_or_tab_with_options(
tokenizer,
SpaceOrTabOptions {
kind: Name::SpaceOrTab,
min: 1,
max: usize::MAX,
content: tokenizer.tokenize_state.space_or_tab_eol_content.clone(),
connect: tokenizer.tokenize_state.space_or_tab_eol_connect,
},
))
}
_ => State::Retry(StateName::SpaceOrTabEolAtEol),
}
}
/// After initial whitespace, at optional eol.
///
/// ```markdown
/// > | a␠␠b
/// ^
/// > | a␠␠␊
/// ^
/// | ␠␠b
/// ```
pub fn after_first(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.space_or_tab_eol_ok = true;
debug_assert!(
tokenizer.tokenize_state.space_or_tab_eol_content.is_none(),
"expected no content"
);
// If the above ever errors, set `tokenizer.tokenize_state.space_or_tab_eol_connect: true` in that case.
State::Retry(StateName::SpaceOrTabEolAtEol)
}
/// After optional whitespace, at eol.
///
/// ```markdown
/// > | a␠␠b
/// ^
/// > | a␠␠␊
/// ^
/// | ␠␠b
/// > | a␊
/// ^
/// | ␠␠b
/// ```
pub fn at_eol(tokenizer: &mut Tokenizer) -> State {
if let Some(b'\n') = tokenizer.current {
if let Some(ref content) = tokenizer.tokenize_state.space_or_tab_eol_content {
tokenizer.enter_link(
Name::LineEnding,
Link {
previous: None,
next: None,
content: content.clone(),
},
);
} else {
tokenizer.enter(Name::LineEnding);
}
if tokenizer.tokenize_state.space_or_tab_eol_connect {
let index = tokenizer.events.len() - 1;
link(&mut tokenizer.events, index);
} else if tokenizer.tokenize_state.space_or_tab_eol_content.is_some() {
tokenizer.tokenize_state.space_or_tab_eol_connect = true;
}
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::SpaceOrTabEolAfterEol)
} else {
let ok = tokenizer.tokenize_state.space_or_tab_eol_ok;
tokenizer.tokenize_state.space_or_tab_eol_content = None;
tokenizer.tokenize_state.space_or_tab_eol_connect = false;
tokenizer.tokenize_state.space_or_tab_eol_ok = false;
if ok {
State::Ok
} else {
State::Nok
}
}
}
/// After eol.
///
/// ```markdown
/// | a␠␠␊
/// > | ␠␠b
/// ^
/// | a␊
/// > | ␠␠b
/// ^
/// ```
pub fn after_eol(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::SpaceOrTabEolAfterMore), State::Nok);
State::Retry(space_or_tab_with_options(
tokenizer,
SpaceOrTabOptions {
kind: Name::SpaceOrTab,
min: 1,
max: usize::MAX,
content: tokenizer.tokenize_state.space_or_tab_eol_content.clone(),
connect: tokenizer.tokenize_state.space_or_tab_eol_connect,
},
))
} else {
State::Retry(StateName::SpaceOrTabEolAfterMore)
}
}
/// After optional final whitespace.
///
/// ```markdown
/// | a␠␠␊
/// > | ␠␠b
/// ^
/// | a␊
/// > | ␠␠b
/// ^
/// ```
pub fn after_more(tokenizer: &mut Tokenizer) -> State {
debug_assert!(
!matches!(tokenizer.current, None | Some(b'\n')),
"did not expect blank line"
);
// If the above ever starts erroring, gracefully `State::Nok` on it.
// Currently it doesn’t happen, as we only use this in content, which does
// not allow blank lines.
tokenizer.tokenize_state.space_or_tab_eol_content = None;
tokenizer.tokenize_state.space_or_tab_eol_connect = false;
tokenizer.tokenize_state.space_or_tab_eol_ok = false;
State::Ok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_title.rs | Rust | //! Title occurs in [definition][] and [label end][label_end].
//!
//! ## Grammar
//!
//! Title forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: no blank lines.
//! ; Restriction: markers must match (in case of `(` with `)`).
//! title ::= marker *(title_byte | title_escape) marker
//! title_byte ::= code - '\\' - marker
//! title_escape ::= '\\' ['\\' | marker]
//! marker ::= '"' | '\'' | '('
//! ```
//!
//! Titles can be double quoted (`"a"`), single quoted (`'a'`), or
//! parenthesized (`(a)`).
//!
//! Titles can contain line endings and whitespace, but they are not allowed to
//! contain blank lines.
//! They are allowed to be blank themselves.
//!
//! The title is interpreted as the [string][] content type.
//! That means that [character escapes][character_escape] and
//! [character references][character_reference] are allowed.
//!
//! ## References
//!
//! * [`micromark-factory-title/index.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-factory-title/dev/index.js)
//!
//! [definition]: crate::construct::definition
//! [string]: crate::construct::string
//! [character_escape]: crate::construct::character_escape
//! [character_reference]: crate::construct::character_reference
//! [label_end]: crate::construct::label_end
use crate::construct::partial_space_or_tab_eol::{space_or_tab_eol_with_options, Options};
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::subtokenize::link;
use crate::tokenizer::Tokenizer;
/// Start of title.
///
/// ```markdown
/// > | "a"
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'"' | b'\'' | b'(') => {
let marker = tokenizer.current.unwrap();
tokenizer.tokenize_state.marker = if marker == b'(' { b')' } else { marker };
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
State::Next(StateName::TitleBegin)
}
_ => State::Nok,
}
}
/// After opening marker.
///
/// This is also used at the closing marker.
///
/// ```markdown
/// > | "a"
/// ^
/// ```
pub fn begin(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.consume();
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.connect = false;
State::Ok
} else {
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
State::Retry(StateName::TitleAtBreak)
}
}
/// At something, before something else.
///
/// ```markdown
/// > | "a"
/// ^
/// ```
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
if let Some(byte) = tokenizer.current {
if byte == tokenizer.tokenize_state.marker {
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
State::Retry(StateName::TitleBegin)
} else if byte == b'\n' {
tokenizer.attempt(
State::Next(StateName::TitleAfterEol),
State::Next(StateName::TitleNok),
);
State::Retry(space_or_tab_eol_with_options(
tokenizer,
Options {
content: Some(Content::String),
connect: tokenizer.tokenize_state.connect,
},
))
} else {
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
if tokenizer.tokenize_state.connect {
let index = tokenizer.events.len() - 1;
link(&mut tokenizer.events, index);
} else {
tokenizer.tokenize_state.connect = true;
}
State::Retry(StateName::TitleInside)
}
} else {
State::Retry(StateName::TitleNok)
}
}
/// In title, after whitespace.
///
/// ```markdown
/// | "a␊
/// > | b"
/// ^
/// ```
pub fn after_eol(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.connect = true;
State::Retry(StateName::TitleAtBreak)
}
/// In title, at something that isn’t allowed.
///
/// ```markdown
/// > | "a
/// ^
/// ```
pub fn nok(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.connect = false;
State::Nok
}
/// In text.
///
/// ```markdown
/// > | "a"
/// ^
/// ```
pub fn inside(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker)
|| matches!(tokenizer.current, None | Some(b'\n'))
{
tokenizer.exit(Name::Data);
State::Retry(StateName::TitleAtBreak)
} else {
let name = if tokenizer.current == Some(b'\\') {
StateName::TitleEscape
} else {
StateName::TitleInside
};
tokenizer.consume();
State::Next(name)
}
}
/// After `\`, at a special character.
///
/// ```markdown
/// > | "a\*b"
/// ^
/// ```
pub fn escape(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'"' | b'\'' | b')' | b'\\') => {
tokenizer.consume();
State::Next(StateName::TitleInside)
}
_ => State::Retry(StateName::TitleInside),
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/partial_whitespace.rs | Rust | //! Trailing whitespace occurs in [string][] and [text][].
//!
//! ## Grammar
//!
//! Trailing whitespace forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: the start and end here count as an eol in the case of `text`.
//! whitespace ::= *space_or_tab eol *space_or_tab
//! ```
//!
//! It occurs around line endings and, in the case of text content, it also
//! occurs at the start or end of the whole.
//!
//! Normally this whitespace is ignored.
//! In the case of text content, whitespace before a line ending that
//! consistents solely of spaces, at least 2, forms a hard break (trailing).
//!
//! The minimum number of those spaces is defined in
//! [`HARD_BREAK_PREFIX_SIZE_MIN`][].
//!
//! It is also possible to create a hard break with a similar construct: a
//! [hard break (escape)][hard_break_escape] is a backslash followed
//! by a line ending.
//! That construct is recommended because it is similar to a
//! [character escape][character_escape] and similar to how line endings can be
//! “escaped” in other languages.
//! Trailing spaces are typically invisible in editors, or even automatically
//! removed, making hard break (trailing) hard to use.
//!
//! ## HTML
//!
//! Hard breaks in markdown relate to the HTML element `<br>`.
//! See [*§ 4.5.27 The `br` element* in the HTML spec][html] for more info.
//!
//! ## Recommendation
//!
//! Do not use trailing whitespace.
//! It is never needed when using [hard break (escape)][hard_break_escape]
//! to create hard breaks.
//!
//! ## Tokens
//!
//! * [`HardBreakTrailing`][Name::HardBreakTrailing]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`initialize/text.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark/dev/lib/initialize/text.js)
//! * [*§ 6.7 Hard line breaks* in `CommonMark`](https://spec.commonmark.org/0.31/#hard-line-breaks)
//!
//! [string]: crate::construct::string
//! [text]: crate::construct::text
//! [hard_break_escape]: crate::construct::hard_break_escape
//! [character_escape]: crate::construct::character_escape
//! [hard_break_prefix_size_min]: crate::util::constant::HARD_BREAK_PREFIX_SIZE_MIN
//! [html]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-br-element
use crate::event::{Event, Kind, Name};
use crate::tokenizer::Tokenizer;
use crate::util::{
constant::HARD_BREAK_PREFIX_SIZE_MIN,
slice::{Position, Slice},
};
use alloc::vec;
/// Resolve whitespace.
pub fn resolve_whitespace(tokenizer: &mut Tokenizer, hard_break: bool, trim_whole: bool) {
let mut index = 0;
while index < tokenizer.events.len() {
let event = &tokenizer.events[index];
if event.kind == Kind::Exit && event.name == Name::Data {
let trim_start = (trim_whole && index == 1)
|| (index > 1 && tokenizer.events[index - 2].name == Name::LineEnding);
let trim_end = (trim_whole && index == tokenizer.events.len() - 1)
|| (index + 1 < tokenizer.events.len()
&& tokenizer.events[index + 1].name == Name::LineEnding);
trim_data(tokenizer, index, trim_start, trim_end, hard_break);
}
index += 1;
}
tokenizer.map.consume(&mut tokenizer.events);
}
/// Trim a [`Data`][Name::Data] event.
fn trim_data(
tokenizer: &mut Tokenizer,
exit_index: usize,
trim_start: bool,
trim_end: bool,
hard_break: bool,
) {
let mut slice = Slice::from_position(
tokenizer.parse_state.bytes,
&Position::from_exit_event(&tokenizer.events, exit_index),
);
if trim_end {
let mut index = slice.bytes.len();
let mut spaces_only = slice.after == 0;
while index > 0 {
match slice.bytes[index - 1] {
b' ' => {}
b'\t' => spaces_only = false,
_ => break,
}
index -= 1;
}
let diff = slice.bytes.len() - index;
let name = if hard_break
&& spaces_only
&& diff >= HARD_BREAK_PREFIX_SIZE_MIN
&& exit_index + 1 < tokenizer.events.len()
{
Name::HardBreakTrailing
} else {
Name::SpaceOrTab
};
// The whole data is whitespace.
// We can be very fast: we only change the event names.
if index == 0 {
tokenizer.events[exit_index - 1].name = name.clone();
tokenizer.events[exit_index].name = name;
return;
}
if diff > 0 || slice.after > 0 {
let exit_point = tokenizer.events[exit_index].point.clone();
let mut enter_point = exit_point.clone();
enter_point.index -= diff;
enter_point.column -= diff;
enter_point.vs = 0;
tokenizer.map.add(
exit_index + 1,
0,
vec![
Event {
kind: Kind::Enter,
name: name.clone(),
point: enter_point.clone(),
link: None,
},
Event {
kind: Kind::Exit,
name,
point: exit_point,
link: None,
},
],
);
tokenizer.events[exit_index].point = enter_point;
slice.bytes = &slice.bytes[..index];
}
}
if trim_start {
let mut index = 0;
while index < slice.bytes.len() {
match slice.bytes[index] {
b' ' | b'\t' => index += 1,
_ => break,
}
}
// The whole data is whitespace.
// We can be very fast: we only change the event names.
if index == slice.bytes.len() {
tokenizer.events[exit_index - 1].name = Name::SpaceOrTab;
tokenizer.events[exit_index].name = Name::SpaceOrTab;
return;
}
if index > 0 || slice.before > 0 {
let enter_point = tokenizer.events[exit_index - 1].point.clone();
let mut exit_point = enter_point.clone();
exit_point.index += index;
exit_point.column += index;
exit_point.vs = 0;
tokenizer.map.add(
exit_index - 1,
0,
vec![
Event {
kind: Kind::Enter,
name: Name::SpaceOrTab,
point: enter_point,
link: None,
},
Event {
kind: Kind::Exit,
name: Name::SpaceOrTab,
point: exit_point.clone(),
link: None,
},
],
);
tokenizer.events[exit_index - 1].point = exit_point;
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/raw_flow.rs | Rust | //! Raw (flow) occurs in the [flow][] content type.
//! It forms code (fenced) and math (flow).
//!
//! ## Grammar
//!
//! Code (fenced) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! raw_flow ::= fence_open *( eol *byte ) [ eol fence_close ]
//!
//! ; Restriction: math (flow) does not support the `info` part.
//! fence_open ::= sequence [*space_or_tab info [1*space_or_tab meta]] *space_or_tab
//! ; Restriction: the number of markers in the closing fence sequence must be
//! ; equal to or greater than the number of markers in the opening fence
//! ; sequence.
//! ; Restriction: the marker in the closing fence sequence must match the
//! ; marker in the opening fence sequence
//! fence_close ::= sequence *space_or_tab
//! sequence ::= 3*'`' | 3*'~' | 2*'$'
//! ; Restriction: the marker cannot occur in `info` if it is the `$` or `` ` `` character.
//! info ::= 1*text
//! ; Restriction: the marker cannot occur in `meta` if it is the `$` or `` ` `` character.
//! meta ::= 1*text *(*space_or_tab 1*text)
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! The above grammar does not show how indentation (with `space_or_tab`) of
//! each line is handled.
//! To parse raw (flow), let `x` be the number of `space_or_tab` characters
//! before the opening fence sequence.
//! Each line of text is then allowed (not required) to be indented with up
//! to `x` spaces or tabs, which are then ignored as an indent instead of being
//! considered as part of the content.
//! This indent does not affect the closing fence.
//! It can be indented up to a separate 3 spaces or tabs.
//! A bigger indent makes it part of the content instead of a fence.
//!
//! The `info` and `meta` parts are interpreted as the [string][] content type.
//! That means that [character escapes][character_escape] and
//! [character references][character_reference] are allowed.
//! Math (flow) does not support `info`.
//!
//! The optional `meta` part is ignored: it is not used when parsing or
//! rendering.
//!
//! The optional `info` part is used and is expected to specify the programming
//! language that the content is in.
//! Which value it holds depends on what your syntax highlighter supports, if
//! one is used.
//!
//! In markdown, it is also possible to use [raw (text)][raw_text] in the
//! [text][] content type.
//! It is also possible to create code with the
//! [code (indented)][code_indented] construct.
//!
//! ## HTML
//!
//! Code (fenced) relates to both the `<pre>` and the `<code>` elements in
//! HTML.
//! See [*§ 4.4.3 The `pre` element*][html_pre] and the [*§ 4.5.15 The `code`
//! element*][html_code] in the HTML spec for more info.
//!
//! Math (flow) does not relate to HTML elements.
//! `MathML`, which is sort of like SVG but for math, exists but it doesn’t work
//! well and isn’t widely supported.
//! Instead, it is recommended to use client side JavaScript with something like
//! `KaTeX` or `MathJax` to process the math
//! For that, the math is compiled as a `<pre>`, and a `<code>` element with two
//! classes: `language-math` and `math-display`.
//! Client side JavaScript can look for these classes to process them further.
//!
//! The `info` is, when rendering to HTML, typically exposed as a class.
//! This behavior stems from the HTML spec ([*§ 4.5.15 The `code`
//! element*][html_code]).
//! For example:
//!
//! ```markdown
//! ~~~css
//! * { color: tomato }
//! ~~~
//! ```
//!
//! Yields:
//!
//! ```html
//! <pre><code class="language-css">* { color: tomato }
//! </code></pre>
//! ```
//!
//! ## Recommendation
//!
//! It is recommended to use code (fenced) instead of code (indented).
//! Code (fenced) is more explicit, similar to code (text), and has support
//! for specifying the programming language.
//!
//! When authoring markdown with math, keep in mind that math doesn’t work in
//! most places.
//! Notably, GitHub currently has a really weird crappy client-side regex-based
//! thing.
//! But on your own (math-heavy?) site it can be great!
//! You can use code (fenced) with an info string of `math` to improve this, as
//! that works in many places.
//!
//! ## Tokens
//!
//! * [`CodeFenced`][Name::CodeFenced]
//! * [`CodeFencedFence`][Name::CodeFencedFence]
//! * [`CodeFencedFenceInfo`][Name::CodeFencedFenceInfo]
//! * [`CodeFencedFenceMeta`][Name::CodeFencedFenceMeta]
//! * [`CodeFencedFenceSequence`][Name::CodeFencedFenceSequence]
//! * [`CodeFlowChunk`][Name::CodeFlowChunk]
//! * [`LineEnding`][Name::LineEnding]
//! * [`MathFlow`][Name::MathFlow]
//! * [`MathFlowFence`][Name::MathFlowFence]
//! * [`MathFlowFenceMeta`][Name::MathFlowFenceMeta]
//! * [`MathFlowFenceSequence`][Name::MathFlowFenceSequence]
//! * [`MathFlowChunk`][Name::MathFlowChunk]
//! * [`SpaceOrTab`][Name::SpaceOrTab]
//!
//! ## References
//!
//! * [`code-fenced.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/code-fenced.js)
//! * [`micromark-extension-math`](https://github.com/micromark/micromark-extension-math)
//! * [*§ 4.5 Fenced code blocks* in `CommonMark`](https://spec.commonmark.org/0.31/#fenced-code-blocks)
//!
//! > 👉 **Note**: math is not specified anywhere.
//!
//! [flow]: crate::construct::flow
//! [string]: crate::construct::string
//! [text]: crate::construct::text
//! [character_escape]: crate::construct::character_escape
//! [character_reference]: crate::construct::character_reference
//! [code_indented]: crate::construct::code_indented
//! [raw_text]: crate::construct::raw_text
//! [html_code]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-code-element
//! [html_pre]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-pre-element
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::{Content, Link, Name};
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{
constant::{CODE_FENCED_SEQUENCE_SIZE_MIN, MATH_FLOW_SEQUENCE_SIZE_MIN, TAB_SIZE},
slice::{Position, Slice},
};
/// Start of raw.
///
/// ```markdown
/// > | ~~~js
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.code_fenced
|| tokenizer.parse_state.options.constructs.math_flow
{
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::RawFlowBeforeSequenceOpen),
State::Nok,
);
return State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
));
}
if matches!(tokenizer.current, Some(b'$' | b'`' | b'~')) {
return State::Retry(StateName::RawFlowBeforeSequenceOpen);
}
}
State::Nok
}
/// In opening fence, after prefix, at sequence.
///
/// ```markdown
/// > | ~~~js
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn before_sequence_open(tokenizer: &mut Tokenizer) -> State {
let tail = tokenizer.events.last();
let mut prefix = 0;
if let Some(event) = tail {
if event.name == Name::SpaceOrTab {
prefix = Slice::from_position(
tokenizer.parse_state.bytes,
&Position::from_exit_event(&tokenizer.events, tokenizer.events.len() - 1),
)
.len();
}
}
// Code (fenced).
if (tokenizer.parse_state.options.constructs.code_fenced
&& matches!(tokenizer.current, Some(b'`' | b'~')))
// Math (flow).
|| (tokenizer.parse_state.options.constructs.math_flow && tokenizer.current == Some(b'$'))
{
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
tokenizer.tokenize_state.size_c = prefix;
if tokenizer.tokenize_state.marker == b'$' {
tokenizer.tokenize_state.token_1 = Name::MathFlow;
tokenizer.tokenize_state.token_2 = Name::MathFlowFence;
tokenizer.tokenize_state.token_3 = Name::MathFlowFenceSequence;
// Math (flow) does not support an `info` part: everything after the
// opening sequence is the `meta` part.
tokenizer.tokenize_state.token_5 = Name::MathFlowFenceMeta;
tokenizer.tokenize_state.token_6 = Name::MathFlowChunk;
} else {
tokenizer.tokenize_state.token_1 = Name::CodeFenced;
tokenizer.tokenize_state.token_2 = Name::CodeFencedFence;
tokenizer.tokenize_state.token_3 = Name::CodeFencedFenceSequence;
tokenizer.tokenize_state.token_4 = Name::CodeFencedFenceInfo;
tokenizer.tokenize_state.token_5 = Name::CodeFencedFenceMeta;
tokenizer.tokenize_state.token_6 = Name::CodeFlowChunk;
}
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
State::Retry(StateName::RawFlowSequenceOpen)
} else {
State::Nok
}
}
/// In opening fence sequence.
///
/// ```markdown
/// > | ~~~js
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn sequence_open(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::RawFlowSequenceOpen)
} else if tokenizer.tokenize_state.size
< (if tokenizer.tokenize_state.marker == b'$' {
MATH_FLOW_SEQUENCE_SIZE_MIN
} else {
CODE_FENCED_SEQUENCE_SIZE_MIN
})
{
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size_c = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.token_6 = Name::Data;
State::Nok
} else {
// Math (flow) does not support an `info` part: everything after the
// opening sequence is the `meta` part.
let next = if tokenizer.tokenize_state.marker == b'$' {
StateName::RawFlowMetaBefore
} else {
StateName::RawFlowInfoBefore
};
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
tokenizer.attempt(State::Next(next), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
State::Retry(next)
}
}
}
/// In opening fence, after the sequence (and optional whitespace), before info.
///
/// ```markdown
/// > | ~~~js
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn info_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
// Do not form containers.
tokenizer.concrete = true;
tokenizer.check(
State::Next(StateName::RawFlowAtNonLazyBreak),
State::Next(StateName::RawFlowAfter),
);
State::Retry(StateName::NonLazyContinuationStart)
}
_ => {
tokenizer.enter(tokenizer.tokenize_state.token_4.clone());
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
State::Retry(StateName::RawFlowInfo)
}
}
}
/// In info.
///
/// ```markdown
/// > | ~~~js
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn info(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::Data);
tokenizer.exit(tokenizer.tokenize_state.token_4.clone());
State::Retry(StateName::RawFlowInfoBefore)
}
Some(b'\t' | b' ') => {
tokenizer.exit(Name::Data);
tokenizer.exit(tokenizer.tokenize_state.token_4.clone());
tokenizer.attempt(State::Next(StateName::RawFlowMetaBefore), State::Nok);
State::Retry(space_or_tab(tokenizer))
}
Some(byte) => {
// This looks like code (text) / math (text).
// Note: no reason to check for `~`, because 3 of them can‘t be
// used as strikethrough in text.
if tokenizer.tokenize_state.marker == byte && matches!(byte, b'$' | b'`') {
tokenizer.concrete = false;
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size_c = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.token_6 = Name::Data;
State::Nok
} else {
tokenizer.consume();
State::Next(StateName::RawFlowInfo)
}
}
}
}
/// In opening fence, after info and whitespace, before meta.
///
/// ```markdown
/// > | ~~~js eval
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn meta_before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => State::Retry(StateName::RawFlowInfoBefore),
_ => {
tokenizer.enter(tokenizer.tokenize_state.token_5.clone());
tokenizer.enter_link(
Name::Data,
Link {
previous: None,
next: None,
content: Content::String,
},
);
State::Retry(StateName::RawFlowMeta)
}
}
}
/// In meta.
///
/// ```markdown
/// > | ~~~js eval
/// ^
/// | console.log(1)
/// | ~~~
/// ```
pub fn meta(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(Name::Data);
tokenizer.exit(tokenizer.tokenize_state.token_5.clone());
State::Retry(StateName::RawFlowInfoBefore)
}
Some(byte) => {
// This looks like code (text) / math (text).
// Note: no reason to check for `~`, because 3 of them can‘t be
// used as strikethrough in text.
if tokenizer.tokenize_state.marker == byte && matches!(byte, b'$' | b'`') {
tokenizer.concrete = false;
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size_c = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.token_6 = Name::Data;
State::Nok
} else {
tokenizer.consume();
State::Next(StateName::RawFlowMeta)
}
}
}
}
/// At eol/eof in raw, before a non-lazy closing fence or content.
///
/// ```markdown
/// > | ~~~js
/// ^
/// > | console.log(1)
/// ^
/// | ~~~
/// ```
pub fn at_non_lazy_break(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::RawFlowAfter),
State::Next(StateName::RawFlowContentBefore),
);
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::RawFlowCloseStart)
}
/// Before closing fence, at optional whitespace.
///
/// ```markdown
/// | ~~~js
/// | console.log(1)
/// > | ~~~
/// ^
/// ```
pub fn close_start(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::RawFlowBeforeSequenceClose),
State::Nok,
);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::RawFlowBeforeSequenceClose)
}
}
/// In closing fence, after optional whitespace, at sequence.
///
/// ```markdown
/// | ~~~js
/// | console.log(1)
/// > | ~~~
/// ^
/// ```
pub fn before_sequence_close(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
State::Retry(StateName::RawFlowSequenceClose)
} else {
State::Nok
}
}
/// In closing fence sequence.
///
/// ```markdown
/// | ~~~js
/// | console.log(1)
/// > | ~~~
/// ^
/// ```
pub fn sequence_close(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.size_b += 1;
tokenizer.consume();
State::Next(StateName::RawFlowSequenceClose)
} else if tokenizer.tokenize_state.size_b >= tokenizer.tokenize_state.size {
tokenizer.tokenize_state.size_b = 0;
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::RawFlowAfterSequenceClose),
State::Nok,
);
State::Retry(space_or_tab(tokenizer))
} else {
State::Retry(StateName::RawFlowAfterSequenceClose)
}
} else {
tokenizer.tokenize_state.size_b = 0;
State::Nok
}
}
/// After closing fence sequence, after optional whitespace.
///
/// ```markdown
/// | ~~~js
/// | console.log(1)
/// > | ~~~
/// ^
/// ```
pub fn sequence_close_after(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
State::Ok
}
_ => State::Nok,
}
}
/// Before raw content, not a closing fence, at eol.
///
/// ```markdown
/// | ~~~js
/// > | console.log(1)
/// ^
/// | ~~~
/// ```
pub fn content_before(tokenizer: &mut Tokenizer) -> State {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::RawFlowContentStart)
}
/// Before raw content, not a closing fence.
///
/// ```markdown
/// | ~~~js
/// > | console.log(1)
/// ^
/// | ~~~
/// ```
pub fn content_start(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(
State::Next(StateName::RawFlowBeforeContentChunk),
State::Nok,
);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
tokenizer.tokenize_state.size_c,
))
} else {
State::Retry(StateName::RawFlowBeforeContentChunk)
}
}
/// Before raw content, after optional prefix.
///
/// ```markdown
/// | ~~~js
/// > | console.log(1)
/// ^
/// | ~~~
/// ```
pub fn before_content_chunk(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.check(
State::Next(StateName::RawFlowAtNonLazyBreak),
State::Next(StateName::RawFlowAfter),
);
State::Retry(StateName::NonLazyContinuationStart)
}
_ => {
tokenizer.enter(tokenizer.tokenize_state.token_6.clone());
State::Retry(StateName::RawFlowContentChunk)
}
}
}
/// In raw content.
///
/// ```markdown
/// | ~~~js
/// > | console.log(1)
/// ^^^^^^^^^^^^^^
/// | ~~~
/// ```
pub fn content_chunk(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None | Some(b'\n') => {
tokenizer.exit(tokenizer.tokenize_state.token_6.clone());
State::Retry(StateName::RawFlowBeforeContentChunk)
}
_ => {
tokenizer.consume();
State::Next(StateName::RawFlowContentChunk)
}
}
}
/// After raw.
///
/// ```markdown
/// | ~~~js
/// | console.log(1)
/// > | ~~~
/// ^
/// ```
pub fn after(tokenizer: &mut Tokenizer) -> State {
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size_c = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
tokenizer.tokenize_state.token_4 = Name::Data;
tokenizer.tokenize_state.token_5 = Name::Data;
tokenizer.tokenize_state.token_6 = Name::Data;
// Feel free to interrupt.
tokenizer.interrupt = false;
// No longer concrete.
tokenizer.concrete = false;
State::Ok
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/raw_text.rs | Rust | //! Raw (text) occurs in the [text][] content type.
//! It forms code (text) and math (text).
//!
//! ## Grammar
//!
//! Raw (text) forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: the number of markers in the closing sequence must be equal
//! ; to the number of markers in the opening sequence.
//! raw_text ::= sequence 1*byte sequence
//!
//! ; Restriction: not preceded or followed by the same marker.
//! sequence ::= 1*'`' | 1*'$'
//! ```
//!
//! The above grammar shows that it is not possible to create empty raw (text).
//! It is possible to include the sequence marker (grave accent for code,
//! dollar for math) in raw (text), by wrapping it in bigger or smaller
//! sequences:
//!
//! ```markdown
//! Include more: `a``b` or include less: ``a`b``.
//! ```
//!
//! It is also possible to include just one marker:
//!
//! ```markdown
//! Include just one: `` ` ``.
//! ```
//!
//! Sequences are “gready”, in that they cannot be preceded or followed by
//! more markers.
//! To illustrate:
//!
//! ```markdown
//! Not code: ``x`.
//!
//! Not code: `x``.
//!
//! Escapes work, this is code: \``x`.
//!
//! Escapes work, this is code: `x`\`.
//! ```
//!
//! Yields:
//!
//! ```html
//! <p>Not code: ``x`.</p>
//! <p>Not code: `x``.</p>
//! <p>Escapes work, this is code: `<code>x</code>.</p>
//! <p>Escapes work, this is code: <code>x</code>`.</p>
//! ```
//!
//! That is because, when turning markdown into HTML, the first and last space,
//! if both exist and there is also a non-space in the code, are removed.
//! Line endings, at that stage, are considered as spaces.
//!
//! In markdown, it is possible to create code or math with the
//! [raw (flow)][raw_flow] (or [code (indented)][code_indented]) constructs
//! in the [flow][] content type.
//!
//! ## HTML
//!
//! Code (text) relates to the `<code>` element in HTML.
//! See [*§ 4.5.15 The `code` element*][html_code] in the HTML spec for more
//! info.
//!
//! Math (text) does not relate to HTML elements.
//! `MathML`, which is sort of like SVG but for math, exists but it doesn’t work
//! well and isn’t widely supported.
//! Instead, it is recommended to use client side JavaScript with something like
//! `KaTeX` or `MathJax` to process the math
//! For that, the math is compiled as a `<code>` element with two classes:
//! `language-math` and `math-inline`.
//! Client side JavaScript can look for these classes to process them further.
//!
//! When turning markdown into HTML, each line ending in raw (text) is turned
//! into a space.
//!
//! ## Recommendations
//!
//! When authoring markdown with math, keep in mind that math doesn’t work in
//! most places.
//! Notably, GitHub currently has a really weird crappy client-side regex-based
//! thing.
//! But on your own (math-heavy?) site it can be great!
//! You can set [`parse_options.math_text_single_dollar: false`][parse_options]
//! to improve this, as it prevents single dollars from being seen as math, and
//! thus prevents normal dollars in text from being seen as math.
//!
//! ## Tokens
//!
//! * [`CodeText`][Name::CodeText]
//! * [`CodeTextData`][Name::CodeTextData]
//! * [`CodeTextSequence`][Name::CodeTextSequence]
//! * [`MathText`][Name::MathText]
//! * [`MathTextData`][Name::MathTextData]
//! * [`MathTextSequence`][Name::MathTextSequence]
//! * [`LineEnding`][Name::LineEnding]
//!
//! ## References
//!
//! * [`code-text.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/code-text.js)
//! * [`micromark-extension-math`](https://github.com/micromark/micromark-extension-math)
//! * [*§ 6.1 Code spans* in `CommonMark`](https://spec.commonmark.org/0.31/#code-spans)
//!
//! > 👉 **Note**: math is not specified anywhere.
//!
//! [flow]: crate::construct::flow
//! [text]: crate::construct::text
//! [code_indented]: crate::construct::code_indented
//! [raw_flow]: crate::construct::raw_flow
//! [html_code]: https://html.spec.whatwg.org/multipage/text-level-semantics.html#the-code-element
//! [parse_options]: crate::ParseOptions
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
/// Start of raw (text).
///
/// ```markdown
/// > | `a`
/// ^
/// > | \`a`
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
// Code (text):
if ((tokenizer.parse_state.options.constructs.code_text && tokenizer.current == Some(b'`'))
// Math (text):
|| (tokenizer.parse_state.options.constructs.math_text && tokenizer.current == Some(b'$')))
// Not the same marker (except when escaped).
&& (tokenizer.previous != tokenizer.current
|| (!tokenizer.events.is_empty()
&& tokenizer.events[tokenizer.events.len() - 1].name == Name::CharacterEscape))
{
let marker = tokenizer.current.unwrap();
if marker == b'`' {
tokenizer.tokenize_state.token_1 = Name::CodeText;
tokenizer.tokenize_state.token_2 = Name::CodeTextSequence;
tokenizer.tokenize_state.token_3 = Name::CodeTextData;
} else {
tokenizer.tokenize_state.token_1 = Name::MathText;
tokenizer.tokenize_state.token_2 = Name::MathTextSequence;
tokenizer.tokenize_state.token_3 = Name::MathTextData;
}
tokenizer.tokenize_state.marker = marker;
tokenizer.enter(tokenizer.tokenize_state.token_1.clone());
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
State::Retry(StateName::RawTextSequenceOpen)
} else {
State::Nok
}
}
/// In opening sequence.
///
/// ```markdown
/// > | `a`
/// ^
/// ```
pub fn sequence_open(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.size += 1;
tokenizer.consume();
State::Next(StateName::RawTextSequenceOpen)
}
// Not enough markers in the sequence.
else if tokenizer.tokenize_state.marker == b'$'
&& tokenizer.tokenize_state.size == 1
&& !tokenizer.parse_state.options.math_text_single_dollar
{
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
State::Nok
} else {
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
State::Retry(StateName::RawTextBetween)
}
}
/// Between something and something else.
///
/// ```markdown
/// > | `a`
/// ^^
/// ```
pub fn between(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
State::Nok
}
Some(b'\n') => {
tokenizer.enter(Name::LineEnding);
tokenizer.consume();
tokenizer.exit(Name::LineEnding);
State::Next(StateName::RawTextBetween)
}
_ => {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.enter(tokenizer.tokenize_state.token_2.clone());
State::Retry(StateName::RawTextSequenceClose)
} else {
tokenizer.enter(tokenizer.tokenize_state.token_3.clone());
State::Retry(StateName::RawTextData)
}
}
}
}
/// In data.
///
/// ```markdown
/// > | `a`
/// ^
/// ```
pub fn data(tokenizer: &mut Tokenizer) -> State {
if matches!(tokenizer.current, None | Some(b'\n'))
|| tokenizer.current == Some(tokenizer.tokenize_state.marker)
{
tokenizer.exit(tokenizer.tokenize_state.token_3.clone());
State::Retry(StateName::RawTextBetween)
} else {
tokenizer.consume();
State::Next(StateName::RawTextData)
}
}
/// In closing sequence.
///
/// ```markdown
/// > | `a`
/// ^
/// ```
pub fn sequence_close(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.tokenize_state.size_b += 1;
tokenizer.consume();
State::Next(StateName::RawTextSequenceClose)
} else {
tokenizer.exit(tokenizer.tokenize_state.token_2.clone());
if tokenizer.tokenize_state.size == tokenizer.tokenize_state.size_b {
tokenizer.exit(tokenizer.tokenize_state.token_1.clone());
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.tokenize_state.size_b = 0;
tokenizer.tokenize_state.token_1 = Name::Data;
tokenizer.tokenize_state.token_2 = Name::Data;
tokenizer.tokenize_state.token_3 = Name::Data;
State::Ok
} else {
// More or less accents: mark as data.
let len = tokenizer.events.len();
tokenizer.events[len - 2].name = tokenizer.tokenize_state.token_3.clone();
tokenizer.events[len - 1].name = tokenizer.tokenize_state.token_3.clone();
tokenizer.tokenize_state.size_b = 0;
State::Retry(StateName::RawTextBetween)
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/string.rs | Rust | //! The string content type.
//!
//! **String** is a limited [text][] like content type which only allows
//! character escapes and character references.
//! It exists in things such as identifiers (media references, definitions),
//! titles, URLs, code (fenced) info and meta parts.
//!
//! The constructs found in string are:
//!
//! * [Character escape][crate::construct::character_escape]
//! * [Character reference][crate::construct::character_reference]
//!
//! [text]: crate::construct::text
use crate::construct::partial_whitespace::resolve_whitespace;
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
/// Characters that can start something in string.
const MARKERS: [u8; 2] = [b'&', b'\\'];
/// Start of string.
///
/// ````markdown
/// > | ```js
/// ^
/// ````
pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.markers = &MARKERS;
State::Retry(StateName::StringBefore)
}
/// Before string.
///
/// ````markdown
/// > | ```js
/// ^
/// ````
pub fn before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.register_resolver(ResolveName::Data);
tokenizer.register_resolver(ResolveName::String);
State::Ok
}
Some(b'&') => {
tokenizer.attempt(
State::Next(StateName::StringBefore),
State::Next(StateName::StringBeforeData),
);
State::Retry(StateName::CharacterReferenceStart)
}
Some(b'\\') => {
tokenizer.attempt(
State::Next(StateName::StringBefore),
State::Next(StateName::StringBeforeData),
);
State::Retry(StateName::CharacterEscapeStart)
}
_ => State::Retry(StateName::StringBeforeData),
}
}
/// At data.
///
/// ````markdown
/// > | ```js
/// ^
/// ````
pub fn before_data(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(State::Next(StateName::StringBefore), State::Nok);
State::Retry(StateName::DataStart)
}
/// Resolve whitespace in string.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
resolve_whitespace(tokenizer, false, false);
None
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/text.rs | Rust | //! The text content type.
//!
//! **Text** contains phrasing content such as
//! [attention][crate::construct::attention] (emphasis, gfm strikethrough, strong),
//! [raw (text)][crate::construct::raw_text] (code (text), math (text)), and actual text.
//!
//! The constructs found in text are:
//!
//! * [Attention][crate::construct::attention] (emphasis, gfm strikethrough, strong)
//! * [Autolink][crate::construct::autolink]
//! * [Character escape][crate::construct::character_escape]
//! * [Character reference][crate::construct::character_reference]
//! * [Raw (text)][crate::construct::raw_text] (code (text), math (text))
//! * [GFM: Label start (footnote)][crate::construct::gfm_label_start_footnote]
//! * [GFM: Task list item check][crate::construct::gfm_task_list_item_check]
//! * [Hard break (escape)][crate::construct::hard_break_escape]
//! * [HTML (text)][crate::construct::html_text]
//! * [Label start (image)][crate::construct::label_start_image]
//! * [Label start (link)][crate::construct::label_start_link]
//! * [Label end][crate::construct::label_end]
//! * [MDX: expression (text)][crate::construct::mdx_expression_text]
//! * [MDX: JSX (text)][crate::construct::mdx_jsx_text]
//!
//! > 👉 **Note**: for performance reasons, hard break (trailing) is formed by
//! > [whitespace][crate::construct::partial_whitespace].
use crate::construct::gfm_autolink_literal::resolve as resolve_gfm_autolink_literal;
use crate::construct::partial_whitespace::resolve_whitespace;
use crate::resolve::Name as ResolveName;
use crate::state::{Name as StateName, State};
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
/// Characters that can start something in text.
const MARKERS: [u8; 16] = [
b'!', // `label_start_image`
b'$', // `raw_text` (math (text))
b'&', // `character_reference`
b'*', // `attention` (emphasis, strong)
b'<', // `autolink`, `html_text`, `mdx_jsx_text`
b'H', // `gfm_autolink_literal` (`protocol` kind)
b'W', // `gfm_autolink_literal` (`www.` kind)
b'[', // `label_start_link`
b'\\', // `character_escape`, `hard_break_escape`
b']', // `label_end`, `gfm_label_start_footnote`
b'_', // `attention` (emphasis, strong)
b'`', // `raw_text` (code (text))
b'h', // `gfm_autolink_literal` (`protocol` kind)
b'w', // `gfm_autolink_literal` (`www.` kind)
b'{', // `mdx_expression_text`
b'~', // `attention` (gfm strikethrough)
];
/// Start of text.
///
/// There is a slightly weird case where task list items have their check at
/// the start of the first paragraph.
/// So we start by checking for that.
///
/// ```markdown
/// > | abc
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
tokenizer.tokenize_state.markers = &MARKERS;
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBefore),
);
State::Retry(StateName::GfmTaskListItemCheckStart)
}
/// Before text.
///
/// ```markdown
/// > | abc
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
None => {
tokenizer.register_resolver(ResolveName::Data);
tokenizer.register_resolver(ResolveName::Text);
State::Ok
}
Some(b'!') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::LabelStartImageStart)
}
// raw (text) (code (text), math (text))
Some(b'$' | b'`') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::RawTextStart)
}
Some(b'&') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::CharacterReferenceStart)
}
// attention (emphasis, gfm strikethrough, strong)
Some(b'*' | b'_' | b'~') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::AttentionStart)
}
// `autolink`, `html_text` (order does not matter), `mdx_jsx_text` (order matters).
Some(b'<') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeHtml),
);
State::Retry(StateName::AutolinkStart)
}
Some(b'H' | b'h') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::GfmAutolinkLiteralProtocolStart)
}
Some(b'W' | b'w') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::GfmAutolinkLiteralWwwStart)
}
Some(b'[') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeLabelStartLink),
);
State::Retry(StateName::GfmLabelStartFootnoteStart)
}
Some(b'\\') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeHardBreakEscape),
);
State::Retry(StateName::CharacterEscapeStart)
}
Some(b']') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::LabelEndStart)
}
Some(b'{') => {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::MdxExpressionTextStart)
}
_ => State::Retry(StateName::TextBeforeData),
}
}
/// Before html (text).
///
/// At `<`, which wasn’t an autolink.
///
/// ```markdown
/// > | a <b>
/// ^
/// ```
pub fn before_html(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeMdxJsx),
);
State::Retry(StateName::HtmlTextStart)
}
/// Before mdx jsx (text).
///
/// At `<`, which wasn’t an autolink or html.
///
/// ```markdown
/// > | a <b>
/// ^
/// ```
pub fn before_mdx_jsx(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::MdxJsxTextStart)
}
/// Before hard break escape.
///
/// At `\`, which wasn’t a character escape.
///
/// ```markdown
/// > | a \␊
/// ^
/// ```
pub fn before_hard_break_escape(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::HardBreakEscapeStart)
}
/// Before label start (link).
///
/// At `[`, which wasn’t a GFM label start (footnote).
///
/// ```markdown
/// > | [a](b)
/// ^
/// ```
pub fn before_label_start_link(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(
State::Next(StateName::TextBefore),
State::Next(StateName::TextBeforeData),
);
State::Retry(StateName::LabelStartLinkStart)
}
/// Before data.
///
/// ```markdown
/// > | a
/// ^
/// ```
pub fn before_data(tokenizer: &mut Tokenizer) -> State {
tokenizer.attempt(State::Next(StateName::TextBefore), State::Nok);
State::Retry(StateName::DataStart)
}
/// Resolve whitespace.
pub fn resolve(tokenizer: &mut Tokenizer) -> Option<Subresult> {
resolve_whitespace(
tokenizer,
tokenizer.parse_state.options.constructs.hard_break_trailing,
true,
);
if tokenizer
.parse_state
.options
.constructs
.gfm_autolink_literal
{
resolve_gfm_autolink_literal(tokenizer);
}
tokenizer.map.consume(&mut tokenizer.events);
None
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/construct/thematic_break.rs | Rust | //! Thematic break occurs in the [flow][] content type.
//!
//! ## Grammar
//!
//! Thematic break forms with the following BNF
//! (<small>see [construct][crate::construct] for character groups</small>):
//!
//! ```bnf
//! ; Restriction: all markers must be identical.
//! ; Restriction: at least 3 markers must be used.
//! thematic_break ::= *space_or_tab 1*(1*marker *space_or_tab)
//!
//! marker ::= '*' | '-' | '_'
//! ```
//!
//! As this construct occurs in flow, like all flow constructs, it must be
//! followed by an eol (line ending) or eof (end of file).
//!
//! ## HTML
//!
//! Thematic breaks in markdown typically relate to the HTML element `<hr>`.
//! See [*§ 4.4.2 The `hr` element* in the HTML spec][html] for more info.
//!
//! ## Recommendation
//!
//! It is recommended to use exactly three asterisks without whitespace when
//! writing markdown.
//! As using more than three markers has no effect other than wasting space,
//! it is recommended to use exactly three markers.
//! Thematic breaks formed with asterisks or dashes can interfere with
//! [list][list-item]s if there is whitespace between them: `* * *` and `- - -`.
//! For these reasons, it is recommend to not use spaces or tabs between the
//! markers.
//! Thematic breaks formed with dashes (without whitespace) can also form
//! [heading (setext)][heading_setext].
//! As dashes and underscores frequently occur in natural language and URLs, it
//! is recommended to use asterisks for thematic breaks to distinguish from
//! such use.
//! Because asterisks can be used to form the most markdown constructs, using
//! them has the added benefit of making it easier to gloss over markdown: you
//! can look for asterisks to find syntax while not worrying about other
//! characters.
//!
//! ## Tokens
//!
//! * [`ThematicBreak`][Name::ThematicBreak]
//! * [`ThematicBreakSequence`][Name::ThematicBreakSequence]
//!
//! ## References
//!
//! * [`thematic-break.js` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-core-commonmark/dev/lib/thematic-break.js)
//! * [*§ 4.1 Thematic breaks* in `CommonMark`](https://spec.commonmark.org/0.31/#thematic-breaks)
//!
//! [flow]: crate::construct::flow
//! [heading_setext]: crate::construct::heading_setext
//! [list-item]: crate::construct::list_item
//! [html]: https://html.spec.whatwg.org/multipage/grouping-content.html#the-hr-element
use crate::construct::partial_space_or_tab::{space_or_tab, space_or_tab_min_max};
use crate::event::Name;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::constant::{TAB_SIZE, THEMATIC_BREAK_MARKER_COUNT_MIN};
/// Start of thematic break.
///
/// ```markdown
/// > | ***
/// ^
/// ```
pub fn start(tokenizer: &mut Tokenizer) -> State {
if tokenizer.parse_state.options.constructs.thematic_break {
tokenizer.enter(Name::ThematicBreak);
if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.attempt(State::Next(StateName::ThematicBreakBefore), State::Nok);
State::Retry(space_or_tab_min_max(
tokenizer,
0,
if tokenizer.parse_state.options.constructs.code_indented {
TAB_SIZE - 1
} else {
usize::MAX
},
))
} else {
State::Retry(StateName::ThematicBreakBefore)
}
} else {
State::Nok
}
}
/// After optional whitespace, at marker.
///
/// ```markdown
/// > | ***
/// ^
/// ```
pub fn before(tokenizer: &mut Tokenizer) -> State {
match tokenizer.current {
Some(b'*' | b'-' | b'_') => {
tokenizer.tokenize_state.marker = tokenizer.current.unwrap();
State::Retry(StateName::ThematicBreakAtBreak)
}
_ => State::Nok,
}
}
/// After something, before something else.
///
/// ```markdown
/// > | ***
/// ^
/// ```
pub fn at_break(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.enter(Name::ThematicBreakSequence);
State::Retry(StateName::ThematicBreakSequence)
} else if tokenizer.tokenize_state.size >= THEMATIC_BREAK_MARKER_COUNT_MIN
&& matches!(tokenizer.current, None | Some(b'\n'))
{
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
tokenizer.exit(Name::ThematicBreak);
// Feel free to interrupt.
tokenizer.interrupt = false;
State::Ok
} else {
tokenizer.tokenize_state.marker = 0;
tokenizer.tokenize_state.size = 0;
State::Nok
}
}
/// In sequence.
///
/// ```markdown
/// > | ***
/// ^
/// ```
pub fn sequence(tokenizer: &mut Tokenizer) -> State {
if tokenizer.current == Some(tokenizer.tokenize_state.marker) {
tokenizer.consume();
tokenizer.tokenize_state.size += 1;
State::Next(StateName::ThematicBreakSequence)
} else if matches!(tokenizer.current, Some(b'\t' | b' ')) {
tokenizer.exit(Name::ThematicBreakSequence);
tokenizer.attempt(State::Next(StateName::ThematicBreakAtBreak), State::Nok);
State::Retry(space_or_tab(tokenizer))
} else {
tokenizer.exit(Name::ThematicBreakSequence);
State::Retry(StateName::ThematicBreakAtBreak)
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/event.rs | Rust | //! Semantic labels of things happening.
use crate::unist;
use crate::util::constant::TAB_SIZE;
/// Semantic label of a span.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum Name {
/// Attention sequence.
///
/// > 👉 **Note**: this is used while parsing but compiled away.
AttentionSequence,
/// Whole autolink.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`AutolinkEmail`][Name::AutolinkEmail],
/// [`AutolinkMarker`][Name::AutolinkMarker],
/// [`AutolinkProtocol`][Name::AutolinkProtocol]
/// * **Construct**:
/// [`autolink`][crate::construct::autolink]
///
/// ## Example
///
/// ```markdown
/// > | <https://example.com> and <admin@example.com>
/// ^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
/// ```
Autolink,
/// Email autolink w/o markers.
///
/// ## Info
///
/// * **Context**:
/// [`Autolink`][Name::Autolink]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`autolink`][crate::construct::autolink]
///
/// ## Example
///
/// ```markdown
/// > | <admin@example.com>
/// ^^^^^^^^^^^^^^^^^
/// ```
AutolinkEmail,
/// Marker of an autolink.
///
/// ## Info
///
/// * **Context**:
/// [`Autolink`][Name::Autolink]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`autolink`][crate::construct::autolink]
///
/// ## Example
///
/// ```markdown
/// > | <https://example.com>
/// ^ ^
/// ```
AutolinkMarker,
/// Protocol autolink w/o markers.
///
/// ## Info
///
/// * **Context**:
/// [`Autolink`][Name::Autolink]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`autolink`][crate::construct::autolink]
///
/// ## Example
///
/// ```markdown
/// > | <https://example.com>
/// ^^^^^^^^^^^^^^^^^^^
/// ```
AutolinkProtocol,
/// Line ending preceded only by whitespace or nothing at all.
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`blank_line`][crate::construct::blank_line]
///
/// ## Example
///
/// ```markdown
/// > | ␠␠␊
/// ^
/// ```
BlankLineEnding,
/// Whole block quote.
///
/// ## Info
///
/// * **Context**:
/// [document content][crate::construct::document]
/// * **Content model**:
/// [`BlockQuotePrefix`][Name::BlockQuotePrefix],
/// [flow content][crate::construct::flow]
/// * **Construct**:
/// [`block_quote`][crate::construct::block_quote]
///
/// ## Example
///
/// ```markdown
/// > | > a
/// ^^^
/// > | b
/// ^
/// ```
BlockQuote,
/// Block quote marker.
///
/// ## Info
///
/// * **Context**:
/// [`BlockQuotePrefix`][Name::BlockQuotePrefix]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`block_quote`][crate::construct::block_quote]
///
/// ## Example
///
/// ```markdown
/// > | > a
/// ^
/// | b
/// ```
BlockQuoteMarker,
/// Block quote prefix.
///
/// ## Info
///
/// * **Context**:
/// [`BlockQuote`][Name::BlockQuote]
/// * **Content model**:
/// [`BlockQuoteMarker`][Name::BlockQuoteMarker],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`block_quote`][crate::construct::block_quote]
///
/// ## Example
///
/// ```markdown
/// > | > a
/// ^^
/// | b
/// ```
BlockQuotePrefix,
/// Byte order mark.
///
/// ## Info
///
/// * **Context**:
/// optional first event
/// * **Content model**:
/// void
/// * **Construct**:
/// [`document`][crate::construct::document]
ByteOrderMark,
/// Whole character escape.
///
/// ## Info
///
/// * **Context**:
/// [string content][crate::construct::string] or
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`CharacterEscapeMarker`][Name::CharacterEscapeMarker],
/// [`CharacterEscapeValue`][Name::CharacterEscapeValue]
/// * **Construct**:
/// [`character_escape`][crate::construct::character_escape]
///
/// ## Example
///
/// ```markdown
/// > | a \- b
/// ^^
/// ```
CharacterEscape,
/// Character escape marker.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterEscape`][Name::CharacterEscape]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_escape`][crate::construct::character_escape]
///
/// ## Example
///
/// ```markdown
/// > | a \- b
/// ^
/// ```
CharacterEscapeMarker,
/// Character escape value.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterEscape`][Name::CharacterEscape]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_escape`][crate::construct::character_escape]
///
/// ## Example
///
/// ```markdown
/// > | a \- b
/// ^
/// ```
CharacterEscapeValue,
/// Whole character reference.
///
/// ## Info
///
/// * **Context**:
/// [string content][crate::construct::string] or
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`CharacterReferenceMarker`][Name::CharacterReferenceMarker],
/// [`CharacterReferenceMarkerHexadecimal`][Name::CharacterReferenceMarkerHexadecimal],
/// [`CharacterReferenceMarkerNumeric`][Name::CharacterReferenceMarkerNumeric],
/// [`CharacterReferenceMarkerSemi`][Name::CharacterReferenceMarkerSemi],
/// [`CharacterReferenceValue`][Name::CharacterReferenceValue]
/// * **Construct**:
/// [`character_reference`][crate::construct::character_reference]
///
/// ## Example
///
/// ```markdown
/// > | a & b ≠ c 𝌆 d
/// ^^^^^ ^^^^^^^ ^^^^^^^^^
/// ```
CharacterReference,
/// Character reference opening marker.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterReference`][Name::CharacterReference]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_reference`][crate::construct::character_reference]
///
/// ## Example
///
/// ```markdown
/// > | a & b ≠ c 𝌆 d
/// ^ ^ ^
/// ```
CharacterReferenceMarker,
/// Character reference hexadecimal numeric marker.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterReference`][Name::CharacterReference]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_reference`][crate::construct::character_reference]
///
/// ## Example
///
/// ```markdown
/// > | a & b ≠ c 𝌆 d
/// ^
/// ```
CharacterReferenceMarkerHexadecimal,
/// Character reference numeric marker.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterReference`][Name::CharacterReference]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_reference`][crate::construct::character_reference]
///
/// ## Example
///
/// ```markdown
/// > | a & b ≠ c 𝌆 d
/// ^ ^
/// ```
CharacterReferenceMarkerNumeric,
/// Character reference closing marker.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterReference`][Name::CharacterReference]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_reference`][crate::construct::character_reference]
///
/// ## Example
///
/// ```markdown
/// > | a & b ≠ c 𝌆 d
/// ^ ^ ^
/// ```
CharacterReferenceMarkerSemi,
/// Character reference value.
///
/// ## Info
///
/// * **Context**:
/// [`CharacterReference`][Name::CharacterReference]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`character_reference`][crate::construct::character_reference]
///
/// ## Example
///
/// ```markdown
/// > | a & b ≠ c 𝌆 d
/// ^^^ ^^^^ ^^^^^
/// ```
CharacterReferenceValue,
/// Whole code (fenced).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`CodeFencedFence`][Name::CodeFencedFence],
/// [`CodeFlowChunk`][Name::CodeFlowChunk],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ````markdown
/// > | ```js
/// ^^^^^
/// > | console.log(1)
/// ^^^^^^^^^^^^^^
/// > | ```
/// ^^^
/// ````
CodeFenced,
/// A code (fenced) fence.
///
/// ## Info
///
/// * **Context**:
/// [`CodeFenced`][Name::CodeFenced]
/// * **Content model**:
/// [`CodeFencedFenceInfo`][Name::CodeFencedFenceInfo],
/// [`CodeFencedFenceMeta`][Name::CodeFencedFenceMeta],
/// [`CodeFencedFenceSequence`][Name::CodeFencedFenceSequence],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ````markdown
/// > | ```js
/// ^^^^^
/// | console.log(1)
/// > | ```
/// ^^^
/// ````
CodeFencedFence,
/// A code (fenced) fence info word.
///
/// ## Info
///
/// * **Context**:
/// [`CodeFencedFence`][Name::CodeFencedFence]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ````markdown
/// > | ```js
/// ^^
/// | console.log(1)
/// | ```
/// ````
CodeFencedFenceInfo,
/// A code (fenced) fence meta string.
///
/// ## Info
///
/// * **Context**:
/// [`CodeFencedFence`][Name::CodeFencedFence]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ````markdown
/// > | ```js highlight="1"
/// ^^^^^^^^^^^^^
/// | console.log(1)
/// | ```
/// ````
CodeFencedFenceMeta,
/// A code (fenced) fence sequence.
///
/// ## Info
///
/// * **Context**:
/// [`CodeFencedFenceSequence`][Name::CodeFencedFenceSequence]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ````markdown
/// > | ```js
/// ^^^
/// | console.log(1)
/// > | ```
/// ^^^
/// ````
CodeFencedFenceSequence,
/// A code (fenced, indented) chunk.
///
/// ## Info
///
/// * **Context**:
/// [`CodeFenced`][Name::CodeFenced],
/// [`CodeIndented`][Name::CodeIndented]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow],
/// [`code_indented`][crate::construct::code_indented]
///
/// ## Example
///
/// ````markdown
/// | ```js
/// > | console.log(1)
/// ^^^^^^^^^^^^^^
/// | ```
/// ````
///
/// ```markdown
/// > | ␠␠␠␠console.log(1)
/// ^^^^^^^^^^^^^^
/// ```
CodeFlowChunk,
/// Whole code (indented).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`CodeFlowChunk`][Name::CodeFlowChunk],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ```markdown
/// ␠␠␠␠console.log(1)
/// ^^^^^^^^^^^^^^^^^^
/// ```
CodeIndented,
/// Whole code (text).
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`CodeTextData`][Name::CodeTextData],
/// [`CodeTextSequence`][Name::CodeTextSequence],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`raw_text`][crate::construct::raw_text]
///
/// ## Example
///
/// ```markdown
/// > | a `b` c
/// ^^^
/// ```
CodeText,
/// Code (text) data.
///
/// ## Info
///
/// * **Context**:
/// [`CodeText`][Name::CodeText]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_text`][crate::construct::raw_text]
///
/// ## Example
///
/// ```markdown
/// > | a `b` c
/// ^
/// ```
CodeTextData,
/// Code (text) sequence.
///
/// ## Info
///
/// * **Context**:
/// [`CodeText`][Name::CodeText]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_text`][crate::construct::raw_text]
///
/// ## Example
///
/// ```markdown
/// > | a `b` c
/// ^ ^
/// ```
CodeTextSequence,
/// Content.
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [content][crate::construct::content]
/// * **Construct**:
/// [`content`][crate::construct::content]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b
/// ^^^^^^
/// > | c.
/// ^^
/// ```
Content,
/// Data.
///
/// ## Info
///
/// * **Context**:
/// [string content][crate::construct::string],
/// [text content][crate::construct::text]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`data`][crate::construct::partial_data]
///
/// ## Example
///
/// ```markdown
/// > | aa *bb* cc
/// ^^^ ^^ ^^^
/// ```
Data,
/// Whole definition.
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`DefinitionMarker`][Name::DefinitionMarker],
/// [`DefinitionLabel`][Name::DefinitionLabel],
/// [`DefinitionDestination`][Name::DefinitionDestination],
/// [`DefinitionTitle`][Name::DefinitionTitle],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`definition`][crate::construct::definition]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^^^^^^^^^^
/// ```
Definition,
/// Whole definition destination.
///
/// ## Info
///
/// * **Context**:
/// [`Definition`][Name::Definition]
/// * **Content model**:
/// [`DefinitionDestinationLiteral`][Name::DefinitionDestinationLiteral],
/// [`DefinitionDestinationRaw`][Name::DefinitionDestinationRaw]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// > | [a]: <b> "c"
/// ^^^
/// ```
DefinitionDestination,
/// Definition destination literal.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionDestination`][Name::DefinitionDestination]
/// * **Content model**:
/// [`DefinitionDestinationLiteralMarker`][Name::DefinitionDestinationLiteralMarker],
/// [`DefinitionDestinationString`][Name::DefinitionDestinationString]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | [a]: <b> "c"
/// ^^^
/// ```
DefinitionDestinationLiteral,
/// Definition destination literal marker.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionDestinationLiteral`][Name::DefinitionDestinationLiteral]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | [a]: <b> "c"
/// ^ ^
/// ```
DefinitionDestinationLiteralMarker,
/// Definition destination raw.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionDestination`][Name::DefinitionDestination]
/// * **Content model**:
/// [`DefinitionDestinationString`][Name::DefinitionDestinationString]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
DefinitionDestinationRaw,
/// Definition destination data.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionDestinationLiteral`][Name::DefinitionDestinationLiteral],
/// [`DefinitionDestinationRaw`][Name::DefinitionDestinationRaw]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// > | [a]: <b> "c"
/// ^
/// ```
DefinitionDestinationString,
/// Whole definition label.
///
/// ## Info
///
/// * **Context**:
/// [`Definition`][Name::Definition]
/// * **Content model**:
/// [`DefinitionLabelMarker`][Name::DefinitionLabelMarker],
/// [`DefinitionLabelString`][Name::DefinitionLabelString],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`label`][crate::construct::partial_label]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^^^
/// ```
DefinitionLabel,
/// Definition label marker.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionLabel`][Name::DefinitionLabel]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`label`][crate::construct::partial_label]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^ ^
/// ```
DefinitionLabelMarker,
/// Definition label data.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionLabel`][Name::DefinitionLabel]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`label`][crate::construct::partial_label]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
DefinitionLabelString,
/// Definition marker.
///
/// ## Info
///
/// * **Context**:
/// [`Definition`][Name::Definition],
/// [`GfmFootnoteDefinition`][Name::GfmFootnoteDefinition]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`definition`][crate::construct::definition]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
DefinitionMarker,
/// Whole definition title.
///
/// ## Info
///
/// * **Context**:
/// [`Definition`][Name::Definition]
/// * **Content model**:
/// [`DefinitionTitleMarker`][Name::DefinitionTitleMarker],
/// [`DefinitionTitleString`][Name::DefinitionTitleString],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`title`][crate::construct::partial_title]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^^^
/// ```
DefinitionTitle,
/// Definition title marker.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionTitle`][Name::DefinitionTitle]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`title`][crate::construct::partial_title]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^ ^
/// ```
DefinitionTitleMarker,
/// Definition title data.
///
/// ## Info
///
/// * **Context**:
/// [`DefinitionTitle`][Name::DefinitionTitle]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`title`][crate::construct::partial_title]
///
/// ## Example
///
/// ```markdown
/// > | [a]: b "c"
/// ^
/// ```
DefinitionTitleString,
/// Emphasis.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`EmphasisSequence`][Name::EmphasisSequence],
/// [`EmphasisText`][Name::EmphasisText]
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | *a*
/// ^^^
/// ```
Emphasis,
/// Emphasis sequence.
///
/// ## Info
///
/// * **Context**:
/// [`Emphasis`][Name::Emphasis]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | *a*
/// ^ ^
/// ```
EmphasisSequence,
/// Emphasis text.
///
/// ## Info
///
/// * **Context**:
/// [`Emphasis`][Name::Emphasis]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | *a*
/// ^
/// ```
EmphasisText,
/// Whole frontmatter.
///
/// ## Info
///
/// * **Context**:
/// [document content][crate::construct::document]
/// * **Content model**:
/// [`FrontmatterFence`][Name::FrontmatterFence],
/// [`FrontmatterChunk`][Name::FrontmatterChunk],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`frontmatter`][crate::construct::frontmatter]
///
/// ## Example
///
/// ```markdown
/// > | ---
/// ^^^
/// > | title: Neptune
/// ^^^^^^^^^^^^^^
/// > | ---
/// ^^^
/// ```
Frontmatter,
/// Frontmatter chunk.
///
/// ## Info
///
/// * **Context**:
/// [`Frontmatter`][Name::Frontmatter]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`frontmatter`][crate::construct::frontmatter]
///
/// ## Example
///
/// ```markdown
/// | ---
/// > | title: Neptune
/// ^^^^^^^^^^^^^^
/// | ---
/// ```
FrontmatterChunk,
/// Frontmatter fence.
///
/// ## Info
///
/// * **Context**:
/// [`Frontmatter`][Name::Frontmatter]
/// * **Content model**:
/// [`FrontmatterSequence`][Name::FrontmatterSequence],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`frontmatter`][crate::construct::frontmatter]
///
/// ## Example
///
/// ```markdown
/// > | ---
/// ^^^
/// | title: Neptune
/// > | ---
/// ^^^
/// ```
FrontmatterFence,
/// Frontmatter sequence.
///
/// ## Info
///
/// * **Context**:
/// [`FrontmatterFence`][Name::FrontmatterFence]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`frontmatter`][crate::construct::frontmatter]
///
/// ## Example
///
/// ```markdown
/// > | ---
/// ^^^
/// | title: Neptune
/// > | ---
/// ^^^
/// ```
FrontmatterSequence,
/// GFM extension: email autolink.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void.
/// * **Construct**:
/// [`gfm_autolink_literal`][crate::construct::gfm_autolink_literal]
///
/// ## Example
///
/// ```markdown
/// > | context@example.com
/// ^^^^^^^^^^^^^^^^^^^
/// ```
GfmAutolinkLiteralEmail,
/// GFM extension: email autolink w/ explicit `mailto`.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void.
/// * **Construct**:
/// [`gfm_autolink_literal`][crate::construct::gfm_autolink_literal]
///
/// ## Example
///
/// ```markdown
/// > | mailto:context@example.com
/// ^^^^^^^^^^^^^^^^^^^^^^^^^^
/// ```
GfmAutolinkLiteralMailto,
/// GFM extension: autolink w/ protocol.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void.
/// * **Construct**:
/// [`gfm_autolink_literal`][crate::construct::gfm_autolink_literal]
///
/// ## Example
///
/// ```markdown
/// > | https://example.com
/// ^^^^^^^^^^^^^^^^^^^
/// ```
GfmAutolinkLiteralProtocol,
/// GFM extension: autolink w/ www.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void.
/// * **Construct**:
/// [`gfm_autolink_literal`][crate::construct::gfm_autolink_literal]
///
/// ## Example
///
/// ```markdown
/// > | www.example.com
/// ^^^^^^^^^^^^^^^
/// ```
GfmAutolinkLiteralWww,
/// GFM extension: email autolink w/ explicit `xmpp`.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void.
/// * **Construct**:
/// [`gfm_autolink_literal`][crate::construct::gfm_autolink_literal]
///
/// ## Example
///
/// ```markdown
/// > | mailto:a@b.c/d
/// ^^^^^^^^^^^^^^
/// ```
GfmAutolinkLiteralXmpp,
/// GFM extension: whole footnote call.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`Label`][Name::Label]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a [^b] c
/// ^^^^
/// ```
GfmFootnoteCall,
/// GFM extension: label start (footnote).
///
/// ## Info
///
/// * **Context**:
/// [`Label`][Name::Label]
/// * **Content model**:
/// [`GfmFootnoteCallMarker`][Name::GfmFootnoteCallMarker],
/// [`LabelMarker`][Name::LabelMarker]
/// * **Construct**:
/// [`gfm_label_start_footnote`][crate::construct::gfm_label_start_footnote]
///
/// ## Example
///
/// ```markdown
/// > | a [^b] c
/// ^^
/// ```
GfmFootnoteCallLabel,
/// GFM extension: label start (footnote) marker.
///
/// ## Info
///
/// * **Context**:
/// [`GfmFootnoteCallLabel`][Name::GfmFootnoteCallLabel]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_label_start_footnote`][crate::construct::gfm_label_start_footnote]
///
/// ## Example
///
/// ```markdown
/// > | a [^b] c
/// ^
/// ```
GfmFootnoteCallMarker,
/// GFM extension: whole footnote definition.
///
/// ## Info
///
/// * **Context**:
/// [document content][crate::construct::document]
/// * **Content model**:
/// [`GfmFootnoteDefinitionPrefix`][Name::GfmFootnoteDefinitionPrefix],
/// [document content][crate::construct::flow]
/// * **Construct**:
/// [`gfm_footnote_definition`][crate::construct::gfm_footnote_definition]
///
/// ## Example
///
/// ```markdown
/// > | [^a]: b
/// ^^^^^^^
/// ```
GfmFootnoteDefinition,
/// GFM extension: footnote definition prefix.
///
/// ## Info
///
/// * **Context**:
/// [`GfmFootnoteDefinition`][Name::GfmFootnoteDefinition]
/// * **Content model**:
/// [`DefinitionMarker`][Name::DefinitionMarker],
/// [`GfmFootnoteDefinitionLabel`][Name::GfmFootnoteDefinitionLabel],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`gfm_footnote_definition`][crate::construct::gfm_footnote_definition]
///
/// ## Example
///
/// ```markdown
/// > | [^a]: b
/// ^^^^^^
/// ```
GfmFootnoteDefinitionPrefix,
/// GFM extension: footnote definition label.
///
/// ## Info
///
/// * **Context**:
/// [`GfmFootnoteDefinitionPrefix`][Name::GfmFootnoteDefinitionPrefix]
/// * **Content model**:
/// [`GfmFootnoteDefinitionLabelMarker`][Name::GfmFootnoteDefinitionLabelMarker],
/// [`GfmFootnoteDefinitionLabelString`][Name::GfmFootnoteDefinitionLabelString],
/// [`GfmFootnoteDefinitionMarker`][Name::GfmFootnoteDefinitionMarker]
/// * **Construct**:
/// [`gfm_footnote_definition`][crate::construct::gfm_footnote_definition]
///
/// ## Example
///
/// ```markdown
/// > | [^a]: b
/// ^^^^
/// ```
GfmFootnoteDefinitionLabel,
/// GFM extension: footnote definition label marker.
///
/// ## Info
///
/// * **Context**:
/// [`GfmFootnoteDefinitionLabel`][Name::GfmFootnoteDefinitionLabel]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_footnote_definition`][crate::construct::gfm_footnote_definition]
///
/// ## Example
///
/// ```markdown
/// > | [^a]: b
/// ^ ^
GfmFootnoteDefinitionLabelMarker,
/// GFM extension: footnote definition label string.
///
/// ## Info
///
/// * **Context**:
/// [`GfmFootnoteDefinitionLabel`][Name::GfmFootnoteDefinitionLabel]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`gfm_footnote_definition`][crate::construct::gfm_footnote_definition]
///
/// ## Example
///
/// ```markdown
/// > | [^a]: b
/// ^
GfmFootnoteDefinitionLabelString,
/// GFM extension: footnote definition marker.
///
/// ## Info
///
/// * **Context**:
/// [`GfmFootnoteDefinitionLabel`][Name::GfmFootnoteDefinitionLabel]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_footnote_definition`][crate::construct::gfm_footnote_definition]
///
/// ## Example
///
/// ```markdown
/// > | [^a]: b
/// ^
GfmFootnoteDefinitionMarker,
/// GFM extension: Strikethrough.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`GfmStrikethroughSequence`][Name::GfmStrikethroughSequence],
/// [`GfmStrikethroughText`][Name::GfmStrikethroughText]
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | ~a~
/// ^^^
/// ```
GfmStrikethrough,
/// GFM extension: Strikethrough sequence.
///
/// ## Info
///
/// * **Context**:
/// [`GfmStrikethrough`][Name::GfmStrikethrough]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | ~a~
/// ^ ^
/// ```
GfmStrikethroughSequence,
/// GFM extension: Strikethrough text.
///
/// ## Info
///
/// * **Context**:
/// [`GfmStrikethrough`][Name::GfmStrikethrough]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | ~a~
/// ^
/// ```
GfmStrikethroughText,
/// GFM extension: Table.
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`GfmTableBody`][Name::GfmTableBody],
/// [`GfmTableHead`][Name::GfmTableHead],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// > | | - |
/// ^^^^^
/// > | | b |
/// ^^^^^
/// ```
GfmTable,
/// GFM extension: Table body.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTable`][Name::GfmTable]
/// * **Content model**:
/// [`GfmTableRow`][Name::GfmTableRow],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// | | a |
/// | | - |
/// > | | b |
/// ^^^^^
/// ```
GfmTableBody,
/// GFM extension: Table cell.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableRow`][Name::GfmTableRow]
/// * **Content model**:
/// [`GfmTableCellDivider`][Name::GfmTableCellDivider],
/// [`GfmTableCellText`][Name::GfmTableCellText],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// | | - |
/// > | | b |
/// ^^^^^
/// ```
GfmTableCell,
/// GFM extension: Table cell text.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableCell`][Name::GfmTableCell]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// > | | a |
/// ^
/// | | - |
/// > | | b |
/// ^
/// ```
GfmTableCellText,
/// GFM extension: Table cell divider.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableCell`][Name::GfmTableCell]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// > | | a |
/// ^ ^
/// > | | - |
/// ^ ^
/// > | | b |
/// ^ ^
/// ```
GfmTableCellDivider,
/// GFM extension: Table delimiter row.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableHead`][Name::GfmTableHead]
/// * **Content model**:
/// [`GfmTableDelimiterCell`][Name::GfmTableDelimiterCell]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^^^^^
/// | | b |
/// ```
GfmTableDelimiterRow,
/// GFM extension: Table delimiter alignment marker.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableDelimiterCellValue`][Name::GfmTableDelimiterCellValue]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// | | a |
/// > | | :- |
/// ^
/// | | b |
/// ```
GfmTableDelimiterMarker,
/// GFM extension: Table delimiter cell.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableDelimiterRow`][Name::GfmTableDelimiterRow]
/// * **Content model**:
/// [`GfmTableCellDivider`][Name::GfmTableCellDivider],
/// [`GfmTableDelimiterCellValue`][Name::GfmTableDelimiterCellValue],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^^^^^
/// | | b |
/// ```
GfmTableDelimiterCell,
/// GFM extension: Table delimiter cell alignment.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableDelimiterCell`][Name::GfmTableDelimiterCell]
/// * **Content model**:
/// [`GfmTableDelimiterMarker`][Name::GfmTableDelimiterMarker],
/// [`GfmTableDelimiterFiller`][Name::GfmTableDelimiterFiller]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// | | b |
/// ```
GfmTableDelimiterCellValue,
/// GFM extension: Table delimiter filler.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableDelimiterCellValue`][Name::GfmTableDelimiterCellValue]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// | | a |
/// > | | - |
/// ^
/// | | b |
/// ```
GfmTableDelimiterFiller,
/// GFM extension: Table head.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTable`][Name::GfmTable]
/// * **Content model**:
/// [`GfmTableRow`][Name::GfmTableRow],
/// [`GfmTableDelimiterRow`][Name::GfmTableDelimiterRow],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// > | | - |
/// ^^^^^
/// | | b |
/// ```
GfmTableHead,
/// GFM extension: Table row.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTableBody`][Name::GfmTableBody],
/// [`GfmTableHead`][Name::GfmTableHead]
/// * **Content model**:
/// [`GfmTableCell`][Name::GfmTableCell]
/// * **Construct**:
/// [`gfm_table`][crate::construct::gfm_table]
///
/// ## Example
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// | | - |
/// > | | b |
/// ^^^^^
/// ```
GfmTableRow,
/// GFM extension: task list item check.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`GfmTaskListItemMarker`][Name::GfmTaskListItemMarker],
/// [`GfmTaskListItemValueChecked`][Name::GfmTaskListItemValueChecked],
/// [`GfmTaskListItemValueUnchecked`][Name::GfmTaskListItemValueUnchecked]
/// * **Construct**:
/// [`gfm_task_list_item_check`][crate::construct::gfm_task_list_item_check]
///
/// ## Example
///
/// ```markdown
/// > | * [x] y.
/// ^^^
/// ```
GfmTaskListItemCheck,
/// GFM extension: task list item check marker.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTaskListItemCheck`][Name::GfmTaskListItemCheck]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_task_list_item_check`][crate::construct::gfm_task_list_item_check]
///
/// ## Example
///
/// ```markdown
/// > | * [x] y.
/// ^ ^
/// ```
GfmTaskListItemMarker,
/// GFM extension: task list item value: checked.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTaskListItemCheck`][Name::GfmTaskListItemCheck]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_task_list_item_check`][crate::construct::gfm_task_list_item_check]
///
/// ## Example
///
/// ```markdown
/// > | * [x] y.
/// ^
/// ```
GfmTaskListItemValueChecked,
/// GFM extension: task list item value: unchecked.
///
/// ## Info
///
/// * **Context**:
/// [`GfmTaskListItemCheck`][Name::GfmTaskListItemCheck]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`gfm_task_list_item_check`][crate::construct::gfm_task_list_item_check]
///
/// ## Example
///
/// ```markdown
/// > | * [ ] z.
/// ^
/// ```
GfmTaskListItemValueUnchecked,
/// Whole hard break (escape).
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`hard_break_escape`][crate::construct::hard_break_escape]
///
/// ## Example
///
/// ```markdown
/// > | a\␊
/// ^
/// > | b
/// ```
HardBreakEscape,
/// Whole hard break (trailing).
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`whitespace`][crate::construct::partial_whitespace]
///
/// ## Example
///
/// ```markdown
/// > | a␠␠␊
/// ^^
/// > | b
/// ```
HardBreakTrailing,
/// Whole heading (atx).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`HeadingAtxSequence`][Name::HeadingAtxSequence],
/// [`HeadingAtxText`][Name::HeadingAtxText],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`heading_atx`][crate::construct::heading_atx]
///
/// ## Example
///
/// ```markdown
/// > | # alpha
/// ^^^^^^^
/// ```
HeadingAtx,
/// Heading (atx) sequence.
///
/// ## Info
///
/// * **Context**:
/// [`HeadingAtx`][Name::HeadingAtx]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`heading_atx`][crate::construct::heading_atx]
///
/// ## Example
///
/// ```markdown
/// > | # alpha
/// ^
/// ```
HeadingAtxSequence,
/// Heading (atx) data.
///
/// ## Info
///
/// * **Context**:
/// [`HeadingAtx`][Name::HeadingAtx]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`heading_atx`][crate::construct::heading_atx]
///
/// ## Example
///
/// ```markdown
/// > | # alpha
/// ^^^^^
/// ```
HeadingAtxText,
/// Whole heading (setext).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`HeadingSetextText`][Name::HeadingSetextText],
/// [`HeadingSetextUnderline`][Name::HeadingSetextUnderline],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`heading_setext`][crate::construct::heading_setext]
///
/// ## Example
///
/// ```markdown
/// > | alpha
/// ^^^^^
/// > | =====
/// ^^^^^
/// ```
HeadingSetext,
/// Heading (setext) data.
///
/// ## Info
///
/// * **Context**:
/// [`HeadingSetext`][Name::HeadingSetext]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`heading_setext`][crate::construct::heading_setext]
///
/// ## Example
///
/// ```markdown
/// > | alpha
/// ^^^^^
/// | =====
/// ```
HeadingSetextText,
/// Heading (setext) underline.
///
/// ## Info
///
/// * **Context**:
/// [`HeadingSetext`][Name::HeadingSetext]
/// * **Content model**:
/// [`HeadingSetextUnderlineSequence`][Name::HeadingSetextUnderlineSequence],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`heading_setext`][crate::construct::heading_setext]
///
/// ## Example
///
/// ```markdown
/// | alpha
/// > | =====
/// ^^^^^
/// ```
HeadingSetextUnderline,
/// Heading (setext) underline sequence.
///
/// ## Info
///
/// * **Context**:
/// [`HeadingSetext`][Name::HeadingSetext]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`heading_setext`][crate::construct::heading_setext]
///
/// ## Example
///
/// ```markdown
/// | alpha
/// > | =====
/// ^^^^^
/// ```
HeadingSetextUnderlineSequence,
/// Whole html (flow).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`HtmlFlowData`][Name::HtmlFlowData],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`html_flow`][crate::construct::html_flow]
///
/// ## Example
///
/// ```markdown
/// > | <div>
/// ^^^^^
/// ```
HtmlFlow,
/// HTML (flow) data.
///
/// ## Info
///
/// * **Context**:
/// [`HtmlFlow`][Name::HtmlFlow]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`html_flow`][crate::construct::html_flow]
///
/// ## Example
///
/// ```markdown
/// > | <div>
/// ^^^^^
/// ```
HtmlFlowData,
/// Whole html (text).
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`HtmlTextData`][Name::HtmlTextData],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`html_text`][crate::construct::html_text]
///
/// ## Example
///
/// ```markdown
/// > | a <b> c
/// ^^^
/// ```
HtmlText,
/// HTML (text) data.
///
/// ## Info
///
/// * **Context**:
/// [`HtmlText`][Name::HtmlText]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`html_text`][crate::construct::html_text]
///
/// ## Example
///
/// ```markdown
/// > | a <b> c
/// ^^^
/// ```
HtmlTextData,
/// Image.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`Label`][Name::Label],
/// [`Resource`][Name::Resource],
/// [`Reference`][Name::Reference]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a ![b] c
/// ^^^^
/// > | a ![b][c] d
/// ^^^^^^^
/// > | a  d
/// ^^^^^^^
/// ```
Image,
/// Label.
///
/// ## Info
///
/// * **Context**:
/// [`Image`][Name::Image],
/// [`Link`][Name::Link]
/// * **Content model**:
/// [`LabelImage`][Name::LabelImage],
/// [`LabelLink`][Name::LabelLink],
/// [`LabelEnd`][Name::LabelEnd],
/// [`LabelText`][Name::LabelText]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a [b] c
/// ^^^
/// > | a ![b][c] d
/// ^^^^
/// > | a [b](c) d
/// ^^^
/// ```
Label,
/// Label end.
///
/// ## Info
///
/// * **Context**:
/// [`Label`][Name::Label]
/// * **Content model**:
/// [`LabelMarker`][Name::LabelMarker]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a  d
/// ^
/// > | a [b](c) d
/// ^
/// ```
LabelEnd,
/// Label start (image).
///
/// ## Info
///
/// * **Context**:
/// [`Label`][Name::Label]
/// * **Content model**:
/// [`LabelImageMarker`][Name::LabelImageMarker],
/// [`LabelMarker`][Name::LabelMarker]
/// * **Construct**:
/// [`label_start_image`][crate::construct::label_start_image]
///
/// ## Example
///
/// ```markdown
/// > | a  d
/// ^^
/// ```
LabelImage,
/// Label start (image) marker.
///
/// ## Info
///
/// * **Context**:
/// [`LabelImage`][Name::LabelImage]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`label_start_image`][crate::construct::label_start_image]
///
/// ## Example
///
/// ```markdown
/// > | a  d
/// ^
/// ```
LabelImageMarker,
/// Label start (link).
///
/// ## Info
///
/// * **Context**:
/// [`Label`][Name::Label]
/// * **Content model**:
/// [`LabelMarker`][Name::LabelMarker]
/// * **Construct**:
/// [`label_start_link`][crate::construct::label_start_link]
///
/// ## Example
///
/// ```markdown
/// > | a [b](c) d
/// ^
/// ```
LabelLink,
/// Label marker.
///
/// ## Info
///
/// * **Context**:
/// [`LabelImage`][Name::LabelImage],
/// [`LabelLink`][Name::LabelLink],
/// [`LabelEnd`][Name::LabelEnd]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`label_start_image`][crate::construct::label_start_image],
/// [`label_start_link`][crate::construct::label_start_link],
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a  d
/// ^ ^
/// > | a [b](c) d
/// ^ ^
/// ```
LabelMarker,
/// Label text.
///
/// ## Info
///
/// * **Context**:
/// [`Label`][Name::Label]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a [b] c
/// ^
/// > | a ![b][c] d
/// ^
/// > | a [b](c) d
/// ^
/// ```
LabelText,
/// Line ending.
///
/// ## Info
///
/// * **Context**:
/// basically everywhere
/// * **Content model**:
/// void
/// * **Construct**:
/// n/a
///
/// ## Example
///
/// ```markdown
/// > | a␊
/// ^
/// | b
/// ```
LineEnding,
/// Link.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`Label`][Name::Label],
/// [`Resource`][Name::Resource],
/// [`Reference`][Name::Reference]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a [b] c
/// ^^^
/// > | a [b][c] d
/// ^^^^^^
/// > | a [b](c) d
/// ^^^^^^
/// ```
Link,
/// List item.
///
/// ## Info
///
/// * **Context**:
/// [`ListOrdered`][Name::ListOrdered],
/// [`ListUnordered`][Name::ListUnordered]
/// * **Content model**:
/// [`ListItemPrefix`][Name::ListItemPrefix],
/// [flow content][crate::construct::flow]
/// * **Construct**:
/// [`list item`][crate::construct::list_item]
///
/// ## Example
///
/// ```markdown
/// > | * a
/// ^^^
/// > | 1. b
/// ^^^^
/// ```
ListItem,
/// List item (marker).
///
/// ## Info
///
/// * **Context**:
/// [`ListItemPrefix`][Name::ListItemPrefix]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`list item`][crate::construct::list_item]
///
/// ## Example
///
/// ```markdown
/// > | * a
/// ^
/// > | 1. b
/// ^
/// ```
ListItemMarker,
/// List item (prefix).
///
/// ## Info
///
/// * **Context**:
/// [`ListItem`][Name::ListItem]
/// * **Content model**:
/// [`ListItemMarker`][Name::ListItemMarker],
/// [`ListItemValue`][Name::ListItemValue],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`list item`][crate::construct::list_item]
///
/// ## Example
///
/// ```markdown
/// > | * a
/// ^^
/// > | b
/// ^^
/// ```
ListItemPrefix,
/// List item (value).
///
/// ## Info
///
/// * **Context**:
/// [`ListItemPrefix`][Name::ListItemPrefix]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`list item`][crate::construct::list_item]
///
/// ## Example
///
/// ```markdown
/// > | 1. b
/// ^
/// ```
ListItemValue,
/// List (ordered).
///
/// ## Info
///
/// * **Context**:
/// [document content][crate::construct::document]
/// * **Content model**:
/// [`BlankLineEnding`][Name::BlankLineEnding],
/// [`BlockQuotePrefix`][Name::BlockQuotePrefix],
/// [`ListItem`][Name::ListItem],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`list item`][crate::construct::list_item]
///
/// ## Example
///
/// ```markdown
/// > | 1. a
/// ^^^^
/// > | 2. b
/// ^^^^
/// ```
ListOrdered,
/// List (unordered).
///
/// ## Info
///
/// * **Context**:
/// [document content][crate::construct::document]
/// * **Content model**:
/// [`BlankLineEnding`][Name::BlankLineEnding],
/// [`BlockQuotePrefix`][Name::BlockQuotePrefix],
/// [`ListItem`][Name::ListItem],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`list item`][crate::construct::list_item]
///
/// ## Example
///
/// ```markdown
/// > | * a
/// ^^^
/// > | * b
/// ^^^
/// ```
ListUnordered,
/// Whole math (flow).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`MathFlowFence`][Name::MathFlowFence],
/// [`MathFlowChunk`][Name::MathFlowChunk],
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ```markdown
/// > | $$
/// ^^
/// > | \frac{1}{2}
/// ^^^^^^^^^^^
/// > | $$
/// ^^
/// ```
MathFlow,
/// A math (flow) fence.
///
/// ## Info
///
/// * **Context**:
/// [`MathFlow`][Name::MathFlow]
/// * **Content model**:
/// [`MathFlowFenceMeta`][Name::MathFlowFenceMeta],
/// [`MathFlowFenceSequence`][Name::MathFlowFenceSequence],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ```markdown
/// > | $$
/// ^^
/// | \frac{1}{2}
/// > | $$
/// ^^
/// ```
MathFlowFence,
/// A math (flow) fence meta string.
///
/// ## Info
///
/// * **Context**:
/// [`MathFlowFence`][Name::MathFlowFence]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ```markdown
/// > | $$alpha bravo
/// ^^^^^^^^^^^
/// | \frac{1}{2}
/// | $$
/// ```
MathFlowFenceMeta,
/// A math (flow) fence sequence.
///
/// ## Info
///
/// * **Context**:
/// [`MathFlowFenceSequence`][Name::MathFlowFenceSequence]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ```markdown
/// > | $$
/// ^^
/// | \frac{1}{2}
/// > | $$
/// ^^
/// ```
MathFlowFenceSequence,
/// A math (flow) chunk.
///
/// ## Info
///
/// * **Context**:
/// [`MathFlow`][Name::MathFlow]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_flow`][crate::construct::raw_flow]
///
/// ## Example
///
/// ```markdown
/// | $$
/// > | \frac{1}{2}
/// ^^^^^^^^^^^
/// | $$
/// ```
MathFlowChunk,
/// Whole math (text).
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`MathTextData`][Name::MathTextData],
/// [`MathTextSequence`][Name::MathTextSequence],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`raw_text`][crate::construct::raw_text]
///
/// ## Example
///
/// ```markdown
/// > | a $b$ c
/// ^^^
/// ```
MathText,
/// Math (text) data.
///
/// ## Info
///
/// * **Context**:
/// [`MathText`][Name::MathText]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_text`][crate::construct::raw_text]
///
/// ## Example
///
/// ```markdown
/// > | a `b` c
/// ^
/// ```
MathTextData,
/// Math (text) sequence.
///
/// ## Info
///
/// * **Context**:
/// [`MathText`][Name::MathText]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`raw_text`][crate::construct::raw_text]
///
/// ## Example
///
/// ```markdown
/// > | a $b$ c
/// ^ ^
/// ```
MathTextSequence,
/// MDX extension: ESM.
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// void
/// [`MdxEsmData`][Name::MdxEsmData],
/// [`SpaceOrTab`][Name::SpaceOrTab],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`mdx_esm`][crate::construct::mdx_esm]
///
/// ## Example
///
/// ```markdown
/// > | import a from 'b'
/// ^^^^^^^^^^^^^^^^^
/// ```
MdxEsm,
/// MDX extension: ESM data.
///
/// ## Info
///
/// * **Context**:
/// [`MdxEsm`][Name::MdxEsm]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`mdx_esm`][crate::construct::mdx_esm]
///
/// ## Example
///
/// ```markdown
/// > | import a from 'b'
/// ^^^^^^^^^^^^^^^^^
/// ```
MdxEsmData,
/// MDX extension: expression marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxFlowExpression`][Name::MdxFlowExpression],
/// [`MdxTextExpression`][Name::MdxTextExpression],
/// [`MdxJsxTagAttributeExpression`][Name::MdxJsxTagAttributeExpression],
/// [`MdxJsxTagAttributeValueExpression`][Name::MdxJsxTagAttributeValueExpression]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_expression`][crate::construct::partial_mdx_expression]
///
/// ## Example
///
/// ```markdown
/// > | {Math.PI}
/// ^ ^
/// ```
MdxExpressionMarker,
/// MDX extension: expression data.
///
/// ## Info
///
/// * **Context**:
/// [`MdxFlowExpression`][Name::MdxFlowExpression],
/// [`MdxTextExpression`][Name::MdxTextExpression],
/// [`MdxJsxTagAttributeExpression`][Name::MdxJsxTagAttributeExpression],
/// [`MdxJsxTagAttributeValueExpression`][Name::MdxJsxTagAttributeValueExpression]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_expression`][crate::construct::partial_mdx_expression]
///
/// ## Example
///
/// ```markdown
/// > | {Math.PI}
/// ^^^^^^^
/// ```
MdxExpressionData,
/// MDX extension: expression (flow).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab],
/// [`MdxExpressionMarker`][Name::MdxExpressionMarker],
/// [`MdxExpressionData`][Name::MdxExpressionData]
/// * **Construct**:
/// [`mdx_expression_flow`][crate::construct::mdx_expression_flow]
///
/// ## Example
///
/// ```markdown
/// > | {Math.PI}
/// ^^^^^^^^^
/// ```
MdxFlowExpression,
/// MDX extension: expression (text).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab],
/// [`MdxExpressionMarker`][Name::MdxExpressionMarker],
/// [`MdxExpressionData`][Name::MdxExpressionData]
/// * **Construct**:
/// [`mdx_expression_text`][crate::construct::mdx_expression_text]
///
/// ## Example
///
/// ```markdown
/// > | a {Math.PI} b
/// ^^^^^^^^^
/// ```
MdxTextExpression,
/// MDX extension: JSX (flow).
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace],
/// [`MdxJsxTagMarker`][Name::MdxJsxTagMarker],
/// [`MdxJsxTagClosingMarker`][Name::MdxJsxTagClosingMarker],
/// [`MdxJsxTagName`][Name::MdxJsxTagName],
/// [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute],
/// [`MdxJsxTagAttributeExpression`][Name::MdxJsxTagAttributeExpression],
/// [`MdxJsxTagSelfClosingMarker`][Name::MdxJsxTagSelfClosingMarker]
/// * **Construct**:
/// [`mdx_jsx_flow`][crate::construct::mdx_jsx_flow]
///
/// ## Example
///
/// ```markdown
/// > | <B />
/// ^^^^^
/// ```
MdxJsxFlowTag,
/// MDX extension: JSX (text).
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace],
/// [`MdxJsxTagMarker`][Name::MdxJsxTagMarker],
/// [`MdxJsxTagClosingMarker`][Name::MdxJsxTagClosingMarker],
/// [`MdxJsxTagName`][Name::MdxJsxTagName],
/// [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute],
/// [`MdxJsxTagAttributeExpression`][Name::MdxJsxTagAttributeExpression],
/// [`MdxJsxTagSelfClosingMarker`][Name::MdxJsxTagSelfClosingMarker]
/// * **Construct**:
/// [`mdx_jsx_text`][crate::construct::mdx_jsx_text]
///
/// ## Example
///
/// ```markdown
/// > | a <B /> c
/// ^^^^^
/// ```
MdxJsxTextTag,
/// MDX extension: JSX: ECMAScript whitespace.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag],
/// [`MdxJsxTagName`][Name::MdxJsxTagName],
/// [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute],
/// [`MdxJsxTagAttributeName`][Name::MdxJsxTagAttributeName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <B /> c
/// ^
/// ```
MdxJsxEsWhitespace,
/// MDX extension: JSX: tag marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <B /> c
/// ^ ^
/// ```
MdxJsxTagMarker,
/// MDX extension: JSX: closing tag marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a </B> c
/// ^
/// ```
MdxJsxTagClosingMarker,
/// MDX extension: JSX: tag name.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace],
/// [`MdxJsxTagNamePrimary`][Name::MdxJsxTagNamePrimary],
/// [`MdxJsxTagNameMember`][Name::MdxJsxTagNameMember],
/// [`MdxJsxTagNameMemberMarker`][Name::MdxJsxTagNameMemberMarker],
/// [`MdxJsxTagNamePrefixMarker`][Name::MdxJsxTagNamePrefixMarker],
/// [`MdxJsxTagNameLocal`][Name::MdxJsxTagNameLocal]
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b> c
/// ^
/// > | a <b:c> d
/// ^^^
/// > | a <b.c> d
/// ^^^
/// ```
#[allow(clippy::enum_variant_names)]
MdxJsxTagName,
/// MDX extension: JSX: primary tag name.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagName`][Name::MdxJsxTagName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b> c
/// ^
/// > | a <b:c> d
/// ^
/// > | a <b.c> d
/// ^
/// ```
MdxJsxTagNamePrimary,
/// MDX extension: JSX: tag name member marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagName`][Name::MdxJsxTagName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b.c> d
/// ^
/// ```
MdxJsxTagNameMemberMarker,
/// MDX extension: JSX: tag name prefix marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagName`][Name::MdxJsxTagName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b:c> d
/// ^
/// ```
MdxJsxTagNamePrefixMarker,
/// MDX extension: JSX: tag name member.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagName`][Name::MdxJsxTagName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b.c> d
/// ^
/// ```
MdxJsxTagNameMember,
/// MDX extension: JSX: tag name local.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagName`][Name::MdxJsxTagName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b:c> d
/// ^
/// ```
MdxJsxTagNameLocal,
/// MDX extension: JSX: attribute.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace],
/// [`MdxJsxTagAttributeName`][Name::MdxJsxTagAttributeName],
/// [`MdxJsxTagAttributeInitializerMarker`][Name::MdxJsxTagAttributeInitializerMarker],
/// [`MdxJsxTagAttributeValueLiteral`][Name::MdxJsxTagAttributeValueLiteral],
/// [`MdxJsxTagAttributeValueExpression`][Name::MdxJsxTagAttributeValueExpression]
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c> d
/// ^
/// > | a <b c="d"> e
/// ^^^^^
/// > | a <b c={d}> e
/// ^^^^^
/// ```
MdxJsxTagAttribute,
/// MDX extension: JSX tag attribute expression.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab],
/// [`MdxExpressionMarker`][Name::MdxExpressionMarker],
/// [`MdxExpressionData`][Name::MdxExpressionData]
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b {Math.PI} /> c
/// ^^^^^^^^^
/// ```
MdxJsxTagAttributeExpression,
/// MDX extension: JSX: attribute name.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace],
/// [`MdxJsxTagAttributePrimaryName`][Name::MdxJsxTagAttributePrimaryName],
/// [`MdxJsxTagAttributeNamePrefixMarker`][Name::MdxJsxTagAttributeNamePrefixMarker],
/// [`MdxJsxTagAttributeNameLocal`][Name::MdxJsxTagAttributeNameLocal]
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c> d
/// ^
/// > | a <b c:d="e"> f
/// ^^^
/// ```
#[allow(clippy::enum_variant_names)]
MdxJsxTagAttributeName,
/// MDX extension: JSX: primary attribute name.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttributeName`][Name::MdxJsxTagAttributeName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c> d
/// ^
/// > | a <b c:d="e"> f
/// ^
/// ```
#[allow(clippy::enum_variant_names)]
MdxJsxTagAttributePrimaryName,
/// MDX extension: JSX: attribute name prefix marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttributeName`][Name::MdxJsxTagAttributeName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c:d="e"> f
/// ^
/// ```
MdxJsxTagAttributeNamePrefixMarker,
/// MDX extension: JSX: local attribute name.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttributeName`][Name::MdxJsxTagAttributeName]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c:d="e"> f
/// ^
/// ```
MdxJsxTagAttributeNameLocal,
/// MDX extension: JSX: attribute initializer marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c="d"> e
/// ^
/// ```
MdxJsxTagAttributeInitializerMarker,
/// MDX extension: JSX tag attribute value expression.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`SpaceOrTab`][Name::SpaceOrTab],
/// [`MdxExpressionMarker`][Name::MdxExpressionMarker],
/// [`MdxExpressionData`][Name::MdxExpressionData]
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c={Math.PI} /> d
/// ^^^^^^^^^
/// ```
MdxJsxTagAttributeValueExpression,
/// MDX extension: JSX: attribute value literal.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute]
/// * **Content model**:
/// [`LineEnding`][Name::LineEnding],
/// [`MdxJsxEsWhitespace`][Name::MdxJsxEsWhitespace],
/// [`MdxJsxTagAttributeValueLiteralMarker`][Name::MdxJsxTagAttributeValueLiteralMarker],
/// [`MdxJsxTagAttributeValueLiteralValue`][Name::MdxJsxTagAttributeValueLiteralValue]
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c="d"> e
/// ^^^
/// ```
MdxJsxTagAttributeValueLiteral,
/// MDX extension: JSX: attribute value literal marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttributeValueLiteral`][Name::MdxJsxTagAttributeValueLiteral]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c="d"> e
/// ^ ^
/// ```
MdxJsxTagAttributeValueLiteralMarker,
/// MDX extension: JSX: attribute value literal value.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxTagAttributeValueLiteral`][Name::MdxJsxTagAttributeValueLiteral]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b c="d"> e
/// ^
/// ```
MdxJsxTagAttributeValueLiteralValue,
/// MDX extension: JSX: self-closing tag marker.
///
/// ## Info
///
/// * **Context**:
/// [`MdxJsxFlowTag`][Name::MdxJsxFlowTag],
/// [`MdxJsxTextTag`][Name::MdxJsxTextTag]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`partial_mdx_jsx`][crate::construct::partial_mdx_jsx]
///
/// ## Example
///
/// ```markdown
/// > | a <b /> c
/// ^
/// ```
MdxJsxTagSelfClosingMarker,
/// Paragraph.
///
/// ## Info
///
/// * **Context**:
/// [content][crate::construct::content]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`paragraph`][crate::construct::paragraph]
///
/// ## Example
///
/// ```markdown
/// > | a b
/// ^^^
/// > | c.
/// ^^
/// ```
Paragraph,
/// Reference.
///
/// ## Info
///
/// * **Context**:
/// [`Image`][Name::Image],
/// [`Link`][Name::Link]
/// * **Content model**:
/// [`ReferenceMarker`][Name::ReferenceMarker],
/// [`ReferenceString`][Name::ReferenceString]
/// * **Construct**:
/// [`label`][crate::construct::partial_label]
///
/// ## Example
///
/// ```markdown
/// > | a ![b][c] d
/// ^^^
/// ```
Reference,
/// Reference marker.
///
/// ## Info
///
/// * **Context**:
/// [`Reference`][Name::Reference]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`label`][crate::construct::partial_label]
///
/// ## Example
///
/// ```markdown
/// > | a ![b][c] d
/// ^ ^
/// ```
ReferenceMarker,
/// Reference string.
///
/// ## Info
///
/// * **Context**:
/// [`Reference`][Name::Reference]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`label`][crate::construct::partial_label]
///
/// ## Example
///
/// ```markdown
/// > | a ![b][c] d
/// ^
/// ```
ReferenceString,
/// Resource.
///
/// ## Info
///
/// * **Context**:
/// [`Image`][Name::Image],
/// [`Link`][Name::Link]
/// * **Content model**:
/// [`ResourceMarker`][Name::ResourceMarker],
/// [`ResourceDestination`][Name::ResourceDestination],
/// [`ResourceTitle`][Name::ResourceTitle],
/// [`SpaceOrTab`][Name::SpaceOrTab],
/// [`LineEnding`][Name::LineEnding]
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^^^^^^^
/// > | a [b](c) d
/// ^^^
/// ```
Resource,
/// Resource destination.
///
/// ## Info
///
/// * **Context**:
/// [`Resource`][Name::Resource]
/// * **Content model**:
/// [`ResourceDestinationLiteral`][Name::ResourceDestinationLiteral],
/// [`ResourceDestinationRaw`][Name::ResourceDestinationRaw]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^
/// ```
ResourceDestination,
/// Resource destination literal.
///
/// ## Info
///
/// * **Context**:
/// [`ResourceDestination`][Name::ResourceDestination]
/// * **Content model**:
/// [`ResourceDestinationLiteralMarker`][Name::ResourceDestinationLiteralMarker],
/// [`ResourceDestinationString`][Name::ResourceDestinationString]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^^^
/// ```
ResourceDestinationLiteral,
/// Resource destination literal marker.
///
/// ## Info
///
/// * **Context**:
/// [`ResourceDestinationLiteral`][Name::ResourceDestinationLiteral]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^ ^
/// ```
ResourceDestinationLiteralMarker,
/// Resource destination raw.
///
/// ## Info
///
/// * **Context**:
/// [`ResourceDestination`][Name::ResourceDestination]
/// * **Content model**:
/// [`ResourceDestinationString`][Name::ResourceDestinationString]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^
/// ```
ResourceDestinationRaw,
/// Resource destination raw.
///
/// ## Info
///
/// * **Context**:
/// [`ResourceDestinationLiteral`][Name::ResourceDestinationLiteral],
/// [`ResourceDestinationRaw`][Name::ResourceDestinationRaw]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`destination`][crate::construct::partial_destination]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^
/// > | a  e
/// ^
/// ```
ResourceDestinationString,
/// Resource marker.
///
/// ## Info
///
/// * **Context**:
/// [`Resource`][Name::Resource]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`label_end`][crate::construct::label_end]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^ ^
/// ```
ResourceMarker,
/// Resource title.
///
/// ## Info
///
/// * **Context**:
/// [`Resource`][Name::Resource]
/// * **Content model**:
/// [`ResourceTitleMarker`][Name::ResourceTitleMarker],
/// [`ResourceTitleString`][Name::ResourceTitleString]
/// * **Construct**:
/// [`title`][crate::construct::partial_title]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^^^
/// ```
ResourceTitle,
/// Resource title marker.
///
/// ## Info
///
/// * **Context**:
/// [`ResourceTitle`][Name::ResourceTitle]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`title`][crate::construct::partial_title]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^ ^
/// ```
ResourceTitleMarker,
/// Resource title string.
///
/// ## Info
///
/// * **Context**:
/// [`ResourceTitle`][Name::ResourceTitle]
/// * **Content model**:
/// [string content][crate::construct::string]
/// * **Construct**:
/// [`title`][crate::construct::partial_title]
///
/// ## Example
///
/// ```markdown
/// > | a  e
/// ^
/// ```
ResourceTitleString,
/// Space or tab.
///
/// ## Info
///
/// * **Context**:
/// basically everywhere
/// * **Content model**:
/// void
/// * **Construct**:
/// n/a
///
/// ## Example
///
/// ```markdown
/// > | ␠* * *␠
/// ^ ^ ^ ^
/// ```
SpaceOrTab,
/// Strong.
///
/// ## Info
///
/// * **Context**:
/// [text content][crate::construct::text]
/// * **Content model**:
/// [`StrongSequence`][Name::StrongSequence],
/// [`StrongText`][Name::StrongText]
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | **a**
/// ^^^^^
/// ```
Strong,
/// Strong sequence.
///
/// ## Info
///
/// * **Context**:
/// [`Strong`][Name::Strong]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | **a**
/// ^^ ^^
/// ```
StrongSequence,
/// Strong text.
///
/// ## Info
///
/// * **Context**:
/// [`Strong`][Name::Strong]
/// * **Content model**:
/// [text content][crate::construct::text]
/// * **Construct**:
/// [`attention`][crate::construct::attention]
///
/// ## Example
///
/// ```markdown
/// > | **a**
/// ^
/// ```
StrongText,
/// Whole thematic break.
///
/// ## Info
///
/// * **Context**:
/// [flow content][crate::construct::flow]
/// * **Content model**:
/// [`ThematicBreakSequence`][Name::ThematicBreakSequence],
/// [`SpaceOrTab`][Name::SpaceOrTab]
/// * **Construct**:
/// [`thematic_break`][crate::construct::thematic_break]
///
/// ## Example
///
/// ```markdown
/// > | * * *
/// ^^^^^
/// ```
ThematicBreak,
/// Thematic break sequence.
///
/// ## Info
///
/// * **Context**:
/// [`ThematicBreak`][Name::ThematicBreak]
/// * **Content model**:
/// void
/// * **Construct**:
/// [`thematic_break`][crate::construct::thematic_break]
///
/// ## Example
///
/// ```markdown
/// > | * * *
/// ^ ^ ^
/// ```
ThematicBreakSequence,
LinePrefix,
}
/// List of void events, used to make sure everything is working well.
pub const VOID_EVENTS: [Name; 76] = [
Name::AttentionSequence,
Name::AutolinkEmail,
Name::AutolinkMarker,
Name::AutolinkProtocol,
Name::BlankLineEnding,
Name::BlockQuoteMarker,
Name::ByteOrderMark,
Name::CharacterEscapeMarker,
Name::CharacterEscapeValue,
Name::CharacterReferenceMarker,
Name::CharacterReferenceMarkerHexadecimal,
Name::CharacterReferenceMarkerNumeric,
Name::CharacterReferenceMarkerSemi,
Name::CharacterReferenceValue,
Name::CodeFencedFenceSequence,
Name::CodeFlowChunk,
Name::CodeTextData,
Name::CodeTextSequence,
Name::Data,
Name::DefinitionDestinationLiteralMarker,
Name::DefinitionLabelMarker,
Name::DefinitionMarker,
Name::DefinitionTitleMarker,
Name::EmphasisSequence,
Name::FrontmatterChunk,
Name::GfmAutolinkLiteralEmail,
Name::GfmAutolinkLiteralProtocol,
Name::GfmAutolinkLiteralWww,
Name::GfmFootnoteCallMarker,
Name::GfmFootnoteDefinitionLabelMarker,
Name::GfmFootnoteDefinitionMarker,
Name::GfmStrikethroughSequence,
Name::GfmTableCellDivider,
Name::GfmTableDelimiterMarker,
Name::GfmTableDelimiterFiller,
Name::GfmTaskListItemMarker,
Name::GfmTaskListItemValueChecked,
Name::GfmTaskListItemValueUnchecked,
Name::FrontmatterSequence,
Name::HardBreakEscape,
Name::HardBreakTrailing,
Name::HeadingAtxSequence,
Name::HeadingSetextUnderlineSequence,
Name::HtmlFlowData,
Name::HtmlTextData,
Name::LabelImageMarker,
Name::LabelMarker,
Name::LineEnding,
Name::ListItemMarker,
Name::ListItemValue,
Name::MathFlowFenceSequence,
Name::MathFlowChunk,
Name::MathTextData,
Name::MathTextSequence,
Name::MdxEsmData,
Name::MdxExpressionMarker,
Name::MdxExpressionData,
Name::MdxJsxTagMarker,
Name::MdxJsxTagClosingMarker,
Name::MdxJsxTagNamePrimary,
Name::MdxJsxTagNameMemberMarker,
Name::MdxJsxTagNamePrefixMarker,
Name::MdxJsxTagNameMember,
Name::MdxJsxTagNameLocal,
Name::MdxJsxTagSelfClosingMarker,
Name::MdxJsxTagAttributeNamePrefixMarker,
Name::MdxJsxTagAttributeInitializerMarker,
Name::MdxJsxTagAttributeNameLocal,
Name::MdxJsxTagAttributeValueLiteralMarker,
Name::MdxJsxEsWhitespace,
Name::ReferenceMarker,
Name::ResourceMarker,
Name::ResourceTitleMarker,
Name::SpaceOrTab,
Name::StrongSequence,
Name::ThematicBreakSequence,
];
/// Embedded content type.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Content {
/// Represents [flow content][crate::construct::flow].
Flow,
/// Represents [content][crate::construct::content].
#[allow(clippy::enum_variant_names)]
Content,
/// Represents [string content][crate::construct::string].
String,
/// Represents [text content][crate::construct::text].
Text,
}
/// Link to another event.
#[derive(Clone, Debug)]
pub struct Link {
/// Previous event.
pub previous: Option<usize>,
/// Next event.
pub next: Option<usize>,
/// Content type.
pub content: Content,
}
/// Place in the document.
///
/// The interface for the location in the document comes from unist
/// [`Point`](https://github.com/syntax-tree/unist#point).
#[derive(Clone, Debug)]
pub struct Point {
/// 1-indexed line number.
pub line: usize,
/// 1-indexed column number.
///
/// This is increased up to a tab stop for tabs.
/// Some editors count tabs as 1 character, so this position is not the
/// same as editors.
pub column: usize,
/// 0-indexed position in the document.
///
/// Also an `index` into `bytes`.
pub index: usize,
/// Virtual step on the same `index`.
pub vs: usize,
}
impl Point {
/// Create a unist point.
pub fn to_unist(&self) -> unist::Point {
unist::Point {
line: self.line,
column: self.column,
offset: self.index,
}
}
/// Create a new point, that is shifted from the close earlier current
/// point, to `index`.
pub fn shift_to(&self, bytes: &[u8], index: usize) -> Point {
let mut next = self.clone();
debug_assert!(index > next.index, "expected to shift forward");
while next.index < index {
match bytes[next.index] {
b'\n' | b'\r' => unreachable!("cannot move past line endings"),
b'\t' => {
let remainder = next.column % TAB_SIZE;
let vs = if remainder == 0 {
0
} else {
TAB_SIZE - remainder
};
next.index += 1;
next.column += 1 + vs;
}
_ => {
next.index += 1;
next.column += 1;
}
}
}
next
}
}
/// Event kinds.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Kind {
/// The start of something.
Enter,
/// The end of something.
Exit,
}
/// Something semantic happening somewhere.
#[derive(Clone, Debug)]
pub struct Event {
/// Kind of event.
pub kind: Kind,
/// Name of event.
pub name: Name,
/// Place where this happens.
pub point: Point,
/// Link to another event.
pub link: Option<Link>,
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/lib.rs | Rust | //! Public API of `markdown-rs`.
//!
//! This module exposes primarily [`to_html()`][].
//! It also exposes [`to_html_with_options()`][] and [`to_mdast()`][].
//!
//! * [`to_html()`][]
//! — safe way to transform (untrusted?) markdown into HTML
//! * [`to_html_with_options()`][]
//! — like `to_html` but lets you configure how markdown is turned into
//! HTML, such as allowing dangerous HTML or turning on/off different
//! constructs (GFM, MDX, and the like)
//! * [`to_mdast()`][]
//! — turn markdown into a syntax tree
//!
//! ## Features
//!
//! * **`default`**
//! — nothing is enabled by default
//! * **`log`**
//! — enable logging (includes `dep:log`);
//! you can show logs with `RUST_LOG=debug`
//! * **`serde`**
//! — enable serde to serialize ASTs and configuration (includes `dep:serde`)
#![no_std]
#![deny(clippy::pedantic)]
#![allow(clippy::doc_link_with_quotes)]
#![allow(clippy::missing_panics_doc)]
#![allow(clippy::must_use_candidate)]
#![allow(clippy::too_many_lines)]
#![allow(clippy::result_large_err)]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/wooorm/markdown-rs/8924580/media/logo-monochromatic.svg?sanitize=true"
)]
extern crate alloc;
mod configuration;
mod construct;
mod event;
mod parser;
mod resolve;
mod state;
mod subtokenize;
mod to_html;
mod to_mdast;
mod tokenizer;
mod util;
pub mod mdast; // To do: externalize?
pub mod message; // To do: externalize.
pub mod unist; // To do: externalize.
#[doc(hidden)]
pub use util::character_reference::{decode_named, decode_numeric};
#[doc(hidden)]
pub use util::identifier::{id_cont, id_start};
#[doc(hidden)]
pub use util::sanitize_uri::sanitize;
#[doc(hidden)]
pub use util::location::Location;
pub use util::line_ending::LineEnding;
pub use util::mdx::{
EsmParse as MdxEsmParse, ExpressionKind as MdxExpressionKind,
ExpressionParse as MdxExpressionParse, Signal as MdxSignal,
};
pub use configuration::{CompileOptions, Constructs, Options, ParseOptions};
use alloc::string::String;
/// Turn markdown into HTML.
///
/// Compiles markdown to HTML according to `CommonMark`.
/// Use [`to_html_with_options()`][] to configure how markdown is turned into
/// HTML.
///
/// ## Examples
///
/// ```
/// use markdown::to_html;
///
/// assert_eq!(to_html("# Hi Mercury!"), "<h1>Hi Mercury!</h1>");
/// ```
pub fn to_html(value: &str) -> String {
to_html_with_options(value, &Options::default()).unwrap()
}
/// Turn markdown into HTML, with configuration.
///
/// ## Errors
///
/// `to_html_with_options()` never errors with normal markdown because markdown
/// does not have syntax errors, so feel free to `unwrap()`.
/// However, MDX does have syntax errors.
/// When MDX is turned on, there are several errors that can occur with how
/// expressions, ESM, and JSX are written.
///
/// ## Examples
///
/// ```
/// use markdown::{to_html_with_options, CompileOptions, Options};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// // Use GFM:
/// let result = to_html_with_options("~Venus~Mars!", &Options::gfm())?;
///
/// assert_eq!(result, "<p><del>Venus</del>Mars!</p>");
///
/// // Live dangerously / trust the author:
/// let result = to_html_with_options("<div>\n\n# Hi Jupiter!\n\n</div>", &Options {
/// compile: CompileOptions {
/// allow_dangerous_html: true,
/// allow_dangerous_protocol: true,
/// ..CompileOptions::default()
/// },
/// ..Options::default()
/// })?;
///
/// assert_eq!(result, "<div>\n<h1>Hi Jupiter!</h1>\n</div>");
/// # Ok(())
/// # }
/// ```
pub fn to_html_with_options(value: &str, options: &Options) -> Result<String, message::Message> {
let (events, parse_state) = parser::parse(value, &options.parse)?;
Ok(to_html::compile(
&events,
parse_state.bytes,
&options.compile,
))
}
/// Turn markdown into a syntax tree.
///
/// ## Errors
///
/// `to_mdast()` never errors with normal markdown because markdown does not
/// have syntax errors, so feel free to `unwrap()`.
/// However, MDX does have syntax errors.
/// When MDX is turned on, there are several errors that can occur with how
/// JSX, expressions, or ESM are written.
///
/// ## Examples
///
/// ```
/// use markdown::{to_mdast, ParseOptions};
/// # fn main() -> Result<(), markdown::message::Message> {
///
/// let tree = to_mdast("# Hi *Earth*!", &ParseOptions::default())?;
///
/// println!("{:?}", tree);
/// // => Root { children: [Heading { children: [Text { value: "Hi ", position: Some(1:3-1:6 (2-5)) }, Emphasis { children: [Text { value: "Earth", position: Some(1:7-1:12 (6-11)) }], position: Some(1:6-1:13 (5-12)) }, Text { value: "!", position: Some(1:13-1:14 (12-13)) }], position: Some(1:1-1:14 (0-13)), depth: 1 }], position: Some(1:1-1:14 (0-13)) }
/// # Ok(())
/// # }
/// ```
pub fn to_mdast(value: &str, options: &ParseOptions) -> Result<mdast::Node, message::Message> {
let (events, parse_state) = parser::parse(value, options)?;
let node = to_mdast::compile(&events, parse_state.bytes)?;
Ok(node)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/mdast.rs | Rust | //! markdown syntax tree: [mdast][].
//!
//! [mdast]: https://github.com/syntax-tree/mdast
use crate::unist::Position;
use alloc::{
fmt,
string::{String, ToString},
vec::Vec,
};
/// MDX: relative byte index into a string, to an absolute byte index into the
/// whole document.
pub type Stop = (usize, usize);
/// Explicitness of a reference.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "lowercase")
)]
pub enum ReferenceKind {
/// The reference is implicit, its identifier inferred from its content.
Shortcut,
/// The reference is explicit, its identifier inferred from its content.
Collapsed,
/// The reference is explicit, its identifier explicitly set.
Full,
}
/// GFM: alignment of phrasing content.
///
/// Used to align the contents of table cells within a table.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AlignKind {
/// Left alignment.
///
/// See the `left` value of the `text-align` CSS property.
///
/// ```markdown
/// | | aaa |
/// > | | :-- |
/// ^^^
/// ```
Left,
/// Right alignment.
///
/// See the `right` value of the `text-align` CSS property.
///
/// ```markdown
/// | | aaa |
/// > | | --: |
/// ^^^
/// ```
Right,
/// Center alignment.
///
/// See the `center` value of the `text-align` CSS property.
///
/// ```markdown
/// | | aaa |
/// > | | :-: |
/// ^^^
/// ```
Center,
/// No alignment.
///
/// Phrasing content is aligned as defined by the host environment.
///
/// ```markdown
/// | | aaa |
/// > | | --- |
/// ^^^
/// ```
None,
}
/// Implement serde according to <https://github.com/syntax-tree/mdast#aligntype>
#[cfg(feature = "serde")]
impl serde::ser::Serialize for AlignKind {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
match self {
AlignKind::Left => serializer.serialize_unit_variant("AlignKind", 0, "left"),
AlignKind::Right => serializer.serialize_unit_variant("AlignKind", 1, "right"),
AlignKind::Center => serializer.serialize_unit_variant("AlignKind", 2, "center"),
AlignKind::None => serializer.serialize_none(),
}
}
}
#[cfg(feature = "serde")]
struct AlignKindVisitor;
#[cfg(feature = "serde")]
impl serde::de::Visitor<'_> for AlignKindVisitor {
type Value = AlignKind;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("'left', 'right', 'center' or null")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match v {
"left" => Ok(AlignKind::Left),
"right" => Ok(AlignKind::Right),
"center" => Ok(AlignKind::Center),
&_ => Err(serde::de::Error::invalid_type(
serde::de::Unexpected::Str(v),
&self,
)),
}
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match v {
b"left" => Ok(AlignKind::Left),
b"right" => Ok(AlignKind::Right),
b"center" => Ok(AlignKind::Center),
&_ => Err(serde::de::Error::invalid_type(
serde::de::Unexpected::Bytes(v),
&self,
)),
}
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(AlignKind::None)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(AlignKind::None)
}
}
#[cfg(feature = "serde")]
impl<'de> serde::de::Deserialize<'de> for AlignKind {
fn deserialize<D>(deserializer: D) -> Result<AlignKind, D::Error>
where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_any(AlignKindVisitor)
}
}
/// Nodes.
#[derive(Clone, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(tag = "type", rename_all = "camelCase")
)]
pub enum Node {
// Document:
/// Root.
Root(Root),
// Container:
/// Block quote.
Blockquote(Blockquote),
/// Footnote definition.
FootnoteDefinition(FootnoteDefinition),
/// MDX: JSX element (container).
MdxJsxFlowElement(MdxJsxFlowElement),
/// List.
List(List),
// Frontmatter:
/// MDX.js ESM.
MdxjsEsm(MdxjsEsm),
/// Toml.
Toml(Toml),
/// Yaml.
Yaml(Yaml),
// Phrasing:
/// Break.
Break(Break),
/// Code (phrasing).
InlineCode(InlineCode),
/// Math (phrasing).
InlineMath(InlineMath),
/// Delete.
Delete(Delete),
/// Emphasis.
Emphasis(Emphasis),
// MDX: expression (text).
MdxTextExpression(MdxTextExpression),
/// Footnote reference.
FootnoteReference(FootnoteReference),
/// Html (phrasing).
Html(Html),
/// Image.
Image(Image),
/// Image reference.
ImageReference(ImageReference),
// MDX: JSX element (text).
MdxJsxTextElement(MdxJsxTextElement),
/// Link.
Link(Link),
/// Link reference.
LinkReference(LinkReference),
/// Strong
Strong(Strong),
/// Text.
Text(Text),
// Flow:
/// Code (flow).
Code(Code),
/// Math (flow).
Math(Math),
// MDX: expression (flow).
MdxFlowExpression(MdxFlowExpression),
/// Heading.
Heading(Heading),
/// Html (flow).
// Html(Html),
/// Table.
Table(Table),
/// Thematic break.
ThematicBreak(ThematicBreak),
// Table content.
/// Table row.
TableRow(TableRow),
// Row content.
/// Table cell.
TableCell(TableCell),
// List content.
/// List item.
ListItem(ListItem),
// Content.
/// Definition.
Definition(Definition),
/// Paragraph.
Paragraph(Paragraph),
}
impl fmt::Debug for Node {
// Debug the wrapped struct.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Node::Root(x) => x.fmt(f),
Node::Blockquote(x) => x.fmt(f),
Node::FootnoteDefinition(x) => x.fmt(f),
Node::MdxJsxFlowElement(x) => x.fmt(f),
Node::List(x) => x.fmt(f),
Node::MdxjsEsm(x) => x.fmt(f),
Node::Toml(x) => x.fmt(f),
Node::Yaml(x) => x.fmt(f),
Node::Break(x) => x.fmt(f),
Node::InlineCode(x) => x.fmt(f),
Node::InlineMath(x) => x.fmt(f),
Node::Delete(x) => x.fmt(f),
Node::Emphasis(x) => x.fmt(f),
Node::MdxTextExpression(x) => x.fmt(f),
Node::FootnoteReference(x) => x.fmt(f),
Node::Html(x) => x.fmt(f),
Node::Image(x) => x.fmt(f),
Node::ImageReference(x) => x.fmt(f),
Node::MdxJsxTextElement(x) => x.fmt(f),
Node::Link(x) => x.fmt(f),
Node::LinkReference(x) => x.fmt(f),
Node::Strong(x) => x.fmt(f),
Node::Text(x) => x.fmt(f),
Node::Code(x) => x.fmt(f),
Node::Math(x) => x.fmt(f),
Node::MdxFlowExpression(x) => x.fmt(f),
Node::Heading(x) => x.fmt(f),
Node::Table(x) => x.fmt(f),
Node::ThematicBreak(x) => x.fmt(f),
Node::TableRow(x) => x.fmt(f),
Node::TableCell(x) => x.fmt(f),
Node::ListItem(x) => x.fmt(f),
Node::Definition(x) => x.fmt(f),
Node::Paragraph(x) => x.fmt(f),
}
}
}
fn children_to_string(children: &[Node]) -> String {
children.iter().map(ToString::to_string).collect()
}
// To do: clippy may be right but that’s a breaking change.
#[allow(clippy::to_string_trait_impl)]
impl ToString for Node {
fn to_string(&self) -> String {
match self {
// Parents.
Node::Root(x) => children_to_string(&x.children),
Node::Blockquote(x) => children_to_string(&x.children),
Node::FootnoteDefinition(x) => children_to_string(&x.children),
Node::MdxJsxFlowElement(x) => children_to_string(&x.children),
Node::List(x) => children_to_string(&x.children),
Node::Delete(x) => children_to_string(&x.children),
Node::Emphasis(x) => children_to_string(&x.children),
Node::MdxJsxTextElement(x) => children_to_string(&x.children),
Node::Link(x) => children_to_string(&x.children),
Node::LinkReference(x) => children_to_string(&x.children),
Node::Strong(x) => children_to_string(&x.children),
Node::Heading(x) => children_to_string(&x.children),
Node::Table(x) => children_to_string(&x.children),
Node::TableRow(x) => children_to_string(&x.children),
Node::TableCell(x) => children_to_string(&x.children),
Node::ListItem(x) => children_to_string(&x.children),
Node::Paragraph(x) => children_to_string(&x.children),
// Literals.
Node::MdxjsEsm(x) => x.value.clone(),
Node::Toml(x) => x.value.clone(),
Node::Yaml(x) => x.value.clone(),
Node::InlineCode(x) => x.value.clone(),
Node::InlineMath(x) => x.value.clone(),
Node::MdxTextExpression(x) => x.value.clone(),
Node::Html(x) => x.value.clone(),
Node::Text(x) => x.value.clone(),
Node::Code(x) => x.value.clone(),
Node::Math(x) => x.value.clone(),
Node::MdxFlowExpression(x) => x.value.clone(),
// Voids.
Node::Break(_)
| Node::FootnoteReference(_)
| Node::Image(_)
| Node::ImageReference(_)
| Node::ThematicBreak(_)
| Node::Definition(_) => String::new(),
}
}
}
impl Node {
#[must_use]
pub fn children(&self) -> Option<&Vec<Node>> {
match self {
// Parent.
Node::Root(x) => Some(&x.children),
Node::Paragraph(x) => Some(&x.children),
Node::Heading(x) => Some(&x.children),
Node::Blockquote(x) => Some(&x.children),
Node::List(x) => Some(&x.children),
Node::ListItem(x) => Some(&x.children),
Node::Emphasis(x) => Some(&x.children),
Node::Strong(x) => Some(&x.children),
Node::Link(x) => Some(&x.children),
Node::LinkReference(x) => Some(&x.children),
Node::FootnoteDefinition(x) => Some(&x.children),
Node::Table(x) => Some(&x.children),
Node::TableRow(x) => Some(&x.children),
Node::TableCell(x) => Some(&x.children),
Node::Delete(x) => Some(&x.children),
Node::MdxJsxFlowElement(x) => Some(&x.children),
Node::MdxJsxTextElement(x) => Some(&x.children),
// Non-parent.
_ => None,
}
}
pub fn children_mut(&mut self) -> Option<&mut Vec<Node>> {
match self {
// Parent.
Node::Root(x) => Some(&mut x.children),
Node::Paragraph(x) => Some(&mut x.children),
Node::Heading(x) => Some(&mut x.children),
Node::Blockquote(x) => Some(&mut x.children),
Node::List(x) => Some(&mut x.children),
Node::ListItem(x) => Some(&mut x.children),
Node::Emphasis(x) => Some(&mut x.children),
Node::Strong(x) => Some(&mut x.children),
Node::Link(x) => Some(&mut x.children),
Node::LinkReference(x) => Some(&mut x.children),
Node::FootnoteDefinition(x) => Some(&mut x.children),
Node::Table(x) => Some(&mut x.children),
Node::TableRow(x) => Some(&mut x.children),
Node::TableCell(x) => Some(&mut x.children),
Node::Delete(x) => Some(&mut x.children),
Node::MdxJsxFlowElement(x) => Some(&mut x.children),
Node::MdxJsxTextElement(x) => Some(&mut x.children),
// Non-parent.
_ => None,
}
}
#[must_use]
pub fn position(&self) -> Option<&Position> {
match self {
Node::Root(x) => x.position.as_ref(),
Node::Blockquote(x) => x.position.as_ref(),
Node::FootnoteDefinition(x) => x.position.as_ref(),
Node::MdxJsxFlowElement(x) => x.position.as_ref(),
Node::List(x) => x.position.as_ref(),
Node::MdxjsEsm(x) => x.position.as_ref(),
Node::Toml(x) => x.position.as_ref(),
Node::Yaml(x) => x.position.as_ref(),
Node::Break(x) => x.position.as_ref(),
Node::InlineCode(x) => x.position.as_ref(),
Node::InlineMath(x) => x.position.as_ref(),
Node::Delete(x) => x.position.as_ref(),
Node::Emphasis(x) => x.position.as_ref(),
Node::MdxTextExpression(x) => x.position.as_ref(),
Node::FootnoteReference(x) => x.position.as_ref(),
Node::Html(x) => x.position.as_ref(),
Node::Image(x) => x.position.as_ref(),
Node::ImageReference(x) => x.position.as_ref(),
Node::MdxJsxTextElement(x) => x.position.as_ref(),
Node::Link(x) => x.position.as_ref(),
Node::LinkReference(x) => x.position.as_ref(),
Node::Strong(x) => x.position.as_ref(),
Node::Text(x) => x.position.as_ref(),
Node::Code(x) => x.position.as_ref(),
Node::Math(x) => x.position.as_ref(),
Node::MdxFlowExpression(x) => x.position.as_ref(),
Node::Heading(x) => x.position.as_ref(),
Node::Table(x) => x.position.as_ref(),
Node::ThematicBreak(x) => x.position.as_ref(),
Node::TableRow(x) => x.position.as_ref(),
Node::TableCell(x) => x.position.as_ref(),
Node::ListItem(x) => x.position.as_ref(),
Node::Definition(x) => x.position.as_ref(),
Node::Paragraph(x) => x.position.as_ref(),
}
}
pub fn position_mut(&mut self) -> Option<&mut Position> {
match self {
Node::Root(x) => x.position.as_mut(),
Node::Blockquote(x) => x.position.as_mut(),
Node::FootnoteDefinition(x) => x.position.as_mut(),
Node::MdxJsxFlowElement(x) => x.position.as_mut(),
Node::List(x) => x.position.as_mut(),
Node::MdxjsEsm(x) => x.position.as_mut(),
Node::Toml(x) => x.position.as_mut(),
Node::Yaml(x) => x.position.as_mut(),
Node::Break(x) => x.position.as_mut(),
Node::InlineCode(x) => x.position.as_mut(),
Node::InlineMath(x) => x.position.as_mut(),
Node::Delete(x) => x.position.as_mut(),
Node::Emphasis(x) => x.position.as_mut(),
Node::MdxTextExpression(x) => x.position.as_mut(),
Node::FootnoteReference(x) => x.position.as_mut(),
Node::Html(x) => x.position.as_mut(),
Node::Image(x) => x.position.as_mut(),
Node::ImageReference(x) => x.position.as_mut(),
Node::MdxJsxTextElement(x) => x.position.as_mut(),
Node::Link(x) => x.position.as_mut(),
Node::LinkReference(x) => x.position.as_mut(),
Node::Strong(x) => x.position.as_mut(),
Node::Text(x) => x.position.as_mut(),
Node::Code(x) => x.position.as_mut(),
Node::Math(x) => x.position.as_mut(),
Node::MdxFlowExpression(x) => x.position.as_mut(),
Node::Heading(x) => x.position.as_mut(),
Node::Table(x) => x.position.as_mut(),
Node::ThematicBreak(x) => x.position.as_mut(),
Node::TableRow(x) => x.position.as_mut(),
Node::TableCell(x) => x.position.as_mut(),
Node::ListItem(x) => x.position.as_mut(),
Node::Definition(x) => x.position.as_mut(),
Node::Paragraph(x) => x.position.as_mut(),
}
}
pub fn position_set(&mut self, position: Option<Position>) {
match self {
Node::Root(x) => x.position = position,
Node::Blockquote(x) => x.position = position,
Node::FootnoteDefinition(x) => x.position = position,
Node::MdxJsxFlowElement(x) => x.position = position,
Node::List(x) => x.position = position,
Node::MdxjsEsm(x) => x.position = position,
Node::Toml(x) => x.position = position,
Node::Yaml(x) => x.position = position,
Node::Break(x) => x.position = position,
Node::InlineCode(x) => x.position = position,
Node::InlineMath(x) => x.position = position,
Node::Delete(x) => x.position = position,
Node::Emphasis(x) => x.position = position,
Node::MdxTextExpression(x) => x.position = position,
Node::FootnoteReference(x) => x.position = position,
Node::Html(x) => x.position = position,
Node::Image(x) => x.position = position,
Node::ImageReference(x) => x.position = position,
Node::MdxJsxTextElement(x) => x.position = position,
Node::Link(x) => x.position = position,
Node::LinkReference(x) => x.position = position,
Node::Strong(x) => x.position = position,
Node::Text(x) => x.position = position,
Node::Code(x) => x.position = position,
Node::Math(x) => x.position = position,
Node::MdxFlowExpression(x) => x.position = position,
Node::Heading(x) => x.position = position,
Node::Table(x) => x.position = position,
Node::ThematicBreak(x) => x.position = position,
Node::TableRow(x) => x.position = position,
Node::TableCell(x) => x.position = position,
Node::ListItem(x) => x.position = position,
Node::Definition(x) => x.position = position,
Node::Paragraph(x) => x.position = position,
}
}
}
/// MDX: attribute content.
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(untagged)
)]
pub enum AttributeContent {
/// JSX expression.
///
/// ```markdown
/// > | <a {...b} />
/// ^^^^^^
/// ```
Expression(MdxJsxExpressionAttribute),
/// JSX property.
///
/// ```markdown
/// > | <a b />
/// ^
/// ```
Property(MdxJsxAttribute),
}
//
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(tag = "type", rename = "mdxJsxAttributeValueExpression")
)]
pub struct AttributeValueExpression {
pub value: String,
#[cfg_attr(feature = "serde", serde(rename = "_markdownRsStops"))]
pub stops: Vec<Stop>,
}
/// MDX: attribute value.
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(untagged)
)]
pub enum AttributeValue {
/// Expression value.
///
/// ```markdown
/// > | <a b={c} />
/// ^^^
/// ```
Expression(AttributeValueExpression),
/// Static value.
///
/// ```markdown
/// > | <a b="c" />
/// ^^^
/// ```
Literal(String),
}
/// Document.
///
/// ```markdown
/// > | a
/// ^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Root {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Paragraph.
///
/// ```markdown
/// > | a
/// ^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Paragraph {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Heading.
///
/// ```markdown
/// > | # a
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Heading {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Extra.
/// Rank (between `1` and `6`, both including).
pub depth: u8,
}
/// Thematic break.
///
/// ```markdown
/// > | ***
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ThematicBreak {
// Void.
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Block quote.
///
/// ```markdown
/// > | > a
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Blockquote {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// List.
///
/// ```markdown
/// > | * a
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct List {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Extra.
/// Ordered (`true`) or unordered (`false`).
pub ordered: bool,
/// Starting number of the list.
/// `None` when unordered.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub start: Option<u32>,
/// One or more of its children are separated with a blank line from its
/// siblings (when `true`), or not (when `false`).
pub spread: bool,
}
/// List item.
///
/// ```markdown
/// > | * a
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ListItem {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Extra.
/// The item contains two or more children separated by a blank line
/// (when `true`), or not (when `false`).
pub spread: bool,
/// GFM: whether the item is done (when `true`), not done (when `false`),
/// or indeterminate or not applicable (`None`).
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub checked: Option<bool>,
}
/// Html (flow or phrasing).
///
/// ```markdown
/// > | <a>
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Html {
// Text.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Code (flow).
///
/// ```markdown
/// > | ~~~
/// ^^^
/// > | a
/// ^
/// > | ~~~
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Code {
// Text.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Extra.
/// The language of computer code being marked up.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub lang: Option<String>,
/// Custom info relating to the node.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub meta: Option<String>,
}
/// Math (flow).
///
/// ```markdown
/// > | $$
/// ^^
/// > | a
/// ^
/// > | $$
/// ^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Math {
// Text.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Extra.
/// Custom info relating to the node.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub meta: Option<String>,
}
/// Definition.
///
/// ```markdown
/// > | [a]: b
/// ^^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Definition {
// Void.
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Resource.
/// URL to the referenced resource.
pub url: String,
/// Advisory info for the resource, such as something that would be
/// appropriate for a tooltip.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub title: Option<String>,
// Association.
/// Value that can match another node.
/// `identifier` is a source value: character escapes and character references
/// are *not* parsed.
/// Its value must be normalized.
pub identifier: String,
/// `label` is a string value: it works just like `title` on a link or a
/// `lang` on code: character escapes and character references are parsed.
///
/// To normalize a value, collapse markdown whitespace (`[\t\n\r ]+`) to a
/// space, trim the optional initial and/or final space, and perform
/// case-folding.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub label: Option<String>,
}
/// Text.
///
/// ```markdown
/// > | a
/// ^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Text {
// Text.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Emphasis.
///
/// ```markdown
/// > | *a*
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Emphasis {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Strong.
///
/// ```markdown
/// > | **a**
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Strong {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Code (phrasing).
///
/// ```markdown
/// > | `a`
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct InlineCode {
// Text.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Math (phrasing).
///
/// ```markdown
/// > | $a$
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct InlineMath {
// Text.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Break.
///
/// ```markdown
/// > | a\
/// ^
/// | b
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Break {
// Void.
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Link.
///
/// ```markdown
/// > | [a](b)
/// ^^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Link {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Resource.
/// URL to the referenced resource.
pub url: String,
/// Advisory info for the resource, such as something that would be
/// appropriate for a tooltip.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub title: Option<String>,
}
/// Image.
///
/// ```markdown
/// > | 
/// ^^^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Image {
// Void.
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Alternative.
/// Equivalent content for environments that cannot represent the node as
/// intended.
pub alt: String,
// Resource.
/// URL to the referenced resource.
pub url: String,
/// Advisory info for the resource, such as something that would be
/// appropriate for a tooltip.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub title: Option<String>,
}
/// Link reference.
///
/// ```markdown
/// > | [a]
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct LinkReference {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Reference.
/// Explicitness of a reference.
#[cfg_attr(feature = "serde", serde(rename = "referenceType"))]
pub reference_kind: ReferenceKind,
// Association.
/// Value that can match another node.
/// `identifier` is a source value: character escapes and character references
/// are *not* parsed.
/// Its value must be normalized.
pub identifier: String,
/// `label` is a string value: it works just like `title` on a link or a
/// `lang` on code: character escapes and character references are parsed.
///
/// To normalize a value, collapse markdown whitespace (`[\t\n\r ]+`) to a
/// space, trim the optional initial and/or final space, and perform
/// case-folding.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub label: Option<String>,
}
/// Image reference.
///
/// ```markdown
/// > | ![a]
/// ^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ImageReference {
// Void.
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Alternative.
/// Equivalent content for environments that cannot represent the node as
/// intended.
pub alt: String,
// Reference.
/// Explicitness of a reference.
#[cfg_attr(feature = "serde", serde(rename = "referenceType"))]
pub reference_kind: ReferenceKind,
// Association.
/// Value that can match another node.
/// `identifier` is a source value: character escapes and character references
/// are *not* parsed.
/// Its value must be normalized.
pub identifier: String,
/// `label` is a string value: it works just like `title` on a link or a
/// `lang` on code: character escapes and character references are parsed.
///
/// To normalize a value, collapse markdown whitespace (`[\t\n\r ]+`) to a
/// space, trim the optional initial and/or final space, and perform
/// case-folding.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub label: Option<String>,
}
/// GFM: footnote definition.
///
/// ```markdown
/// > | [^a]: b
/// ^^^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct FootnoteDefinition {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Association.
/// Value that can match another node.
/// `identifier` is a source value: character escapes and character references
/// are *not* parsed.
/// Its value must be normalized.
pub identifier: String,
/// `label` is a string value: it works just like `title` on a link or a
/// `lang` on code: character escapes and character references are parsed.
///
/// To normalize a value, collapse markdown whitespace (`[\t\n\r ]+`) to a
/// space, trim the optional initial and/or final space, and perform
/// case-folding.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub label: Option<String>,
}
/// GFM: footnote reference.
///
/// ```markdown
/// > | [^a]
/// ^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct FootnoteReference {
// Void.
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Association.
/// Value that can match another node.
/// `identifier` is a source value: character escapes and character references
/// are *not* parsed.
/// Its value must be normalized.
pub identifier: String,
/// `label` is a string value: it works just like `title` on a link or a
/// `lang` on code: character escapes and character references are parsed.
///
/// To normalize a value, collapse markdown whitespace (`[\t\n\r ]+`) to a
/// space, trim the optional initial and/or final space, and perform
/// case-folding.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub label: Option<String>,
}
/// GFM: table.
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// > | | - |
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Table {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Extra.
/// Represents how cells in columns are aligned.
pub align: Vec<AlignKind>,
}
/// GFM: table row.
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct TableRow {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// GFM: table cell.
///
/// ```markdown
/// > | | a |
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct TableCell {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// GFM: delete.
///
/// ```markdown
/// > | ~~a~~
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Delete {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Frontmatter: yaml.
///
/// ```markdown
/// > | ---
/// ^^^
/// > | a: b
/// ^^^^
/// > | ---
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Yaml {
// Void.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// Frontmatter: toml.
///
/// ```markdown
/// > | +++
/// ^^^
/// > | a: b
/// ^^^^
/// > | +++
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Toml {
// Void.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
}
/// MDX: ESM.
///
/// ```markdown
/// > | import a from 'b'
/// ^^^^^^^^^^^^^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MdxjsEsm {
// Literal.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Custom data on where each slice of `value` came from.
#[cfg_attr(feature = "serde", serde(rename = "_markdownRsStops"))]
pub stops: Vec<Stop>,
}
/// MDX: expression (flow).
///
/// ```markdown
/// > | {a}
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MdxFlowExpression {
// Literal.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Custom data on where each slice of `value` came from.
#[cfg_attr(feature = "serde", serde(rename = "_markdownRsStops"))]
pub stops: Vec<Stop>,
}
/// MDX: expression (text).
///
/// ```markdown
/// > | a {b}
/// ^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MdxTextExpression {
// Literal.
/// Content model.
pub value: String,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// Custom data on where each slice of `value` came from.
#[cfg_attr(feature = "serde", serde(rename = "_markdownRsStops"))]
pub stops: Vec<Stop>,
}
/// MDX: JSX element (container).
///
/// ```markdown
/// > | <a />
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "camelCase")
)]
pub struct MdxJsxFlowElement {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// JSX element.
/// Name.
///
/// Fragments have no name.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub name: Option<String>,
/// Attributes.
pub attributes: Vec<AttributeContent>,
}
/// MDX: JSX element (text).
///
/// ```markdown
/// > | <a />.
/// ^^^^^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(rename_all = "camelCase")
)]
pub struct MdxJsxTextElement {
// Parent.
/// Content model.
pub children: Vec<Node>,
/// Positional info.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub position: Option<Position>,
// JSX element.
/// Name.
///
/// Fragments have no name.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub name: Option<String>,
/// Attributes.
pub attributes: Vec<AttributeContent>,
}
/// MDX: JSX attribute.
///
/// ```markdown
/// > | <a b />
/// ^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(tag = "type", rename = "mdxJsxAttribute")
)]
pub struct MdxJsxAttribute {
// Void.
/// Positional info.
// pub position: Option<Position>,
/// Key.
pub name: String,
/// Value.
#[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
pub value: Option<AttributeValue>,
}
/// MDX: JSX expression attribute.
///
/// ```markdown
/// > | <a {...b} />
/// ^
/// ```
#[derive(Clone, Debug, Eq, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(tag = "type", rename = "mdxJsxExpressionAttribute")
)]
pub struct MdxJsxExpressionAttribute {
/// Value.
pub value: String,
/// Stops
#[cfg_attr(feature = "serde", serde(rename = "_markdownRsStops"))]
pub stops: Vec<Stop>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::unist::Position;
use alloc::{format, string::ToString, vec};
// Literals.
#[test]
fn text() {
let mut node = Node::Text(Text {
value: "a".into(),
position: None,
});
assert_eq!(
format!("{:?}", node),
"Text { value: \"a\", position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Text { value: \"a\", position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn inline_code() {
let mut node = Node::InlineCode(InlineCode {
value: "a".into(),
position: None,
});
assert_eq!(
format!("{:?}", node),
"InlineCode { value: \"a\", position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"InlineCode { value: \"a\", position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn code() {
let mut node = Node::Code(Code {
value: "a".into(),
position: None,
lang: None,
meta: None,
});
assert_eq!(
format!("{:?}", node),
"Code { value: \"a\", position: None, lang: None, meta: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Code { value: \"a\", position: Some(1:1-1:2 (0-1)), lang: None, meta: None }",
"should support `position_set`"
);
}
#[test]
fn inline_math() {
let mut node = Node::InlineMath(InlineMath {
value: "a".into(),
position: None,
});
assert_eq!(
format!("{:?}", node),
"InlineMath { value: \"a\", position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"InlineMath { value: \"a\", position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn math() {
let mut node = Node::Math(Math {
value: "a".into(),
position: None,
meta: None,
});
assert_eq!(
format!("{:?}", node),
"Math { value: \"a\", position: None, meta: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Math { value: \"a\", position: Some(1:1-1:2 (0-1)), meta: None }",
"should support `position_set`"
);
}
#[test]
fn html() {
let mut node = Node::Html(Html {
value: "a".into(),
position: None,
});
assert_eq!(
format!("{:?}", node),
"Html { value: \"a\", position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Html { value: \"a\", position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn mdx_text_expression() {
let mut node = Node::MdxTextExpression(MdxTextExpression {
value: "a".into(),
stops: vec![],
position: None,
});
assert_eq!(
format!("{:?}", node),
"MdxTextExpression { value: \"a\", position: None, stops: [] }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"MdxTextExpression { value: \"a\", position: Some(1:1-1:2 (0-1)), stops: [] }",
"should support `position_set`"
);
}
#[test]
fn mdx_flow_expression() {
let mut node = Node::MdxFlowExpression(MdxFlowExpression {
value: "a".into(),
stops: vec![],
position: None,
});
assert_eq!(
format!("{:?}", node),
"MdxFlowExpression { value: \"a\", position: None, stops: [] }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"MdxFlowExpression { value: \"a\", position: Some(1:1-1:2 (0-1)), stops: [] }",
"should support `position_set`"
);
}
#[test]
fn mdxjs_esm() {
let mut node = Node::MdxjsEsm(MdxjsEsm {
value: "a".into(),
stops: vec![],
position: None,
});
assert_eq!(
format!("{:?}", node),
"MdxjsEsm { value: \"a\", position: None, stops: [] }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"MdxjsEsm { value: \"a\", position: Some(1:1-1:2 (0-1)), stops: [] }",
"should support `position_set`"
);
}
#[test]
fn toml() {
let mut node = Node::Toml(Toml {
value: "a".into(),
position: None,
});
assert_eq!(
format!("{:?}", node),
"Toml { value: \"a\", position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Toml { value: \"a\", position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn yaml() {
let mut node = Node::Yaml(Yaml {
value: "a".into(),
position: None,
});
assert_eq!(
format!("{:?}", node),
"Yaml { value: \"a\", position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "a", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Yaml { value: \"a\", position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
// Voids.
#[test]
fn break_node() {
let mut node = Node::Break(Break { position: None });
assert_eq!(
format!("{:?}", node),
"Break { position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Break { position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn thematic_break() {
let mut node = Node::ThematicBreak(ThematicBreak { position: None });
assert_eq!(
format!("{:?}", node),
"ThematicBreak { position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"ThematicBreak { position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn footnote_reference() {
let mut node = Node::FootnoteReference(FootnoteReference {
position: None,
identifier: "a".into(),
label: Some("b".into()),
});
assert_eq!(
format!("{:?}", node),
"FootnoteReference { position: None, identifier: \"a\", label: Some(\"b\") }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"FootnoteReference { position: Some(1:1-1:2 (0-1)), identifier: \"a\", label: Some(\"b\") }",
"should support `position_set`"
);
}
#[test]
fn image_reference() {
let mut node = Node::ImageReference(ImageReference {
position: None,
alt: "a".into(),
identifier: "b".into(),
label: Some("c".into()),
reference_kind: ReferenceKind::Full,
});
assert_eq!(
format!("{:?}", node),
"ImageReference { position: None, alt: \"a\", reference_kind: Full, identifier: \"b\", label: Some(\"c\") }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"ImageReference { position: Some(1:1-1:2 (0-1)), alt: \"a\", reference_kind: Full, identifier: \"b\", label: Some(\"c\") }",
"should support `position_set`"
);
}
#[test]
fn image() {
let mut node = Node::Image(Image {
position: None,
alt: "a".into(),
url: "b".into(),
title: None,
});
assert_eq!(
format!("{:?}", node),
"Image { position: None, alt: \"a\", url: \"b\", title: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Image { position: Some(1:1-1:2 (0-1)), alt: \"a\", url: \"b\", title: None }",
"should support `position_set`"
);
}
#[test]
fn definition() {
let mut node = Node::Definition(Definition {
position: None,
identifier: "a".into(),
label: None,
url: "b".into(),
title: None,
});
assert_eq!(
format!("{:?}", node),
"Definition { position: None, url: \"b\", title: None, identifier: \"a\", label: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(node.children_mut(), None, "should support `children_mut`");
assert_eq!(node.children(), None, "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Definition { position: Some(1:1-1:2 (0-1)), url: \"b\", title: None, identifier: \"a\", label: None }",
"should support `position_set`"
);
}
// Parents.
#[test]
fn root() {
let mut node = Node::Root(Root {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Root { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Root { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn block_quote() {
let mut node = Node::Blockquote(Blockquote {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Blockquote { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Blockquote { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn delete() {
let mut node = Node::Delete(Delete {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Delete { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Delete { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn emphasis() {
let mut node = Node::Emphasis(Emphasis {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Emphasis { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Emphasis { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn strong() {
let mut node = Node::Strong(Strong {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Strong { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Strong { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn paragraph() {
let mut node = Node::Paragraph(Paragraph {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Paragraph { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Paragraph { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn table_row() {
let mut node = Node::TableRow(TableRow {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"TableRow { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"TableRow { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn table_cell() {
let mut node = Node::TableCell(TableCell {
position: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"TableCell { children: [], position: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"TableCell { children: [], position: Some(1:1-1:2 (0-1)) }",
"should support `position_set`"
);
}
#[test]
fn heading() {
let mut node = Node::Heading(Heading {
position: None,
depth: 1,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Heading { children: [], position: None, depth: 1 }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Heading { children: [], position: Some(1:1-1:2 (0-1)), depth: 1 }",
"should support `position_set`"
);
}
#[test]
fn table() {
let mut node = Node::Table(Table {
position: None,
align: vec![],
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Table { children: [], position: None, align: [] }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Table { children: [], position: Some(1:1-1:2 (0-1)), align: [] }",
"should support `position_set`"
);
}
#[test]
fn list_item() {
let mut node = Node::ListItem(ListItem {
position: None,
spread: false,
checked: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"ListItem { children: [], position: None, spread: false, checked: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"ListItem { children: [], position: Some(1:1-1:2 (0-1)), spread: false, checked: None }",
"should support `position_set`"
);
}
#[test]
fn list() {
let mut node = Node::List(List {
position: None,
spread: false,
ordered: false,
start: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"List { children: [], position: None, ordered: false, start: None, spread: false }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"List { children: [], position: Some(1:1-1:2 (0-1)), ordered: false, start: None, spread: false }",
"should support `position_set`"
);
}
#[test]
fn link_reference() {
let mut node = Node::LinkReference(LinkReference {
position: None,
identifier: "a".into(),
label: None,
reference_kind: ReferenceKind::Full,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"LinkReference { children: [], position: None, reference_kind: Full, identifier: \"a\", label: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"LinkReference { children: [], position: Some(1:1-1:2 (0-1)), reference_kind: Full, identifier: \"a\", label: None }",
"should support `position_set`"
);
}
#[test]
fn link() {
let mut node = Node::Link(Link {
position: None,
url: "a".into(),
title: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"Link { children: [], position: None, url: \"a\", title: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"Link { children: [], position: Some(1:1-1:2 (0-1)), url: \"a\", title: None }",
"should support `position_set`"
);
}
#[test]
fn footnote_definition() {
let mut node = Node::FootnoteDefinition(FootnoteDefinition {
position: None,
identifier: "a".into(),
label: None,
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"FootnoteDefinition { children: [], position: None, identifier: \"a\", label: None }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"FootnoteDefinition { children: [], position: Some(1:1-1:2 (0-1)), identifier: \"a\", label: None }",
"should support `position_set`"
);
}
#[test]
fn mdx_jsx_flow_element() {
let mut node = Node::MdxJsxFlowElement(MdxJsxFlowElement {
position: None,
name: None,
attributes: vec![],
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"MdxJsxFlowElement { children: [], position: None, name: None, attributes: [] }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"MdxJsxFlowElement { children: [], position: Some(1:1-1:2 (0-1)), name: None, attributes: [] }",
"should support `position_set`"
);
}
#[test]
fn mdx_jsx_text_element() {
let mut node = Node::MdxJsxTextElement(MdxJsxTextElement {
position: None,
name: None,
attributes: vec![],
children: vec![],
});
assert_eq!(
format!("{:?}", node),
"MdxJsxTextElement { children: [], position: None, name: None, attributes: [] }",
"should support `Debug`"
);
assert_eq!(node.to_string(), "", "should support `ToString`");
assert_eq!(
node.children_mut(),
Some(&mut vec![]),
"should support `children_mut`"
);
assert_eq!(node.children(), Some(&vec![]), "should support `children`");
assert_eq!(node.position(), None, "should support `position`");
assert_eq!(node.position_mut(), None, "should support `position`");
node.position_set(Some(Position::new(1, 1, 0, 1, 2, 1)));
assert_eq!(
format!("{:?}", node),
"MdxJsxTextElement { children: [], position: Some(1:1-1:2 (0-1)), name: None, attributes: [] }",
"should support `position_set`"
);
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/message.rs | Rust | use crate::unist::{Point, Position};
use alloc::{boxed::Box, fmt, string::String};
#[derive(Clone, Debug, PartialEq)]
pub struct Message {
/// Place of message.
pub place: Option<Box<Place>>,
/// Reason for message (should use markdown).
pub reason: String,
/// Category of message.
pub rule_id: Box<String>,
/// Namespace of message.
pub source: Box<String>,
}
impl fmt::Display for Message {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref place) = self.place {
write!(f, "{}: ", place)?;
}
write!(f, "{} ({}:{})", self.reason, self.source, self.rule_id)
}
}
/// Somewhere.
#[derive(Clone, Debug, PartialEq)]
pub enum Place {
/// Between two points.
Position(Position),
/// At a point.
Point(Point),
}
impl fmt::Display for Place {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Place::Position(position) => write!(
f,
"{}:{}-{}:{}",
position.start.line, position.start.column, position.end.line, position.end.column
),
Place::Point(point) => write!(f, "{}:{}", point.line, point.column),
}
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/parser.rs | Rust | //! Turn bytes of markdown into events.
use crate::event::{Event, Point};
use crate::message;
use crate::state::{Name as StateName, State};
use crate::subtokenize::subtokenize;
use crate::tokenizer::Tokenizer;
use crate::util::location::Location;
use crate::ParseOptions;
use alloc::{string::String, vec, vec::Vec};
/// Info needed, in all content types, when parsing markdown.
///
/// Importantly, this contains a set of known definitions.
/// It also references the input value as bytes (`u8`).
#[derive(Debug)]
pub struct ParseState<'a> {
/// Configuration.
pub location: Option<Location>,
/// Configuration.
pub options: &'a ParseOptions,
/// List of chars.
pub bytes: &'a [u8],
/// Set of defined definition identifiers.
pub definitions: Vec<String>,
/// Set of defined GFM footnote definition identifiers.
pub gfm_footnote_definitions: Vec<String>,
}
/// Turn a string of markdown into events.
///
/// Passes the bytes back so the compiler can access the source.
pub fn parse<'a>(
value: &'a str,
options: &'a ParseOptions,
) -> Result<(Vec<Event>, ParseState<'a>), message::Message> {
let bytes = value.as_bytes();
let mut parse_state = ParseState {
options,
bytes,
location: if options.mdx_esm_parse.is_some() || options.mdx_expression_parse.is_some() {
Some(Location::new(bytes))
} else {
None
},
definitions: vec![],
gfm_footnote_definitions: vec![],
};
let start = Point {
line: 1,
column: 1,
index: 0,
vs: 0,
};
let mut tokenizer = Tokenizer::new(start, &parse_state);
let state = tokenizer.push(
(0, 0),
(parse_state.bytes.len(), 0),
State::Next(StateName::DocumentStart),
);
let mut result = tokenizer.flush(state, true)?;
let mut events = tokenizer.events;
loop {
let fn_defs = &mut parse_state.gfm_footnote_definitions;
let defs = &mut parse_state.definitions;
fn_defs.append(&mut result.gfm_footnote_definitions);
defs.append(&mut result.definitions);
if result.done {
return Ok((events, parse_state));
}
result = subtokenize(&mut events, &parse_state, None)?;
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/resolve.rs | Rust | //! Resolve events.
use crate::construct;
use crate::message;
use crate::subtokenize::Subresult;
use crate::tokenizer::Tokenizer;
/// Names of resolvers.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Name {
/// Resolve labels.
///
/// Labels are parsed as starts and ends, and when they match, merged
/// together to form media (links and images), and otherwise turned into
/// data.
Label,
/// Resolve attention.
///
/// Attention sequences are parsed and finally matched together to form
/// attention (emphasis and strong) based on which characters they contain,
/// and what occurs before and after each sequence.
/// Otherwise they are turned into data.
Attention,
/// Resolve GFM tables.
///
/// The table head, and later each row, are all parsed separately.
/// Resolving groups everything together, and groups cells.
GfmTable,
/// Resolve heading (atx).
///
/// Heading (atx) contains further sequences and data.
/// At the end, a final sequence is kept that way, while the rest is merged
/// with the data.
HeadingAtx,
/// Resolve heading (setext).
///
/// Heading (setext) is parsed as an underline that is preceded by content,
/// both will form the whole construct.
HeadingSetext,
/// Resolve list item.
///
/// List items are parsed on their own.
/// They are wrapped into ordered or unordered lists based on whether items
/// with the same marker occur next to each other.
ListItem,
/// Resolve content.
///
/// Content is parsed as single lines, as what remains if other flow
/// constructs don’t match.
/// But, when they occur next to each other, they need to be merged.
Content,
/// Resolve data.
///
/// Data is parsed as many small bits, due to many punctuation characters
/// potentially starting something in particularly text content.
/// It helps performance to merge them together if those markers did not
/// match anything and hence they occur next to each other.
Data,
/// Resolve whitespace in `string`.
String,
/// Resolve whitespace in `text`.
Text,
}
/// Call the corresponding resolver.
pub fn call(tokenizer: &mut Tokenizer, name: Name) -> Result<Option<Subresult>, message::Message> {
let result = match name {
Name::Label => construct::label_end::resolve(tokenizer),
Name::Attention => construct::attention::resolve(tokenizer),
Name::GfmTable => construct::gfm_table::resolve(tokenizer),
Name::HeadingAtx => construct::heading_atx::resolve(tokenizer),
Name::HeadingSetext => construct::heading_setext::resolve(tokenizer),
Name::ListItem => construct::list_item::resolve(tokenizer),
Name::Content => construct::content::resolve(tokenizer)?,
Name::Data => construct::partial_data::resolve(tokenizer),
Name::String => construct::string::resolve(tokenizer),
Name::Text => construct::text::resolve(tokenizer),
};
Ok(result)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/state.rs | Rust | //! States of the state machine.
use crate::construct;
use crate::message;
use crate::tokenizer::Tokenizer;
/// Result of a state.
#[derive(Clone, Debug, PartialEq)]
pub enum State {
/// Syntax error.
///
/// Only used by MDX.
Error(message::Message),
/// Move to [`Name`][] next.
Next(Name),
/// Retry in [`Name`][].
Retry(Name),
/// The state is successful.
Ok,
/// The state is not successful.
Nok,
}
impl State {
/// Turn a final state into a result.
///
/// This doesn’t work on future states ([`State::Next`], [`State::Retry`]),
/// or on an attempt ([`State::Nok`]).
///
/// But it turns the final result into an error if crashed.
pub fn to_result(&self) -> Result<(), message::Message> {
match self {
State::Nok | State::Next(_) | State::Retry(_) => {
unreachable!("cannot turn intermediate state into result")
}
State::Ok => Ok(()),
State::Error(x) => Err(x.clone()),
}
}
}
/// Names of states to move to.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[allow(clippy::enum_variant_names)]
pub enum Name {
AttentionStart,
AttentionInside,
AutolinkStart,
AutolinkOpen,
AutolinkSchemeOrEmailAtext,
AutolinkSchemeInsideOrEmailAtext,
AutolinkUrlInside,
AutolinkEmailAtSignOrDot,
AutolinkEmailAtext,
AutolinkEmailValue,
AutolinkEmailLabel,
BlankLineStart,
BlankLineAfter,
BlockQuoteStart,
BlockQuoteContStart,
BlockQuoteContBefore,
BlockQuoteContAfter,
BomStart,
BomInside,
CharacterEscapeStart,
CharacterEscapeInside,
CharacterReferenceStart,
CharacterReferenceOpen,
CharacterReferenceNumeric,
CharacterReferenceValue,
CodeIndentedStart,
CodeIndentedAtBreak,
CodeIndentedAfter,
CodeIndentedFurtherStart,
CodeIndentedInside,
CodeIndentedFurtherBegin,
CodeIndentedFurtherAfter,
ContentChunkStart,
ContentChunkInside,
ContentDefinitionBefore,
ContentDefinitionAfter,
DataStart,
DataInside,
DataAtBreak,
DefinitionStart,
DefinitionBefore,
DefinitionLabelAfter,
DefinitionLabelNok,
DefinitionMarkerAfter,
DefinitionDestinationBefore,
DefinitionDestinationAfter,
DefinitionDestinationMissing,
DefinitionTitleBefore,
DefinitionAfter,
DefinitionAfterWhitespace,
DefinitionTitleBeforeMarker,
DefinitionTitleAfter,
DefinitionTitleAfterOptionalWhitespace,
DestinationStart,
DestinationEnclosedBefore,
DestinationEnclosed,
DestinationEnclosedEscape,
DestinationRaw,
DestinationRawEscape,
DocumentStart,
DocumentBeforeFrontmatter,
DocumentContainerExistingBefore,
DocumentContainerExistingAfter,
DocumentContainerNewBefore,
DocumentContainerNewBeforeNotBlockQuote,
DocumentContainerNewBeforeNotList,
DocumentContainerNewBeforeNotGfmFootnoteDefinition,
DocumentContainerNewAfter,
DocumentContainersAfter,
DocumentFlowInside,
DocumentFlowEnd,
FlowStart,
FlowBeforeGfmTable,
FlowBeforeCodeIndented,
FlowBeforeRaw,
FlowBeforeHtml,
FlowBeforeMdxExpression,
FlowBeforeMdxJsx,
FlowBeforeHeadingAtx,
FlowBeforeHeadingSetext,
FlowBeforeThematicBreak,
FlowAfter,
FlowBlankLineBefore,
FlowBlankLineAfter,
FlowBeforeContent,
FrontmatterStart,
FrontmatterOpenSequence,
FrontmatterOpenAfter,
FrontmatterAfter,
FrontmatterContentStart,
FrontmatterContentInside,
FrontmatterContentEnd,
FrontmatterCloseStart,
FrontmatterCloseSequence,
FrontmatterCloseAfter,
GfmAutolinkLiteralProtocolStart,
GfmAutolinkLiteralProtocolAfter,
GfmAutolinkLiteralProtocolPrefixInside,
GfmAutolinkLiteralProtocolSlashesInside,
GfmAutolinkLiteralWwwStart,
GfmAutolinkLiteralWwwAfter,
GfmAutolinkLiteralWwwPrefixInside,
GfmAutolinkLiteralWwwPrefixAfter,
GfmAutolinkLiteralDomainInside,
GfmAutolinkLiteralDomainAtPunctuation,
GfmAutolinkLiteralDomainAfter,
GfmAutolinkLiteralPathInside,
GfmAutolinkLiteralPathAtPunctuation,
GfmAutolinkLiteralPathAfter,
GfmAutolinkLiteralTrail,
GfmAutolinkLiteralTrailCharRefInside,
GfmAutolinkLiteralTrailCharRefStart,
GfmAutolinkLiteralTrailBracketAfter,
GfmFootnoteDefinitionStart,
GfmFootnoteDefinitionLabelBefore,
GfmFootnoteDefinitionLabelAtMarker,
GfmFootnoteDefinitionLabelInside,
GfmFootnoteDefinitionLabelEscape,
GfmFootnoteDefinitionLabelAfter,
GfmFootnoteDefinitionWhitespaceAfter,
GfmFootnoteDefinitionContStart,
GfmFootnoteDefinitionContBlank,
GfmFootnoteDefinitionContFilled,
GfmLabelStartFootnoteStart,
GfmLabelStartFootnoteOpen,
GfmTaskListItemCheckStart,
GfmTaskListItemCheckInside,
GfmTaskListItemCheckClose,
GfmTaskListItemCheckAfter,
GfmTaskListItemCheckAfterSpaceOrTab,
GfmTableStart,
GfmTableHeadRowBefore,
GfmTableHeadRowStart,
GfmTableHeadRowBreak,
GfmTableHeadRowData,
GfmTableHeadRowEscape,
GfmTableHeadDelimiterStart,
GfmTableHeadDelimiterBefore,
GfmTableHeadDelimiterCellBefore,
GfmTableHeadDelimiterValueBefore,
GfmTableHeadDelimiterLeftAlignmentAfter,
GfmTableHeadDelimiterFiller,
GfmTableHeadDelimiterRightAlignmentAfter,
GfmTableHeadDelimiterCellAfter,
GfmTableHeadDelimiterNok,
GfmTableBodyRowStart,
GfmTableBodyRowBreak,
GfmTableBodyRowData,
GfmTableBodyRowEscape,
HardBreakEscapeStart,
HardBreakEscapeAfter,
HeadingAtxStart,
HeadingAtxBefore,
HeadingAtxSequenceOpen,
HeadingAtxAtBreak,
HeadingAtxSequenceFurther,
HeadingAtxData,
HeadingSetextStart,
HeadingSetextBefore,
HeadingSetextInside,
HeadingSetextAfter,
HtmlFlowStart,
HtmlFlowBefore,
HtmlFlowOpen,
HtmlFlowDeclarationOpen,
HtmlFlowCommentOpenInside,
HtmlFlowCdataOpenInside,
HtmlFlowTagCloseStart,
HtmlFlowTagName,
HtmlFlowBasicSelfClosing,
HtmlFlowCompleteClosingTagAfter,
HtmlFlowCompleteEnd,
HtmlFlowCompleteAttributeNameBefore,
HtmlFlowCompleteAttributeName,
HtmlFlowCompleteAttributeNameAfter,
HtmlFlowCompleteAttributeValueBefore,
HtmlFlowCompleteAttributeValueQuoted,
HtmlFlowCompleteAttributeValueQuotedAfter,
HtmlFlowCompleteAttributeValueUnquoted,
HtmlFlowCompleteAfter,
HtmlFlowBlankLineBefore,
HtmlFlowContinuation,
HtmlFlowContinuationDeclarationInside,
HtmlFlowContinuationAfter,
HtmlFlowContinuationStart,
HtmlFlowContinuationBefore,
HtmlFlowContinuationCommentInside,
HtmlFlowContinuationRawTagOpen,
HtmlFlowContinuationRawEndTag,
HtmlFlowContinuationClose,
HtmlFlowContinuationCdataInside,
HtmlFlowContinuationStartNonLazy,
HtmlTextStart,
HtmlTextOpen,
HtmlTextDeclarationOpen,
HtmlTextTagCloseStart,
HtmlTextTagClose,
HtmlTextTagCloseBetween,
HtmlTextTagOpen,
HtmlTextTagOpenBetween,
HtmlTextTagOpenAttributeName,
HtmlTextTagOpenAttributeNameAfter,
HtmlTextTagOpenAttributeValueBefore,
HtmlTextTagOpenAttributeValueQuoted,
HtmlTextTagOpenAttributeValueQuotedAfter,
HtmlTextTagOpenAttributeValueUnquoted,
HtmlTextCdata,
HtmlTextCdataOpenInside,
HtmlTextCdataClose,
HtmlTextCdataEnd,
HtmlTextCommentOpenInside,
HtmlTextComment,
HtmlTextCommentClose,
HtmlTextCommentEnd,
HtmlTextDeclaration,
HtmlTextEnd,
HtmlTextInstruction,
HtmlTextInstructionClose,
HtmlTextLineEndingBefore,
HtmlTextLineEndingAfter,
HtmlTextLineEndingAfterPrefix,
LabelStart,
LabelAtBreak,
LabelEolAfter,
LabelEscape,
LabelInside,
LabelNok,
LabelEndStart,
LabelEndAfter,
LabelEndResourceStart,
LabelEndResourceBefore,
LabelEndResourceOpen,
LabelEndResourceDestinationAfter,
LabelEndResourceDestinationMissing,
LabelEndResourceBetween,
LabelEndResourceTitleAfter,
LabelEndResourceEnd,
LabelEndOk,
LabelEndNok,
LabelEndReferenceFull,
LabelEndReferenceFullAfter,
LabelEndReferenceFullMissing,
LabelEndReferenceNotFull,
LabelEndReferenceCollapsed,
LabelEndReferenceCollapsedOpen,
LabelStartImageStart,
LabelStartImageOpen,
LabelStartImageAfter,
LabelStartLinkStart,
ListItemStart,
ListItemBefore,
ListItemBeforeOrdered,
ListItemBeforeUnordered,
ListItemValue,
ListItemMarker,
ListItemMarkerAfter,
ListItemAfter,
ListItemMarkerAfterFilled,
ListItemWhitespace,
ListItemPrefixOther,
ListItemWhitespaceAfter,
ListItemContStart,
ListItemContBlank,
ListItemContFilled,
MdxEsmStart,
MdxEsmWord,
MdxEsmInside,
MdxEsmLineStart,
MdxEsmBlankLineBefore,
MdxEsmContinuationStart,
MdxEsmAtEnd,
MdxExpressionTextStart,
MdxExpressionTextAfter,
MdxExpressionFlowStart,
MdxExpressionFlowBefore,
MdxExpressionFlowAfter,
MdxExpressionFlowEnd,
MdxExpressionStart,
MdxExpressionBefore,
MdxExpressionPrefix,
MdxExpressionInside,
MdxExpressionEolAfter,
MdxJsxFlowStart,
MdxJsxFlowBefore,
MdxJsxFlowAfter,
MdxJsxFlowEnd,
MdxJsxFlowNok,
MdxJsxTextStart,
MdxJsxTextAfter,
MdxJsxTextNok,
MdxJsxEsWhitespaceStart,
MdxJsxEsWhitespaceInside,
MdxJsxEsWhitespaceEolAfter,
MdxJsxStart,
MdxJsxStartAfter,
MdxJsxNameBefore,
MdxJsxClosingTagNameBefore,
MdxJsxTagEnd,
MdxJsxPrimaryName,
MdxJsxPrimaryNameAfter,
MdxJsxMemberNameBefore,
MdxJsxMemberName,
MdxJsxMemberNameAfter,
MdxJsxLocalNameBefore,
MdxJsxLocalName,
MdxJsxLocalNameAfter,
MdxJsxAttributeBefore,
MdxJsxSelfClosing,
MdxJsxAttributeExpressionAfter,
MdxJsxAttributePrimaryName,
MdxJsxAttributePrimaryNameAfter,
MdxJsxAttributeLocalNameBefore,
MdxJsxAttributeLocalName,
MdxJsxAttributeLocalNameAfter,
MdxJsxAttributeValueBefore,
MdxJsxAttributeValueQuotedStart,
MdxJsxAttributeValueQuoted,
MdxJsxAttributeValueExpressionAfter,
NonLazyContinuationStart,
NonLazyContinuationAfter,
ParagraphStart,
ParagraphLineStart,
ParagraphInside,
RawFlowStart,
RawFlowBeforeSequenceOpen,
RawFlowSequenceOpen,
RawFlowInfoBefore,
RawFlowInfo,
RawFlowMetaBefore,
RawFlowMeta,
RawFlowAtNonLazyBreak,
RawFlowCloseStart,
RawFlowBeforeSequenceClose,
RawFlowSequenceClose,
RawFlowAfterSequenceClose,
RawFlowContentBefore,
RawFlowContentStart,
RawFlowBeforeContentChunk,
RawFlowContentChunk,
RawFlowAfter,
RawTextStart,
RawTextSequenceOpen,
RawTextBetween,
RawTextData,
RawTextSequenceClose,
SpaceOrTabStart,
SpaceOrTabInside,
SpaceOrTabAfter,
SpaceOrTabEolStart,
SpaceOrTabEolAfterFirst,
SpaceOrTabEolAfterEol,
SpaceOrTabEolAtEol,
SpaceOrTabEolAfterMore,
StringStart,
StringBefore,
StringBeforeData,
TextStart,
TextBefore,
TextBeforeHtml,
TextBeforeMdxJsx,
TextBeforeHardBreakEscape,
TextBeforeLabelStartLink,
TextBeforeData,
ThematicBreakStart,
ThematicBreakBefore,
ThematicBreakSequence,
ThematicBreakAtBreak,
TitleStart,
TitleBegin,
TitleAfterEol,
TitleAtBreak,
TitleEscape,
TitleInside,
TitleNok,
}
#[allow(clippy::too_many_lines)]
/// Call the corresponding state for a state name.
pub fn call(tokenizer: &mut Tokenizer, name: Name) -> State {
let func = match name {
Name::AttentionStart => construct::attention::start,
Name::AttentionInside => construct::attention::inside,
Name::AutolinkStart => construct::autolink::start,
Name::AutolinkOpen => construct::autolink::open,
Name::AutolinkSchemeOrEmailAtext => construct::autolink::scheme_or_email_atext,
Name::AutolinkSchemeInsideOrEmailAtext => construct::autolink::scheme_inside_or_email_atext,
Name::AutolinkUrlInside => construct::autolink::url_inside,
Name::AutolinkEmailAtSignOrDot => construct::autolink::email_at_sign_or_dot,
Name::AutolinkEmailAtext => construct::autolink::email_atext,
Name::AutolinkEmailValue => construct::autolink::email_value,
Name::AutolinkEmailLabel => construct::autolink::email_label,
Name::BlankLineStart => construct::blank_line::start,
Name::BlankLineAfter => construct::blank_line::after,
Name::BlockQuoteStart => construct::block_quote::start,
Name::BlockQuoteContStart => construct::block_quote::cont_start,
Name::BlockQuoteContBefore => construct::block_quote::cont_before,
Name::BlockQuoteContAfter => construct::block_quote::cont_after,
Name::BomStart => construct::partial_bom::start,
Name::BomInside => construct::partial_bom::inside,
Name::CharacterEscapeStart => construct::character_escape::start,
Name::CharacterEscapeInside => construct::character_escape::inside,
Name::CharacterReferenceStart => construct::character_reference::start,
Name::CharacterReferenceOpen => construct::character_reference::open,
Name::CharacterReferenceNumeric => construct::character_reference::numeric,
Name::CharacterReferenceValue => construct::character_reference::value,
Name::CodeIndentedStart => construct::code_indented::start,
Name::CodeIndentedAtBreak => construct::code_indented::at_break,
Name::CodeIndentedAfter => construct::code_indented::after,
Name::CodeIndentedFurtherStart => construct::code_indented::further_start,
Name::CodeIndentedInside => construct::code_indented::inside,
Name::CodeIndentedFurtherBegin => construct::code_indented::further_begin,
Name::CodeIndentedFurtherAfter => construct::code_indented::further_after,
Name::ContentChunkStart => construct::content::chunk_start,
Name::ContentChunkInside => construct::content::chunk_inside,
Name::ContentDefinitionBefore => construct::content::definition_before,
Name::ContentDefinitionAfter => construct::content::definition_after,
Name::DataStart => construct::partial_data::start,
Name::DataInside => construct::partial_data::inside,
Name::DataAtBreak => construct::partial_data::at_break,
Name::DefinitionStart => construct::definition::start,
Name::DefinitionBefore => construct::definition::before,
Name::DefinitionLabelAfter => construct::definition::label_after,
Name::DefinitionLabelNok => construct::definition::label_nok,
Name::DefinitionMarkerAfter => construct::definition::marker_after,
Name::DefinitionDestinationBefore => construct::definition::destination_before,
Name::DefinitionDestinationAfter => construct::definition::destination_after,
Name::DefinitionDestinationMissing => construct::definition::destination_missing,
Name::DefinitionTitleBefore => construct::definition::title_before,
Name::DefinitionAfter => construct::definition::after,
Name::DefinitionAfterWhitespace => construct::definition::after_whitespace,
Name::DefinitionTitleBeforeMarker => construct::definition::title_before_marker,
Name::DefinitionTitleAfter => construct::definition::title_after,
Name::DefinitionTitleAfterOptionalWhitespace => {
construct::definition::title_after_optional_whitespace
}
Name::DestinationStart => construct::partial_destination::start,
Name::DestinationEnclosedBefore => construct::partial_destination::enclosed_before,
Name::DestinationEnclosed => construct::partial_destination::enclosed,
Name::DestinationEnclosedEscape => construct::partial_destination::enclosed_escape,
Name::DestinationRaw => construct::partial_destination::raw,
Name::DestinationRawEscape => construct::partial_destination::raw_escape,
Name::DocumentStart => construct::document::start,
Name::DocumentBeforeFrontmatter => construct::document::before_frontmatter,
Name::DocumentContainerExistingBefore => construct::document::container_existing_before,
Name::DocumentContainerExistingAfter => construct::document::container_existing_after,
Name::DocumentContainerNewBefore => construct::document::container_new_before,
Name::DocumentContainerNewBeforeNotBlockQuote => {
construct::document::container_new_before_not_block_quote
}
Name::DocumentContainerNewBeforeNotList => {
construct::document::container_new_before_not_list
}
Name::DocumentContainerNewBeforeNotGfmFootnoteDefinition => {
construct::document::container_new_before_not_footnote_definition
}
Name::DocumentContainerNewAfter => construct::document::container_new_after,
Name::DocumentContainersAfter => construct::document::containers_after,
Name::DocumentFlowEnd => construct::document::flow_end,
Name::DocumentFlowInside => construct::document::flow_inside,
Name::FlowStart => construct::flow::start,
Name::FlowBeforeGfmTable => construct::flow::before_gfm_table,
Name::FlowBeforeCodeIndented => construct::flow::before_code_indented,
Name::FlowBeforeRaw => construct::flow::before_raw,
Name::FlowBeforeHtml => construct::flow::before_html,
Name::FlowBeforeMdxExpression => construct::flow::before_mdx_expression,
Name::FlowBeforeMdxJsx => construct::flow::before_mdx_jsx,
Name::FlowBeforeHeadingAtx => construct::flow::before_heading_atx,
Name::FlowBeforeHeadingSetext => construct::flow::before_heading_setext,
Name::FlowBeforeThematicBreak => construct::flow::before_thematic_break,
Name::FlowAfter => construct::flow::after,
Name::FlowBlankLineBefore => construct::flow::blank_line_before,
Name::FlowBlankLineAfter => construct::flow::blank_line_after,
Name::FlowBeforeContent => construct::flow::before_content,
Name::FrontmatterStart => construct::frontmatter::start,
Name::FrontmatterOpenSequence => construct::frontmatter::open_sequence,
Name::FrontmatterOpenAfter => construct::frontmatter::open_after,
Name::FrontmatterAfter => construct::frontmatter::after,
Name::FrontmatterContentStart => construct::frontmatter::content_start,
Name::FrontmatterContentInside => construct::frontmatter::content_inside,
Name::FrontmatterContentEnd => construct::frontmatter::content_end,
Name::FrontmatterCloseStart => construct::frontmatter::close_start,
Name::FrontmatterCloseSequence => construct::frontmatter::close_sequence,
Name::FrontmatterCloseAfter => construct::frontmatter::close_after,
Name::GfmAutolinkLiteralProtocolStart => construct::gfm_autolink_literal::protocol_start,
Name::GfmAutolinkLiteralProtocolAfter => construct::gfm_autolink_literal::protocol_after,
Name::GfmAutolinkLiteralProtocolPrefixInside => {
construct::gfm_autolink_literal::protocol_prefix_inside
}
Name::GfmAutolinkLiteralProtocolSlashesInside => {
construct::gfm_autolink_literal::protocol_slashes_inside
}
Name::GfmAutolinkLiteralWwwAfter => construct::gfm_autolink_literal::www_after,
Name::GfmAutolinkLiteralWwwStart => construct::gfm_autolink_literal::www_start,
Name::GfmAutolinkLiteralWwwPrefixInside => {
construct::gfm_autolink_literal::www_prefix_inside
}
Name::GfmAutolinkLiteralWwwPrefixAfter => construct::gfm_autolink_literal::www_prefix_after,
Name::GfmAutolinkLiteralDomainInside => construct::gfm_autolink_literal::domain_inside,
Name::GfmAutolinkLiteralDomainAtPunctuation => {
construct::gfm_autolink_literal::domain_at_punctuation
}
Name::GfmAutolinkLiteralDomainAfter => construct::gfm_autolink_literal::domain_after,
Name::GfmAutolinkLiteralPathInside => construct::gfm_autolink_literal::path_inside,
Name::GfmAutolinkLiteralPathAtPunctuation => {
construct::gfm_autolink_literal::path_at_punctuation
}
Name::GfmAutolinkLiteralPathAfter => construct::gfm_autolink_literal::path_after,
Name::GfmAutolinkLiteralTrail => construct::gfm_autolink_literal::trail,
Name::GfmAutolinkLiteralTrailCharRefStart => {
construct::gfm_autolink_literal::trail_char_ref_start
}
Name::GfmAutolinkLiteralTrailCharRefInside => {
construct::gfm_autolink_literal::trail_char_ref_inside
}
Name::GfmAutolinkLiteralTrailBracketAfter => {
construct::gfm_autolink_literal::trail_bracket_after
}
Name::GfmFootnoteDefinitionStart => construct::gfm_footnote_definition::start,
Name::GfmFootnoteDefinitionLabelBefore => construct::gfm_footnote_definition::label_before,
Name::GfmFootnoteDefinitionLabelAtMarker => {
construct::gfm_footnote_definition::label_at_marker
}
Name::GfmFootnoteDefinitionLabelInside => construct::gfm_footnote_definition::label_inside,
Name::GfmFootnoteDefinitionLabelEscape => construct::gfm_footnote_definition::label_escape,
Name::GfmFootnoteDefinitionLabelAfter => construct::gfm_footnote_definition::label_after,
Name::GfmFootnoteDefinitionWhitespaceAfter => {
construct::gfm_footnote_definition::whitespace_after
}
Name::GfmFootnoteDefinitionContStart => construct::gfm_footnote_definition::cont_start,
Name::GfmFootnoteDefinitionContBlank => construct::gfm_footnote_definition::cont_blank,
Name::GfmFootnoteDefinitionContFilled => construct::gfm_footnote_definition::cont_filled,
Name::GfmLabelStartFootnoteStart => construct::gfm_label_start_footnote::start,
Name::GfmLabelStartFootnoteOpen => construct::gfm_label_start_footnote::open,
Name::GfmTableStart => construct::gfm_table::start,
Name::GfmTableHeadRowBefore => construct::gfm_table::head_row_before,
Name::GfmTableHeadRowStart => construct::gfm_table::head_row_start,
Name::GfmTableHeadRowBreak => construct::gfm_table::head_row_break,
Name::GfmTableHeadRowData => construct::gfm_table::head_row_data,
Name::GfmTableHeadRowEscape => construct::gfm_table::head_row_escape,
Name::GfmTableHeadDelimiterStart => construct::gfm_table::head_delimiter_start,
Name::GfmTableHeadDelimiterBefore => construct::gfm_table::head_delimiter_before,
Name::GfmTableHeadDelimiterCellBefore => construct::gfm_table::head_delimiter_cell_before,
Name::GfmTableHeadDelimiterValueBefore => construct::gfm_table::head_delimiter_value_before,
Name::GfmTableHeadDelimiterLeftAlignmentAfter => {
construct::gfm_table::head_delimiter_left_alignment_after
}
Name::GfmTableHeadDelimiterFiller => construct::gfm_table::head_delimiter_filler,
Name::GfmTableHeadDelimiterRightAlignmentAfter => {
construct::gfm_table::head_delimiter_right_alignment_after
}
Name::GfmTableHeadDelimiterCellAfter => construct::gfm_table::head_delimiter_cell_after,
Name::GfmTableHeadDelimiterNok => construct::gfm_table::head_delimiter_nok,
Name::GfmTableBodyRowStart => construct::gfm_table::body_row_start,
Name::GfmTableBodyRowBreak => construct::gfm_table::body_row_break,
Name::GfmTableBodyRowData => construct::gfm_table::body_row_data,
Name::GfmTableBodyRowEscape => construct::gfm_table::body_row_escape,
Name::GfmTaskListItemCheckStart => construct::gfm_task_list_item_check::start,
Name::GfmTaskListItemCheckInside => construct::gfm_task_list_item_check::inside,
Name::GfmTaskListItemCheckClose => construct::gfm_task_list_item_check::close,
Name::GfmTaskListItemCheckAfter => construct::gfm_task_list_item_check::after,
Name::GfmTaskListItemCheckAfterSpaceOrTab => {
construct::gfm_task_list_item_check::after_space_or_tab
}
Name::HardBreakEscapeStart => construct::hard_break_escape::start,
Name::HardBreakEscapeAfter => construct::hard_break_escape::after,
Name::HeadingAtxStart => construct::heading_atx::start,
Name::HeadingAtxBefore => construct::heading_atx::before,
Name::HeadingAtxSequenceOpen => construct::heading_atx::sequence_open,
Name::HeadingAtxAtBreak => construct::heading_atx::at_break,
Name::HeadingAtxSequenceFurther => construct::heading_atx::sequence_further,
Name::HeadingAtxData => construct::heading_atx::data,
Name::HeadingSetextStart => construct::heading_setext::start,
Name::HeadingSetextBefore => construct::heading_setext::before,
Name::HeadingSetextInside => construct::heading_setext::inside,
Name::HeadingSetextAfter => construct::heading_setext::after,
Name::HtmlFlowStart => construct::html_flow::start,
Name::HtmlFlowBefore => construct::html_flow::before,
Name::HtmlFlowOpen => construct::html_flow::open,
Name::HtmlFlowDeclarationOpen => construct::html_flow::declaration_open,
Name::HtmlFlowCommentOpenInside => construct::html_flow::comment_open_inside,
Name::HtmlFlowCdataOpenInside => construct::html_flow::cdata_open_inside,
Name::HtmlFlowTagCloseStart => construct::html_flow::tag_close_start,
Name::HtmlFlowTagName => construct::html_flow::tag_name,
Name::HtmlFlowBasicSelfClosing => construct::html_flow::basic_self_closing,
Name::HtmlFlowCompleteClosingTagAfter => construct::html_flow::complete_closing_tag_after,
Name::HtmlFlowCompleteEnd => construct::html_flow::complete_end,
Name::HtmlFlowCompleteAttributeNameBefore => {
construct::html_flow::complete_attribute_name_before
}
Name::HtmlFlowCompleteAttributeName => construct::html_flow::complete_attribute_name,
Name::HtmlFlowCompleteAttributeNameAfter => {
construct::html_flow::complete_attribute_name_after
}
Name::HtmlFlowCompleteAttributeValueBefore => {
construct::html_flow::complete_attribute_value_before
}
Name::HtmlFlowCompleteAttributeValueQuoted => {
construct::html_flow::complete_attribute_value_quoted
}
Name::HtmlFlowCompleteAttributeValueQuotedAfter => {
construct::html_flow::complete_attribute_value_quoted_after
}
Name::HtmlFlowCompleteAttributeValueUnquoted => {
construct::html_flow::complete_attribute_value_unquoted
}
Name::HtmlFlowCompleteAfter => construct::html_flow::complete_after,
Name::HtmlFlowBlankLineBefore => construct::html_flow::blank_line_before,
Name::HtmlFlowContinuation => construct::html_flow::continuation,
Name::HtmlFlowContinuationDeclarationInside => {
construct::html_flow::continuation_declaration_inside
}
Name::HtmlFlowContinuationAfter => construct::html_flow::continuation_after,
Name::HtmlFlowContinuationStart => construct::html_flow::continuation_start,
Name::HtmlFlowContinuationBefore => construct::html_flow::continuation_before,
Name::HtmlFlowContinuationCommentInside => {
construct::html_flow::continuation_comment_inside
}
Name::HtmlFlowContinuationRawTagOpen => construct::html_flow::continuation_raw_tag_open,
Name::HtmlFlowContinuationRawEndTag => construct::html_flow::continuation_raw_end_tag,
Name::HtmlFlowContinuationClose => construct::html_flow::continuation_close,
Name::HtmlFlowContinuationCdataInside => construct::html_flow::continuation_cdata_inside,
Name::HtmlFlowContinuationStartNonLazy => construct::html_flow::continuation_start_non_lazy,
Name::HtmlTextStart => construct::html_text::start,
Name::HtmlTextOpen => construct::html_text::open,
Name::HtmlTextDeclarationOpen => construct::html_text::declaration_open,
Name::HtmlTextTagCloseStart => construct::html_text::tag_close_start,
Name::HtmlTextTagClose => construct::html_text::tag_close,
Name::HtmlTextTagCloseBetween => construct::html_text::tag_close_between,
Name::HtmlTextTagOpen => construct::html_text::tag_open,
Name::HtmlTextTagOpenBetween => construct::html_text::tag_open_between,
Name::HtmlTextTagOpenAttributeName => construct::html_text::tag_open_attribute_name,
Name::HtmlTextTagOpenAttributeNameAfter => {
construct::html_text::tag_open_attribute_name_after
}
Name::HtmlTextTagOpenAttributeValueBefore => {
construct::html_text::tag_open_attribute_value_before
}
Name::HtmlTextTagOpenAttributeValueQuoted => {
construct::html_text::tag_open_attribute_value_quoted
}
Name::HtmlTextTagOpenAttributeValueQuotedAfter => {
construct::html_text::tag_open_attribute_value_quoted_after
}
Name::HtmlTextTagOpenAttributeValueUnquoted => {
construct::html_text::tag_open_attribute_value_unquoted
}
Name::HtmlTextCdata => construct::html_text::cdata,
Name::HtmlTextCdataOpenInside => construct::html_text::cdata_open_inside,
Name::HtmlTextCdataClose => construct::html_text::cdata_close,
Name::HtmlTextCdataEnd => construct::html_text::cdata_end,
Name::HtmlTextCommentOpenInside => construct::html_text::comment_open_inside,
Name::HtmlTextComment => construct::html_text::comment,
Name::HtmlTextCommentClose => construct::html_text::comment_close,
Name::HtmlTextCommentEnd => construct::html_text::comment_end,
Name::HtmlTextDeclaration => construct::html_text::declaration,
Name::HtmlTextEnd => construct::html_text::end,
Name::HtmlTextInstruction => construct::html_text::instruction,
Name::HtmlTextInstructionClose => construct::html_text::instruction_close,
Name::HtmlTextLineEndingBefore => construct::html_text::line_ending_before,
Name::HtmlTextLineEndingAfter => construct::html_text::line_ending_after,
Name::HtmlTextLineEndingAfterPrefix => construct::html_text::line_ending_after_prefix,
Name::LabelStart => construct::partial_label::start,
Name::LabelAtBreak => construct::partial_label::at_break,
Name::LabelEolAfter => construct::partial_label::eol_after,
Name::LabelEscape => construct::partial_label::escape,
Name::LabelInside => construct::partial_label::inside,
Name::LabelNok => construct::partial_label::nok,
Name::LabelEndStart => construct::label_end::start,
Name::LabelEndAfter => construct::label_end::after,
Name::LabelEndResourceStart => construct::label_end::resource_start,
Name::LabelEndResourceBefore => construct::label_end::resource_before,
Name::LabelEndResourceOpen => construct::label_end::resource_open,
Name::LabelEndResourceDestinationAfter => construct::label_end::resource_destination_after,
Name::LabelEndResourceDestinationMissing => {
construct::label_end::resource_destination_missing
}
Name::LabelEndResourceBetween => construct::label_end::resource_between,
Name::LabelEndResourceTitleAfter => construct::label_end::resource_title_after,
Name::LabelEndResourceEnd => construct::label_end::resource_end,
Name::LabelEndOk => construct::label_end::ok,
Name::LabelEndNok => construct::label_end::nok,
Name::LabelEndReferenceFull => construct::label_end::reference_full,
Name::LabelEndReferenceFullAfter => construct::label_end::reference_full_after,
Name::LabelEndReferenceFullMissing => construct::label_end::reference_full_missing,
Name::LabelEndReferenceNotFull => construct::label_end::reference_not_full,
Name::LabelEndReferenceCollapsed => construct::label_end::reference_collapsed,
Name::LabelEndReferenceCollapsedOpen => construct::label_end::reference_collapsed_open,
Name::LabelStartImageStart => construct::label_start_image::start,
Name::LabelStartImageOpen => construct::label_start_image::open,
Name::LabelStartImageAfter => construct::label_start_image::after,
Name::LabelStartLinkStart => construct::label_start_link::start,
Name::ListItemStart => construct::list_item::start,
Name::ListItemBefore => construct::list_item::before,
Name::ListItemBeforeOrdered => construct::list_item::before_ordered,
Name::ListItemBeforeUnordered => construct::list_item::before_unordered,
Name::ListItemValue => construct::list_item::value,
Name::ListItemMarker => construct::list_item::marker,
Name::ListItemMarkerAfter => construct::list_item::marker_after,
Name::ListItemAfter => construct::list_item::after,
Name::ListItemMarkerAfterFilled => construct::list_item::marker_after_filled,
Name::ListItemWhitespace => construct::list_item::whitespace,
Name::ListItemWhitespaceAfter => construct::list_item::whitespace_after,
Name::ListItemPrefixOther => construct::list_item::prefix_other,
Name::ListItemContStart => construct::list_item::cont_start,
Name::ListItemContBlank => construct::list_item::cont_blank,
Name::ListItemContFilled => construct::list_item::cont_filled,
Name::MdxEsmStart => construct::mdx_esm::start,
Name::MdxEsmWord => construct::mdx_esm::word,
Name::MdxEsmInside => construct::mdx_esm::inside,
Name::MdxEsmLineStart => construct::mdx_esm::line_start,
Name::MdxEsmBlankLineBefore => construct::mdx_esm::blank_line_before,
Name::MdxEsmContinuationStart => construct::mdx_esm::continuation_start,
Name::MdxEsmAtEnd => construct::mdx_esm::at_end,
Name::MdxExpressionStart => construct::partial_mdx_expression::start,
Name::MdxExpressionPrefix => construct::partial_mdx_expression::prefix,
Name::MdxExpressionBefore => construct::partial_mdx_expression::before,
Name::MdxExpressionInside => construct::partial_mdx_expression::inside,
Name::MdxExpressionEolAfter => construct::partial_mdx_expression::eol_after,
Name::MdxExpressionFlowStart => construct::mdx_expression_flow::start,
Name::MdxExpressionFlowBefore => construct::mdx_expression_flow::before,
Name::MdxExpressionFlowAfter => construct::mdx_expression_flow::after,
Name::MdxExpressionFlowEnd => construct::mdx_expression_flow::end,
Name::MdxExpressionTextStart => construct::mdx_expression_text::start,
Name::MdxExpressionTextAfter => construct::mdx_expression_text::after,
Name::MdxJsxFlowStart => construct::mdx_jsx_flow::start,
Name::MdxJsxFlowBefore => construct::mdx_jsx_flow::before,
Name::MdxJsxFlowAfter => construct::mdx_jsx_flow::after,
Name::MdxJsxFlowEnd => construct::mdx_jsx_flow::end,
Name::MdxJsxFlowNok => construct::mdx_jsx_flow::nok,
Name::MdxJsxTextStart => construct::mdx_jsx_text::start,
Name::MdxJsxTextAfter => construct::mdx_jsx_text::after,
Name::MdxJsxTextNok => construct::mdx_jsx_text::nok,
Name::MdxJsxStart => construct::partial_mdx_jsx::start,
Name::MdxJsxStartAfter => construct::partial_mdx_jsx::start_after,
Name::MdxJsxNameBefore => construct::partial_mdx_jsx::name_before,
Name::MdxJsxClosingTagNameBefore => construct::partial_mdx_jsx::closing_tag_name_before,
Name::MdxJsxTagEnd => construct::partial_mdx_jsx::tag_end,
Name::MdxJsxPrimaryName => construct::partial_mdx_jsx::primary_name,
Name::MdxJsxPrimaryNameAfter => construct::partial_mdx_jsx::primary_name_after,
Name::MdxJsxMemberNameBefore => construct::partial_mdx_jsx::member_name_before,
Name::MdxJsxMemberName => construct::partial_mdx_jsx::member_name,
Name::MdxJsxMemberNameAfter => construct::partial_mdx_jsx::member_name_after,
Name::MdxJsxLocalNameBefore => construct::partial_mdx_jsx::local_name_before,
Name::MdxJsxLocalName => construct::partial_mdx_jsx::local_name,
Name::MdxJsxLocalNameAfter => construct::partial_mdx_jsx::local_name_after,
Name::MdxJsxAttributeBefore => construct::partial_mdx_jsx::attribute_before,
Name::MdxJsxSelfClosing => construct::partial_mdx_jsx::self_closing,
Name::MdxJsxAttributeExpressionAfter => {
construct::partial_mdx_jsx::attribute_expression_after
}
Name::MdxJsxAttributePrimaryName => construct::partial_mdx_jsx::attribute_primary_name,
Name::MdxJsxAttributePrimaryNameAfter => {
construct::partial_mdx_jsx::attribute_primary_name_after
}
Name::MdxJsxAttributeLocalNameBefore => {
construct::partial_mdx_jsx::attribute_local_name_before
}
Name::MdxJsxAttributeLocalName => construct::partial_mdx_jsx::attribute_local_name,
Name::MdxJsxAttributeLocalNameAfter => {
construct::partial_mdx_jsx::attribute_local_name_after
}
Name::MdxJsxAttributeValueBefore => construct::partial_mdx_jsx::attribute_value_before,
Name::MdxJsxAttributeValueQuotedStart => {
construct::partial_mdx_jsx::attribute_value_quoted_start
}
Name::MdxJsxAttributeValueQuoted => construct::partial_mdx_jsx::attribute_value_quoted,
Name::MdxJsxAttributeValueExpressionAfter => {
construct::partial_mdx_jsx::attribute_value_expression_after
}
Name::MdxJsxEsWhitespaceStart => construct::partial_mdx_jsx::es_whitespace_start,
Name::MdxJsxEsWhitespaceInside => construct::partial_mdx_jsx::es_whitespace_inside,
Name::MdxJsxEsWhitespaceEolAfter => construct::partial_mdx_jsx::es_whitespace_eol_after,
Name::NonLazyContinuationStart => construct::partial_non_lazy_continuation::start,
Name::NonLazyContinuationAfter => construct::partial_non_lazy_continuation::after,
Name::ParagraphStart => construct::paragraph::start,
Name::ParagraphLineStart => construct::paragraph::line_start,
Name::ParagraphInside => construct::paragraph::inside,
Name::RawFlowStart => construct::raw_flow::start,
Name::RawFlowBeforeSequenceOpen => construct::raw_flow::before_sequence_open,
Name::RawFlowSequenceOpen => construct::raw_flow::sequence_open,
Name::RawFlowInfoBefore => construct::raw_flow::info_before,
Name::RawFlowInfo => construct::raw_flow::info,
Name::RawFlowMetaBefore => construct::raw_flow::meta_before,
Name::RawFlowMeta => construct::raw_flow::meta,
Name::RawFlowAtNonLazyBreak => construct::raw_flow::at_non_lazy_break,
Name::RawFlowCloseStart => construct::raw_flow::close_start,
Name::RawFlowBeforeSequenceClose => construct::raw_flow::before_sequence_close,
Name::RawFlowSequenceClose => construct::raw_flow::sequence_close,
Name::RawFlowAfterSequenceClose => construct::raw_flow::sequence_close_after,
Name::RawFlowContentBefore => construct::raw_flow::content_before,
Name::RawFlowContentStart => construct::raw_flow::content_start,
Name::RawFlowBeforeContentChunk => construct::raw_flow::before_content_chunk,
Name::RawFlowContentChunk => construct::raw_flow::content_chunk,
Name::RawFlowAfter => construct::raw_flow::after,
Name::RawTextStart => construct::raw_text::start,
Name::RawTextSequenceOpen => construct::raw_text::sequence_open,
Name::RawTextBetween => construct::raw_text::between,
Name::RawTextData => construct::raw_text::data,
Name::RawTextSequenceClose => construct::raw_text::sequence_close,
Name::SpaceOrTabStart => construct::partial_space_or_tab::start,
Name::SpaceOrTabInside => construct::partial_space_or_tab::inside,
Name::SpaceOrTabAfter => construct::partial_space_or_tab::after,
Name::SpaceOrTabEolStart => construct::partial_space_or_tab_eol::start,
Name::SpaceOrTabEolAfterFirst => construct::partial_space_or_tab_eol::after_first,
Name::SpaceOrTabEolAfterEol => construct::partial_space_or_tab_eol::after_eol,
Name::SpaceOrTabEolAtEol => construct::partial_space_or_tab_eol::at_eol,
Name::SpaceOrTabEolAfterMore => construct::partial_space_or_tab_eol::after_more,
Name::StringStart => construct::string::start,
Name::StringBefore => construct::string::before,
Name::StringBeforeData => construct::string::before_data,
Name::TextStart => construct::text::start,
Name::TextBefore => construct::text::before,
Name::TextBeforeHtml => construct::text::before_html,
Name::TextBeforeMdxJsx => construct::text::before_mdx_jsx,
Name::TextBeforeHardBreakEscape => construct::text::before_hard_break_escape,
Name::TextBeforeLabelStartLink => construct::text::before_label_start_link,
Name::TextBeforeData => construct::text::before_data,
Name::ThematicBreakStart => construct::thematic_break::start,
Name::ThematicBreakBefore => construct::thematic_break::before,
Name::ThematicBreakSequence => construct::thematic_break::sequence,
Name::ThematicBreakAtBreak => construct::thematic_break::at_break,
Name::TitleStart => construct::partial_title::start,
Name::TitleBegin => construct::partial_title::begin,
Name::TitleAfterEol => construct::partial_title::after_eol,
Name::TitleAtBreak => construct::partial_title::at_break,
Name::TitleEscape => construct::partial_title::escape,
Name::TitleInside => construct::partial_title::inside,
Name::TitleNok => construct::partial_title::nok,
};
func(tokenizer)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/subtokenize.rs | Rust | //! Deal with content in other content.
//!
//! To deal with content in content, *you* (a `markdown-rs` contributor) add
//! info on events.
//! Events are a flat list, but they can be connected to each other with a
//! [`Link`][crate::event::Link].
//! Links must occur on [`Enter`][Kind::Enter] events only, which are void
//! (they are followed by their corresponding [`Exit`][Kind::Exit] event).
//!
//! Links will then be passed through a tokenizer for the corresponding content
//! type by `subtokenize`.
//! The subevents they result in are split up into slots for each linked event
//! and replace those links.
//!
//! Subevents are not immediately subtokenized as markdown prevents us from
//! doing so due to definitions, which can occur after references, and thus the
//! whole document needs to be parsed up to the level of definitions, before
//! any level that can include references can be parsed.
use crate::event::{Content, Event, Kind, Name, VOID_EVENTS};
use crate::message;
use crate::parser::ParseState;
use crate::state::{Name as StateName, State};
use crate::tokenizer::Tokenizer;
use crate::util::{edit_map::EditMap, skip};
use alloc::{string::String, vec, vec::Vec};
#[derive(Debug)]
pub struct Subresult {
pub done: bool,
pub gfm_footnote_definitions: Vec<String>,
pub definitions: Vec<String>,
}
/// Link two [`Event`][]s.
///
/// Arbitrary (void) events can be linked together.
/// This optimizes for the common case where the event at `index` is connected
/// to the previous void event.
pub fn link(events: &mut [Event], index: usize) {
link_to(events, index - 2, index);
}
/// Link two arbitrary [`Event`][]s together.
pub fn link_to(events: &mut [Event], previous: usize, next: usize) {
debug_assert_eq!(events[previous].kind, Kind::Enter);
debug_assert!(
VOID_EVENTS.iter().any(|d| d == &events[previous].name),
"expected event to be void"
);
debug_assert_eq!(events[previous + 1].kind, Kind::Exit);
debug_assert_eq!(events[previous].name, events[previous + 1].name);
debug_assert_eq!(events[next].kind, Kind::Enter);
debug_assert!(
VOID_EVENTS.iter().any(|d| d == &events[next].name),
"expected event to be void"
);
// Note: the exit of this event may not exist, so don’t check for that.
let link_previous = events[previous]
.link
.as_mut()
.expect("expected `link` on previous");
link_previous.next = Some(next);
let link_next = events[next].link.as_mut().expect("expected `link` on next");
link_next.previous = Some(previous);
debug_assert_eq!(
events[previous].link.as_ref().unwrap().content,
events[next].link.as_ref().unwrap().content,
"expected `content` to match"
);
}
/// Parse linked events.
///
/// Supposed to be called repeatedly, returns `true` when done.
pub fn subtokenize(
events: &mut Vec<Event>,
parse_state: &ParseState,
filter: Option<&Content>,
) -> Result<Subresult, message::Message> {
let mut map = EditMap::new();
let mut index = 0;
let mut value = Subresult {
done: true,
gfm_footnote_definitions: vec![],
definitions: vec![],
};
let mut acc = (0, 0);
while index < events.len() {
let event = &events[index];
// Find each first opening chunk.
if let Some(ref link) = event.link {
debug_assert_eq!(event.kind, Kind::Enter);
// No need to enter linked events again.
if link.previous.is_none()
&& (filter.is_none() || &link.content == *filter.as_ref().unwrap())
{
// Index into `events` pointing to a chunk.
let mut link_index = Some(index);
// Subtokenizer.
let mut tokenizer = Tokenizer::new(event.point.clone(), parse_state);
debug_assert!(
!matches!(link.content, Content::Flow),
"cannot use flow as subcontent yet"
);
// Substate.
let mut state = State::Next(match link.content {
Content::Content => StateName::ContentDefinitionBefore,
Content::String => StateName::StringStart,
_ => StateName::TextStart,
});
// Check if this is the first paragraph, after zero or more
// definitions (or a blank line), in a list item.
// Used for GFM task list items.
if tokenizer.parse_state.options.constructs.gfm_task_list_item
&& index > 2
&& events[index - 1].kind == Kind::Enter
&& events[index - 1].name == Name::Paragraph
{
let before = skip::opt_back(
events,
index - 2,
&[
Name::BlankLineEnding,
Name::Definition,
Name::LineEnding,
Name::SpaceOrTab,
],
);
if events[before].kind == Kind::Exit
&& events[before].name == Name::ListItemPrefix
{
tokenizer
.tokenize_state
.document_at_first_paragraph_of_list_item = true;
}
}
// Loop through links to pass them in order to the subtokenizer.
while let Some(index) = link_index {
let enter = &events[index];
let link_curr = enter.link.as_ref().expect("expected link");
debug_assert_eq!(enter.kind, Kind::Enter);
if link_curr.previous.is_some() {
tokenizer.define_skip(enter.point.clone());
}
let end = &events[index + 1].point;
state = tokenizer.push(
(enter.point.index, enter.point.vs),
(end.index, end.vs),
state,
);
link_index = link_curr.next;
}
let mut result = tokenizer.flush(state, true)?;
value
.gfm_footnote_definitions
.append(&mut result.gfm_footnote_definitions);
value.definitions.append(&mut result.definitions);
value.done = false;
acc = divide_events(&mut map, events, index, &mut tokenizer.events, acc);
}
}
index += 1;
}
map.consume(events);
Ok(value)
}
/// Divide `child_events` over links in `events`, the first of which is at
/// `link_index`.
pub fn divide_events(
map: &mut EditMap,
events: &[Event],
mut link_index: usize,
child_events: &mut Vec<Event>,
acc_before: (usize, usize),
) -> (usize, usize) {
// Loop through `child_events` to figure out which parts belong where and
// fix deep links.
let mut child_index = 0;
let mut slices = vec![];
let mut slice_start = 0;
let mut old_prev: Option<usize> = None;
let len = child_events.len();
while child_index < len {
let current = &child_events[child_index].point;
let end = &events[link_index + 1].point;
// Find the first event that starts after the end we’re looking
// for.
if current.index > end.index || (current.index == end.index && current.vs > end.vs) {
slices.push((link_index, slice_start));
slice_start = child_index;
link_index = events[link_index].link.as_ref().unwrap().next.unwrap();
}
// Fix sublinks.
if let Some(sublink_curr) = &child_events[child_index].link {
if sublink_curr.previous.is_some() {
let old_prev = old_prev.unwrap();
let prev_event = &mut child_events[old_prev];
// The `index` in `events` where the current link is,
// minus one to get the previous link,
// minus 2 events (the enter and exit) for each removed
// link.
let new_link = if slices.is_empty() {
old_prev + link_index + 2
} else {
old_prev + link_index - (slices.len() - 1) * 2
};
prev_event.link.as_mut().unwrap().next =
Some(new_link + acc_before.1 - acc_before.0);
}
}
// If there is a `next` link in the subevents, we have to change
// its `previous` index to account for the shifted events.
// If it points to a next event, we also change the next event’s
// reference back to *this* event.
if let Some(sublink_curr) = &child_events[child_index].link {
if let Some(next) = sublink_curr.next {
let sublink_next = child_events[next].link.as_mut().unwrap();
old_prev = sublink_next.previous;
sublink_next.previous = sublink_next
.previous
// The `index` in `events` where the current link is,
// minus 2 events (the enter and exit) for each removed
// link.
.map(|previous| {
previous + link_index - (slices.len() * 2) + acc_before.1 - acc_before.0
});
}
}
child_index += 1;
}
if !child_events.is_empty() {
slices.push((link_index, slice_start));
}
// Finally, inject the subevents.
let mut index = slices.len();
while index > 0 {
index -= 1;
debug_assert!(
slices[index].0 < events.len(),
"expected slice start in bounds"
);
map.add(slices[index].0, 2, child_events.split_off(slices[index].1));
}
(acc_before.0 + (slices.len() * 2), acc_before.1 + len)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/to_html.rs | Rust | //! Turn events into a string of HTML.
use crate::event::{Event, Kind, Name};
use crate::mdast::AlignKind;
use crate::util::{
character_reference::decode as decode_character_reference,
constant::{SAFE_PROTOCOL_HREF, SAFE_PROTOCOL_SRC},
encode::encode,
gfm_tagfilter::gfm_tagfilter,
infer::{gfm_table_align, list_loose},
normalize_identifier::normalize_identifier,
sanitize_uri::{sanitize, sanitize_with_protocols},
skip,
slice::{Position, Slice},
};
use crate::{CompileOptions, LineEnding};
use alloc::{
format,
string::{String, ToString},
vec,
vec::Vec,
};
use core::str;
/// Link, image, or footnote call.
/// Resource or reference.
/// Reused for temporary definitions as well, in the first pass.
#[derive(Debug)]
struct Media {
/// Whether this represents an image (`true`) or a link or definition
/// (`false`).
image: bool,
/// The text between the brackets (`x` in `![x]()` and `[x]()`).
///
/// Not interpreted.
label_id: Option<(usize, usize)>,
/// The result of interpreting the text between the brackets
/// (`x` in `![x]()` and `[x]()`).
///
/// When this is a link, it contains further text content and thus HTML
/// tags.
/// Otherwise, when an image, text content is also allowed, but resulting
/// tags are ignored.
label: Option<String>,
/// The string between the explicit brackets of the reference (`y` in
/// `[x][y]`), as content.
///
/// Not interpreted.
reference_id: Option<(usize, usize)>,
/// The destination (url).
///
/// Interpreted string content.
destination: Option<String>,
/// The destination (url).
///
/// Interpreted string content.
title: Option<String>,
}
/// Representation of a definition.
#[derive(Debug)]
struct Definition {
/// Identifier.
id: String,
/// The destination (url).
///
/// Interpreted string content.
destination: Option<String>,
/// The title.
///
/// Interpreted string content.
title: Option<String>,
}
/// Context used to compile markdown.
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug)]
struct CompileContext<'a> {
// Static info.
/// List of events.
events: &'a [Event],
/// List of bytes.
bytes: &'a [u8],
/// Configuration.
options: &'a CompileOptions,
// Fields used by handlers to track the things they need to track to
// compile markdown.
/// Rank of heading (atx).
heading_atx_rank: Option<usize>,
/// Buffer of heading (setext) text.
heading_setext_buffer: Option<String>,
/// Whether raw (flow) (code (fenced), math (flow)) or code (indented) contains data.
raw_flow_seen_data: Option<bool>,
/// Number of raw (flow) fences.
raw_flow_fences_count: Option<usize>,
/// Whether we are in code (text).
raw_text_inside: bool,
/// Whether we are in image text.
image_alt_inside: bool,
/// Marker of character reference.
character_reference_marker: Option<u8>,
/// Whether we are expecting the first list item marker.
list_expect_first_marker: Option<bool>,
/// Stack of media (link, image).
media_stack: Vec<Media>,
/// Stack of containers.
tight_stack: Vec<bool>,
/// List of definitions.
definitions: Vec<Definition>,
/// List of definitions.
gfm_footnote_definitions: Vec<(String, String)>,
gfm_footnote_definition_calls: Vec<(String, usize)>,
gfm_footnote_definition_stack: Vec<(usize, usize)>,
/// Whether we are in a GFM table head.
gfm_table_in_head: bool,
/// Current GFM table alignment.
gfm_table_align: Option<Vec<AlignKind>>,
/// Current GFM table column.
gfm_table_column: usize,
// Fields used to influance the current compilation.
/// Ignore the next line ending.
slurp_one_line_ending: bool,
/// Whether to encode HTML.
encode_html: bool,
// Configuration
/// Line ending to use.
line_ending_default: LineEnding,
// Intermediate results.
/// Stack of buffers.
buffers: Vec<String>,
/// Current event index.
index: usize,
}
impl<'a> CompileContext<'a> {
/// Create a new compile context.
fn new(
events: &'a [Event],
bytes: &'a [u8],
options: &'a CompileOptions,
line_ending: LineEnding,
) -> CompileContext<'a> {
CompileContext {
events,
bytes,
heading_atx_rank: None,
heading_setext_buffer: None,
raw_flow_seen_data: None,
raw_flow_fences_count: None,
raw_text_inside: false,
character_reference_marker: None,
list_expect_first_marker: None,
media_stack: vec![],
definitions: vec![],
gfm_footnote_definitions: vec![],
gfm_footnote_definition_calls: vec![],
gfm_footnote_definition_stack: vec![],
gfm_table_in_head: false,
gfm_table_align: None,
gfm_table_column: 0,
tight_stack: vec![],
slurp_one_line_ending: false,
image_alt_inside: false,
encode_html: true,
line_ending_default: line_ending,
buffers: vec![String::new()],
index: 0,
options,
}
}
/// Push a buffer.
fn buffer(&mut self) {
self.buffers.push(String::new());
}
/// Pop a buffer, returning its value.
fn resume(&mut self) -> String {
self.buffers.pop().expect("Cannot resume w/o buffer")
}
/// Push a str to the last buffer.
fn push(&mut self, value: &str) {
let last_buf_opt = self.buffers.last_mut();
let last_buf = last_buf_opt.expect("at least one buffer should exist");
last_buf.push_str(value);
}
/// Add a line ending.
fn line_ending(&mut self) {
let eol = self.line_ending_default.as_str().to_string();
self.push(&eol);
}
/// Add a line ending if needed (as in, there’s no eol/eof already).
fn line_ending_if_needed(&mut self) {
let last_buf_opt = self.buffers.last();
let last_buf = last_buf_opt.expect("at least one buffer should exist");
let last_byte = last_buf.as_bytes().last();
if !matches!(last_byte, None | Some(b'\n' | b'\r')) {
self.line_ending();
}
}
}
/// Turn events and bytes into a string of HTML.
pub fn compile(events: &[Event], bytes: &[u8], options: &CompileOptions) -> String {
let mut index = 0;
let mut line_ending_inferred = None;
// First, we figure out what the used line ending style is.
// Stop when we find a line ending.
while index < events.len() {
let event = &events[index];
if event.kind == Kind::Exit
&& (event.name == Name::BlankLineEnding || event.name == Name::LineEnding)
{
let slice = Slice::from_position(bytes, &Position::from_exit_event(events, index));
line_ending_inferred = Some(slice.as_str().parse().unwrap());
break;
}
index += 1;
}
// Figure out which line ending style we’ll use.
let line_ending_default =
line_ending_inferred.unwrap_or_else(|| options.default_line_ending.clone());
let mut context = CompileContext::new(events, bytes, options, line_ending_default);
let mut definition_indices = vec![];
let mut index = 0;
let mut definition_inside = false;
// Handle all definitions first.
// We must do two passes because we need to compile the events in
// definitions which come after references already.
//
// To speed things up, we collect the places we can jump over for the
// second pass.
//
// We don’t need to handle GFM footnote definitions like this, because
// unlike normal definitions, what they produce is not used in calls.
// It would also get very complex, because footnote definitions can be
// nested.
while index < events.len() {
let event = &events[index];
if definition_inside {
handle(&mut context, index);
}
if event.kind == Kind::Enter {
if event.name == Name::Definition {
handle(&mut context, index); // Also handle start.
definition_inside = true;
definition_indices.push((index, index));
}
} else if event.name == Name::Definition {
definition_inside = false;
definition_indices.last_mut().unwrap().1 = index;
}
index += 1;
}
let mut index = 0;
let jump_default = (events.len(), events.len());
let mut definition_index = 0;
let mut jump = definition_indices
.get(definition_index)
.unwrap_or(&jump_default);
while index < events.len() {
if index == jump.0 {
index = jump.1 + 1;
definition_index += 1;
jump = definition_indices
.get(definition_index)
.unwrap_or(&jump_default);
} else {
handle(&mut context, index);
index += 1;
}
}
// No section to generate.
if !context.gfm_footnote_definition_calls.is_empty() {
generate_footnote_section(&mut context);
}
debug_assert_eq!(context.buffers.len(), 1, "expected 1 final buffer");
context
.buffers
.first()
.expect("expected 1 final buffer")
.into()
}
/// Handle the event at `index`.
fn handle(context: &mut CompileContext, index: usize) {
context.index = index;
if context.events[index].kind == Kind::Enter {
enter(context);
} else {
exit(context);
}
}
/// Handle [`Enter`][Kind::Enter].
fn enter(context: &mut CompileContext) {
match context.events[context.index].name {
Name::CodeFencedFenceInfo
| Name::CodeFencedFenceMeta
| Name::MathFlowFenceMeta
| Name::DefinitionLabelString
| Name::DefinitionTitleString
| Name::GfmFootnoteDefinitionPrefix
| Name::HeadingAtxText
| Name::HeadingSetextText
| Name::Label
| Name::MdxEsm
| Name::MdxFlowExpression
| Name::MdxTextExpression
| Name::MdxJsxFlowTag
| Name::MdxJsxTextTag
| Name::ReferenceString
| Name::ResourceTitleString => on_enter_buffer(context),
Name::BlockQuote => on_enter_block_quote(context),
Name::CodeIndented => on_enter_code_indented(context),
Name::CodeFenced | Name::MathFlow => on_enter_raw_flow(context),
Name::CodeText | Name::MathText => on_enter_raw_text(context),
Name::Definition => on_enter_definition(context),
Name::DefinitionDestinationString => on_enter_definition_destination_string(context),
Name::Emphasis => on_enter_emphasis(context),
Name::Frontmatter => on_enter_frontmatter(context),
Name::GfmFootnoteDefinition => on_enter_gfm_footnote_definition(context),
Name::GfmFootnoteCall => on_enter_gfm_footnote_call(context),
Name::GfmStrikethrough => on_enter_gfm_strikethrough(context),
Name::GfmTable => on_enter_gfm_table(context),
Name::GfmTableBody => on_enter_gfm_table_body(context),
Name::GfmTableCell => on_enter_gfm_table_cell(context),
Name::GfmTableHead => on_enter_gfm_table_head(context),
Name::GfmTableRow => on_enter_gfm_table_row(context),
Name::GfmTaskListItemCheck => on_enter_gfm_task_list_item_check(context),
Name::HtmlFlow => on_enter_html_flow(context),
Name::HtmlText => on_enter_html_text(context),
Name::Image => on_enter_image(context),
Name::Link => on_enter_link(context),
Name::ListItemMarker => on_enter_list_item_marker(context),
Name::ListOrdered | Name::ListUnordered => on_enter_list(context),
Name::Paragraph => on_enter_paragraph(context),
Name::Resource => on_enter_resource(context),
Name::ResourceDestinationString => on_enter_resource_destination_string(context),
Name::Strong => on_enter_strong(context),
_ => {}
}
}
/// Handle [`Exit`][Kind::Exit].
fn exit(context: &mut CompileContext) {
match context.events[context.index].name {
Name::CodeFencedFenceMeta
| Name::MathFlowFenceMeta
| Name::MdxJsxTextTag
| Name::MdxTextExpression
| Name::Resource => {
on_exit_drop(context);
}
Name::MdxEsm | Name::MdxFlowExpression | Name::MdxJsxFlowTag => on_exit_drop_slurp(context),
Name::CharacterEscapeValue | Name::CodeTextData | Name::Data | Name::MathTextData => {
on_exit_data(context);
}
Name::AutolinkEmail => on_exit_autolink_email(context),
Name::AutolinkProtocol => on_exit_autolink_protocol(context),
Name::BlankLineEnding => on_exit_blank_line_ending(context),
Name::BlockQuote => on_exit_block_quote(context),
Name::CharacterReferenceMarker => on_exit_character_reference_marker(context),
Name::CharacterReferenceMarkerNumeric => {
on_exit_character_reference_marker_numeric(context);
}
Name::CharacterReferenceMarkerHexadecimal => {
on_exit_character_reference_marker_hexadecimal(context);
}
Name::CharacterReferenceValue => on_exit_character_reference_value(context),
Name::CodeFenced | Name::CodeIndented | Name::MathFlow => on_exit_raw_flow(context),
Name::CodeFencedFence | Name::MathFlowFence => on_exit_raw_flow_fence(context),
Name::CodeFencedFenceInfo => on_exit_raw_flow_fence_info(context),
Name::CodeFlowChunk | Name::MathFlowChunk => on_exit_raw_flow_chunk(context),
Name::CodeText | Name::MathText => on_exit_raw_text(context),
Name::Definition => on_exit_definition(context),
Name::DefinitionDestinationString => on_exit_definition_destination_string(context),
Name::DefinitionLabelString => on_exit_definition_label_string(context),
Name::DefinitionTitleString => on_exit_definition_title_string(context),
Name::Emphasis => on_exit_emphasis(context),
Name::Frontmatter => on_exit_frontmatter(context),
Name::GfmAutolinkLiteralEmail => on_exit_gfm_autolink_literal_email(context),
Name::GfmAutolinkLiteralMailto => on_exit_gfm_autolink_literal_mailto(context),
Name::GfmAutolinkLiteralProtocol => on_exit_gfm_autolink_literal_protocol(context),
Name::GfmAutolinkLiteralWww => on_exit_gfm_autolink_literal_www(context),
Name::GfmAutolinkLiteralXmpp => on_exit_gfm_autolink_literal_xmpp(context),
Name::GfmFootnoteCall => on_exit_gfm_footnote_call(context),
Name::GfmFootnoteDefinitionLabelString => {
on_exit_gfm_footnote_definition_label_string(context);
}
Name::GfmFootnoteDefinitionPrefix => on_exit_gfm_footnote_definition_prefix(context),
Name::GfmFootnoteDefinition => on_exit_gfm_footnote_definition(context),
Name::GfmStrikethrough => on_exit_gfm_strikethrough(context),
Name::GfmTable => on_exit_gfm_table(context),
Name::GfmTableBody => on_exit_gfm_table_body(context),
Name::GfmTableCell => on_exit_gfm_table_cell(context),
Name::GfmTableHead => on_exit_gfm_table_head(context),
Name::GfmTableRow => on_exit_gfm_table_row(context),
Name::GfmTaskListItemCheck => on_exit_gfm_task_list_item_check(context),
Name::GfmTaskListItemValueChecked => on_exit_gfm_task_list_item_value_checked(context),
Name::HardBreakEscape | Name::HardBreakTrailing => on_exit_break(context),
Name::HeadingAtx => on_exit_heading_atx(context),
Name::HeadingAtxSequence => on_exit_heading_atx_sequence(context),
Name::HeadingAtxText => on_exit_heading_atx_text(context),
Name::HeadingSetextText => on_exit_heading_setext_text(context),
Name::HeadingSetextUnderlineSequence => on_exit_heading_setext_underline_sequence(context),
Name::HtmlFlow | Name::HtmlText => on_exit_html(context),
Name::HtmlFlowData | Name::HtmlTextData => on_exit_html_data(context),
Name::Image | Name::Link => on_exit_media(context),
Name::Label => on_exit_label(context),
Name::LabelText => on_exit_label_text(context),
Name::LineEnding => on_exit_line_ending(context),
Name::ListOrdered | Name::ListUnordered => on_exit_list(context),
Name::ListItem => on_exit_list_item(context),
Name::ListItemValue => on_exit_list_item_value(context),
Name::Paragraph => on_exit_paragraph(context),
Name::ReferenceString => on_exit_reference_string(context),
Name::ResourceDestinationString => on_exit_resource_destination_string(context),
Name::ResourceTitleString => on_exit_resource_title_string(context),
Name::Strong => on_exit_strong(context),
Name::ThematicBreak => on_exit_thematic_break(context),
_ => {}
}
}
/// Handle [`Enter`][Kind::Enter]:`*`.
///
/// Buffers data.
fn on_enter_buffer(context: &mut CompileContext) {
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`BlockQuote`][Name::BlockQuote].
fn on_enter_block_quote(context: &mut CompileContext) {
context.tight_stack.push(false);
context.line_ending_if_needed();
context.push("<blockquote>");
}
/// Handle [`Enter`][Kind::Enter]:[`CodeIndented`][Name::CodeIndented].
fn on_enter_code_indented(context: &mut CompileContext) {
context.raw_flow_seen_data = Some(false);
context.line_ending_if_needed();
context.push("<pre><code>");
}
/// Handle [`Enter`][Kind::Enter]:{[`CodeFenced`][Name::CodeFenced],[`MathFlow`][Name::MathFlow]}.
fn on_enter_raw_flow(context: &mut CompileContext) {
context.raw_flow_seen_data = Some(false);
context.line_ending_if_needed();
// Note that no `>` is used, which is added later (due to info)
context.push("<pre><code");
context.raw_flow_fences_count = Some(0);
if context.events[context.index].name == Name::MathFlow {
context.push(" class=\"language-math math-display\"");
}
}
/// Handle [`Enter`][Kind::Enter]:{[`CodeText`][Name::CodeText],[`MathText`][Name::MathText]}.
fn on_enter_raw_text(context: &mut CompileContext) {
context.raw_text_inside = true;
if !context.image_alt_inside {
context.push("<code");
if context.events[context.index].name == Name::MathText {
context.push(" class=\"language-math math-inline\"");
}
context.push(">");
}
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`Definition`][Name::Definition].
fn on_enter_definition(context: &mut CompileContext) {
context.buffer();
context.media_stack.push(Media {
image: false,
label: None,
label_id: None,
reference_id: None,
destination: None,
title: None,
});
}
/// Handle [`Enter`][Kind::Enter]:[`DefinitionDestinationString`][Name::DefinitionDestinationString].
fn on_enter_definition_destination_string(context: &mut CompileContext) {
context.buffer();
context.encode_html = false;
}
/// Handle [`Enter`][Kind::Enter]:[`Emphasis`][Name::Emphasis].
fn on_enter_emphasis(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("<em>");
}
}
/// Handle [`Enter`][Kind::Enter]:[`Frontmatter`][Name::Frontmatter].
fn on_enter_frontmatter(context: &mut CompileContext) {
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`GfmFootnoteDefinition`][Name::GfmFootnoteDefinition].
fn on_enter_gfm_footnote_definition(context: &mut CompileContext) {
context.tight_stack.push(false);
}
/// Handle [`Enter`][Kind::Enter]:[`GfmFootnoteCall`][Name::GfmFootnoteCall].
fn on_enter_gfm_footnote_call(context: &mut CompileContext) {
context.media_stack.push(Media {
image: false,
label_id: None,
label: None,
reference_id: None,
destination: None,
title: None,
});
}
/// Handle [`Enter`][Kind::Enter]:[`GfmStrikethrough`][Name::GfmStrikethrough].
fn on_enter_gfm_strikethrough(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("<del>");
}
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTable`][Name::GfmTable].
fn on_enter_gfm_table(context: &mut CompileContext) {
let align = gfm_table_align(context.events, context.index);
context.gfm_table_align = Some(align);
context.line_ending_if_needed();
context.push("<table>");
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTableBody`][Name::GfmTableBody].
fn on_enter_gfm_table_body(context: &mut CompileContext) {
context.push("<tbody>");
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTableCell`][Name::GfmTableCell].
fn on_enter_gfm_table_cell(context: &mut CompileContext) {
let column = context.gfm_table_column;
let align = context.gfm_table_align.as_ref().unwrap();
if column >= align.len() {
// Capture cell to ignore it.
context.buffer();
} else {
let value = align[column];
context.line_ending_if_needed();
if context.gfm_table_in_head {
context.push("<th");
} else {
context.push("<td");
}
match value {
AlignKind::Left => context.push(" align=\"left\""),
AlignKind::Right => context.push(" align=\"right\""),
AlignKind::Center => context.push(" align=\"center\""),
AlignKind::None => {}
}
context.push(">");
}
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTableHead`][Name::GfmTableHead].
fn on_enter_gfm_table_head(context: &mut CompileContext) {
context.line_ending_if_needed();
context.push("<thead>");
context.gfm_table_in_head = true;
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTableRow`][Name::GfmTableRow].
fn on_enter_gfm_table_row(context: &mut CompileContext) {
context.line_ending_if_needed();
context.push("<tr>");
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTaskListItemCheck`][Name::GfmTaskListItemCheck].
fn on_enter_gfm_task_list_item_check(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("<input type=\"checkbox\" ");
if !context.options.gfm_task_list_item_checkable {
context.push("disabled=\"\" ");
}
}
}
/// Handle [`Enter`][Kind::Enter]:[`HtmlFlow`][Name::HtmlFlow].
fn on_enter_html_flow(context: &mut CompileContext) {
context.line_ending_if_needed();
if context.options.allow_dangerous_html {
context.encode_html = false;
}
}
/// Handle [`Enter`][Kind::Enter]:[`HtmlText`][Name::HtmlText].
fn on_enter_html_text(context: &mut CompileContext) {
if context.options.allow_dangerous_html {
context.encode_html = false;
}
}
/// Handle [`Enter`][Kind::Enter]:[`Image`][Name::Image].
fn on_enter_image(context: &mut CompileContext) {
context.media_stack.push(Media {
image: true,
label_id: None,
label: None,
reference_id: None,
destination: None,
title: None,
});
context.image_alt_inside = true; // Disallow tags.
}
/// Handle [`Enter`][Kind::Enter]:[`Link`][Name::Link].
fn on_enter_link(context: &mut CompileContext) {
context.media_stack.push(Media {
image: false,
label_id: None,
label: None,
reference_id: None,
destination: None,
title: None,
});
}
/// Handle [`Enter`][Kind::Enter]:{[`ListOrdered`][Name::ListOrdered],[`ListUnordered`][Name::ListUnordered]}.
fn on_enter_list(context: &mut CompileContext) {
let loose = list_loose(context.events, context.index, true);
context.tight_stack.push(!loose);
context.line_ending_if_needed();
// Note: no `>`.
context.push(if context.events[context.index].name == Name::ListOrdered {
"<ol"
} else {
"<ul"
});
context.list_expect_first_marker = Some(true);
}
/// Handle [`Enter`][Kind::Enter]:[`ListItemMarker`][Name::ListItemMarker].
fn on_enter_list_item_marker(context: &mut CompileContext) {
if context.list_expect_first_marker.take().unwrap() {
context.push(">");
}
context.line_ending_if_needed();
context.push("<li>");
context.list_expect_first_marker = Some(false);
}
/// Handle [`Enter`][Kind::Enter]:[`Paragraph`][Name::Paragraph].
fn on_enter_paragraph(context: &mut CompileContext) {
let tight = context.tight_stack.last().unwrap_or(&false);
if !tight {
context.line_ending_if_needed();
context.push("<p>");
}
}
/// Handle [`Enter`][Kind::Enter]:[`Resource`][Name::Resource].
fn on_enter_resource(context: &mut CompileContext) {
context.buffer(); // We can have line endings in the resource, ignore them.
context.media_stack.last_mut().unwrap().destination = Some(String::new());
}
/// Handle [`Enter`][Kind::Enter]:[`ResourceDestinationString`][Name::ResourceDestinationString].
fn on_enter_resource_destination_string(context: &mut CompileContext) {
context.buffer();
// Ignore encoding the result, as we’ll first percent encode the url and
// encode manually after.
context.encode_html = false;
}
/// Handle [`Enter`][Kind::Enter]:[`Strong`][Name::Strong].
fn on_enter_strong(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("<strong>");
}
}
/// Handle [`Exit`][Kind::Exit]:[`AutolinkEmail`][Name::AutolinkEmail].
fn on_exit_autolink_email(context: &mut CompileContext) {
generate_autolink(
context,
Some("mailto:"),
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
false,
);
}
/// Handle [`Exit`][Kind::Exit]:[`AutolinkProtocol`][Name::AutolinkProtocol].
fn on_exit_autolink_protocol(context: &mut CompileContext) {
generate_autolink(
context,
None,
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
false,
);
}
/// Handle [`Exit`][Kind::Exit]:{[`HardBreakEscape`][Name::HardBreakEscape],[`HardBreakTrailing`][Name::HardBreakTrailing]}.
fn on_exit_break(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("<br />");
}
}
/// Handle [`Exit`][Kind::Exit]:[`BlankLineEnding`][Name::BlankLineEnding].
fn on_exit_blank_line_ending(context: &mut CompileContext) {
context.slurp_one_line_ending = false;
if context.index == context.events.len() - 1 {
context.line_ending_if_needed();
}
}
/// Handle [`Exit`][Kind::Exit]:[`BlockQuote`][Name::BlockQuote].
fn on_exit_block_quote(context: &mut CompileContext) {
context.tight_stack.pop();
context.line_ending_if_needed();
context.slurp_one_line_ending = false;
context.push("</blockquote>");
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceMarker`][Name::CharacterReferenceMarker].
fn on_exit_character_reference_marker(context: &mut CompileContext) {
context.character_reference_marker = Some(b'&');
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceMarkerHexadecimal`][Name::CharacterReferenceMarkerHexadecimal].
fn on_exit_character_reference_marker_hexadecimal(context: &mut CompileContext) {
context.character_reference_marker = Some(b'x');
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceMarkerNumeric`][Name::CharacterReferenceMarkerNumeric].
fn on_exit_character_reference_marker_numeric(context: &mut CompileContext) {
context.character_reference_marker = Some(b'#');
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceValue`][Name::CharacterReferenceValue].
fn on_exit_character_reference_value(context: &mut CompileContext) {
let marker = context
.character_reference_marker
.take()
.expect("expected `character_reference_kind` to be set");
let slice = Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
);
let value = decode_character_reference(slice.as_str(), marker, true)
.expect("expected to parse only valid named references");
context.push(&encode(&value, context.encode_html));
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeFlowChunk`][Name::CodeFlowChunk],[`MathFlowChunk`][Name::MathFlowChunk]}.
fn on_exit_raw_flow_chunk(context: &mut CompileContext) {
context.raw_flow_seen_data = Some(true);
context.push(&encode(
&Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
// Must serialize to get virtual spaces.
.serialize(),
context.encode_html,
));
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeFencedFence`][Name::CodeFencedFence],[`MathFlowFence`][Name::MathFlowFence]}.
fn on_exit_raw_flow_fence(context: &mut CompileContext) {
let count = context
.raw_flow_fences_count
.expect("expected `raw_flow_fences_count`");
if count == 0 {
context.push(">");
context.slurp_one_line_ending = true;
}
context.raw_flow_fences_count = Some(count + 1);
}
/// Handle [`Exit`][Kind::Exit]:[`CodeFencedFenceInfo`][Name::CodeFencedFenceInfo].
///
/// Note: math (flow) does not support `info`.
fn on_exit_raw_flow_fence_info(context: &mut CompileContext) {
let value = context.resume();
context.push(" class=\"language-");
context.push(&value);
context.push("\"");
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeFenced`][Name::CodeFenced],[`CodeIndented`][Name::CodeIndented],[`MathFlow`][Name::MathFlow]}.
fn on_exit_raw_flow(context: &mut CompileContext) {
// One special case is if we are inside a container, and the raw (flow) was
// not closed (meaning it runs to the end).
// In that case, the following line ending, is considered *outside* the
// fenced code and block quote by `markdown-rs`, but CM wants to treat that
// ending as part of the code.
if let Some(count) = context.raw_flow_fences_count {
// No closing fence.
if count == 1
// In a container.
&& !context.tight_stack.is_empty()
// Empty (as the closing is right at the opening fence)
&& !matches!(context.events[context.index - 1].name, Name::CodeFencedFence | Name::MathFlowFence)
{
context.line_ending();
}
}
// But in most cases, it’s simpler: when we’ve seen some data, emit an extra
// line ending when needed.
if context
.raw_flow_seen_data
.take()
.expect("`raw_flow_seen_data` must be defined")
{
context.line_ending_if_needed();
}
context.push("</code></pre>");
if let Some(count) = context.raw_flow_fences_count.take() {
if count < 2 {
context.line_ending_if_needed();
}
}
context.slurp_one_line_ending = false;
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeText`][Name::CodeText],[`MathText`][Name::MathText]}.
fn on_exit_raw_text(context: &mut CompileContext) {
let result = context.resume();
// To do: share with `to_mdast`.
let mut bytes = result.as_bytes().to_vec();
// If we are in a GFM table, we need to decode escaped pipes.
// This is a rather weird GFM feature.
if context.gfm_table_align.is_some() {
let mut index = 0;
let mut len = bytes.len();
while index < len {
if index + 1 < len && bytes[index] == b'\\' && bytes[index + 1] == b'|' {
bytes.remove(index);
len -= 1;
}
index += 1;
}
}
let mut trim = false;
let mut index = 0;
let mut end = bytes.len();
if end > 2 && bytes[index] == b' ' && bytes[end - 1] == b' ' {
index += 1;
end -= 1;
while index < end && !trim {
if bytes[index] != b' ' {
trim = true;
break;
}
index += 1;
}
}
if trim {
bytes.remove(0);
bytes.pop();
}
context.raw_text_inside = false;
context.push(str::from_utf8(&bytes).unwrap());
if !context.image_alt_inside {
context.push("</code>");
}
}
/// Handle [`Exit`][Kind::Exit]:*.
///
/// Resumes, and ignores what was resumed.
fn on_exit_drop(context: &mut CompileContext) {
context.resume();
}
/// Handle [`Exit`][Kind::Exit]:*.
///
/// Resumes, ignores what was resumed, and slurps the following line ending.
fn on_exit_drop_slurp(context: &mut CompileContext) {
context.resume();
context.slurp_one_line_ending = true;
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeTextData`][Name::CodeTextData],[`Data`][Name::Data],[`CharacterEscapeValue`][Name::CharacterEscapeValue]}.
fn on_exit_data(context: &mut CompileContext) {
context.push(&encode(
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
context.encode_html,
));
}
/// Handle [`Exit`][Kind::Exit]:[`Definition`][Name::Definition].
fn on_exit_definition(context: &mut CompileContext) {
context.resume();
let media = context.media_stack.pop().unwrap();
let indices = media.reference_id.unwrap();
let id =
normalize_identifier(Slice::from_indices(context.bytes, indices.0, indices.1).as_str());
context.definitions.push(Definition {
id,
destination: media.destination,
title: media.title,
});
}
/// Handle [`Exit`][Kind::Exit]:[`DefinitionDestinationString`][Name::DefinitionDestinationString].
fn on_exit_definition_destination_string(context: &mut CompileContext) {
let buf = context.resume();
context.media_stack.last_mut().unwrap().destination = Some(buf);
context.encode_html = true;
}
/// Handle [`Exit`][Kind::Exit]:[`DefinitionLabelString`][Name::DefinitionLabelString].
fn on_exit_definition_label_string(context: &mut CompileContext) {
// Discard label, use the source content instead.
context.resume();
context.media_stack.last_mut().unwrap().reference_id =
Some(Position::from_exit_event(context.events, context.index).to_indices());
}
/// Handle [`Exit`][Kind::Exit]:[`DefinitionTitleString`][Name::DefinitionTitleString].
fn on_exit_definition_title_string(context: &mut CompileContext) {
let buf = context.resume();
context.media_stack.last_mut().unwrap().title = Some(buf);
}
/// Handle [`Exit`][Kind::Exit]:[`Emphasis`][Name::Emphasis].
fn on_exit_emphasis(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("</em>");
}
}
/// Handle [`Exit`][Kind::Exit]:[`Frontmatter`][Name::Frontmatter].
fn on_exit_frontmatter(context: &mut CompileContext) {
context.resume();
context.slurp_one_line_ending = true;
}
/// Handle [`Exit`][Kind::Exit]:[`GfmAutolinkLiteralEmail`][Name::GfmAutolinkLiteralEmail].
fn on_exit_gfm_autolink_literal_email(context: &mut CompileContext) {
generate_autolink(
context,
Some("mailto:"),
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
true,
);
}
/// Handle [`Exit`][Kind::Exit]:[`GfmAutolinkLiteralMailto`][Name::GfmAutolinkLiteralMailto].
fn on_exit_gfm_autolink_literal_mailto(context: &mut CompileContext) {
generate_autolink(
context,
None,
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
true,
);
}
/// Handle [`Exit`][Kind::Exit]:[`GfmAutolinkLiteralProtocol`][Name::GfmAutolinkLiteralProtocol].
fn on_exit_gfm_autolink_literal_protocol(context: &mut CompileContext) {
generate_autolink(
context,
None,
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
true,
);
}
/// Handle [`Exit`][Kind::Exit]:[`GfmAutolinkLiteralWww`][Name::GfmAutolinkLiteralWww].
fn on_exit_gfm_autolink_literal_www(context: &mut CompileContext) {
generate_autolink(
context,
Some("http://"),
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
true,
);
}
/// Handle [`Exit`][Kind::Exit]:[`GfmAutolinkLiteralXmpp`][Name::GfmAutolinkLiteralXmpp].
fn on_exit_gfm_autolink_literal_xmpp(context: &mut CompileContext) {
generate_autolink(
context,
None,
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
true,
);
}
/// Handle [`Exit`][Kind::Exit]:[`GfmFootnoteCall`][Name::GfmFootnoteCall].
fn on_exit_gfm_footnote_call(context: &mut CompileContext) {
let indices = context.media_stack.pop().unwrap().label_id.unwrap();
let id =
normalize_identifier(Slice::from_indices(context.bytes, indices.0, indices.1).as_str());
let safe_id = sanitize(&id.to_lowercase());
let mut call_index = 0;
// See if this has been called before.
while call_index < context.gfm_footnote_definition_calls.len() {
if context.gfm_footnote_definition_calls[call_index].0 == id {
break;
}
call_index += 1;
}
// New.
if call_index == context.gfm_footnote_definition_calls.len() {
context.gfm_footnote_definition_calls.push((id, 0));
}
// Increment.
context.gfm_footnote_definition_calls[call_index].1 += 1;
// No call is output in an image alt, though the definition and
// backreferences are generated as if it was the case.
if context.image_alt_inside {
return;
}
context.push("<sup><a href=\"#");
if let Some(ref value) = context.options.gfm_footnote_clobber_prefix {
context.push(&encode(value, context.encode_html));
} else {
context.push("user-content-");
}
context.push("fn-");
context.push(&safe_id);
context.push("\" id=\"");
if let Some(ref value) = context.options.gfm_footnote_clobber_prefix {
context.push(&encode(value, context.encode_html));
} else {
context.push("user-content-");
}
context.push("fnref-");
context.push(&safe_id);
if context.gfm_footnote_definition_calls[call_index].1 > 1 {
context.push("-");
context.push(
&context.gfm_footnote_definition_calls[call_index]
.1
.to_string(),
);
}
context.push("\" data-footnote-ref=\"\" aria-describedby=\"footnote-label\">");
context.push(&(call_index + 1).to_string());
context.push("</a></sup>");
}
/// Handle [`Exit`][Kind::Exit]:[`GfmFootnoteDefinitionLabelString`][Name::GfmFootnoteDefinitionLabelString].
fn on_exit_gfm_footnote_definition_label_string(context: &mut CompileContext) {
context
.gfm_footnote_definition_stack
.push(Position::from_exit_event(context.events, context.index).to_indices());
}
/// Handle [`Exit`][Kind::Exit]:[`GfmFootnoteDefinitionPrefix`][Name::GfmFootnoteDefinitionPrefix].
fn on_exit_gfm_footnote_definition_prefix(context: &mut CompileContext) {
// Drop the prefix.
context.resume();
// Capture everything until end of definition.
context.buffer();
}
/// Handle [`Exit`][Kind::Exit]:[`GfmFootnoteDefinition`][Name::GfmFootnoteDefinition].
fn on_exit_gfm_footnote_definition(context: &mut CompileContext) {
let value = context.resume();
let indices = context.gfm_footnote_definition_stack.pop().unwrap();
context.tight_stack.pop();
context.gfm_footnote_definitions.push((
normalize_identifier(Slice::from_indices(context.bytes, indices.0, indices.1).as_str()),
value,
));
}
/// Handle [`Exit`][Kind::Exit]:[`GfmStrikethrough`][Name::GfmStrikethrough].
fn on_exit_gfm_strikethrough(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("</del>");
}
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTable`][Name::GfmTable].
fn on_exit_gfm_table(context: &mut CompileContext) {
context.gfm_table_align = None;
context.line_ending_if_needed();
context.push("</table>");
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTableBody`][Name::GfmTableBody].
fn on_exit_gfm_table_body(context: &mut CompileContext) {
context.line_ending_if_needed();
context.push("</tbody>");
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTableCell`][Name::GfmTableCell].
fn on_exit_gfm_table_cell(context: &mut CompileContext) {
let align = context.gfm_table_align.as_ref().unwrap();
if context.gfm_table_column < align.len() {
if context.gfm_table_in_head {
context.push("</th>");
} else {
context.push("</td>");
}
} else {
// Stop capturing.
context.resume();
}
context.gfm_table_column += 1;
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTableHead`][Name::GfmTableHead].
fn on_exit_gfm_table_head(context: &mut CompileContext) {
context.gfm_table_in_head = false;
context.line_ending_if_needed();
context.push("</thead>");
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTableRow`][Name::GfmTableRow].
fn on_exit_gfm_table_row(context: &mut CompileContext) {
let mut column = context.gfm_table_column;
let len = context.gfm_table_align.as_ref().unwrap().len();
// Add “phantom” cells, for body rows that are shorter than the delimiter
// row (which is equal to the head row).
while column < len {
on_enter_gfm_table_cell(context);
on_exit_gfm_table_cell(context);
column += 1;
}
context.gfm_table_column = 0;
context.line_ending_if_needed();
context.push("</tr>");
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTaskListItemCheck`][Name::GfmTaskListItemCheck].
fn on_exit_gfm_task_list_item_check(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("/>");
}
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTaskListItemValueChecked`][Name::GfmTaskListItemValueChecked].
fn on_exit_gfm_task_list_item_value_checked(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("checked=\"\" ");
}
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingAtx`][Name::HeadingAtx].
fn on_exit_heading_atx(context: &mut CompileContext) {
let rank = context
.heading_atx_rank
.take()
.expect("`heading_atx_rank` must be set in headings");
context.push("</h");
context.push(&rank.to_string());
context.push(">");
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingAtxSequence`][Name::HeadingAtxSequence].
fn on_exit_heading_atx_sequence(context: &mut CompileContext) {
// First fence we see.
if context.heading_atx_rank.is_none() {
let rank = Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.len();
context.line_ending_if_needed();
context.heading_atx_rank = Some(rank);
context.push("<h");
context.push(&rank.to_string());
context.push(">");
}
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingAtxText`][Name::HeadingAtxText].
fn on_exit_heading_atx_text(context: &mut CompileContext) {
let value = context.resume();
context.push(&value);
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingSetextText`][Name::HeadingSetextText].
fn on_exit_heading_setext_text(context: &mut CompileContext) {
let buf = context.resume();
context.heading_setext_buffer = Some(buf);
context.slurp_one_line_ending = true;
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingSetextUnderlineSequence`][Name::HeadingSetextUnderlineSequence].
fn on_exit_heading_setext_underline_sequence(context: &mut CompileContext) {
let text = context
.heading_setext_buffer
.take()
.expect("`heading_atx_rank` must be set in headings");
let position = Position::from_exit_event(context.events, context.index);
let head = context.bytes[position.start.index];
let rank = if head == b'-' { "2" } else { "1" };
context.line_ending_if_needed();
context.push("<h");
context.push(rank);
context.push(">");
context.push(&text);
context.push("</h");
context.push(rank);
context.push(">");
}
/// Handle [`Exit`][Kind::Exit]:{[`HtmlFlow`][Name::HtmlFlow],[`HtmlText`][Name::HtmlText]}.
fn on_exit_html(context: &mut CompileContext) {
context.encode_html = true;
}
/// Handle [`Exit`][Kind::Exit]:{[`HtmlFlowData`][Name::HtmlFlowData],[`HtmlTextData`][Name::HtmlTextData]}.
fn on_exit_html_data(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
);
let value = slice.as_str();
let encoded = if context.options.gfm_tagfilter && context.options.allow_dangerous_html {
encode(&gfm_tagfilter(value), context.encode_html)
} else {
encode(value, context.encode_html)
};
context.push(&encoded);
}
/// Handle [`Exit`][Kind::Exit]:[`Label`][Name::Label].
fn on_exit_label(context: &mut CompileContext) {
let buf = context.resume();
context.media_stack.last_mut().unwrap().label = Some(buf);
}
/// Handle [`Exit`][Kind::Exit]:[`LabelText`][Name::LabelText].
fn on_exit_label_text(context: &mut CompileContext) {
context.media_stack.last_mut().unwrap().label_id =
Some(Position::from_exit_event(context.events, context.index).to_indices());
}
/// Handle [`Exit`][Kind::Exit]:[`LineEnding`][Name::LineEnding].
fn on_exit_line_ending(context: &mut CompileContext) {
if context.raw_text_inside {
context.push(" ");
} else if context.slurp_one_line_ending
// Ignore line endings after definitions.
|| (context.index > 1
&& (context.events[context.index - 2].name == Name::Definition
|| context.events[context.index - 2].name == Name::GfmFootnoteDefinition))
{
context.slurp_one_line_ending = false;
} else {
context.push(&encode(
Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
)
.as_str(),
context.encode_html,
));
}
}
/// Handle [`Exit`][Kind::Exit]:{[`ListOrdered`][Name::ListOrdered],[`ListUnordered`][Name::ListUnordered]}.
fn on_exit_list(context: &mut CompileContext) {
context.tight_stack.pop();
context.line_ending();
context.push(if context.events[context.index].name == Name::ListOrdered {
"</ol>"
} else {
"</ul>"
});
}
/// Handle [`Exit`][Kind::Exit]:[`ListItem`][Name::ListItem].
fn on_exit_list_item(context: &mut CompileContext) {
let tight = context.tight_stack.last().unwrap_or(&false);
let before_item = skip::opt_back(
context.events,
context.index - 1,
&[
Name::BlankLineEnding,
Name::BlockQuotePrefix,
Name::LineEnding,
Name::SpaceOrTab,
// Also ignore things that don’t contribute to the document.
Name::Definition,
Name::GfmFootnoteDefinition,
],
);
let previous = &context.events[before_item];
let tight_paragraph = *tight && previous.name == Name::Paragraph;
let empty_item = previous.name == Name::ListItemPrefix;
context.slurp_one_line_ending = false;
if !tight_paragraph && !empty_item {
context.line_ending_if_needed();
}
context.push("</li>");
}
/// Handle [`Exit`][Kind::Exit]:[`ListItemValue`][Name::ListItemValue].
fn on_exit_list_item_value(context: &mut CompileContext) {
if context.list_expect_first_marker.unwrap() {
let slice = Slice::from_position(
context.bytes,
&Position::from_exit_event(context.events, context.index),
);
let value = slice.as_str().parse::<u32>().ok().unwrap();
if value != 1 {
context.push(" start=\"");
context.push(&value.to_string());
context.push("\"");
}
}
}
/// Handle [`Exit`][Kind::Exit]:{[`Image`][Name::Image],[`Link`][Name::Link]}.
fn on_exit_media(context: &mut CompileContext) {
let mut is_in_image = false;
let mut index = 0;
// Skip current.
let end = context.media_stack.len() - 1;
while index < end {
if context.media_stack[index].image {
is_in_image = true;
break;
}
index += 1;
}
context.image_alt_inside = is_in_image;
let media = context.media_stack.pop().unwrap();
let label = media.label.unwrap();
let id = media.reference_id.or(media.label_id).map(|indices| {
normalize_identifier(Slice::from_indices(context.bytes, indices.0, indices.1).as_str())
});
let definition_index = if media.destination.is_none() {
id.map(|id| {
let mut index = 0;
while index < context.definitions.len() && context.definitions[index].id != id {
index += 1;
}
debug_assert!(
index < context.definitions.len(),
"expected defined definition"
);
index
})
} else {
None
};
if !is_in_image {
if media.image {
context.push("<img src=\"");
} else {
context.push("<a href=\"");
}
let destination = if let Some(index) = definition_index {
context.definitions[index].destination.as_ref()
} else {
media.destination.as_ref()
};
if let Some(destination) = destination {
let allow_dangerous_protocol = context.options.allow_dangerous_protocol
|| (context.options.allow_any_img_src && media.image);
let url = if allow_dangerous_protocol {
sanitize(destination)
} else {
sanitize_with_protocols(
destination,
if media.image {
&SAFE_PROTOCOL_SRC
} else {
&SAFE_PROTOCOL_HREF
},
)
};
context.push(&url);
}
if media.image {
context.push("\" alt=\"");
}
}
if media.image {
context.push(&label);
}
if !is_in_image {
context.push("\"");
let title = if let Some(index) = definition_index {
context.definitions[index].title.clone()
} else {
media.title
};
if let Some(title) = title {
context.push(" title=\"");
context.push(&title);
context.push("\"");
}
if media.image {
context.push(" /");
}
context.push(">");
}
if !media.image {
context.push(&label);
if !is_in_image {
context.push("</a>");
}
}
}
/// Handle [`Exit`][Kind::Exit]:[`Paragraph`][Name::Paragraph].
fn on_exit_paragraph(context: &mut CompileContext) {
let tight = context.tight_stack.last().unwrap_or(&false);
if *tight {
context.slurp_one_line_ending = true;
} else {
context.push("</p>");
}
}
/// Handle [`Exit`][Kind::Exit]:[`ReferenceString`][Name::ReferenceString].
fn on_exit_reference_string(context: &mut CompileContext) {
// Drop stuff.
context.resume();
context.media_stack.last_mut().unwrap().reference_id =
Some(Position::from_exit_event(context.events, context.index).to_indices());
}
/// Handle [`Exit`][Kind::Exit]:[`ResourceDestinationString`][Name::ResourceDestinationString].
fn on_exit_resource_destination_string(context: &mut CompileContext) {
let buf = context.resume();
context.media_stack.last_mut().unwrap().destination = Some(buf);
context.encode_html = true;
}
/// Handle [`Exit`][Kind::Exit]:[`ResourceTitleString`][Name::ResourceTitleString].
fn on_exit_resource_title_string(context: &mut CompileContext) {
let buf = context.resume();
context.media_stack.last_mut().unwrap().title = Some(buf);
}
/// Handle [`Exit`][Kind::Exit]:[`Strong`][Name::Strong].
fn on_exit_strong(context: &mut CompileContext) {
if !context.image_alt_inside {
context.push("</strong>");
}
}
/// Handle [`Exit`][Kind::Exit]:[`ThematicBreak`][Name::ThematicBreak].
fn on_exit_thematic_break(context: &mut CompileContext) {
context.line_ending_if_needed();
context.push("<hr />");
}
/// Generate a footnote section.
fn generate_footnote_section(context: &mut CompileContext) {
context.line_ending_if_needed();
context.push("<section data-footnotes=\"\" class=\"footnotes\"><");
if let Some(ref value) = context.options.gfm_footnote_label_tag_name {
context.push(&encode(value, context.encode_html));
} else {
context.push("h2");
}
context.push(" id=\"footnote-label\" ");
if let Some(ref value) = context.options.gfm_footnote_label_attributes {
context.push(value);
} else {
context.push("class=\"sr-only\"");
}
context.push(">");
if let Some(ref value) = context.options.gfm_footnote_label {
context.push(&encode(value, context.encode_html));
} else {
context.push("Footnotes");
}
context.push("</");
if let Some(ref value) = context.options.gfm_footnote_label_tag_name {
context.push(&encode(value, context.encode_html));
} else {
context.push("h2");
}
context.push(">");
context.line_ending();
context.push("<ol>");
let mut index = 0;
while index < context.gfm_footnote_definition_calls.len() {
generate_footnote_item(context, index);
index += 1;
}
context.line_ending();
context.push("</ol>");
context.line_ending();
context.push("</section>");
context.line_ending();
}
/// Generate a footnote item from a call.
fn generate_footnote_item(context: &mut CompileContext, index: usize) {
let id = &context.gfm_footnote_definition_calls[index].0;
let safe_id = sanitize(&id.to_lowercase());
// Find definition: we’ll always find it.
let mut definition_index = 0;
while definition_index < context.gfm_footnote_definitions.len() {
if &context.gfm_footnote_definitions[definition_index].0 == id {
break;
}
definition_index += 1;
}
debug_assert_ne!(
definition_index,
context.gfm_footnote_definitions.len(),
"expected definition"
);
context.line_ending();
context.push("<li id=\"");
if let Some(ref value) = context.options.gfm_footnote_clobber_prefix {
context.push(&encode(value, context.encode_html));
} else {
context.push("user-content-");
}
context.push("fn-");
context.push(&safe_id);
context.push("\">");
context.line_ending();
// Create one or more backreferences.
let mut reference_index = 0;
let mut backreferences = String::new();
while reference_index < context.gfm_footnote_definition_calls[index].1 {
if reference_index != 0 {
backreferences.push(' ');
}
backreferences.push_str("<a href=\"#");
if let Some(ref value) = context.options.gfm_footnote_clobber_prefix {
backreferences.push_str(&encode(value, context.encode_html));
} else {
backreferences.push_str("user-content-");
}
backreferences.push_str("fnref-");
backreferences.push_str(&safe_id);
if reference_index != 0 {
backreferences.push('-');
backreferences.push_str(&(reference_index + 1).to_string());
}
backreferences.push_str("\" data-footnote-backref=\"\" aria-label=\"");
if let Some(ref value) = context.options.gfm_footnote_back_label {
backreferences.push_str(&encode(value, context.encode_html));
} else {
backreferences.push_str("Back to content");
}
backreferences.push_str("\" class=\"data-footnote-backref\">↩");
if reference_index != 0 {
backreferences.push_str("<sup>");
backreferences.push_str(&(reference_index + 1).to_string());
backreferences.push_str("</sup>");
}
backreferences.push_str("</a>");
reference_index += 1;
}
let value = context.gfm_footnote_definitions[definition_index].1.clone();
let bytes = value.as_bytes();
let mut byte_index = bytes.len();
// Move back past EOL.
while byte_index > 0 && matches!(bytes[byte_index - 1], b'\n' | b'\r') {
byte_index -= 1;
}
// Check if it ends in `</p>`.
// This is a bit funky if someone wrote a safe paragraph by hand in
// there.
// But in all other cases, `<` and `>` would be encoded, so we can be
// sure that this is generated by our compiler.
if byte_index > 3
&& bytes[byte_index - 4] == b'<'
&& bytes[byte_index - 3] == b'/'
&& bytes[byte_index - 2] == b'p'
&& bytes[byte_index - 1] == b'>'
{
let (before, after) = bytes.split_at(byte_index - 4);
let mut result = String::new();
result.push_str(str::from_utf8(before).unwrap());
result.push(' ');
result.push_str(&backreferences);
result.push_str(str::from_utf8(after).unwrap());
context.push(&result);
} else {
context.push(&value);
context.line_ending_if_needed();
context.push(&backreferences);
}
context.line_ending_if_needed();
context.push("</li>");
}
/// Generate an autolink (used by unicode autolinks and GFM autolink literals).
fn generate_autolink(
context: &mut CompileContext,
protocol: Option<&str>,
value: &str,
is_gfm_literal: bool,
) {
let mut is_in_link = false;
let mut index = 0;
while index < context.media_stack.len() {
if !context.media_stack[index].image {
is_in_link = true;
break;
}
index += 1;
}
if !context.image_alt_inside && (!is_in_link || !is_gfm_literal) {
context.push("<a href=\"");
let url = if let Some(protocol) = protocol {
format!("{}{}", protocol, value)
} else {
value.into()
};
let url = if context.options.allow_dangerous_protocol {
sanitize(&url)
} else {
sanitize_with_protocols(&url, &SAFE_PROTOCOL_HREF)
};
context.push(&url);
context.push("\">");
}
context.push(&encode(value, context.encode_html));
if !context.image_alt_inside && (!is_in_link || !is_gfm_literal) {
context.push("</a>");
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/to_mdast.rs | Rust | //! Turn events into a syntax tree.
use crate::event::{Event, Kind, Name};
use crate::mdast::{
AttributeContent, AttributeValue, AttributeValueExpression, Blockquote, Break, Code,
Definition, Delete, Emphasis, FootnoteDefinition, FootnoteReference, Heading, Html, Image,
ImageReference, InlineCode, InlineMath, Link, LinkReference, List, ListItem, Math,
MdxFlowExpression, MdxJsxAttribute, MdxJsxExpressionAttribute, MdxJsxFlowElement,
MdxJsxTextElement, MdxTextExpression, MdxjsEsm, Node, Paragraph, ReferenceKind, Root, Strong,
Table, TableCell, TableRow, Text, ThematicBreak, Toml, Yaml,
};
use crate::message;
use crate::unist::{Point, Position};
use crate::util::{
character_reference::{
decode as decode_character_reference, parse as parse_character_reference,
},
infer::{gfm_table_align, list_item_loose, list_loose},
mdx_collect::{collect, Result as CollectResult},
normalize_identifier::normalize_identifier,
slice::{Position as SlicePosition, Slice},
};
use alloc::{
boxed::Box,
format,
string::{String, ToString},
vec,
vec::Vec,
};
use core::str;
/// A reference to something.
#[derive(Debug)]
struct Reference {
#[allow(clippy::struct_field_names)]
reference_kind: Option<ReferenceKind>,
identifier: String,
label: String,
}
/// Info on a tag.
///
/// JSX tags are parsed on their own.
/// They’re matched together here.
#[derive(Debug, Clone)]
struct JsxTag {
/// Optional tag name.
///
/// `None` means that it’s a fragment.
name: Option<String>,
/// List of attributes.
attributes: Vec<AttributeContent>,
/// Whether this is a closing tag.
///
/// ```markdown
/// > | </a>
/// ^
/// ```
close: bool,
/// Whether this is a self-closing tag.
///
/// ```markdown
/// > | <a/>
/// ^
/// ```
self_closing: bool,
/// Starting point.
start: Point,
/// Ending point.
end: Point,
}
impl Reference {
fn new() -> Reference {
Reference {
// Assume shortcut: removed on a resource, changed on a reference.
reference_kind: Some(ReferenceKind::Shortcut),
identifier: String::new(),
label: String::new(),
}
}
}
/// Context used to compile markdown.
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug)]
struct CompileContext<'a> {
// Static info.
/// List of events.
events: &'a [Event],
/// List of bytes.
bytes: &'a [u8],
// Fields used by handlers to track the things they need to track to
// compile markdown.
character_reference_marker: u8,
gfm_table_inside: bool,
hard_break_after: bool,
heading_setext_text_after: bool,
jsx_tag_stack: Vec<JsxTag>,
jsx_tag: Option<JsxTag>,
media_reference_stack: Vec<Reference>,
raw_flow_fence_seen: bool,
// Intermediate results.
/// Primary tree and buffers.
trees: Vec<(Node, Vec<usize>, Vec<usize>)>,
/// Current event index.
index: usize,
}
impl<'a> CompileContext<'a> {
/// Create a new compile context.
fn new(events: &'a [Event], bytes: &'a [u8]) -> CompileContext<'a> {
let tree = Node::Root(Root {
children: vec![],
position: Some(Position {
start: if events.is_empty() {
Point::new(1, 1, 0)
} else {
events[0].point.to_unist()
},
end: if events.is_empty() {
Point::new(1, 1, 0)
} else {
events[events.len() - 1].point.to_unist()
},
}),
});
CompileContext {
events,
bytes,
character_reference_marker: 0,
gfm_table_inside: false,
hard_break_after: false,
heading_setext_text_after: false,
jsx_tag_stack: vec![],
jsx_tag: None,
media_reference_stack: vec![],
raw_flow_fence_seen: false,
trees: vec![(tree, vec![], vec![])],
index: 0,
}
}
/// Push a buffer.
fn buffer(&mut self) {
self.trees.push((
Node::Paragraph(Paragraph {
children: vec![],
position: None,
}),
vec![],
vec![],
));
}
/// Pop a buffer, returning its value.
fn resume(&mut self) -> Node {
if let Some((node, stack_a, stack_b)) = self.trees.pop() {
debug_assert_eq!(
stack_a.len(),
0,
"expected stack (nodes in tree) to be drained"
);
debug_assert_eq!(
stack_b.len(),
0,
"expected stack (opening events) to be drained"
);
node
} else {
unreachable!("Cannot resume w/o buffer")
}
}
fn tail_mut(&mut self) -> &mut Node {
let (tree, stack, _) = self.trees.last_mut().expect("Cannot get tail w/o tree");
delve_mut(tree, stack)
}
fn tail_penultimate_mut(&mut self) -> &mut Node {
let (tree, stack, _) = self.trees.last_mut().expect("Cannot get tail w/o tree");
delve_mut(tree, &stack[0..(stack.len() - 1)])
}
fn tail_push(&mut self, mut child: Node) {
if child.position().is_none() {
child.position_set(Some(position_from_event(&self.events[self.index])));
}
let (tree, stack, event_stack) = self.trees.last_mut().expect("Cannot get tail w/o tree");
let node = delve_mut(tree, stack);
let children = node.children_mut().expect("Cannot push to non-parent");
let index = children.len();
children.push(child);
stack.push(index);
event_stack.push(self.index);
}
fn tail_push_again(&mut self) {
let (tree, stack, event_stack) = self.trees.last_mut().expect("Cannot get tail w/o tree");
let node = delve_mut(tree, stack);
let children = node.children().expect("Cannot push to non-parent");
stack.push(children.len() - 1);
event_stack.push(self.index);
}
fn tail_pop(&mut self) -> Result<(), message::Message> {
let ev = &self.events[self.index];
let end = ev.point.to_unist();
let (tree, stack, event_stack) = self.trees.last_mut().expect("Cannot get tail w/o tree");
let node = delve_mut(tree, stack);
let pos = node.position_mut().expect("Cannot pop manually added node");
pos.end = end;
stack.pop().unwrap();
let left_index = event_stack.pop().unwrap();
let left = &self.events[left_index];
if left.name != ev.name {
on_mismatch_error(self, Some(ev), left)?;
}
Ok(())
}
}
/// Turn events and bytes into a syntax tree.
pub fn compile(events: &[Event], bytes: &[u8]) -> Result<Node, message::Message> {
let mut context = CompileContext::new(events, bytes);
let mut index = 0;
while index < events.len() {
handle(&mut context, index)?;
index += 1;
}
debug_assert_eq!(context.trees.len(), 1, "expected 1 final tree");
let (tree, _, event_stack) = context.trees.pop().unwrap();
if let Some(index) = event_stack.last() {
let event = &events[*index];
on_mismatch_error(&mut context, None, event)?;
}
Ok(tree)
}
/// Handle the event at `index`.
fn handle(context: &mut CompileContext, index: usize) -> Result<(), message::Message> {
context.index = index;
if context.events[index].kind == Kind::Enter {
enter(context)?;
} else {
exit(context)?;
}
Ok(())
}
/// Handle [`Enter`][Kind::Enter].
fn enter(context: &mut CompileContext) -> Result<(), message::Message> {
match context.events[context.index].name {
Name::AutolinkEmail
| Name::AutolinkProtocol
| Name::CharacterEscapeValue
| Name::CharacterReference
| Name::CodeFlowChunk
| Name::CodeTextData
| Name::Data
| Name::FrontmatterChunk
| Name::HtmlFlowData
| Name::HtmlTextData
| Name::MathFlowChunk
| Name::MathTextData
| Name::MdxJsxTagAttributeValueLiteralValue => on_enter_data(context),
Name::CodeFencedFenceInfo
| Name::CodeFencedFenceMeta
| Name::DefinitionDestinationString
| Name::DefinitionLabelString
| Name::DefinitionTitleString
| Name::GfmFootnoteDefinitionLabelString
| Name::LabelText
| Name::MathFlowFenceMeta
| Name::MdxJsxTagAttributeValueLiteral
| Name::ReferenceString
| Name::ResourceDestinationString
| Name::ResourceTitleString => on_enter_buffer(context),
Name::Autolink => on_enter_autolink(context),
Name::BlockQuote => on_enter_block_quote(context),
Name::CodeFenced => on_enter_code_fenced(context),
Name::CodeIndented => on_enter_code_indented(context),
Name::CodeText => on_enter_code_text(context),
Name::Definition => on_enter_definition(context),
Name::Emphasis => on_enter_emphasis(context),
Name::Frontmatter => on_enter_frontmatter(context),
Name::GfmAutolinkLiteralEmail
| Name::GfmAutolinkLiteralMailto
| Name::GfmAutolinkLiteralProtocol
| Name::GfmAutolinkLiteralWww
| Name::GfmAutolinkLiteralXmpp => on_enter_gfm_autolink_literal(context),
Name::GfmFootnoteCall => on_enter_gfm_footnote_call(context),
Name::GfmFootnoteDefinition => on_enter_gfm_footnote_definition(context),
Name::GfmStrikethrough => on_enter_gfm_strikethrough(context),
Name::GfmTable => on_enter_gfm_table(context),
Name::GfmTableRow => on_enter_gfm_table_row(context),
Name::GfmTableCell => on_enter_gfm_table_cell(context),
Name::HardBreakEscape | Name::HardBreakTrailing => on_enter_hard_break(context),
Name::HeadingAtx | Name::HeadingSetext => on_enter_heading(context),
Name::HtmlFlow | Name::HtmlText => on_enter_html(context),
Name::Image => on_enter_image(context),
Name::Link => on_enter_link(context),
Name::ListItem => on_enter_list_item(context),
Name::ListOrdered | Name::ListUnordered => on_enter_list(context),
Name::MathFlow => on_enter_math_flow(context),
Name::MathText => on_enter_math_text(context),
Name::MdxEsm => on_enter_mdx_esm(context),
Name::MdxFlowExpression => on_enter_mdx_flow_expression(context),
Name::MdxTextExpression => on_enter_mdx_text_expression(context),
Name::MdxJsxFlowTag | Name::MdxJsxTextTag => on_enter_mdx_jsx_tag(context),
Name::MdxJsxTagClosingMarker => on_enter_mdx_jsx_tag_closing_marker(context)?,
Name::MdxJsxTagAttribute => on_enter_mdx_jsx_tag_attribute(context)?,
Name::MdxJsxTagAttributeExpression => on_enter_mdx_jsx_tag_attribute_expression(context)?,
Name::MdxJsxTagAttributeValueExpression => {
on_enter_mdx_jsx_tag_attribute_value_expression(context);
}
Name::MdxJsxTagSelfClosingMarker => on_enter_mdx_jsx_tag_self_closing_marker(context)?,
Name::Paragraph => on_enter_paragraph(context),
Name::Reference => on_enter_reference(context),
Name::Resource => on_enter_resource(context),
Name::Strong => on_enter_strong(context),
Name::ThematicBreak => on_enter_thematic_break(context),
_ => {}
}
Ok(())
}
/// Handle [`Exit`][Kind::Exit].
fn exit(context: &mut CompileContext) -> Result<(), message::Message> {
match context.events[context.index].name {
Name::Autolink
| Name::BlockQuote
| Name::CharacterReference
| Name::Definition
| Name::Emphasis
| Name::GfmFootnoteDefinition
| Name::GfmStrikethrough
| Name::GfmTableRow
| Name::GfmTableCell
| Name::HeadingAtx
| Name::ListOrdered
| Name::ListUnordered
| Name::Paragraph
| Name::Strong
| Name::ThematicBreak => {
on_exit(context)?;
}
Name::CharacterEscapeValue
| Name::CodeFlowChunk
| Name::CodeTextData
| Name::Data
| Name::FrontmatterChunk
| Name::HtmlFlowData
| Name::HtmlTextData
| Name::MathFlowChunk
| Name::MathTextData
| Name::MdxJsxTagAttributeValueLiteralValue => {
on_exit_data(context)?;
}
Name::MdxJsxTagAttributeExpression | Name::MdxJsxTagAttributeValueExpression => {
on_exit_drop(context);
}
Name::AutolinkProtocol => on_exit_autolink_protocol(context)?,
Name::AutolinkEmail => on_exit_autolink_email(context)?,
Name::CharacterReferenceMarker => on_exit_character_reference_marker(context),
Name::CharacterReferenceMarkerNumeric => {
on_exit_character_reference_marker_numeric(context);
}
Name::CharacterReferenceMarkerHexadecimal => {
on_exit_character_reference_marker_hexadecimal(context);
}
Name::CharacterReferenceValue => on_exit_character_reference_value(context),
Name::CodeFencedFenceInfo => on_exit_code_fenced_fence_info(context),
Name::CodeFencedFenceMeta | Name::MathFlowFenceMeta => on_exit_raw_flow_fence_meta(context),
Name::CodeFencedFence | Name::MathFlowFence => on_exit_raw_flow_fence(context),
Name::CodeFenced | Name::MathFlow => on_exit_raw_flow(context)?,
Name::CodeIndented => on_exit_code_indented(context)?,
Name::CodeText | Name::MathText => on_exit_raw_text(context)?,
Name::DefinitionDestinationString => on_exit_definition_destination_string(context),
Name::DefinitionLabelString | Name::GfmFootnoteDefinitionLabelString => {
on_exit_definition_id(context);
}
Name::DefinitionTitleString => on_exit_definition_title_string(context),
Name::Frontmatter => on_exit_frontmatter(context)?,
Name::GfmAutolinkLiteralEmail
| Name::GfmAutolinkLiteralMailto
| Name::GfmAutolinkLiteralProtocol
| Name::GfmAutolinkLiteralWww
| Name::GfmAutolinkLiteralXmpp => on_exit_gfm_autolink_literal(context)?,
Name::GfmFootnoteCall | Name::Image | Name::Link => on_exit_media(context)?,
Name::GfmTable => on_exit_gfm_table(context)?,
Name::GfmTaskListItemValueUnchecked | Name::GfmTaskListItemValueChecked => {
on_exit_gfm_task_list_item_value(context);
}
Name::HardBreakEscape | Name::HardBreakTrailing => on_exit_hard_break(context)?,
Name::HeadingAtxSequence => on_exit_heading_atx_sequence(context),
Name::HeadingSetext => on_exit_heading_setext(context)?,
Name::HeadingSetextUnderlineSequence => on_exit_heading_setext_underline_sequence(context),
Name::HeadingSetextText => on_exit_heading_setext_text(context),
Name::HtmlFlow | Name::HtmlText => on_exit_html(context)?,
Name::LabelText => on_exit_label_text(context),
Name::LineEnding => on_exit_line_ending(context)?,
Name::ListItem => on_exit_list_item(context)?,
Name::ListItemValue => on_exit_list_item_value(context),
Name::MdxEsm | Name::MdxFlowExpression | Name::MdxTextExpression => {
on_exit_mdx_esm_or_expression(context)?;
}
Name::MdxJsxFlowTag | Name::MdxJsxTextTag => on_exit_mdx_jsx_tag(context)?,
Name::MdxJsxTagClosingMarker => on_exit_mdx_jsx_tag_closing_marker(context),
Name::MdxJsxTagNamePrimary => on_exit_mdx_jsx_tag_name_primary(context),
Name::MdxJsxTagNameMember => on_exit_mdx_jsx_tag_name_member(context),
Name::MdxJsxTagNameLocal => on_exit_mdx_jsx_tag_name_local(context),
Name::MdxJsxTagAttributePrimaryName => on_exit_mdx_jsx_tag_attribute_primary_name(context),
Name::MdxJsxTagAttributeNameLocal => on_exit_mdx_jsx_tag_attribute_name_local(context),
Name::MdxJsxTagAttributeValueLiteral => {
on_exit_mdx_jsx_tag_attribute_value_literal(context);
}
Name::MdxJsxTagSelfClosingMarker => on_exit_mdx_jsx_tag_self_closing_marker(context),
Name::ReferenceString => on_exit_reference_string(context),
Name::ResourceDestinationString => on_exit_resource_destination_string(context),
Name::ResourceTitleString => on_exit_resource_title_string(context),
_ => {}
}
Ok(())
}
/// Handle [`Enter`][Kind::Enter]:`*`.
fn on_enter_buffer(context: &mut CompileContext) {
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`Data`][Name::Data] (and many text things).
fn on_enter_data(context: &mut CompileContext) {
let parent = context.tail_mut();
let children = parent.children_mut().expect("expected parent");
// Add to stack again.
if let Some(Node::Text(_)) = children.last_mut() {
context.tail_push_again();
} else {
context.tail_push(Node::Text(Text {
value: String::new(),
position: None,
}));
}
}
/// Handle [`Enter`][Kind::Enter]:[`Autolink`][Name::Autolink].
fn on_enter_autolink(context: &mut CompileContext) {
context.tail_push(Node::Link(Link {
url: String::new(),
title: None,
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`BlockQuote`][Name::BlockQuote].
fn on_enter_block_quote(context: &mut CompileContext) {
context.tail_push(Node::Blockquote(Blockquote {
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`CodeFenced`][Name::CodeFenced].
fn on_enter_code_fenced(context: &mut CompileContext) {
context.tail_push(Node::Code(Code {
lang: None,
meta: None,
value: String::new(),
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`CodeIndented`][Name::CodeIndented].
fn on_enter_code_indented(context: &mut CompileContext) {
on_enter_code_fenced(context);
on_enter_buffer(context);
}
/// Handle [`Enter`][Kind::Enter]:[`CodeText`][Name::CodeText].
fn on_enter_code_text(context: &mut CompileContext) {
context.tail_push(Node::InlineCode(InlineCode {
value: String::new(),
position: None,
}));
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`MathText`][Name::MathText].
fn on_enter_math_text(context: &mut CompileContext) {
context.tail_push(Node::InlineMath(InlineMath {
value: String::new(),
position: None,
}));
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`MdxEsm`][Name::MdxEsm].
fn on_enter_mdx_esm(context: &mut CompileContext) {
let result = collect(
context.events,
context.bytes,
context.index,
&[Name::MdxEsmData, Name::LineEnding],
&[Name::MdxEsm],
);
context.tail_push(Node::MdxjsEsm(MdxjsEsm {
value: result.value,
position: None,
stops: result.stops,
}));
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`MdxFlowExpression`][Name::MdxFlowExpression].
fn on_enter_mdx_flow_expression(context: &mut CompileContext) {
let result = collect(
context.events,
context.bytes,
context.index,
&[Name::MdxExpressionData, Name::LineEnding],
&[Name::MdxFlowExpression],
);
context.tail_push(Node::MdxFlowExpression(MdxFlowExpression {
value: result.value,
position: None,
stops: result.stops,
}));
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`MdxTextExpression`][Name::MdxTextExpression].
fn on_enter_mdx_text_expression(context: &mut CompileContext) {
let result = collect(
context.events,
context.bytes,
context.index,
&[Name::MdxExpressionData, Name::LineEnding],
&[Name::MdxTextExpression],
);
context.tail_push(Node::MdxTextExpression(MdxTextExpression {
value: result.value,
position: None,
stops: result.stops,
}));
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`Definition`][Name::Definition].
fn on_enter_definition(context: &mut CompileContext) {
context.tail_push(Node::Definition(Definition {
url: String::new(),
identifier: String::new(),
label: None,
title: None,
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`Emphasis`][Name::Emphasis].
fn on_enter_emphasis(context: &mut CompileContext) {
context.tail_push(Node::Emphasis(Emphasis {
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:{[`GfmAutolinkLiteralEmail`][Name::GfmAutolinkLiteralEmail],[`GfmAutolinkLiteralMailto`][Name::GfmAutolinkLiteralMailto],[`GfmAutolinkLiteralProtocol`][Name::GfmAutolinkLiteralProtocol],[`GfmAutolinkLiteralWww`][Name::GfmAutolinkLiteralWww],[`GfmAutolinkLiteralXmpp`][Name::GfmAutolinkLiteralXmpp]}.
fn on_enter_gfm_autolink_literal(context: &mut CompileContext) {
on_enter_autolink(context);
on_enter_data(context);
}
/// Handle [`Enter`][Kind::Enter]:[`GfmFootnoteCall`][Name::GfmFootnoteCall].
fn on_enter_gfm_footnote_call(context: &mut CompileContext) {
context.tail_push(Node::FootnoteReference(FootnoteReference {
identifier: String::new(),
label: None,
position: None,
}));
context.media_reference_stack.push(Reference::new());
}
/// Handle [`Enter`][Kind::Enter]:[`GfmFootnoteDefinition`][Name::GfmFootnoteDefinition].
fn on_enter_gfm_footnote_definition(context: &mut CompileContext) {
context.tail_push(Node::FootnoteDefinition(FootnoteDefinition {
identifier: String::new(),
label: None,
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`GfmStrikethrough`][Name::GfmStrikethrough].
fn on_enter_gfm_strikethrough(context: &mut CompileContext) {
context.tail_push(Node::Delete(Delete {
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTable`][Name::GfmTable].
fn on_enter_gfm_table(context: &mut CompileContext) {
let align = gfm_table_align(context.events, context.index);
context.tail_push(Node::Table(Table {
align,
children: vec![],
position: None,
}));
context.gfm_table_inside = true;
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTableRow`][Name::GfmTableRow].
fn on_enter_gfm_table_row(context: &mut CompileContext) {
context.tail_push(Node::TableRow(TableRow {
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`GfmTableCell`][Name::GfmTableCell].
fn on_enter_gfm_table_cell(context: &mut CompileContext) {
context.tail_push(Node::TableCell(TableCell {
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`HardBreakEscape`][Name::HardBreakEscape].
fn on_enter_hard_break(context: &mut CompileContext) {
context.tail_push(Node::Break(Break { position: None }));
}
/// Handle [`Enter`][Kind::Enter]:[`Frontmatter`][Name::Frontmatter].
fn on_enter_frontmatter(context: &mut CompileContext) {
let index = context.events[context.index].point.index;
let byte = context.bytes[index];
let node = if byte == b'+' {
Node::Toml(Toml {
value: String::new(),
position: None,
})
} else {
Node::Yaml(Yaml {
value: String::new(),
position: None,
})
};
context.tail_push(node);
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`Reference`][Name::Reference].
fn on_enter_reference(context: &mut CompileContext) {
let reference = context
.media_reference_stack
.last_mut()
.expect("expected reference on media stack");
// Assume collapsed.
// If there’s a string after it, we set `Full`.
reference.reference_kind = Some(ReferenceKind::Collapsed);
}
/// Handle [`Enter`][Kind::Enter]:[`Resource`][Name::Resource].
fn on_enter_resource(context: &mut CompileContext) {
let reference = context
.media_reference_stack
.last_mut()
.expect("expected reference on media stack");
// It’s not a reference.
reference.reference_kind = None;
}
/// Handle [`Enter`][Kind::Enter]:[`Strong`][Name::Strong].
fn on_enter_strong(context: &mut CompileContext) {
context.tail_push(Node::Strong(Strong {
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`ThematicBreak`][Name::ThematicBreak].
fn on_enter_thematic_break(context: &mut CompileContext) {
context.tail_push(Node::ThematicBreak(ThematicBreak { position: None }));
}
/// Handle [`Enter`][Kind::Enter]:[`HeadingAtx`][Name::HeadingAtx].
fn on_enter_heading(context: &mut CompileContext) {
context.tail_push(Node::Heading(Heading {
depth: 0, // Will be set later.
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:{[`HtmlFlow`][Name::HtmlFlow],[`HtmlText`][Name::HtmlText]}.
fn on_enter_html(context: &mut CompileContext) {
context.tail_push(Node::Html(Html {
value: String::new(),
position: None,
}));
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`Image`][Name::Image].
fn on_enter_image(context: &mut CompileContext) {
context.tail_push(Node::Image(Image {
url: String::new(),
title: None,
alt: String::new(),
position: None,
}));
context.media_reference_stack.push(Reference::new());
}
/// Handle [`Enter`][Kind::Enter]:[`Link`][Name::Link].
fn on_enter_link(context: &mut CompileContext) {
context.tail_push(Node::Link(Link {
url: String::new(),
title: None,
children: vec![],
position: None,
}));
context.media_reference_stack.push(Reference::new());
}
/// Handle [`Enter`][Kind::Enter]:{[`ListOrdered`][Name::ListOrdered],[`ListUnordered`][Name::ListUnordered]}.
fn on_enter_list(context: &mut CompileContext) {
let ordered = context.events[context.index].name == Name::ListOrdered;
let spread = list_loose(context.events, context.index, false);
context.tail_push(Node::List(List {
ordered,
spread,
start: None,
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`ListItem`][Name::ListItem].
fn on_enter_list_item(context: &mut CompileContext) {
let spread = list_item_loose(context.events, context.index);
context.tail_push(Node::ListItem(ListItem {
spread,
checked: None,
children: vec![],
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:[`MathFlow`][Name::MathFlow].
fn on_enter_math_flow(context: &mut CompileContext) {
context.tail_push(Node::Math(Math {
meta: None,
value: String::new(),
position: None,
}));
}
/// Handle [`Enter`][Kind::Enter]:{[`MdxJsxFlowTag`][Name::MdxJsxFlowTag],[`MdxJsxTextTag`][Name::MdxJsxTextTag]}.
fn on_enter_mdx_jsx_tag(context: &mut CompileContext) {
let point = context.events[context.index].point.to_unist();
context.jsx_tag = Some(JsxTag {
name: None,
attributes: vec![],
start: point.clone(),
end: point,
close: false,
self_closing: false,
});
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`MdxJsxTagClosingMarker`][Name::MdxJsxTagClosingMarker].
fn on_enter_mdx_jsx_tag_closing_marker(
context: &mut CompileContext,
) -> Result<(), message::Message> {
if context.jsx_tag_stack.is_empty() {
let event = &context.events[context.index];
Err(message::Message {
place: Some(Box::new(message::Place::Point(event.point.to_unist()))),
reason: "Unexpected closing slash `/` in tag, expected an open tag first".into(),
rule_id: Box::new("unexpected-closing-slash".into()),
source: Box::new("markdown-rs".into()),
})
} else {
Ok(())
}
}
/// Handle [`Enter`][Kind::Enter]:{[`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute],[`MdxJsxTagAttributeExpression`][Name::MdxJsxTagAttributeExpression]}.
fn on_enter_mdx_jsx_tag_any_attribute(
context: &mut CompileContext,
) -> Result<(), message::Message> {
if context.jsx_tag.as_ref().expect("expected tag").close {
let event = &context.events[context.index];
Err(message::Message {
place: Some(Box::new(message::Place::Point(event.point.to_unist()))),
reason: "Unexpected attribute in closing tag, expected the end of the tag".into(),
rule_id: Box::new("unexpected-attribute".into()),
source: Box::new("markdown-rs".into()),
})
} else {
Ok(())
}
}
/// Handle [`Enter`][Kind::Enter]:[`MdxJsxTagAttribute`][Name::MdxJsxTagAttribute].
fn on_enter_mdx_jsx_tag_attribute(context: &mut CompileContext) -> Result<(), message::Message> {
on_enter_mdx_jsx_tag_any_attribute(context)?;
context
.jsx_tag
.as_mut()
.expect("expected tag")
.attributes
.push(AttributeContent::Property(MdxJsxAttribute {
name: String::new(),
value: None,
}));
Ok(())
}
/// Handle [`Enter`][Kind::Enter]:[`MdxJsxTagAttributeExpression`][Name::MdxJsxTagAttributeExpression].
fn on_enter_mdx_jsx_tag_attribute_expression(
context: &mut CompileContext,
) -> Result<(), message::Message> {
on_enter_mdx_jsx_tag_any_attribute(context)?;
let CollectResult { value, stops } = collect(
context.events,
context.bytes,
context.index,
&[Name::MdxExpressionData, Name::LineEnding],
&[Name::MdxJsxTagAttributeExpression],
);
context
.jsx_tag
.as_mut()
.expect("expected tag")
.attributes
.push(AttributeContent::Expression(MdxJsxExpressionAttribute {
value,
stops,
}));
context.buffer();
Ok(())
}
/// Handle [`Enter`][Kind::Enter]:[`MdxJsxTagAttributeValueExpression`][Name::MdxJsxTagAttributeValueExpression].
fn on_enter_mdx_jsx_tag_attribute_value_expression(context: &mut CompileContext) {
let CollectResult { value, stops } = collect(
context.events,
context.bytes,
context.index,
&[Name::MdxExpressionData, Name::LineEnding],
&[Name::MdxJsxTagAttributeValueExpression],
);
if let Some(AttributeContent::Property(node)) = context
.jsx_tag
.as_mut()
.expect("expected tag")
.attributes
.last_mut()
{
node.value = Some(AttributeValue::Expression(AttributeValueExpression {
value,
stops,
}));
} else {
unreachable!("expected property")
}
context.buffer();
}
/// Handle [`Enter`][Kind::Enter]:[`MdxJsxTagSelfClosingMarker`][Name::MdxJsxTagSelfClosingMarker].
fn on_enter_mdx_jsx_tag_self_closing_marker(
context: &mut CompileContext,
) -> Result<(), message::Message> {
let tag = context.jsx_tag.as_ref().expect("expected tag");
if tag.close {
let event = &context.events[context.index];
Err(message::Message {
place: Some(Box::new(message::Place::Point(event.point.to_unist()))),
reason: "Unexpected self-closing slash `/` in closing tag, expected the end of the tag"
.into(),
rule_id: Box::new("unexpected-self-closing-slash".into()),
source: Box::new("markdown-rs".into()),
})
} else {
Ok(())
}
}
/// Handle [`Enter`][Kind::Enter]:[`Paragraph`][Name::Paragraph].
fn on_enter_paragraph(context: &mut CompileContext) {
context.tail_push(Node::Paragraph(Paragraph {
children: vec![],
position: None,
}));
}
/// Handle [`Exit`][Kind::Exit]:`*`.
fn on_exit(context: &mut CompileContext) -> Result<(), message::Message> {
context.tail_pop()?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`AutolinkProtocol`][Name::AutolinkProtocol].
fn on_exit_autolink_protocol(context: &mut CompileContext) -> Result<(), message::Message> {
on_exit_data(context)?;
let value = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
if let Node::Link(link) = context.tail_mut() {
link.url.push_str(value.as_str());
} else {
unreachable!("expected link on stack");
}
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`AutolinkEmail`][Name::AutolinkEmail].
fn on_exit_autolink_email(context: &mut CompileContext) -> Result<(), message::Message> {
on_exit_data(context)?;
let value = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
if let Node::Link(link) = context.tail_mut() {
link.url.push_str("mailto:");
link.url.push_str(value.as_str());
} else {
unreachable!("expected link on stack");
}
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceMarker`][Name::CharacterReferenceMarker].
fn on_exit_character_reference_marker(context: &mut CompileContext) {
context.character_reference_marker = b'&';
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceMarkerHexadecimal`][Name::CharacterReferenceMarkerHexadecimal].
fn on_exit_character_reference_marker_hexadecimal(context: &mut CompileContext) {
context.character_reference_marker = b'x';
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceMarkerNumeric`][Name::CharacterReferenceMarkerNumeric].
fn on_exit_character_reference_marker_numeric(context: &mut CompileContext) {
context.character_reference_marker = b'#';
}
/// Handle [`Exit`][Kind::Exit]:[`CharacterReferenceValue`][Name::CharacterReferenceValue].
fn on_exit_character_reference_value(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let value =
decode_character_reference(slice.as_str(), context.character_reference_marker, true)
.expect("expected to parse only valid named references");
if let Node::Text(node) = context.tail_mut() {
node.value.push_str(value.as_str());
} else {
unreachable!("expected text on stack");
}
context.character_reference_marker = 0;
}
/// Handle [`Exit`][Kind::Exit]:[`CodeFencedFenceInfo`][Name::CodeFencedFenceInfo].
fn on_exit_code_fenced_fence_info(context: &mut CompileContext) {
let value = context.resume().to_string();
if let Node::Code(node) = context.tail_mut() {
node.lang = Some(value);
} else {
unreachable!("expected code on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeFencedFenceMeta`][Name::CodeFencedFenceMeta],[`MathFlowFenceMeta`][Name::MathFlowFenceMeta]}.
fn on_exit_raw_flow_fence_meta(context: &mut CompileContext) {
let value = context.resume().to_string();
match context.tail_mut() {
Node::Code(node) => node.meta = Some(value),
Node::Math(node) => node.meta = Some(value),
_ => {
unreachable!("expected code or math on stack");
}
}
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeFencedFence`][Name::CodeFencedFence],[`MathFlowFence`][Name::MathFlowFence]}.
fn on_exit_raw_flow_fence(context: &mut CompileContext) {
if context.raw_flow_fence_seen {
// Second fence, ignore.
} else {
context.buffer();
context.raw_flow_fence_seen = true;
}
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeFenced`][Name::CodeFenced],[`MathFlow`][Name::MathFlow]}.
fn on_exit_raw_flow(context: &mut CompileContext) -> Result<(), message::Message> {
let value = trim_eol(context.resume().to_string(), true, true);
match context.tail_mut() {
Node::Code(node) => node.value = value,
Node::Math(node) => node.value = value,
_ => unreachable!("expected code or math on stack for value"),
}
on_exit(context)?;
context.raw_flow_fence_seen = false;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`CodeIndented`][Name::CodeIndented].
fn on_exit_code_indented(context: &mut CompileContext) -> Result<(), message::Message> {
let value = context.resume().to_string();
if let Node::Code(node) = context.tail_mut() {
node.value = trim_eol(value, false, true);
} else {
unreachable!("expected code on stack for value");
}
on_exit(context)?;
context.raw_flow_fence_seen = false;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:{[`CodeText`][Name::CodeText],[`MathText`][Name::MathText]}.
fn on_exit_raw_text(context: &mut CompileContext) -> Result<(), message::Message> {
let mut value = context.resume().to_string();
// To do: share with `to_html`.
// If we are in a GFM table, we need to decode escaped pipes.
// This is a rather weird GFM feature.
if context.gfm_table_inside {
let mut bytes = value.as_bytes().to_vec();
let mut index = 0;
let mut len = bytes.len();
let mut replace = false;
while index < len {
if index + 1 < len && bytes[index] == b'\\' && bytes[index + 1] == b'|' {
replace = true;
bytes.remove(index);
len -= 1;
}
index += 1;
}
if replace {
value = str::from_utf8(&bytes).unwrap().into();
}
}
let value_bytes = value.as_bytes();
if value.len() > 2
&& value_bytes[0] == b' '
&& value_bytes[value.len() - 1] == b' '
&& !value_bytes.iter().all(|b| *b == b' ')
{
value.remove(0);
value.pop();
}
match context.tail_mut() {
Node::InlineCode(node) => node.value = value,
Node::InlineMath(node) => node.value = value,
_ => unreachable!("expected inline code or math on stack for value"),
}
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`Data`][Name::Data] (and many text things).
fn on_exit_data(context: &mut CompileContext) -> Result<(), message::Message> {
let value = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
if let Node::Text(text) = context.tail_mut() {
text.value.push_str(value.as_str());
} else {
unreachable!("expected text on stack");
}
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`DefinitionDestinationString`][Name::DefinitionDestinationString].
fn on_exit_definition_destination_string(context: &mut CompileContext) {
let value = context.resume().to_string();
if let Node::Definition(node) = context.tail_mut() {
node.url = value;
} else {
unreachable!("expected definition on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:{[`DefinitionLabelString`][Name::DefinitionLabelString],[`GfmFootnoteDefinitionLabelString`][Name::GfmFootnoteDefinitionLabelString]}.
fn on_exit_definition_id(context: &mut CompileContext) {
let label = context.resume().to_string();
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let identifier = normalize_identifier(slice.as_str()).to_lowercase();
match context.tail_mut() {
Node::Definition(node) => {
node.label = Some(label);
node.identifier = identifier;
}
Node::FootnoteDefinition(node) => {
node.label = Some(label);
node.identifier = identifier;
}
_ => unreachable!("expected definition or footnote definition on stack"),
}
}
/// Handle [`Exit`][Kind::Exit]:[`DefinitionTitleString`][Name::DefinitionTitleString].
fn on_exit_definition_title_string(context: &mut CompileContext) {
let value = context.resume().to_string();
if let Node::Definition(node) = context.tail_mut() {
node.title = Some(value);
} else {
unreachable!("expected definition on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:*, by dropping the current buffer.
fn on_exit_drop(context: &mut CompileContext) {
context.resume();
}
/// Handle [`Exit`][Kind::Exit]:[`Frontmatter`][Name::Frontmatter].
fn on_exit_frontmatter(context: &mut CompileContext) -> Result<(), message::Message> {
let value = trim_eol(context.resume().to_string(), true, true);
match context.tail_mut() {
Node::Yaml(node) => node.value = value,
Node::Toml(node) => node.value = value,
_ => unreachable!("expected yaml/toml on stack for value"),
}
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:{[`GfmAutolinkLiteralEmail`][Name::GfmAutolinkLiteralEmail],[`GfmAutolinkLiteralMailto`][Name::GfmAutolinkLiteralMailto],[`GfmAutolinkLiteralProtocol`][Name::GfmAutolinkLiteralProtocol],[`GfmAutolinkLiteralWww`][Name::GfmAutolinkLiteralWww],[`GfmAutolinkLiteralXmpp`][Name::GfmAutolinkLiteralXmpp]}.
fn on_exit_gfm_autolink_literal(context: &mut CompileContext) -> Result<(), message::Message> {
on_exit_data(context)?;
let value = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let prefix = match &context.events[context.index].name {
Name::GfmAutolinkLiteralEmail => Some("mailto:"),
Name::GfmAutolinkLiteralWww => Some("http://"),
// `GfmAutolinkLiteralMailto`, `GfmAutolinkLiteralProtocol`, `GfmAutolinkLiteralXmpp`.
_ => None,
};
if let Node::Link(link) = context.tail_mut() {
if let Some(prefix) = prefix {
link.url.push_str(prefix);
}
link.url.push_str(value.as_str());
} else {
unreachable!("expected link on stack");
}
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`GfmTable`][Name::GfmTable].
fn on_exit_gfm_table(context: &mut CompileContext) -> Result<(), message::Message> {
on_exit(context)?;
context.gfm_table_inside = false;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:{[`GfmTaskListItemValueChecked`][Name::GfmTaskListItemValueChecked],[`GfmTaskListItemValueUnchecked`][Name::GfmTaskListItemValueUnchecked]}.
fn on_exit_gfm_task_list_item_value(context: &mut CompileContext) {
let checked = context.events[context.index].name == Name::GfmTaskListItemValueChecked;
let ancestor = context.tail_penultimate_mut();
if let Node::ListItem(node) = ancestor {
node.checked = Some(checked);
} else {
unreachable!("expected list item on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:{[`HardBreakEscape`][Name::HardBreakEscape],[`HardBreakTrailing`][Name::HardBreakTrailing]}.
fn on_exit_hard_break(context: &mut CompileContext) -> Result<(), message::Message> {
on_exit(context)?;
context.hard_break_after = true;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingAtxSequence`][Name::HeadingAtxSequence].
fn on_exit_heading_atx_sequence(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
if let Node::Heading(node) = context.tail_mut() {
if node.depth == 0 {
#[allow(clippy::cast_possible_truncation)]
let depth = slice.len() as u8;
node.depth = depth;
}
} else {
unreachable!("expected heading on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingSetext`][Name::HeadingSetext].
fn on_exit_heading_setext(context: &mut CompileContext) -> Result<(), message::Message> {
context.heading_setext_text_after = false;
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingSetextText`][Name::HeadingSetextText].
fn on_exit_heading_setext_text(context: &mut CompileContext) {
context.heading_setext_text_after = true;
}
/// Handle [`Exit`][Kind::Exit]:[`HeadingSetextUnderlineSequence`][Name::HeadingSetextUnderlineSequence].
fn on_exit_heading_setext_underline_sequence(context: &mut CompileContext) {
let position = SlicePosition::from_exit_event(context.events, context.index);
let head = context.bytes[position.start.index];
let depth = if head == b'-' { 2 } else { 1 };
if let Node::Heading(node) = context.tail_mut() {
node.depth = depth;
} else {
unreachable!("expected heading on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:[`LabelText`][Name::LabelText].
fn on_exit_label_text(context: &mut CompileContext) {
let mut fragment = context.resume();
let label = fragment.to_string();
let children = fragment.children_mut().unwrap().split_off(0);
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let identifier = normalize_identifier(slice.as_str()).to_lowercase();
let reference = context
.media_reference_stack
.last_mut()
.expect("expected reference on media stack");
reference.label.clone_from(&label);
reference.identifier = identifier;
match context.tail_mut() {
Node::Link(node) => node.children = children,
Node::Image(node) => node.alt = label,
Node::FootnoteReference(_) => {}
_ => unreachable!("expected footnote refereence, image, or link on stack"),
}
}
/// Handle [`Exit`][Kind::Exit]:[`LineEnding`][Name::LineEnding].
fn on_exit_line_ending(context: &mut CompileContext) -> Result<(), message::Message> {
if context.heading_setext_text_after {
// Ignore.
}
// Line ending position after hard break is part of it.
else if context.hard_break_after {
let end = context.events[context.index].point.to_unist();
let node = context.tail_mut();
let tail = node
.children_mut()
.expect("expected parent")
.last_mut()
.expect("expected tail (break)");
tail.position_mut().unwrap().end = end;
context.hard_break_after = false;
}
// Line ending is a part of nodes that accept phrasing.
else if matches!(
context.tail_mut(),
Node::Emphasis(_)
| Node::Heading(_)
| Node::Paragraph(_)
| Node::Strong(_)
| Node::Delete(_)
) {
context.index -= 1;
on_enter_data(context);
context.index += 1;
on_exit_data(context)?;
}
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:{[`HtmlFlow`][Name::HtmlFlow],[`HtmlText`][Name::HtmlText]}.
fn on_exit_html(context: &mut CompileContext) -> Result<(), message::Message> {
let value = context.resume().to_string();
match context.tail_mut() {
Node::Html(node) => node.value = value,
_ => unreachable!("expected html on stack for value"),
}
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:{[`GfmFootnoteCall`][Name::GfmFootnoteCall],[`Image`][Name::Image],[`Link`][Name::Link]}.
fn on_exit_media(context: &mut CompileContext) -> Result<(), message::Message> {
let reference = context
.media_reference_stack
.pop()
.expect("expected reference on media stack");
on_exit(context)?;
// It’s a reference.
if let Some(kind) = reference.reference_kind {
let parent = context.tail_mut();
let siblings = parent.children_mut().unwrap();
match siblings.last_mut().unwrap() {
Node::FootnoteReference(node) => {
node.identifier = reference.identifier;
node.label = Some(reference.label);
}
Node::Image(_) => {
// Need to swap it with a reference version of the node.
if let Some(Node::Image(node)) = siblings.pop() {
siblings.push(Node::ImageReference(ImageReference {
reference_kind: kind,
identifier: reference.identifier,
label: Some(reference.label),
alt: node.alt,
position: node.position,
}));
} else {
unreachable!("impossible: it’s an image")
}
}
Node::Link(_) => {
// Need to swap it with a reference version of the node.
if let Some(Node::Link(node)) = siblings.pop() {
siblings.push(Node::LinkReference(LinkReference {
reference_kind: kind,
identifier: reference.identifier,
label: Some(reference.label),
children: node.children,
position: node.position,
}));
} else {
unreachable!("impossible: it’s a link")
}
}
_ => unreachable!("expected footnote reference, image, or link on stack"),
}
}
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`ListItem`][Name::ListItem].
fn on_exit_list_item(context: &mut CompileContext) -> Result<(), message::Message> {
if let Node::ListItem(item) = context.tail_mut() {
if item.checked.is_some() {
if let Some(Node::Paragraph(paragraph)) = item.children.first_mut() {
if let Some(Node::Text(text)) = paragraph.children.first_mut() {
let mut point = text.position.as_ref().unwrap().start.clone();
let bytes = text.value.as_bytes();
let mut start = 0;
// Move past eol.
if matches!(bytes[0], b'\t' | b' ') {
point.offset += 1;
point.column += 1;
start += 1;
} else if matches!(bytes[0], b'\r' | b'\n') {
point.line += 1;
point.column = 1;
point.offset += 1;
start += 1;
// Move past the LF of CRLF.
if bytes.len() > 1 && bytes[0] == b'\r' && bytes[1] == b'\n' {
point.offset += 1;
start += 1;
}
}
// The whole text is whitespace: update the text.
if start == bytes.len() {
paragraph.children.remove(0);
} else {
text.value = str::from_utf8(&bytes[start..]).unwrap().into();
text.position.as_mut().unwrap().start = point.clone();
}
paragraph.position.as_mut().unwrap().start = point;
}
}
}
}
on_exit(context)?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`ListItemValue`][Name::ListItemValue].
fn on_exit_list_item_value(context: &mut CompileContext) {
let start = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
)
.as_str()
.parse()
.expect("expected list value up to u8");
if let Node::List(node) = context.tail_penultimate_mut() {
debug_assert!(node.ordered, "expected list to be ordered");
if node.start.is_none() {
node.start = Some(start);
}
} else {
unreachable!("expected list on stack");
}
}
/// Handle [`Exit`][Kind::Exit]:{[`MdxJsxFlowTag`][Name::MdxJsxFlowTag],[`MdxJsxTextTag`][Name::MdxJsxTextTag]}.
fn on_exit_mdx_jsx_tag(context: &mut CompileContext) -> Result<(), message::Message> {
let mut tag = context.jsx_tag.as_ref().expect("expected tag").clone();
// End of a tag, so drop the buffer.
context.resume();
// Set end point.
tag.end = context.events[context.index].point.to_unist();
let stack = &context.jsx_tag_stack;
let tail = stack.last();
if tag.close {
// Unwrap: we crashed earlier if there’s nothing on the stack.
let tail = tail.unwrap();
if tail.name != tag.name {
let label = serialize_abbreviated_tag(&tag);
return Err(
message::Message {
place: Some(Box::new(message::Place::Position(Position {
start: tag.start,
end: tag.end,
}))),
reason: format!(
"Unexpected closing tag `{}`, expected corresponding closing tag for `{}` ({}:{})",
label,
serialize_abbreviated_tag(tail),
tail.start.line,
tail.start.column,
),
rule_id: Box::new("end-tag-mismatch".into()),
source: Box::new("markdown-rs".into()),
},
);
}
// Remove from our custom stack.
// Note that this does not exit the node.
context.jsx_tag_stack.pop();
} else {
let node = if context.events[context.index].name == Name::MdxJsxFlowTag {
Node::MdxJsxFlowElement(MdxJsxFlowElement {
name: tag.name.clone(),
attributes: tag.attributes.clone(),
children: vec![],
position: Some(Position {
start: tag.start.clone(),
end: tag.end.clone(),
}),
})
} else {
Node::MdxJsxTextElement(MdxJsxTextElement {
name: tag.name.clone(),
attributes: tag.attributes.clone(),
children: vec![],
position: Some(Position {
start: tag.start.clone(),
end: tag.end.clone(),
}),
})
};
context.tail_push(node);
}
if tag.self_closing || tag.close {
context.tail_pop()?;
} else {
context.jsx_tag_stack.push(tag);
}
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagClosingMarker`][Name::MdxJsxTagClosingMarker].
fn on_exit_mdx_jsx_tag_closing_marker(context: &mut CompileContext) {
context.jsx_tag.as_mut().expect("expected tag").close = true;
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagNamePrimary`][Name::MdxJsxTagNamePrimary].
fn on_exit_mdx_jsx_tag_name_primary(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let value = slice.serialize();
context.jsx_tag.as_mut().expect("expected tag").name = Some(value);
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagNameMember`][Name::MdxJsxTagNameMember].
fn on_exit_mdx_jsx_tag_name_member(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let name = context
.jsx_tag
.as_mut()
.expect("expected tag")
.name
.as_mut()
.expect("expected primary before member");
name.push('.');
name.push_str(slice.as_str());
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagNameLocal`][Name::MdxJsxTagNameLocal].
fn on_exit_mdx_jsx_tag_name_local(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let name = context
.jsx_tag
.as_mut()
.expect("expected tag")
.name
.as_mut()
.expect("expected primary before local");
name.push(':');
name.push_str(slice.as_str());
}
/// Handle [`Exit`][Kind::Exit]:{[`MdxEsm`][Name::MdxEsm],[`MdxFlowExpression`][Name::MdxFlowExpression],[`MdxTextExpression`][Name::MdxTextExpression]}.
fn on_exit_mdx_esm_or_expression(context: &mut CompileContext) -> Result<(), message::Message> {
on_exit_drop(context);
context.tail_pop()?;
Ok(())
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagAttributePrimaryName`][Name::MdxJsxTagAttributePrimaryName].
fn on_exit_mdx_jsx_tag_attribute_primary_name(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let value = slice.serialize();
if let Some(AttributeContent::Property(attribute)) = context
.jsx_tag
.as_mut()
.expect("expected tag")
.attributes
.last_mut()
{
attribute.name = value;
} else {
unreachable!("expected property")
}
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagAttributeNameLocal`][Name::MdxJsxTagAttributeNameLocal].
fn on_exit_mdx_jsx_tag_attribute_name_local(context: &mut CompileContext) {
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
if let Some(AttributeContent::Property(attribute)) = context
.jsx_tag
.as_mut()
.expect("expected tag")
.attributes
.last_mut()
{
attribute.name.push(':');
attribute.name.push_str(slice.as_str());
} else {
unreachable!("expected property")
}
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagAttributeValueLiteral`][Name::MdxJsxTagAttributeValueLiteral].
fn on_exit_mdx_jsx_tag_attribute_value_literal(context: &mut CompileContext) {
let value = context.resume();
if let Some(AttributeContent::Property(node)) = context
.jsx_tag
.as_mut()
.expect("expected tag")
.attributes
.last_mut()
{
node.value = Some(AttributeValue::Literal(parse_character_reference(
&value.to_string(),
)));
} else {
unreachable!("expected property")
}
}
/// Handle [`Exit`][Kind::Exit]:[`MdxJsxTagSelfClosingMarker`][Name::MdxJsxTagSelfClosingMarker].
fn on_exit_mdx_jsx_tag_self_closing_marker(context: &mut CompileContext) {
context.jsx_tag.as_mut().expect("expected tag").self_closing = true;
}
/// Handle [`Exit`][Kind::Exit]:[`ReferenceString`][Name::ReferenceString].
fn on_exit_reference_string(context: &mut CompileContext) {
let label = context.resume().to_string();
let slice = Slice::from_position(
context.bytes,
&SlicePosition::from_exit_event(context.events, context.index),
);
let identifier = normalize_identifier(slice.as_str()).to_lowercase();
let reference = context
.media_reference_stack
.last_mut()
.expect("expected reference on media stack");
reference.reference_kind = Some(ReferenceKind::Full);
reference.label = label;
reference.identifier = identifier;
}
/// Handle [`Exit`][Kind::Exit]:[`ResourceDestinationString`][Name::ResourceDestinationString].
fn on_exit_resource_destination_string(context: &mut CompileContext) {
let value = context.resume().to_string();
match context.tail_mut() {
Node::Link(node) => node.url = value,
Node::Image(node) => node.url = value,
_ => unreachable!("expected link, image on stack"),
}
}
/// Handle [`Exit`][Kind::Exit]:[`ResourceTitleString`][Name::ResourceTitleString].
fn on_exit_resource_title_string(context: &mut CompileContext) {
let value = Some(context.resume().to_string());
match context.tail_mut() {
Node::Link(node) => node.title = value,
Node::Image(node) => node.title = value,
_ => unreachable!("expected link, image on stack"),
}
}
/// Create a position from an event.
fn position_from_event(event: &Event) -> Position {
let end = Point::new(event.point.line, event.point.column, event.point.index);
Position {
start: end.clone(),
end,
}
}
/// Resolve the current stack on the tree.
fn delve_mut<'tree>(mut node: &'tree mut Node, stack: &'tree [usize]) -> &'tree mut Node {
let mut stack_index = 0;
while stack_index < stack.len() {
let index = stack[stack_index];
node = &mut node.children_mut().expect("Cannot delve into non-parent")[index];
stack_index += 1;
}
node
}
/// Remove initial/final EOLs.
fn trim_eol(value: String, at_start: bool, at_end: bool) -> String {
let bytes = value.as_bytes();
let mut start = 0;
let mut end = bytes.len();
if at_start && !bytes.is_empty() {
if bytes[0] == b'\n' {
start += 1;
} else if bytes[0] == b'\r' {
start += 1;
if bytes.len() > 1 && bytes[1] == b'\n' {
start += 1;
}
}
}
if at_end && end > start {
if bytes[end - 1] == b'\n' {
end -= 1;
if end > start && bytes[end - 1] == b'\r' {
end -= 1;
}
} else if bytes[end - 1] == b'\r' {
end -= 1;
}
}
if start > 0 || end < bytes.len() {
str::from_utf8(&bytes[start..end]).unwrap().into()
} else {
value
}
}
/// Handle a mismatch.
///
/// Mismatches can occur with MDX JSX tags.
fn on_mismatch_error(
context: &mut CompileContext,
left: Option<&Event>,
right: &Event,
) -> Result<(), message::Message> {
if right.name == Name::MdxJsxFlowTag || right.name == Name::MdxJsxTextTag {
let stack = &context.jsx_tag_stack;
let tag = stack.last().unwrap();
let point = if let Some(left) = left {
&left.point
} else {
&context.events[context.events.len() - 1].point
};
return Err(message::Message {
place: Some(Box::new(message::Place::Point(point.to_unist()))),
reason: format!(
"Expected a closing tag for `{}` ({}:{}){}",
serialize_abbreviated_tag(tag),
tag.start.line,
tag.start.column,
if let Some(left) = left {
format!(" before the end of `{:?}`", left.name)
} else {
String::new()
}
),
rule_id: Box::new("end-tag-mismatch".into()),
source: Box::new("markdown-rs".into()),
});
}
if let Some(left) = left {
if left.name == Name::MdxJsxFlowTag || left.name == Name::MdxJsxTextTag {
let tag = context.jsx_tag.as_ref().unwrap();
return Err(
message::Message {
place: Some(Box::new(message::Place::Point(tag.start.clone()))),
reason: format!(
"Expected the closing tag `{}` either before the start of `{:?}` ({}:{}), or another opening tag after that start",
serialize_abbreviated_tag(tag),
&right.name,
&right.point.line,
&right.point.column,
),
rule_id: Box::new("end-tag-mismatch".into()),
source: Box::new("markdown-rs".into()),
}
);
}
unreachable!("mismatched (non-jsx): {:?} / {:?}", left.name, right.name);
} else {
unreachable!("mismatched (non-jsx): document / {:?}", right.name);
}
}
/// Format a JSX tag, ignoring its attributes.
fn serialize_abbreviated_tag(tag: &JsxTag) -> String {
format!(
"<{}{}>",
if tag.close { "/" } else { "" },
if let Some(name) = &tag.name { name } else { "" },
)
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/tokenizer.rs | Rust | //! A tokenizer glues states from the state machine together.
//!
//! It facilitates everything needed to turn bytes into events with a state
//! machine.
//! It also enables the logic needed for parsing markdown, such as an
//! [`attempt`][] to try and parse something, which can succeed or, when
//! unsuccessful, revert the attempt.
//!
//! [`attempt`]: Tokenizer::attempt
use crate::event::{Content, Event, Kind, Link, Name, Point, VOID_EVENTS};
use crate::message;
use crate::parser::ParseState;
use crate::resolve::{call as call_resolve, Name as ResolveName};
use crate::state::{call, State};
use crate::subtokenize::Subresult;
#[cfg(feature = "log")]
use crate::util::char::format_byte_opt;
use crate::util::{constant::TAB_SIZE, edit_map::EditMap};
use alloc::{boxed::Box, string::String, vec, vec::Vec};
/// Containers.
///
/// Containers are found when tokenizing
/// [document content][crate::construct::document].
/// They parse a portion at the start of one or more lines.
/// The rest of those lines is a different content type (specifically, flow),
/// which they “contain”.
#[derive(Debug, Eq, PartialEq)]
pub enum Container {
/// [Block quote][crate::construct::block_quote].
BlockQuote,
/// [List item][crate::construct::list_item].
ListItem,
/// [GFM: Footnote definition][crate::construct::gfm_footnote_definition].
GfmFootnoteDefinition,
}
/// Info used to tokenize a container.
///
/// Practically, these fields are only used for list items.
#[derive(Debug)]
pub struct ContainerState {
/// Kind.
pub kind: Container,
/// Whether the first line was blank.
pub blank_initial: bool,
/// Size.
pub size: usize,
}
/// How to handle a byte.
#[derive(Debug, PartialEq)]
enum ByteAction {
/// This is a normal byte.
///
/// Includes replaced bytes.
Normal(u8),
/// This byte must be ignored.
Ignore,
/// This is a new byte.
Insert(u8),
}
/// Label start kind.
#[derive(Debug, PartialEq, Eq)]
pub enum LabelKind {
/// Label (image) start.
///
/// ```markdown
/// > | a ![b] c
/// ^^
/// ```
///
/// Construct: [Label start (image)][crate::construct::label_start_image].
Image,
/// Label (image) link.
///
/// ```markdown
/// > | a [b] c
/// ^
/// ```
///
/// Construct: [Label start (link)][crate::construct::label_start_link].
Link,
/// GFM: Label (footnote) link.
///
/// ```markdown
/// > | a [^b] c
/// ^^
/// ```
///
/// Construct: [GFM: Label start (footnote)][crate::construct::gfm_label_start_footnote].
GfmFootnote,
/// GFM: Label (footnote) link, not matching a footnote definition, so
/// handled as a label (link) start.
///
/// ```markdown
/// > | a [^b](c) d
/// ^^
/// ```
///
/// Construct: [Label end][crate::construct::label_end].
GfmUndefinedFootnote,
}
/// Label start, looking for an end.
#[derive(Debug)]
pub struct LabelStart {
/// Kind of start.
pub kind: LabelKind,
/// Indices of where the label starts and ends in `events`.
pub start: (usize, usize),
/// A boolean used internally to figure out if a (link) label start can’t
/// be used anymore (because it would contain another link).
/// That link start is still looking for a balanced closing bracket though,
/// so we can’t remove it just yet.
pub inactive: bool,
}
/// Valid label.
#[derive(Debug)]
pub struct Label {
pub kind: LabelKind,
/// Indices of label start.
pub start: (usize, usize),
/// Indices of label end.
pub end: (usize, usize),
}
/// Different kinds of attempts.
#[derive(Debug, PartialEq)]
enum AttemptKind {
/// Discard what was tokenized when unsuccessful.
Attempt,
/// Discard always.
Check,
}
/// How to handle [`State::Ok`][] or [`State::Nok`][].
#[derive(Debug)]
struct Attempt {
/// Where to go to when successful.
ok: State,
/// Where to go to when unsuccessful.
nok: State,
/// Kind of attempt.
kind: AttemptKind,
/// If needed, the progress to revert to.
///
/// It is not needed to discard an [`AttemptKind::Attempt`] that has a
/// `nok` of [`State::Nok`][], because that means it is used in *another*
/// attempt, which will receive that `Nok`, and has to handle it.
progress: Option<Progress>,
}
/// The internal state of a tokenizer.
///
/// Not to be confused with states from the state machine, this instead is all
/// the information on where we currently are and what’s going on.
#[derive(Clone, Debug)]
struct Progress {
/// Length of `events`.
///
/// It’s not allowed to remove events, so reverting will just pop stuff off.
events_len: usize,
/// Length of the stack.
///
/// It’s not allowed to decrease the stack in an attempt.
stack_len: usize,
/// Previous code.
previous: Option<u8>,
/// Current code.
current: Option<u8>,
/// Current place in the file.
point: Point,
}
/// A lot of shared fields used to tokenize things.
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug)]
pub struct TokenizeState<'a> {
// Couple complex fields used to tokenize the document.
/// Tokenizer, used to tokenize flow in document.
pub document_child: Option<Box<Tokenizer<'a>>>,
/// State, used to tokenize containers.
pub document_child_state: Option<State>,
/// Stack of currently active containers.
pub document_container_stack: Vec<ContainerState>,
/// How many active containers continued.
pub document_continued: usize,
/// Index of last `data`.
pub document_data_index: Option<usize>,
/// Container exits by line number.
pub document_exits: Vec<Option<Vec<Event>>>,
/// Whether the previous flow was a paragraph or a definition.
pub document_lazy_accepting_before: bool,
/// Whether this is the first paragraph (potentially after definitions) in
/// a list item.
/// Used for GFM task list items.
pub document_at_first_paragraph_of_list_item: bool,
// Couple of very frequent settings for parsing whitespace.
pub space_or_tab_eol_content: Option<Content>,
pub space_or_tab_eol_connect: bool,
pub space_or_tab_eol_ok: bool,
pub space_or_tab_connect: bool,
pub space_or_tab_content: Option<Content>,
pub space_or_tab_min: usize,
pub space_or_tab_max: usize,
pub space_or_tab_size: usize,
pub space_or_tab_token: Name,
// Couple of media related fields.
/// List of usable label starts.
///
/// Used when tokenizing [text content][crate::construct::text].
pub label_starts: Vec<LabelStart>,
/// List of unusable label starts.
///
/// Used when tokenizing [text content][crate::construct::text].
pub label_starts_loose: Vec<LabelStart>,
/// Stack of images and links.
///
/// Used when tokenizing [text content][crate::construct::text].
pub labels: Vec<Label>,
/// List of defined definition identifiers.
pub definitions: Vec<String>,
/// List of defined GFM footnote definition identifiers.
pub gfm_footnote_definitions: Vec<String>,
// Last error message provided at an EOF of an expression.
pub mdx_last_parse_error: Option<(String, String, String)>,
/// Whether to connect events.
pub connect: bool,
/// Marker.
pub marker: u8,
/// Secondary marker.
pub marker_b: u8,
/// Several markers.
pub markers: &'static [u8],
/// Whether something was seen.
pub seen: bool,
/// Size.
pub size: usize,
/// Secondary size.
pub size_b: usize,
/// Tertiary size.
pub size_c: usize,
/// Index.
pub start: usize,
/// Index.
pub end: usize,
/// Slot for an event name.
pub token_1: Name,
/// Slot for an event name.
pub token_2: Name,
/// Slot for an event name.
pub token_3: Name,
/// Slot for an event name.
pub token_4: Name,
/// Slot for an event name.
pub token_5: Name,
/// Slot for an event name.
pub token_6: Name,
}
/// A tokenizer itself.
#[allow(clippy::struct_excessive_bools)]
#[derive(Debug)]
pub struct Tokenizer<'a> {
/// Jump between line endings.
column_start: Vec<(usize, usize)>,
// First line where this tokenizer starts.
first_line: usize,
/// Current point after the last line ending (excluding jump).
line_start: Point,
/// Track whether the current byte is already consumed (`true`) or expected
/// to be consumed (`false`).
///
/// Tracked to make sure everything’s valid.
consumed: bool,
/// Stack of how to handle attempts.
attempts: Vec<Attempt>,
/// Current byte.
pub current: Option<u8>,
/// Previous byte.
pub previous: Option<u8>,
/// Current relative and absolute place in the file.
pub point: Point,
/// Semantic labels.
pub events: Vec<Event>,
/// Hierarchy of semantic labels.
///
/// Tracked to make sure everything’s valid.
pub stack: Vec<Name>,
/// Edit map, to batch changes.
pub map: EditMap,
/// List of resolvers.
pub resolvers: Vec<ResolveName>,
/// Shared parsing state across tokenizers.
pub parse_state: &'a ParseState<'a>,
/// A lot of shared fields used to tokenize things.
pub tokenize_state: TokenizeState<'a>,
/// Whether we would be interrupting something.
///
/// Used when tokenizing [flow content][crate::construct::flow].
pub interrupt: bool,
/// Whether containers cannot “pierce” into the current construct.
///
/// Used when tokenizing [document content][crate::construct::document].
pub concrete: bool,
/// Whether this row is piercing into the current construct with more
/// containers.
///
/// Used when tokenizing [document content][crate::construct::document].
pub pierce: bool,
/// Whether this line is lazy: there are less containers than before.
pub lazy: bool,
}
impl<'a> Tokenizer<'a> {
/// Create a new tokenizer.
pub fn new(point: Point, parse_state: &'a ParseState) -> Tokenizer<'a> {
Tokenizer {
previous: None,
current: None,
// To do: reserve size when feeding?
column_start: vec![],
first_line: point.line,
line_start: point.clone(),
consumed: true,
attempts: vec![],
point,
stack: vec![],
events: vec![],
parse_state,
tokenize_state: TokenizeState {
connect: false,
document_container_stack: vec![],
document_exits: vec![],
document_continued: 0,
document_lazy_accepting_before: false,
document_data_index: None,
document_child_state: None,
document_child: None,
document_at_first_paragraph_of_list_item: false,
definitions: vec![],
gfm_footnote_definitions: vec![],
mdx_last_parse_error: None,
end: 0,
label_starts: vec![],
label_starts_loose: vec![],
marker: 0,
marker_b: 0,
markers: &[],
labels: vec![],
seen: false,
size: 0,
size_b: 0,
size_c: 0,
space_or_tab_eol_content: None,
space_or_tab_eol_connect: false,
space_or_tab_eol_ok: false,
space_or_tab_connect: false,
space_or_tab_content: None,
space_or_tab_min: 0,
space_or_tab_max: 0,
space_or_tab_size: 0,
space_or_tab_token: Name::SpaceOrTab,
start: 0,
token_1: Name::Data,
token_2: Name::Data,
token_3: Name::Data,
token_4: Name::Data,
token_5: Name::Data,
token_6: Name::Data,
},
map: EditMap::new(),
interrupt: false,
pierce: false,
concrete: false,
lazy: false,
resolvers: vec![],
}
}
/// Register a resolver.
pub fn register_resolver(&mut self, name: ResolveName) {
if !self.resolvers.contains(&name) {
self.resolvers.push(name);
}
}
/// Register a resolver, before others.
pub fn register_resolver_before(&mut self, name: ResolveName) {
if !self.resolvers.contains(&name) {
self.resolvers.insert(0, name);
}
}
/// Define a jump between two places.
///
/// This defines to which future index we move after a line ending.
pub fn define_skip(&mut self, mut point: Point) {
move_point_back(self, &mut point);
let info = (point.index, point.vs);
#[cfg(feature = "log")]
log::trace!("position: define skip: {:?} -> ({:?})", point.line, info);
let at = point.line - self.first_line;
if at >= self.column_start.len() {
self.column_start.push(info);
} else {
self.column_start[at] = info;
}
self.account_for_potential_skip();
}
/// Increment the current positional info if we’re right after a line
/// ending, which has a skip defined.
fn account_for_potential_skip(&mut self) {
let at = self.point.line - self.first_line;
if self.point.column == 1 && at != self.column_start.len() {
self.move_to(self.column_start[at]);
}
}
/// Prepare for a next byte to get consumed.
fn expect(&mut self, byte: Option<u8>) {
debug_assert!(self.consumed, "expected previous byte to be consumed");
self.consumed = false;
self.current = byte;
}
/// Consume the current byte.
/// Each state function is expected to call this to signal that this code is
/// used, or call a next function.
pub fn consume(&mut self) {
debug_assert!(!self.consumed, "expected code to *not* have been consumed: this might be because `State::Retry(x)` instead of `State::Next(x)` was returned");
self.move_one();
self.previous = self.current;
// While we’re not at eof, it is at least better to not have the
// same current code as `previous` *and* `current`.
self.current = None;
// Mark as consumed.
self.consumed = true;
}
/// Move to the next (virtual) byte.
fn move_one(&mut self) {
match byte_action(self.parse_state.bytes, &self.point) {
ByteAction::Ignore => {
self.point.index += 1;
}
ByteAction::Insert(byte) => {
self.previous = Some(byte);
self.point.column += 1;
self.point.vs += 1;
}
ByteAction::Normal(byte) => {
self.previous = Some(byte);
self.point.vs = 0;
self.point.index += 1;
if byte == b'\n' {
self.point.line += 1;
self.point.column = 1;
if self.point.line - self.first_line + 1 > self.column_start.len() {
self.column_start.push((self.point.index, self.point.vs));
}
self.line_start = self.point.clone();
self.account_for_potential_skip();
#[cfg(feature = "log")]
log::trace!("position: after eol: `{:?}`", self.point);
} else {
self.point.column += 1;
}
}
}
}
/// Move (virtual) bytes.
fn move_to(&mut self, to: (usize, usize)) {
let (to_index, to_vs) = to;
while self.point.index < to_index || self.point.index == to_index && self.point.vs < to_vs {
self.move_one();
}
}
/// Mark the start of a semantic label.
pub fn enter(&mut self, name: Name) {
enter_impl(self, name, None);
}
/// Enter with a link.
pub fn enter_link(&mut self, name: Name, link: Link) {
enter_impl(self, name, Some(link));
}
/// Mark the end of a semantic label.
pub fn exit(&mut self, name: Name) {
let current = self.stack.pop().expect("cannot close w/o open tokens");
debug_assert_eq!(current, name, "expected exit event to match current event");
let previous = self.events.last().expect("cannot close w/o open event");
let mut point = self.point.clone();
debug_assert!(
current != previous.name
|| previous.point.index != point.index
|| previous.point.vs != point.vs,
"expected non-empty event"
);
if VOID_EVENTS.iter().any(|d| d == &name) {
debug_assert!(
current == previous.name,
"expected event to be void, instead of including something"
);
}
// A bit weird, but if we exit right after a line ending, we *don’t* want to consider
// potential skips.
if matches!(self.previous, Some(b'\n')) {
point = self.line_start.clone();
} else {
move_point_back(self, &mut point);
}
#[cfg(feature = "log")]
log::debug!("exit: `{:?}`", name);
let event = Event {
kind: Kind::Exit,
name,
point,
link: None,
};
self.events.push(event);
}
/// Capture the tokenizer progress.
fn capture(&mut self) -> Progress {
Progress {
previous: self.previous,
current: self.current,
point: self.point.clone(),
events_len: self.events.len(),
stack_len: self.stack.len(),
}
}
/// Apply tokenizer progress.
fn free(&mut self, previous: Progress) {
self.previous = previous.previous;
self.current = previous.current;
self.point = previous.point;
debug_assert!(
self.events.len() >= previous.events_len,
"expected to restore less events than before"
);
self.events.truncate(previous.events_len);
debug_assert!(
self.stack.len() >= previous.stack_len,
"expected to restore less stack items than before"
);
self.stack.truncate(previous.stack_len);
}
/// Stack an attempt, moving to `ok` on [`State::Ok`][] and `nok` on
/// [`State::Nok`][], reverting in both cases.
pub fn check(&mut self, ok: State, nok: State) {
// Always capture (and restore) when checking.
// No need to capture (and restore) when `nok` is `State::Nok`, because the
// parent attempt will do it.
let progress = Some(self.capture());
let attempt = Attempt {
kind: AttemptKind::Check,
progress,
ok,
nok,
};
self.attempts.push(attempt);
}
/// Stack an attempt, moving to `ok` on [`State::Ok`][] and `nok` on
/// [`State::Nok`][], reverting in the latter case.
pub fn attempt(&mut self, ok: State, nok: State) {
// Always capture (and restore) when checking.
// No need to capture (and restore) when `nok` is `State::Nok`, because the
// parent attempt will do it.
let progress = if nok == State::Nok {
None
} else {
Some(self.capture())
};
let attempt = Attempt {
kind: AttemptKind::Attempt,
progress,
ok,
nok,
};
self.attempts.push(attempt);
}
/// Tokenize.
pub fn push(&mut self, from: (usize, usize), to: (usize, usize), state: State) -> State {
push_impl(self, from, to, state, false)
}
/// Flush.
pub fn flush(&mut self, state: State, resolve: bool) -> Result<Subresult, message::Message> {
let to = (self.point.index, self.point.vs);
let state = push_impl(self, to, to, state, true);
state.to_result()?;
let mut value = Subresult {
done: false,
gfm_footnote_definitions: self.tokenize_state.gfm_footnote_definitions.split_off(0),
definitions: self.tokenize_state.definitions.split_off(0),
};
if resolve {
let resolvers = self.resolvers.split_off(0);
let mut index = 0;
let defs = &mut value.definitions;
let fn_defs = &mut value.gfm_footnote_definitions;
while index < resolvers.len() {
if let Some(mut result) = call_resolve(self, resolvers[index])? {
fn_defs.append(&mut result.gfm_footnote_definitions);
defs.append(&mut result.definitions);
}
index += 1;
}
self.map.consume(&mut self.events);
}
Ok(value)
}
}
/// Move back past ignored bytes.
fn move_point_back(tokenizer: &mut Tokenizer, point: &mut Point) {
while point.index > 0 {
point.index -= 1;
let action = byte_action(tokenizer.parse_state.bytes, point);
if !matches!(action, ByteAction::Ignore) {
point.index += 1;
break;
}
}
}
/// Enter.
fn enter_impl(tokenizer: &mut Tokenizer, name: Name, link: Option<Link>) {
let mut point = tokenizer.point.clone();
move_point_back(tokenizer, &mut point);
#[cfg(feature = "log")]
log::debug!("enter: `{:?}`", name);
tokenizer.stack.push(name.clone());
tokenizer.events.push(Event {
kind: Kind::Enter,
name,
point,
link,
});
}
/// Run the tokenizer.
fn push_impl(
tokenizer: &mut Tokenizer,
from: (usize, usize),
to: (usize, usize),
mut state: State,
flush: bool,
) -> State {
debug_assert!(
from.0 > tokenizer.point.index
|| (from.0 == tokenizer.point.index && from.1 >= tokenizer.point.vs),
"cannot move backwards"
);
tokenizer.move_to(from);
loop {
match state {
State::Error(_) => break,
State::Ok | State::Nok => {
if let Some(attempt) = tokenizer.attempts.pop() {
if attempt.kind == AttemptKind::Check || state == State::Nok {
if let Some(progress) = attempt.progress {
tokenizer.free(progress);
}
}
tokenizer.consumed = true;
let next = if state == State::Ok {
attempt.ok
} else {
attempt.nok
};
#[cfg(feature = "log")]
log::trace!("attempt: `{:?}` -> `{:?}`", state, next);
state = next;
} else {
break;
}
}
State::Next(name) => {
let action = if tokenizer.point.index < to.0
|| (tokenizer.point.index == to.0 && tokenizer.point.vs < to.1)
{
Some(byte_action(tokenizer.parse_state.bytes, &tokenizer.point))
} else if flush {
None
} else {
break;
};
if let Some(ByteAction::Ignore) = action {
tokenizer.move_one();
} else {
let byte =
if let Some(ByteAction::Insert(byte) | ByteAction::Normal(byte)) = action {
Some(byte)
} else {
None
};
#[cfg(feature = "log")]
log::trace!("feed: {} to {:?}", format_byte_opt(byte), name);
tokenizer.expect(byte);
state = call(tokenizer, name);
}
}
State::Retry(name) => {
#[cfg(feature = "log")]
log::trace!("retry: `{:?}`", name);
state = call(tokenizer, name);
}
}
}
tokenizer.consumed = true;
if flush {
debug_assert!(matches!(state, State::Ok | State::Error(_)), "must be ok");
} else {
debug_assert!(
matches!(state, State::Next(_) | State::Error(_)),
"must have a next state"
);
}
state
}
/// Figure out how to handle a byte.
fn byte_action(bytes: &[u8], point: &Point) -> ByteAction {
if point.index < bytes.len() {
let byte = bytes[point.index];
if byte == b'\r' {
// CRLF.
if point.index < bytes.len() - 1 && bytes[point.index + 1] == b'\n' {
ByteAction::Ignore
}
// CR.
else {
ByteAction::Normal(b'\n')
}
} else if byte == b'\t' {
let remainder = point.column % TAB_SIZE;
let vs = if remainder == 0 {
0
} else {
TAB_SIZE - remainder
};
// On the tab itself, first send it.
if point.vs == 0 {
if vs == 0 {
ByteAction::Normal(byte)
} else {
ByteAction::Insert(byte)
}
} else if vs == 0 {
ByteAction::Normal(b' ')
} else {
ByteAction::Insert(b' ')
}
} else {
ByteAction::Normal(byte)
}
} else {
unreachable!("out of bounds")
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/unist.rs | Rust | //! abstract syntax trees: [unist][].
//!
//! [unist]: https://github.com/syntax-tree/unist
use alloc::fmt;
/// One place in a source file.
#[derive(Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Point {
/// 1-indexed integer representing a line in a source file.
pub line: usize,
/// 1-indexed integer representing a column in a source file.
pub column: usize,
/// 0-indexed integer representing a character in a source file.
pub offset: usize,
}
impl Point {
#[must_use]
pub fn new(line: usize, column: usize, offset: usize) -> Point {
Point {
line,
column,
offset,
}
}
}
impl fmt::Debug for Point {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{} ({})", self.line, self.column, self.offset)
}
}
/// Location of a node in a source file.
#[derive(Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Position {
/// Represents the place of the first character of the parsed source region.
pub start: Point,
/// Represents the place of the first character after the parsed source
/// region, whether it exists or not.
pub end: Point,
}
impl Position {
#[must_use]
pub fn new(
start_line: usize,
start_column: usize,
start_offset: usize,
end_line: usize,
end_column: usize,
end_offset: usize,
) -> Position {
Position {
start: Point::new(start_line, start_column, start_offset),
end: Point::new(end_line, end_column, end_offset),
}
}
}
impl fmt::Debug for Position {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}:{}-{}:{} ({}-{})",
self.start.line,
self.start.column,
self.end.line,
self.end.column,
self.start.offset,
self.end.offset
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::format;
#[test]
fn point() {
let point = Point::new(1, 1, 0);
assert_eq!(
format!("{:?}", point),
"1:1 (0)",
"should support `Debug` on unist points"
);
}
#[test]
fn position() {
let position = Position::new(1, 1, 0, 1, 3, 2);
assert_eq!(
format!("{:?}", position),
"1:1-1:3 (0-2)",
"should support `Debug` on unist positions"
);
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/char.rs | Rust | //! Deal with bytes, chars, and kinds.
use crate::util::unicode::PUNCTUATION;
use alloc::{format, string::String};
use core::str;
/// Character kinds.
#[derive(Debug, PartialEq, Eq)]
pub enum Kind {
/// Whitespace.
///
/// ## Example
///
/// ```markdown
/// > | **a_b_ c**.
/// ^ ^ ^
/// ```
Whitespace,
/// Punctuation.
///
/// ## Example
///
/// ```markdown
/// > | **a_b_ c**.
/// ^^ ^ ^ ^
/// ```
Punctuation,
/// Everything else.
///
/// ## Example
///
/// ```markdown
/// > | **a_b_ c**.
/// ^ ^ ^
/// ```
Other,
}
/// Get a [`char`][] right before `index` in bytes (`&[u8]`).
///
/// In most cases, markdown operates on ASCII bytes.
/// In a few cases, it is unicode aware, so we need to find an actual char.
pub fn before_index(bytes: &[u8], index: usize) -> Option<char> {
let start = if index < 4 { 0 } else { index - 4 };
String::from_utf8_lossy(&bytes[start..index]).chars().last()
}
/// Get a [`char`][] right at `index` in bytes (`&[u8]`).
///
/// In most cases, markdown operates on ASCII bytes.
/// In a few cases, it is unicode aware, so we need to find an actual char.
pub fn after_index(bytes: &[u8], index: usize) -> Option<char> {
let end = if index + 4 > bytes.len() {
bytes.len()
} else {
index + 4
};
String::from_utf8_lossy(&bytes[index..end]).chars().next()
}
/// Classify a char at `index` in bytes (`&[u8]`).
pub fn kind_after_index(bytes: &[u8], index: usize) -> Kind {
if index == bytes.len() {
Kind::Whitespace
} else {
let byte = bytes[index];
if byte.is_ascii_whitespace() {
Kind::Whitespace
} else if byte.is_ascii_punctuation() {
Kind::Punctuation
} else if byte.is_ascii_alphanumeric() {
Kind::Other
} else {
// Otherwise: seems to be an ASCII control, so it seems to be a
// non-ASCII `char`.
classify_opt(after_index(bytes, index))
}
}
}
/// Classify whether a `char` represents whitespace, punctuation, or something
/// else.
///
/// Used for attention (emphasis, strong), whose sequences can open or close
/// based on the class of surrounding characters.
///
/// ## References
///
/// * [`micromark-util-classify-character` in `micromark`](https://github.com/micromark/micromark/blob/main/packages/micromark-util-classify-character/dev/index.js)
pub fn classify(char: char) -> Kind {
// Unicode whitespace.
if char.is_whitespace() {
Kind::Whitespace
}
// Unicode punctuation.
else if char.is_ascii_punctuation() || PUNCTUATION.contains(&char) {
Kind::Punctuation
}
// Everything else.
else {
Kind::Other
}
}
/// Like [`classify`], but supports eof as whitespace.
pub fn classify_opt(char_opt: Option<char>) -> Kind {
char_opt.map_or(Kind::Whitespace, classify)
}
/// Format an optional `char` (`none` means eof).
pub fn format_opt(char: Option<char>) -> String {
char.map_or("end of file".into(), |char| {
format!("character {}", format(char))
})
}
/// Format an optional `byte` (`none` means eof).
#[cfg(feature = "log")]
pub fn format_byte_opt(byte: Option<u8>) -> String {
byte.map_or("end of file".into(), |byte| {
format!("byte {}", format_byte(byte))
})
}
/// Format a `char`.
pub fn format(char: char) -> String {
let representation = format!("U+{:>04X}", char as u32);
let printable = match char {
'`' => Some("`` ` ``".into()),
'!'..='~' => Some(format!("`{}`", char)),
_ => None,
};
if let Some(char) = printable {
format!("{} ({})", char, representation)
} else {
representation
}
}
/// Format a byte (`u8`).
pub fn format_byte(byte: u8) -> String {
let representation = format!("U+{:>04X}", byte);
let printable = match byte {
b'`' => Some("`` ` ``".into()),
b'!'..=b'~' => Some(format!("`{}`", str::from_utf8(&[byte]).unwrap())),
_ => None,
};
if let Some(char) = printable {
format!("{} ({})", char, representation)
} else {
representation
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::string::ToString;
#[test]
fn test_classify() {
assert_eq!(
classify(' '),
Kind::Whitespace,
"should classify whitespace"
);
assert_eq!(
classify('.'),
Kind::Punctuation,
"should classify punctuation"
);
assert_eq!(classify('a'), Kind::Other, "should classify other");
}
#[test]
fn test_format_opt() {
assert_eq!(
format_opt(None),
"end of file".to_string(),
"should format an optional char: none -> eof"
);
assert_eq!(
format_opt(Some('!')),
"character `!` (U+0021)".to_string(),
"should format an optional char: char -> pretty"
);
}
#[test]
#[cfg(feature = "log")]
fn test_format_byte_opt() {
assert_eq!(
format_byte_opt(None),
"end of file".to_string(),
"should format an optional byte: none -> eof"
);
assert_eq!(
format_byte_opt(Some(b'!')),
"byte `!` (U+0021)".to_string(),
"should format an optional byte: char -> pretty"
);
}
#[test]
fn test_format() {
assert_eq!(
format('`'),
"`` ` `` (U+0060)".to_string(),
"should format a char: grave accent"
);
assert_eq!(
format('!'),
"`!` (U+0021)".to_string(),
"should format a char: regular"
);
assert_eq!(
format(' '),
"U+0020".to_string(),
"should format a char: unprintable"
);
}
#[test]
fn test_format_byte() {
assert_eq!(
format_byte(b'`'),
"`` ` `` (U+0060)".to_string(),
"should format a byte: grave accent"
);
assert_eq!(
format_byte(b'!'),
"`!` (U+0021)".to_string(),
"should format a byte: regular"
);
assert_eq!(
format_byte(b' '),
"U+0020".to_string(),
"should format a byte: unprintable"
);
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/character_reference.rs | Rust | //! Helpers for character references.
use crate::util::constant::{
CHARACTER_REFERENCES, CHARACTER_REFERENCES_HTML_4, CHARACTER_REFERENCE_DECIMAL_SIZE_MAX,
CHARACTER_REFERENCE_HEXADECIMAL_SIZE_MAX, CHARACTER_REFERENCE_NAMED_SIZE_MAX,
};
use alloc::string::String;
use core::str;
/// Decode named character references.
///
/// Turn the name coming from a named character reference (without the `&` or
/// `;`) into a string.
/// This looks the given string up at `0` in the tuples of
/// `CHARACTER_REFERENCES` (or `CHARACTER_REFERENCES_HTML_4`)
/// and then takes the corresponding value from `1`.
///
/// The `html5` boolean is used for named character references, and specifier
/// whether the 2125 names from HTML 5 or the 252 names from HTML 4 are
/// supported.
///
/// The result is `String` instead of `char` because named character references
/// can expand into multiple characters.
///
/// ## Examples
///
/// ```rust ignore
/// use markdown::util::decode_character_reference::decode_named;
///
/// assert_eq!(decode_named("amp", true), "&");
/// assert_eq!(decode_named("AElig", true), "Æ");
/// assert_eq!(decode_named("aelig", true), "æ");
/// ```
///
/// ## References
///
/// * [`wooorm/decode-named-character-reference`](https://github.com/wooorm/decode-named-character-reference)
/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.31/#entity-and-numeric-character-references)
pub fn decode_named(value: &str, html5: bool) -> Option<String> {
let mut iter = if html5 {
CHARACTER_REFERENCES.iter()
} else {
CHARACTER_REFERENCES_HTML_4.iter()
};
iter.find(|d| d.0 == value).map(|d| d.1.into())
}
/// Decode numeric character references.
///
/// Turn the number (in string form as either hexadecimal or decimal) coming
/// from a numeric character reference into a string.
/// The base of the string form must be passed as the `radix` parameter, as
/// `10` (decimal) or `16` (hexadecimal).
///
/// This returns a `String` form of the associated character or a replacement
/// character for C0 control characters (except for ASCII whitespace), C1
/// control characters, lone surrogates, noncharacters, and out of range
/// characters.
///
/// ## Examples
///
/// ```rust ignore
/// use markdown::util::decode_character_reference::decode_numeric;
///
/// assert_eq!(decode_numeric("123", 10), "{");
/// assert_eq!(decode_numeric("9", 16), "\t");
/// assert_eq!(decode_numeric("0", 10), "�"); // Not allowed.
/// ```
///
/// ## Panics
///
/// This function panics if a invalid string or an out of bounds valid string
/// is given.
/// It is expected that figuring out whether a number is allowed is handled in
/// the parser.
/// When `markdown-rs` is used, this function never panics.
///
/// ## References
///
/// * [`micromark-util-decode-numeric-character-reference` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-decode-numeric-character-reference)
/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.31/#entity-and-numeric-character-references)
pub fn decode_numeric(value: &str, radix: u32) -> String {
if let Some(char) = char::from_u32(u32::from_str_radix(value, radix).unwrap()) {
if !matches!(char,
// C0 except for HT, LF, FF, CR, space.
'\0'..='\u{08}' | '\u{0B}' | '\u{0E}'..='\u{1F}' |
// Control character (DEL) of C0, and C1 controls.
'\u{7F}'..='\u{9F}'
// Lone surrogates, noncharacters, and out of range are handled by
// Rust.
) {
return char.into();
}
}
char::REPLACEMENT_CHARACTER.into()
}
/// Decode a character reference.
///
/// This turns the number (in string form as either hexadecimal or decimal) or
/// name from a character reference into a string.
///
/// The marker specifies the format: `#` for hexadecimal, `x` for decimal, and
/// `&` for named.
///
/// The `html5` boolean is used for named character references, and specifier
/// whether the 2125 names from HTML 5 or the 252 names from HTML 4 are
/// supported.
///
/// ## Panics
///
/// Panics if `marker` is not `b'&'`, `b'x'`, or `b'#'`.
pub fn decode(value: &str, marker: u8, html5: bool) -> Option<String> {
match marker {
b'#' => Some(decode_numeric(value, 10)),
b'x' => Some(decode_numeric(value, 16)),
b'&' => decode_named(value, html5),
_ => unreachable!("Unexpected marker `{}`", marker),
}
}
/// Get the maximum size of a value for different kinds of references.
///
/// The value is the stuff after the markers, before the `;`.
///
/// ## Panics
///
/// Panics if `marker` is not `b'&'`, `b'x'`, or `b'#'`.
pub fn value_max(marker: u8) -> usize {
match marker {
b'&' => CHARACTER_REFERENCE_NAMED_SIZE_MAX,
b'x' => CHARACTER_REFERENCE_HEXADECIMAL_SIZE_MAX,
b'#' => CHARACTER_REFERENCE_DECIMAL_SIZE_MAX,
_ => unreachable!("Unexpected marker `{}`", marker),
}
}
/// Get a test to check if a byte is allowed as a value for different kinds of
/// references.
///
/// The value is the stuff after the markers, before the `;`.
///
/// ## Panics
///
/// Panics if `marker` is not `b'&'`, `b'x'`, or `b'#'`.
pub fn value_test(marker: u8) -> fn(&u8) -> bool {
match marker {
b'&' => u8::is_ascii_alphanumeric,
b'x' => u8::is_ascii_hexdigit,
b'#' => u8::is_ascii_digit,
_ => unreachable!("Unexpected marker `{}`", marker),
}
}
/// Decode character references in a string.
///
/// > 👉 **Note**: this currently only supports the 252 named character
/// > references from HTML 4, as it’s only used for JSX.
/// >
/// > If it’s ever needed to support HTML 5 (which is what normal markdown
/// > uses), a boolean parameter can be added here.
pub fn parse(value: &str) -> String {
let bytes = value.as_bytes();
let mut index = 0;
let len = bytes.len();
// Grows a bit smaller with each character reference.
let mut result = String::with_capacity(value.len());
let mut start = 0;
while index < len {
if bytes[index] == b'&' {
let (marker, value_start) = if index + 1 < len && bytes[index + 1] == b'#' {
if index + 2 < len && matches!(bytes[index + 2], b'x' | b'X') {
(b'x', index + 3)
} else {
(b'#', index + 2)
}
} else {
(b'&', index + 1)
};
let max = value_max(marker);
let test = value_test(marker);
let mut value_index = 0;
while value_index < max && (value_start + value_index) < len {
if !test(&bytes[value_start + value_index]) {
break;
}
value_index += 1;
}
let value_end = value_start + value_index;
// Non empty and terminated.
if value_index > 0 && bytes[value_end] == b';' {
if let Some(decoded) = decode(
str::from_utf8(&bytes[value_start..value_end]).unwrap(),
marker,
false,
) {
result.push_str(&value[start..index]);
result.push_str(&decoded);
start = value_end + 1;
index = start;
continue;
}
}
}
index += 1;
}
result.push_str(&value[start..]);
result
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/constant.rs | Rust | //! Constants needed to parse markdown.
//!
//! Most of these constants are magic numbers, such as the number of markers
//! needed to parse [code (fenced)][raw_flow]
//! ([`CODE_FENCED_SEQUENCE_SIZE_MIN`][]) or the max number of allowed markers
//! in a [heading (atx)][heading_atx]
//! ([`HEADING_ATX_OPENING_FENCE_SIZE_MAX`][]).
//!
//! Some constants are instead lists of things, such as the list of tag names
//! considered in the **raw** production of [HTML (flow)][html_flow]
//! ([`HTML_RAW_NAMES`][]), or the list of named character references
//! ([`CHARACTER_REFERENCES`][]).
//!
//! [raw_flow]: crate::construct::raw_flow
//! [heading_atx]: crate::construct::heading_atx
//! [html_flow]: crate::construct::html_flow
/// The number of characters allowed in a protocol of an [autolink][].
///
/// The protocol part is the `xxx` in `<xxx://example.com>`.
/// 32 characters is fine, 33 is too many.
///
/// [autolink]: crate::construct::autolink
pub const AUTOLINK_SCHEME_SIZE_MAX: usize = 32;
/// The number of characters allowed in a domain of an email [autolink][].
///
/// There can be multiple “domains”.
/// A domain part is each `xxx` in `<example@xxx.xxx.xxx>`.
/// 63 characters is fine, 64 is too many.
///
/// [autolink]: crate::construct::autolink
pub const AUTOLINK_DOMAIN_SIZE_MAX: usize = 63;
/// The max number of characters in a decimal numeric
/// [character reference][character_reference].
///
/// To illustrate, this allows `�` and disallows `�`.
/// This limit is imposed because all bigger numbers are invalid.
///
/// [character_reference]: crate::construct::character_reference
pub const CHARACTER_REFERENCE_DECIMAL_SIZE_MAX: usize = 7;
/// The max number of characters in a hexadecimal numeric
/// [character reference][character_reference].
///
/// To illustrate, this allows `�` and disallows `�`.
/// This limit is imposed because all bigger numbers are invalid.
///
/// [character_reference]: crate::construct::character_reference
pub const CHARACTER_REFERENCE_HEXADECIMAL_SIZE_MAX: usize = 6;
/// The max number of characters in a named
/// [character reference][character_reference].
///
/// This is the number of the longest name in [`CHARACTER_REFERENCES`][].
/// It allows `∳` and prevents the parser from
/// continuing for eons.
///
/// [character_reference]: crate::construct::character_reference
pub const CHARACTER_REFERENCE_NAMED_SIZE_MAX: usize = 31;
/// The number of markers needed for [code (fenced)][raw_flow] to form.
///
/// Like many things in markdown, the number is `3`.
///
/// [raw_flow]: crate::construct::raw_flow
pub const CODE_FENCED_SEQUENCE_SIZE_MIN: usize = 3;
/// The number of markers needed for [frontmatter][] to form.
///
/// Like many things in markdown, the number is `3`.
///
/// [frontmatter]: crate::construct::frontmatter
pub const FRONTMATTER_SEQUENCE_SIZE: usize = 3;
/// The number of the longest tag name in [`GFM_HTML_TAGFILTER_NAMES`][].
///
/// This is currently the size of `plaintext`.
pub const GFM_HTML_TAGFILTER_SIZE_MAX: usize = 9;
/// List of HTML tag names that are escaped by GFMs tag filter.
///
/// Tag name matching must be performed insensitive to case, and thus this list
/// includes lowercase tag names.
///
/// ## References
///
/// * [*§ 6.1 Disallowed Raw HTML (extension)* in GFM](https://github.github.com/gfm/#disallowed-raw-html-extension-)
pub const GFM_HTML_TAGFILTER_NAMES: [&str; 9] = [
"iframe",
"noembed",
"noframes",
"plaintext",
"script",
"style",
"textarea",
"title",
"xmp",
];
/// The number of preceding spaces needed for a [hard break
/// (trailing)][whitespace] to form.
///
/// [whitespace]: crate::construct::partial_whitespace
pub const HARD_BREAK_PREFIX_SIZE_MIN: usize = 2;
/// The max number of markers allowed to form a [heading (atx)][heading_atx].
///
/// This limitation is imposed by HTML, which imposes a max heading rank of
/// `6`.
///
/// [heading_atx]: crate::construct::heading_atx
pub const HEADING_ATX_OPENING_FENCE_SIZE_MAX: usize = 6;
/// List of HTML tag names that form the **basic** production of
/// [HTML (flow)][html_flow].
///
/// The **basic** production allows interleaving HTML and markdown with blank
/// lines and allows flow (block) elements to interrupt definitions, paragraphs,
/// and heading (setext).
/// Tag name matching must be performed insensitive to case, and thus this list
/// includes lowercase tag names.
///
/// Tag names not on this list result in the **complete** production.
///
/// > 👉 **Note**: `source` was removed on `main` of the `CommonMark` spec and
/// > is slated to be released in `CommonMark@0.31`.
///
/// ## References
///
/// * [*§ 4.6 HTML blocks* in `CommonMark`](https://spec.commonmark.org/0.31/#html-blocks)
/// * [*Remove source element as HTML block start condition* as `commonmark/commonmark-spec#710`](https://github.com/commonmark/commonmark-spec/pull/710)
///
/// [html_flow]: crate::construct::html_flow
pub const HTML_BLOCK_NAMES: [&str; 62] = [
"address",
"article",
"aside",
"base",
"basefont",
"blockquote",
"body",
"caption",
"center",
"col",
"colgroup",
"dd",
"details",
"dialog",
"dir",
"div",
"dl",
"dt",
"fieldset",
"figcaption",
"figure",
"footer",
"form",
"frame",
"frameset",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"head",
"header",
"hr",
"html",
"iframe",
"legend",
"li",
"link",
"main",
"menu",
"menuitem",
"nav",
"noframes",
"ol",
"optgroup",
"option",
"p",
"param",
"search",
"section",
"summary",
"table",
"tbody",
"td",
"tfoot",
"th",
"thead",
"title",
"tr",
"track",
"ul",
];
/// Magic string of CDATA (after `<![`).
///
/// Used in the **cdata** production of [HTML (flow)][html_flow] and
/// [HTML (text)][html_text].
///
/// [html_flow]: crate::construct::html_flow
/// [html_text]: crate::construct::html_text
pub const HTML_CDATA_PREFIX: [u8; 6] = [b'C', b'D', b'A', b'T', b'A', b'['];
/// List of HTML tag names that form the **raw** production of
/// [HTML (flow)][html_flow].
///
/// The **raw** production allows blank lines and thus no interleaving with
/// markdown.
/// Tag name matching must be performed insensitive to case, and thus this list
/// includes lowercase tag names.
///
/// The number of the longest tag name is also stored as a constant in
/// [`HTML_RAW_SIZE_MAX`][].
///
/// > 👉 **Note**: `textarea` was added in `CommonMark@0.30`.
///
/// ## References
///
/// * [*§ 4.6 HTML blocks* in `CommonMark`](https://spec.commonmark.org/0.31/#html-blocks)
///
/// [html_flow]: crate::construct::html_flow
pub const HTML_RAW_NAMES: [&str; 4] = ["pre", "script", "style", "textarea"];
/// The number of the longest tag name in [`HTML_RAW_NAMES`][].
///
/// This is currently the size of `textarea`.
pub const HTML_RAW_SIZE_MAX: usize = 8;
/// To safeguard performance, labels are capped at a large number: `999`.
pub const LINK_REFERENCE_SIZE_MAX: usize = 999;
/// The max number of decimals allowed to form an (ordered)
/// [list item][list-item].
///
/// `CommonMark` caps this at 10 digits (9 is fine, 10 not).
/// This limit is imposed because bigger numbers result in integer overflows
/// in some browsers.
///
/// ## References
///
/// * [*§ 5.2 List items* in `CommonMark`](https://spec.commonmark.org/0.31/#ordered-list-marker)
///
/// [list-item]: crate::construct::list_item
pub const LIST_ITEM_VALUE_SIZE_MAX: usize = 10;
/// The number of markers needed for [math (flow)][raw_flow] to form.
///
/// Unlike code (fenced), this number is `2`.
///
/// [raw_flow]: crate::construct::raw_flow
pub const MATH_FLOW_SEQUENCE_SIZE_MIN: usize = 2;
/// Maximum allowed unbalanced parens in destination.
///
/// There can be many balanced parens, but if there are 33 opens that were not
/// yet closed, the destination does not parse.
/// `CommonMark` requires that at least 3 opening parens are allowed.
/// See: <https://spec.commonmark.org/0.31/#link-destination>,
/// In practice, this is quite low, and several places instead cap it at 32.
/// See: <https://github.com/remarkjs/react-markdown/issues/658#issuecomment-984345577>.
pub const RESOURCE_DESTINATION_BALANCE_MAX: usize = 32;
/// List of protocols allowed, when operating safely, as `href` on `a`.
///
/// This list is based on what is allowed by GitHub.
pub const SAFE_PROTOCOL_HREF: [&str; 6] = ["http", "https", "irc", "ircs", "mailto", "xmpp"];
/// List of protocols allowed, when operating safely, as `src` on `img`.
///
/// This list is based on what is allowed by GitHub.
pub const SAFE_PROTOCOL_SRC: [&str; 2] = ["http", "https"];
/// The number of characters that form a tab stop.
///
/// This relates to the number of whitespace characters needed to form certain
/// constructs in markdown, most notable the whitespace required to form
/// [code (indented)][code_indented].
///
/// [code_indented]: crate::construct::code_indented
pub const TAB_SIZE: usize = 4;
/// The number of markers needed for a [thematic break][thematic_break] to form.
///
/// Like many things in markdown, the number is `3`.
///
/// [thematic_break]: crate::construct::thematic_break
pub const THEMATIC_BREAK_MARKER_COUNT_MIN: usize = 3;
// Important: please touch the below lists as few times as possible to keep Git small.
/// List of names and values that form named [character reference][character_reference]s.
///
/// This list is sensitive to casing.
///
/// The number of the longest name (`CounterClockwiseContourIntegral`) is also
/// stored as a constant in [`CHARACTER_REFERENCE_NAMED_SIZE_MAX`][].
///
/// ## References
///
/// * [*§ 2.5 Entity and numeric character references* in `CommonMark`](https://spec.commonmark.org/0.31/#entity-and-numeric-character-references)
///
/// [character_reference]: crate::construct::character_reference
pub static CHARACTER_REFERENCES: [(&str, &str); 2125] = [
("AElig", "Æ"),
("AMP", "&"),
("Aacute", "Á"),
("Abreve", "Ă"),
("Acirc", "Â"),
("Acy", "А"),
("Afr", "𝔄"),
("Agrave", "À"),
("Alpha", "Α"),
("Amacr", "Ā"),
("And", "⩓"),
("Aogon", "Ą"),
("Aopf", "𝔸"),
("ApplyFunction", ""),
("Aring", "Å"),
("Ascr", "𝒜"),
("Assign", "≔"),
("Atilde", "Ã"),
("Auml", "Ä"),
("Backslash", "∖"),
("Barv", "⫧"),
("Barwed", "⌆"),
("Bcy", "Б"),
("Because", "∵"),
("Bernoullis", "ℬ"),
("Beta", "Β"),
("Bfr", "𝔅"),
("Bopf", "𝔹"),
("Breve", "˘"),
("Bscr", "ℬ"),
("Bumpeq", "≎"),
("CHcy", "Ч"),
("COPY", "©"),
("Cacute", "Ć"),
("Cap", "⋒"),
("CapitalDifferentialD", "ⅅ"),
("Cayleys", "ℭ"),
("Ccaron", "Č"),
("Ccedil", "Ç"),
("Ccirc", "Ĉ"),
("Cconint", "∰"),
("Cdot", "Ċ"),
("Cedilla", "¸"),
("CenterDot", "·"),
("Cfr", "ℭ"),
("Chi", "Χ"),
("CircleDot", "⊙"),
("CircleMinus", "⊖"),
("CirclePlus", "⊕"),
("CircleTimes", "⊗"),
("ClockwiseContourIntegral", "∲"),
("CloseCurlyDoubleQuote", "”"),
("CloseCurlyQuote", "’"),
("Colon", "∷"),
("Colone", "⩴"),
("Congruent", "≡"),
("Conint", "∯"),
("ContourIntegral", "∮"),
("Copf", "ℂ"),
("Coproduct", "∐"),
("CounterClockwiseContourIntegral", "∳"),
("Cross", "⨯"),
("Cscr", "𝒞"),
("Cup", "⋓"),
("CupCap", "≍"),
("DD", "ⅅ"),
("DDotrahd", "⤑"),
("DJcy", "Ђ"),
("DScy", "Ѕ"),
("DZcy", "Џ"),
("Dagger", "‡"),
("Darr", "↡"),
("Dashv", "⫤"),
("Dcaron", "Ď"),
("Dcy", "Д"),
("Del", "∇"),
("Delta", "Δ"),
("Dfr", "𝔇"),
("DiacriticalAcute", "´"),
("DiacriticalDot", "˙"),
("DiacriticalDoubleAcute", "˝"),
("DiacriticalGrave", "`"),
("DiacriticalTilde", "˜"),
("Diamond", "⋄"),
("DifferentialD", "ⅆ"),
("Dopf", "𝔻"),
("Dot", "¨"),
("DotDot", "⃜"),
("DotEqual", "≐"),
("DoubleContourIntegral", "∯"),
("DoubleDot", "¨"),
("DoubleDownArrow", "⇓"),
("DoubleLeftArrow", "⇐"),
("DoubleLeftRightArrow", "⇔"),
("DoubleLeftTee", "⫤"),
("DoubleLongLeftArrow", "⟸"),
("DoubleLongLeftRightArrow", "⟺"),
("DoubleLongRightArrow", "⟹"),
("DoubleRightArrow", "⇒"),
("DoubleRightTee", "⊨"),
("DoubleUpArrow", "⇑"),
("DoubleUpDownArrow", "⇕"),
("DoubleVerticalBar", "∥"),
("DownArrow", "↓"),
("DownArrowBar", "⤓"),
("DownArrowUpArrow", "⇵"),
("DownBreve", "̑"),
("DownLeftRightVector", "⥐"),
("DownLeftTeeVector", "⥞"),
("DownLeftVector", "↽"),
("DownLeftVectorBar", "⥖"),
("DownRightTeeVector", "⥟"),
("DownRightVector", "⇁"),
("DownRightVectorBar", "⥗"),
("DownTee", "⊤"),
("DownTeeArrow", "↧"),
("Downarrow", "⇓"),
("Dscr", "𝒟"),
("Dstrok", "Đ"),
("ENG", "Ŋ"),
("ETH", "Ð"),
("Eacute", "É"),
("Ecaron", "Ě"),
("Ecirc", "Ê"),
("Ecy", "Э"),
("Edot", "Ė"),
("Efr", "𝔈"),
("Egrave", "È"),
("Element", "∈"),
("Emacr", "Ē"),
("EmptySmallSquare", "◻"),
("EmptyVerySmallSquare", "▫"),
("Eogon", "Ę"),
("Eopf", "𝔼"),
("Epsilon", "Ε"),
("Equal", "⩵"),
("EqualTilde", "≂"),
("Equilibrium", "⇌"),
("Escr", "ℰ"),
("Esim", "⩳"),
("Eta", "Η"),
("Euml", "Ë"),
("Exists", "∃"),
("ExponentialE", "ⅇ"),
("Fcy", "Ф"),
("Ffr", "𝔉"),
("FilledSmallSquare", "◼"),
("FilledVerySmallSquare", "▪"),
("Fopf", "𝔽"),
("ForAll", "∀"),
("Fouriertrf", "ℱ"),
("Fscr", "ℱ"),
("GJcy", "Ѓ"),
("GT", ">"),
("Gamma", "Γ"),
("Gammad", "Ϝ"),
("Gbreve", "Ğ"),
("Gcedil", "Ģ"),
("Gcirc", "Ĝ"),
("Gcy", "Г"),
("Gdot", "Ġ"),
("Gfr", "𝔊"),
("Gg", "⋙"),
("Gopf", "𝔾"),
("GreaterEqual", "≥"),
("GreaterEqualLess", "⋛"),
("GreaterFullEqual", "≧"),
("GreaterGreater", "⪢"),
("GreaterLess", "≷"),
("GreaterSlantEqual", "⩾"),
("GreaterTilde", "≳"),
("Gscr", "𝒢"),
("Gt", "≫"),
("HARDcy", "Ъ"),
("Hacek", "ˇ"),
("Hat", "^"),
("Hcirc", "Ĥ"),
("Hfr", "ℌ"),
("HilbertSpace", "ℋ"),
("Hopf", "ℍ"),
("HorizontalLine", "─"),
("Hscr", "ℋ"),
("Hstrok", "Ħ"),
("HumpDownHump", "≎"),
("HumpEqual", "≏"),
("IEcy", "Е"),
("IJlig", "IJ"),
("IOcy", "Ё"),
("Iacute", "Í"),
("Icirc", "Î"),
("Icy", "И"),
("Idot", "İ"),
("Ifr", "ℑ"),
("Igrave", "Ì"),
("Im", "ℑ"),
("Imacr", "Ī"),
("ImaginaryI", "ⅈ"),
("Implies", "⇒"),
("Int", "∬"),
("Integral", "∫"),
("Intersection", "⋂"),
("InvisibleComma", ""),
("InvisibleTimes", ""),
("Iogon", "Į"),
("Iopf", "𝕀"),
("Iota", "Ι"),
("Iscr", "ℐ"),
("Itilde", "Ĩ"),
("Iukcy", "І"),
("Iuml", "Ï"),
("Jcirc", "Ĵ"),
("Jcy", "Й"),
("Jfr", "𝔍"),
("Jopf", "𝕁"),
("Jscr", "𝒥"),
("Jsercy", "Ј"),
("Jukcy", "Є"),
("KHcy", "Х"),
("KJcy", "Ќ"),
("Kappa", "Κ"),
("Kcedil", "Ķ"),
("Kcy", "К"),
("Kfr", "𝔎"),
("Kopf", "𝕂"),
("Kscr", "𝒦"),
("LJcy", "Љ"),
("LT", "<"),
("Lacute", "Ĺ"),
("Lambda", "Λ"),
("Lang", "⟪"),
("Laplacetrf", "ℒ"),
("Larr", "↞"),
("Lcaron", "Ľ"),
("Lcedil", "Ļ"),
("Lcy", "Л"),
("LeftAngleBracket", "⟨"),
("LeftArrow", "←"),
("LeftArrowBar", "⇤"),
("LeftArrowRightArrow", "⇆"),
("LeftCeiling", "⌈"),
("LeftDoubleBracket", "⟦"),
("LeftDownTeeVector", "⥡"),
("LeftDownVector", "⇃"),
("LeftDownVectorBar", "⥙"),
("LeftFloor", "⌊"),
("LeftRightArrow", "↔"),
("LeftRightVector", "⥎"),
("LeftTee", "⊣"),
("LeftTeeArrow", "↤"),
("LeftTeeVector", "⥚"),
("LeftTriangle", "⊲"),
("LeftTriangleBar", "⧏"),
("LeftTriangleEqual", "⊴"),
("LeftUpDownVector", "⥑"),
("LeftUpTeeVector", "⥠"),
("LeftUpVector", "↿"),
("LeftUpVectorBar", "⥘"),
("LeftVector", "↼"),
("LeftVectorBar", "⥒"),
("Leftarrow", "⇐"),
("Leftrightarrow", "⇔"),
("LessEqualGreater", "⋚"),
("LessFullEqual", "≦"),
("LessGreater", "≶"),
("LessLess", "⪡"),
("LessSlantEqual", "⩽"),
("LessTilde", "≲"),
("Lfr", "𝔏"),
("Ll", "⋘"),
("Lleftarrow", "⇚"),
("Lmidot", "Ŀ"),
("LongLeftArrow", "⟵"),
("LongLeftRightArrow", "⟷"),
("LongRightArrow", "⟶"),
("Longleftarrow", "⟸"),
("Longleftrightarrow", "⟺"),
("Longrightarrow", "⟹"),
("Lopf", "𝕃"),
("LowerLeftArrow", "↙"),
("LowerRightArrow", "↘"),
("Lscr", "ℒ"),
("Lsh", "↰"),
("Lstrok", "Ł"),
("Lt", "≪"),
("Map", "⤅"),
("Mcy", "М"),
("MediumSpace", " "),
("Mellintrf", "ℳ"),
("Mfr", "𝔐"),
("MinusPlus", "∓"),
("Mopf", "𝕄"),
("Mscr", "ℳ"),
("Mu", "Μ"),
("NJcy", "Њ"),
("Nacute", "Ń"),
("Ncaron", "Ň"),
("Ncedil", "Ņ"),
("Ncy", "Н"),
("NegativeMediumSpace", "\u{200B}"),
("NegativeThickSpace", "\u{200B}"),
("NegativeThinSpace", "\u{200B}"),
("NegativeVeryThinSpace", "\u{200B}"),
("NestedGreaterGreater", "≫"),
("NestedLessLess", "≪"),
("NewLine", "\n"),
("Nfr", "𝔑"),
("NoBreak", "\u{2060}"),
("NonBreakingSpace", " "),
("Nopf", "ℕ"),
("Not", "⫬"),
("NotCongruent", "≢"),
("NotCupCap", "≭"),
("NotDoubleVerticalBar", "∦"),
("NotElement", "∉"),
("NotEqual", "≠"),
("NotEqualTilde", "≂̸"),
("NotExists", "∄"),
("NotGreater", "≯"),
("NotGreaterEqual", "≱"),
("NotGreaterFullEqual", "≧̸"),
("NotGreaterGreater", "≫̸"),
("NotGreaterLess", "≹"),
("NotGreaterSlantEqual", "⩾̸"),
("NotGreaterTilde", "≵"),
("NotHumpDownHump", "≎̸"),
("NotHumpEqual", "≏̸"),
("NotLeftTriangle", "⋪"),
("NotLeftTriangleBar", "⧏̸"),
("NotLeftTriangleEqual", "⋬"),
("NotLess", "≮"),
("NotLessEqual", "≰"),
("NotLessGreater", "≸"),
("NotLessLess", "≪̸"),
("NotLessSlantEqual", "⩽̸"),
("NotLessTilde", "≴"),
("NotNestedGreaterGreater", "⪢̸"),
("NotNestedLessLess", "⪡̸"),
("NotPrecedes", "⊀"),
("NotPrecedesEqual", "⪯̸"),
("NotPrecedesSlantEqual", "⋠"),
("NotReverseElement", "∌"),
("NotRightTriangle", "⋫"),
("NotRightTriangleBar", "⧐̸"),
("NotRightTriangleEqual", "⋭"),
("NotSquareSubset", "⊏̸"),
("NotSquareSubsetEqual", "⋢"),
("NotSquareSuperset", "⊐̸"),
("NotSquareSupersetEqual", "⋣"),
("NotSubset", "⊂⃒"),
("NotSubsetEqual", "⊈"),
("NotSucceeds", "⊁"),
("NotSucceedsEqual", "⪰̸"),
("NotSucceedsSlantEqual", "⋡"),
("NotSucceedsTilde", "≿̸"),
("NotSuperset", "⊃⃒"),
("NotSupersetEqual", "⊉"),
("NotTilde", "≁"),
("NotTildeEqual", "≄"),
("NotTildeFullEqual", "≇"),
("NotTildeTilde", "≉"),
("NotVerticalBar", "∤"),
("Nscr", "𝒩"),
("Ntilde", "Ñ"),
("Nu", "Ν"),
("OElig", "Œ"),
("Oacute", "Ó"),
("Ocirc", "Ô"),
("Ocy", "О"),
("Odblac", "Ő"),
("Ofr", "𝔒"),
("Ograve", "Ò"),
("Omacr", "Ō"),
("Omega", "Ω"),
("Omicron", "Ο"),
("Oopf", "𝕆"),
("OpenCurlyDoubleQuote", "“"),
("OpenCurlyQuote", "‘"),
("Or", "⩔"),
("Oscr", "𝒪"),
("Oslash", "Ø"),
("Otilde", "Õ"),
("Otimes", "⨷"),
("Ouml", "Ö"),
("OverBar", "‾"),
("OverBrace", "⏞"),
("OverBracket", "⎴"),
("OverParenthesis", "⏜"),
("PartialD", "∂"),
("Pcy", "П"),
("Pfr", "𝔓"),
("Phi", "Φ"),
("Pi", "Π"),
("PlusMinus", "±"),
("Poincareplane", "ℌ"),
("Popf", "ℙ"),
("Pr", "⪻"),
("Precedes", "≺"),
("PrecedesEqual", "⪯"),
("PrecedesSlantEqual", "≼"),
("PrecedesTilde", "≾"),
("Prime", "″"),
("Product", "∏"),
("Proportion", "∷"),
("Proportional", "∝"),
("Pscr", "𝒫"),
("Psi", "Ψ"),
("QUOT", "\""),
("Qfr", "𝔔"),
("Qopf", "ℚ"),
("Qscr", "𝒬"),
("RBarr", "⤐"),
("REG", "®"),
("Racute", "Ŕ"),
("Rang", "⟫"),
("Rarr", "↠"),
("Rarrtl", "⤖"),
("Rcaron", "Ř"),
("Rcedil", "Ŗ"),
("Rcy", "Р"),
("Re", "ℜ"),
("ReverseElement", "∋"),
("ReverseEquilibrium", "⇋"),
("ReverseUpEquilibrium", "⥯"),
("Rfr", "ℜ"),
("Rho", "Ρ"),
("RightAngleBracket", "⟩"),
("RightArrow", "→"),
("RightArrowBar", "⇥"),
("RightArrowLeftArrow", "⇄"),
("RightCeiling", "⌉"),
("RightDoubleBracket", "⟧"),
("RightDownTeeVector", "⥝"),
("RightDownVector", "⇂"),
("RightDownVectorBar", "⥕"),
("RightFloor", "⌋"),
("RightTee", "⊢"),
("RightTeeArrow", "↦"),
("RightTeeVector", "⥛"),
("RightTriangle", "⊳"),
("RightTriangleBar", "⧐"),
("RightTriangleEqual", "⊵"),
("RightUpDownVector", "⥏"),
("RightUpTeeVector", "⥜"),
("RightUpVector", "↾"),
("RightUpVectorBar", "⥔"),
("RightVector", "⇀"),
("RightVectorBar", "⥓"),
("Rightarrow", "⇒"),
("Ropf", "ℝ"),
("RoundImplies", "⥰"),
("Rrightarrow", "⇛"),
("Rscr", "ℛ"),
("Rsh", "↱"),
("RuleDelayed", "⧴"),
("SHCHcy", "Щ"),
("SHcy", "Ш"),
("SOFTcy", "Ь"),
("Sacute", "Ś"),
("Sc", "⪼"),
("Scaron", "Š"),
("Scedil", "Ş"),
("Scirc", "Ŝ"),
("Scy", "С"),
("Sfr", "𝔖"),
("ShortDownArrow", "↓"),
("ShortLeftArrow", "←"),
("ShortRightArrow", "→"),
("ShortUpArrow", "↑"),
("Sigma", "Σ"),
("SmallCircle", "∘"),
("Sopf", "𝕊"),
("Sqrt", "√"),
("Square", "□"),
("SquareIntersection", "⊓"),
("SquareSubset", "⊏"),
("SquareSubsetEqual", "⊑"),
("SquareSuperset", "⊐"),
("SquareSupersetEqual", "⊒"),
("SquareUnion", "⊔"),
("Sscr", "𝒮"),
("Star", "⋆"),
("Sub", "⋐"),
("Subset", "⋐"),
("SubsetEqual", "⊆"),
("Succeeds", "≻"),
("SucceedsEqual", "⪰"),
("SucceedsSlantEqual", "≽"),
("SucceedsTilde", "≿"),
("SuchThat", "∋"),
("Sum", "∑"),
("Sup", "⋑"),
("Superset", "⊃"),
("SupersetEqual", "⊇"),
("Supset", "⋑"),
("THORN", "Þ"),
("TRADE", "™"),
("TSHcy", "Ћ"),
("TScy", "Ц"),
("Tab", "\t"),
("Tau", "Τ"),
("Tcaron", "Ť"),
("Tcedil", "Ţ"),
("Tcy", "Т"),
("Tfr", "𝔗"),
("Therefore", "∴"),
("Theta", "Θ"),
("ThickSpace", " "),
("ThinSpace", " "),
("Tilde", "∼"),
("TildeEqual", "≃"),
("TildeFullEqual", "≅"),
("TildeTilde", "≈"),
("Topf", "𝕋"),
("TripleDot", "⃛"),
("Tscr", "𝒯"),
("Tstrok", "Ŧ"),
("Uacute", "Ú"),
("Uarr", "↟"),
("Uarrocir", "⥉"),
("Ubrcy", "Ў"),
("Ubreve", "Ŭ"),
("Ucirc", "Û"),
("Ucy", "У"),
("Udblac", "Ű"),
("Ufr", "𝔘"),
("Ugrave", "Ù"),
("Umacr", "Ū"),
("UnderBar", "_"),
("UnderBrace", "⏟"),
("UnderBracket", "⎵"),
("UnderParenthesis", "⏝"),
("Union", "⋃"),
("UnionPlus", "⊎"),
("Uogon", "Ų"),
("Uopf", "𝕌"),
("UpArrow", "↑"),
("UpArrowBar", "⤒"),
("UpArrowDownArrow", "⇅"),
("UpDownArrow", "↕"),
("UpEquilibrium", "⥮"),
("UpTee", "⊥"),
("UpTeeArrow", "↥"),
("Uparrow", "⇑"),
("Updownarrow", "⇕"),
("UpperLeftArrow", "↖"),
("UpperRightArrow", "↗"),
("Upsi", "ϒ"),
("Upsilon", "Υ"),
("Uring", "Ů"),
("Uscr", "𝒰"),
("Utilde", "Ũ"),
("Uuml", "Ü"),
("VDash", "⊫"),
("Vbar", "⫫"),
("Vcy", "В"),
("Vdash", "⊩"),
("Vdashl", "⫦"),
("Vee", "⋁"),
("Verbar", "‖"),
("Vert", "‖"),
("VerticalBar", "∣"),
("VerticalLine", "|"),
("VerticalSeparator", "❘"),
("VerticalTilde", "≀"),
("VeryThinSpace", " "),
("Vfr", "𝔙"),
("Vopf", "𝕍"),
("Vscr", "𝒱"),
("Vvdash", "⊪"),
("Wcirc", "Ŵ"),
("Wedge", "⋀"),
("Wfr", "𝔚"),
("Wopf", "𝕎"),
("Wscr", "𝒲"),
("Xfr", "𝔛"),
("Xi", "Ξ"),
("Xopf", "𝕏"),
("Xscr", "𝒳"),
("YAcy", "Я"),
("YIcy", "Ї"),
("YUcy", "Ю"),
("Yacute", "Ý"),
("Ycirc", "Ŷ"),
("Ycy", "Ы"),
("Yfr", "𝔜"),
("Yopf", "𝕐"),
("Yscr", "𝒴"),
("Yuml", "Ÿ"),
("ZHcy", "Ж"),
("Zacute", "Ź"),
("Zcaron", "Ž"),
("Zcy", "З"),
("Zdot", "Ż"),
("ZeroWidthSpace", "\u{200B}"),
("Zeta", "Ζ"),
("Zfr", "ℨ"),
("Zopf", "ℤ"),
("Zscr", "𝒵"),
("aacute", "á"),
("abreve", "ă"),
("ac", "∾"),
("acE", "∾̳"),
("acd", "∿"),
("acirc", "â"),
("acute", "´"),
("acy", "а"),
("aelig", "æ"),
("af", ""),
("afr", "𝔞"),
("agrave", "à"),
("alefsym", "ℵ"),
("aleph", "ℵ"),
("alpha", "α"),
("amacr", "ā"),
("amalg", "⨿"),
("amp", "&"),
("and", "∧"),
("andand", "⩕"),
("andd", "⩜"),
("andslope", "⩘"),
("andv", "⩚"),
("ang", "∠"),
("ange", "⦤"),
("angle", "∠"),
("angmsd", "∡"),
("angmsdaa", "⦨"),
("angmsdab", "⦩"),
("angmsdac", "⦪"),
("angmsdad", "⦫"),
("angmsdae", "⦬"),
("angmsdaf", "⦭"),
("angmsdag", "⦮"),
("angmsdah", "⦯"),
("angrt", "∟"),
("angrtvb", "⊾"),
("angrtvbd", "⦝"),
("angsph", "∢"),
("angst", "Å"),
("angzarr", "⍼"),
("aogon", "ą"),
("aopf", "𝕒"),
("ap", "≈"),
("apE", "⩰"),
("apacir", "⩯"),
("ape", "≊"),
("apid", "≋"),
("apos", "'"),
("approx", "≈"),
("approxeq", "≊"),
("aring", "å"),
("ascr", "𝒶"),
("ast", "*"),
("asymp", "≈"),
("asympeq", "≍"),
("atilde", "ã"),
("auml", "ä"),
("awconint", "∳"),
("awint", "⨑"),
("bNot", "⫭"),
("backcong", "≌"),
("backepsilon", "϶"),
("backprime", "‵"),
("backsim", "∽"),
("backsimeq", "⋍"),
("barvee", "⊽"),
("barwed", "⌅"),
("barwedge", "⌅"),
("bbrk", "⎵"),
("bbrktbrk", "⎶"),
("bcong", "≌"),
("bcy", "б"),
("bdquo", "„"),
("becaus", "∵"),
("because", "∵"),
("bemptyv", "⦰"),
("bepsi", "϶"),
("bernou", "ℬ"),
("beta", "β"),
("beth", "ℶ"),
("between", "≬"),
("bfr", "𝔟"),
("bigcap", "⋂"),
("bigcirc", "◯"),
("bigcup", "⋃"),
("bigodot", "⨀"),
("bigoplus", "⨁"),
("bigotimes", "⨂"),
("bigsqcup", "⨆"),
("bigstar", "★"),
("bigtriangledown", "▽"),
("bigtriangleup", "△"),
("biguplus", "⨄"),
("bigvee", "⋁"),
("bigwedge", "⋀"),
("bkarow", "⤍"),
("blacklozenge", "⧫"),
("blacksquare", "▪"),
("blacktriangle", "▴"),
("blacktriangledown", "▾"),
("blacktriangleleft", "◂"),
("blacktriangleright", "▸"),
("blank", "␣"),
("blk12", "▒"),
("blk14", "░"),
("blk34", "▓"),
("block", "█"),
("bne", "=⃥"),
("bnequiv", "≡⃥"),
("bnot", "⌐"),
("bopf", "𝕓"),
("bot", "⊥"),
("bottom", "⊥"),
("bowtie", "⋈"),
("boxDL", "╗"),
("boxDR", "╔"),
("boxDl", "╖"),
("boxDr", "╓"),
("boxH", "═"),
("boxHD", "╦"),
("boxHU", "╩"),
("boxHd", "╤"),
("boxHu", "╧"),
("boxUL", "╝"),
("boxUR", "╚"),
("boxUl", "╜"),
("boxUr", "╙"),
("boxV", "║"),
("boxVH", "╬"),
("boxVL", "╣"),
("boxVR", "╠"),
("boxVh", "╫"),
("boxVl", "╢"),
("boxVr", "╟"),
("boxbox", "⧉"),
("boxdL", "╕"),
("boxdR", "╒"),
("boxdl", "┐"),
("boxdr", "┌"),
("boxh", "─"),
("boxhD", "╥"),
("boxhU", "╨"),
("boxhd", "┬"),
("boxhu", "┴"),
("boxminus", "⊟"),
("boxplus", "⊞"),
("boxtimes", "⊠"),
("boxuL", "╛"),
("boxuR", "╘"),
("boxul", "┘"),
("boxur", "└"),
("boxv", "│"),
("boxvH", "╪"),
("boxvL", "╡"),
("boxvR", "╞"),
("boxvh", "┼"),
("boxvl", "┤"),
("boxvr", "├"),
("bprime", "‵"),
("breve", "˘"),
("brvbar", "¦"),
("bscr", "𝒷"),
("bsemi", "⁏"),
("bsim", "∽"),
("bsime", "⋍"),
("bsol", "\\"),
("bsolb", "⧅"),
("bsolhsub", "⟈"),
("bull", "•"),
("bullet", "•"),
("bump", "≎"),
("bumpE", "⪮"),
("bumpe", "≏"),
("bumpeq", "≏"),
("cacute", "ć"),
("cap", "∩"),
("capand", "⩄"),
("capbrcup", "⩉"),
("capcap", "⩋"),
("capcup", "⩇"),
("capdot", "⩀"),
("caps", "∩︀"),
("caret", "⁁"),
("caron", "ˇ"),
("ccaps", "⩍"),
("ccaron", "č"),
("ccedil", "ç"),
("ccirc", "ĉ"),
("ccups", "⩌"),
("ccupssm", "⩐"),
("cdot", "ċ"),
("cedil", "¸"),
("cemptyv", "⦲"),
("cent", "¢"),
("centerdot", "·"),
("cfr", "𝔠"),
("chcy", "ч"),
("check", "✓"),
("checkmark", "✓"),
("chi", "χ"),
("cir", "○"),
("cirE", "⧃"),
("circ", "ˆ"),
("circeq", "≗"),
("circlearrowleft", "↺"),
("circlearrowright", "↻"),
("circledR", "®"),
("circledS", "Ⓢ"),
("circledast", "⊛"),
("circledcirc", "⊚"),
("circleddash", "⊝"),
("cire", "≗"),
("cirfnint", "⨐"),
("cirmid", "⫯"),
("cirscir", "⧂"),
("clubs", "♣"),
("clubsuit", "♣"),
("colon", ":"),
("colone", "≔"),
("coloneq", "≔"),
("comma", ","),
("commat", "@"),
("comp", "∁"),
("compfn", "∘"),
("complement", "∁"),
("complexes", "ℂ"),
("cong", "≅"),
("congdot", "⩭"),
("conint", "∮"),
("copf", "𝕔"),
("coprod", "∐"),
("copy", "©"),
("copysr", "℗"),
("crarr", "↵"),
("cross", "✗"),
("cscr", "𝒸"),
("csub", "⫏"),
("csube", "⫑"),
("csup", "⫐"),
("csupe", "⫒"),
("ctdot", "⋯"),
("cudarrl", "⤸"),
("cudarrr", "⤵"),
("cuepr", "⋞"),
("cuesc", "⋟"),
("cularr", "↶"),
("cularrp", "⤽"),
("cup", "∪"),
("cupbrcap", "⩈"),
("cupcap", "⩆"),
("cupcup", "⩊"),
("cupdot", "⊍"),
("cupor", "⩅"),
("cups", "∪︀"),
("curarr", "↷"),
("curarrm", "⤼"),
("curlyeqprec", "⋞"),
("curlyeqsucc", "⋟"),
("curlyvee", "⋎"),
("curlywedge", "⋏"),
("curren", "¤"),
("curvearrowleft", "↶"),
("curvearrowright", "↷"),
("cuvee", "⋎"),
("cuwed", "⋏"),
("cwconint", "∲"),
("cwint", "∱"),
("cylcty", "⌭"),
("dArr", "⇓"),
("dHar", "⥥"),
("dagger", "†"),
("daleth", "ℸ"),
("darr", "↓"),
("dash", "‐"),
("dashv", "⊣"),
("dbkarow", "⤏"),
("dblac", "˝"),
("dcaron", "ď"),
("dcy", "д"),
("dd", "ⅆ"),
("ddagger", "‡"),
("ddarr", "⇊"),
("ddotseq", "⩷"),
("deg", "°"),
("delta", "δ"),
("demptyv", "⦱"),
("dfisht", "⥿"),
("dfr", "𝔡"),
("dharl", "⇃"),
("dharr", "⇂"),
("diam", "⋄"),
("diamond", "⋄"),
("diamondsuit", "♦"),
("diams", "♦"),
("die", "¨"),
("digamma", "ϝ"),
("disin", "⋲"),
("div", "÷"),
("divide", "÷"),
("divideontimes", "⋇"),
("divonx", "⋇"),
("djcy", "ђ"),
("dlcorn", "⌞"),
("dlcrop", "⌍"),
("dollar", "$"),
("dopf", "𝕕"),
("dot", "˙"),
("doteq", "≐"),
("doteqdot", "≑"),
("dotminus", "∸"),
("dotplus", "∔"),
("dotsquare", "⊡"),
("doublebarwedge", "⌆"),
("downarrow", "↓"),
("downdownarrows", "⇊"),
("downharpoonleft", "⇃"),
("downharpoonright", "⇂"),
("drbkarow", "⤐"),
("drcorn", "⌟"),
("drcrop", "⌌"),
("dscr", "𝒹"),
("dscy", "ѕ"),
("dsol", "⧶"),
("dstrok", "đ"),
("dtdot", "⋱"),
("dtri", "▿"),
("dtrif", "▾"),
("duarr", "⇵"),
("duhar", "⥯"),
("dwangle", "⦦"),
("dzcy", "џ"),
("dzigrarr", "⟿"),
("eDDot", "⩷"),
("eDot", "≑"),
("eacute", "é"),
("easter", "⩮"),
("ecaron", "ě"),
("ecir", "≖"),
("ecirc", "ê"),
("ecolon", "≕"),
("ecy", "э"),
("edot", "ė"),
("ee", "ⅇ"),
("efDot", "≒"),
("efr", "𝔢"),
("eg", "⪚"),
("egrave", "è"),
("egs", "⪖"),
("egsdot", "⪘"),
("el", "⪙"),
("elinters", "⏧"),
("ell", "ℓ"),
("els", "⪕"),
("elsdot", "⪗"),
("emacr", "ē"),
("empty", "∅"),
("emptyset", "∅"),
("emptyv", "∅"),
("emsp13", " "),
("emsp14", " "),
("emsp", " "),
("eng", "ŋ"),
("ensp", " "),
("eogon", "ę"),
("eopf", "𝕖"),
("epar", "⋕"),
("eparsl", "⧣"),
("eplus", "⩱"),
("epsi", "ε"),
("epsilon", "ε"),
("epsiv", "ϵ"),
("eqcirc", "≖"),
("eqcolon", "≕"),
("eqsim", "≂"),
("eqslantgtr", "⪖"),
("eqslantless", "⪕"),
("equals", "="),
("equest", "≟"),
("equiv", "≡"),
("equivDD", "⩸"),
("eqvparsl", "⧥"),
("erDot", "≓"),
("erarr", "⥱"),
("escr", "ℯ"),
("esdot", "≐"),
("esim", "≂"),
("eta", "η"),
("eth", "ð"),
("euml", "ë"),
("euro", "€"),
("excl", "!"),
("exist", "∃"),
("expectation", "ℰ"),
("exponentiale", "ⅇ"),
("fallingdotseq", "≒"),
("fcy", "ф"),
("female", "♀"),
("ffilig", "ffi"),
("fflig", "ff"),
("ffllig", "ffl"),
("ffr", "𝔣"),
("filig", "fi"),
("fjlig", "fj"),
("flat", "♭"),
("fllig", "fl"),
("fltns", "▱"),
("fnof", "ƒ"),
("fopf", "𝕗"),
("forall", "∀"),
("fork", "⋔"),
("forkv", "⫙"),
("fpartint", "⨍"),
("frac12", "½"),
("frac13", "⅓"),
("frac14", "¼"),
("frac15", "⅕"),
("frac16", "⅙"),
("frac18", "⅛"),
("frac23", "⅔"),
("frac25", "⅖"),
("frac34", "¾"),
("frac35", "⅗"),
("frac38", "⅜"),
("frac45", "⅘"),
("frac56", "⅚"),
("frac58", "⅝"),
("frac78", "⅞"),
("frasl", "⁄"),
("frown", "⌢"),
("fscr", "𝒻"),
("gE", "≧"),
("gEl", "⪌"),
("gacute", "ǵ"),
("gamma", "γ"),
("gammad", "ϝ"),
("gap", "⪆"),
("gbreve", "ğ"),
("gcirc", "ĝ"),
("gcy", "г"),
("gdot", "ġ"),
("ge", "≥"),
("gel", "⋛"),
("geq", "≥"),
("geqq", "≧"),
("geqslant", "⩾"),
("ges", "⩾"),
("gescc", "⪩"),
("gesdot", "⪀"),
("gesdoto", "⪂"),
("gesdotol", "⪄"),
("gesl", "⋛︀"),
("gesles", "⪔"),
("gfr", "𝔤"),
("gg", "≫"),
("ggg", "⋙"),
("gimel", "ℷ"),
("gjcy", "ѓ"),
("gl", "≷"),
("glE", "⪒"),
("gla", "⪥"),
("glj", "⪤"),
("gnE", "≩"),
("gnap", "⪊"),
("gnapprox", "⪊"),
("gne", "⪈"),
("gneq", "⪈"),
("gneqq", "≩"),
("gnsim", "⋧"),
("gopf", "𝕘"),
("grave", "`"),
("gscr", "ℊ"),
("gsim", "≳"),
("gsime", "⪎"),
("gsiml", "⪐"),
("gt", ">"),
("gtcc", "⪧"),
("gtcir", "⩺"),
("gtdot", "⋗"),
("gtlPar", "⦕"),
("gtquest", "⩼"),
("gtrapprox", "⪆"),
("gtrarr", "⥸"),
("gtrdot", "⋗"),
("gtreqless", "⋛"),
("gtreqqless", "⪌"),
("gtrless", "≷"),
("gtrsim", "≳"),
("gvertneqq", "≩︀"),
("gvnE", "≩︀"),
("hArr", "⇔"),
("hairsp", " "),
("half", "½"),
("hamilt", "ℋ"),
("hardcy", "ъ"),
("harr", "↔"),
("harrcir", "⥈"),
("harrw", "↭"),
("hbar", "ℏ"),
("hcirc", "ĥ"),
("hearts", "♥"),
("heartsuit", "♥"),
("hellip", "…"),
("hercon", "⊹"),
("hfr", "𝔥"),
("hksearow", "⤥"),
("hkswarow", "⤦"),
("hoarr", "⇿"),
("homtht", "∻"),
("hookleftarrow", "↩"),
("hookrightarrow", "↪"),
("hopf", "𝕙"),
("horbar", "―"),
("hscr", "𝒽"),
("hslash", "ℏ"),
("hstrok", "ħ"),
("hybull", "⁃"),
("hyphen", "‐"),
("iacute", "í"),
("ic", ""),
("icirc", "î"),
("icy", "и"),
("iecy", "е"),
("iexcl", "¡"),
("iff", "⇔"),
("ifr", "𝔦"),
("igrave", "ì"),
("ii", "ⅈ"),
("iiiint", "⨌"),
("iiint", "∭"),
("iinfin", "⧜"),
("iiota", "℩"),
("ijlig", "ij"),
("imacr", "ī"),
("image", "ℑ"),
("imagline", "ℐ"),
("imagpart", "ℑ"),
("imath", "ı"),
("imof", "⊷"),
("imped", "Ƶ"),
("in", "∈"),
("incare", "℅"),
("infin", "∞"),
("infintie", "⧝"),
("inodot", "ı"),
("int", "∫"),
("intcal", "⊺"),
("integers", "ℤ"),
("intercal", "⊺"),
("intlarhk", "⨗"),
("intprod", "⨼"),
("iocy", "ё"),
("iogon", "į"),
("iopf", "𝕚"),
("iota", "ι"),
("iprod", "⨼"),
("iquest", "¿"),
("iscr", "𝒾"),
("isin", "∈"),
("isinE", "⋹"),
("isindot", "⋵"),
("isins", "⋴"),
("isinsv", "⋳"),
("isinv", "∈"),
("it", ""),
("itilde", "ĩ"),
("iukcy", "і"),
("iuml", "ï"),
("jcirc", "ĵ"),
("jcy", "й"),
("jfr", "𝔧"),
("jmath", "ȷ"),
("jopf", "𝕛"),
("jscr", "𝒿"),
("jsercy", "ј"),
("jukcy", "є"),
("kappa", "κ"),
("kappav", "ϰ"),
("kcedil", "ķ"),
("kcy", "к"),
("kfr", "𝔨"),
("kgreen", "ĸ"),
("khcy", "х"),
("kjcy", "ќ"),
("kopf", "𝕜"),
("kscr", "𝓀"),
("lAarr", "⇚"),
("lArr", "⇐"),
("lAtail", "⤛"),
("lBarr", "⤎"),
("lE", "≦"),
("lEg", "⪋"),
("lHar", "⥢"),
("lacute", "ĺ"),
("laemptyv", "⦴"),
("lagran", "ℒ"),
("lambda", "λ"),
("lang", "⟨"),
("langd", "⦑"),
("langle", "⟨"),
("lap", "⪅"),
("laquo", "«"),
("larr", "←"),
("larrb", "⇤"),
("larrbfs", "⤟"),
("larrfs", "⤝"),
("larrhk", "↩"),
("larrlp", "↫"),
("larrpl", "⤹"),
("larrsim", "⥳"),
("larrtl", "↢"),
("lat", "⪫"),
("latail", "⤙"),
("late", "⪭"),
("lates", "⪭︀"),
("lbarr", "⤌"),
("lbbrk", "❲"),
("lbrace", "{"),
("lbrack", "["),
("lbrke", "⦋"),
("lbrksld", "⦏"),
("lbrkslu", "⦍"),
("lcaron", "ľ"),
("lcedil", "ļ"),
("lceil", "⌈"),
("lcub", "{"),
("lcy", "л"),
("ldca", "⤶"),
("ldquo", "“"),
("ldquor", "„"),
("ldrdhar", "⥧"),
("ldrushar", "⥋"),
("ldsh", "↲"),
("le", "≤"),
("leftarrow", "←"),
("leftarrowtail", "↢"),
("leftharpoondown", "↽"),
("leftharpoonup", "↼"),
("leftleftarrows", "⇇"),
("leftrightarrow", "↔"),
("leftrightarrows", "⇆"),
("leftrightharpoons", "⇋"),
("leftrightsquigarrow", "↭"),
("leftthreetimes", "⋋"),
("leg", "⋚"),
("leq", "≤"),
("leqq", "≦"),
("leqslant", "⩽"),
("les", "⩽"),
("lescc", "⪨"),
("lesdot", "⩿"),
("lesdoto", "⪁"),
("lesdotor", "⪃"),
("lesg", "⋚︀"),
("lesges", "⪓"),
("lessapprox", "⪅"),
("lessdot", "⋖"),
("lesseqgtr", "⋚"),
("lesseqqgtr", "⪋"),
("lessgtr", "≶"),
("lesssim", "≲"),
("lfisht", "⥼"),
("lfloor", "⌊"),
("lfr", "𝔩"),
("lg", "≶"),
("lgE", "⪑"),
("lhard", "↽"),
("lharu", "↼"),
("lharul", "⥪"),
("lhblk", "▄"),
("ljcy", "љ"),
("ll", "≪"),
("llarr", "⇇"),
("llcorner", "⌞"),
("llhard", "⥫"),
("lltri", "◺"),
("lmidot", "ŀ"),
("lmoust", "⎰"),
("lmoustache", "⎰"),
("lnE", "≨"),
("lnap", "⪉"),
("lnapprox", "⪉"),
("lne", "⪇"),
("lneq", "⪇"),
("lneqq", "≨"),
("lnsim", "⋦"),
("loang", "⟬"),
("loarr", "⇽"),
("lobrk", "⟦"),
("longleftarrow", "⟵"),
("longleftrightarrow", "⟷"),
("longmapsto", "⟼"),
("longrightarrow", "⟶"),
("looparrowleft", "↫"),
("looparrowright", "↬"),
("lopar", "⦅"),
("lopf", "𝕝"),
("loplus", "⨭"),
("lotimes", "⨴"),
("lowast", "∗"),
("lowbar", "_"),
("loz", "◊"),
("lozenge", "◊"),
("lozf", "⧫"),
("lpar", "("),
("lparlt", "⦓"),
("lrarr", "⇆"),
("lrcorner", "⌟"),
("lrhar", "⇋"),
("lrhard", "⥭"),
("lrm", ""),
("lrtri", "⊿"),
("lsaquo", "‹"),
("lscr", "𝓁"),
("lsh", "↰"),
("lsim", "≲"),
("lsime", "⪍"),
("lsimg", "⪏"),
("lsqb", "["),
("lsquo", "‘"),
("lsquor", "‚"),
("lstrok", "ł"),
("lt", "<"),
("ltcc", "⪦"),
("ltcir", "⩹"),
("ltdot", "⋖"),
("lthree", "⋋"),
("ltimes", "⋉"),
("ltlarr", "⥶"),
("ltquest", "⩻"),
("ltrPar", "⦖"),
("ltri", "◃"),
("ltrie", "⊴"),
("ltrif", "◂"),
("lurdshar", "⥊"),
("luruhar", "⥦"),
("lvertneqq", "≨︀"),
("lvnE", "≨︀"),
("mDDot", "∺"),
("macr", "¯"),
("male", "♂"),
("malt", "✠"),
("maltese", "✠"),
("map", "↦"),
("mapsto", "↦"),
("mapstodown", "↧"),
("mapstoleft", "↤"),
("mapstoup", "↥"),
("marker", "▮"),
("mcomma", "⨩"),
("mcy", "м"),
("mdash", "—"),
("measuredangle", "∡"),
("mfr", "𝔪"),
("mho", "℧"),
("micro", "µ"),
("mid", "∣"),
("midast", "*"),
("midcir", "⫰"),
("middot", "·"),
("minus", "−"),
("minusb", "⊟"),
("minusd", "∸"),
("minusdu", "⨪"),
("mlcp", "⫛"),
("mldr", "…"),
("mnplus", "∓"),
("models", "⊧"),
("mopf", "𝕞"),
("mp", "∓"),
("mscr", "𝓂"),
("mstpos", "∾"),
("mu", "μ"),
("multimap", "⊸"),
("mumap", "⊸"),
("nGg", "⋙̸"),
("nGt", "≫⃒"),
("nGtv", "≫̸"),
("nLeftarrow", "⇍"),
("nLeftrightarrow", "⇎"),
("nLl", "⋘̸"),
("nLt", "≪⃒"),
("nLtv", "≪̸"),
("nRightarrow", "⇏"),
("nVDash", "⊯"),
("nVdash", "⊮"),
("nabla", "∇"),
("nacute", "ń"),
("nang", "∠⃒"),
("nap", "≉"),
("napE", "⩰̸"),
("napid", "≋̸"),
("napos", "ʼn"),
("napprox", "≉"),
("natur", "♮"),
("natural", "♮"),
("naturals", "ℕ"),
("nbsp", " "),
("nbump", "≎̸"),
("nbumpe", "≏̸"),
("ncap", "⩃"),
("ncaron", "ň"),
("ncedil", "ņ"),
("ncong", "≇"),
("ncongdot", "⩭̸"),
("ncup", "⩂"),
("ncy", "н"),
("ndash", "–"),
("ne", "≠"),
("neArr", "⇗"),
("nearhk", "⤤"),
("nearr", "↗"),
("nearrow", "↗"),
("nedot", "≐̸"),
("nequiv", "≢"),
("nesear", "⤨"),
("nesim", "≂̸"),
("nexist", "∄"),
("nexists", "∄"),
("nfr", "𝔫"),
("ngE", "≧̸"),
("nge", "≱"),
("ngeq", "≱"),
("ngeqq", "≧̸"),
("ngeqslant", "⩾̸"),
("nges", "⩾̸"),
("ngsim", "≵"),
("ngt", "≯"),
("ngtr", "≯"),
("nhArr", "⇎"),
("nharr", "↮"),
("nhpar", "⫲"),
("ni", "∋"),
("nis", "⋼"),
("nisd", "⋺"),
("niv", "∋"),
("njcy", "њ"),
("nlArr", "⇍"),
("nlE", "≦̸"),
("nlarr", "↚"),
("nldr", "‥"),
("nle", "≰"),
("nleftarrow", "↚"),
("nleftrightarrow", "↮"),
("nleq", "≰"),
("nleqq", "≦̸"),
("nleqslant", "⩽̸"),
("nles", "⩽̸"),
("nless", "≮"),
("nlsim", "≴"),
("nlt", "≮"),
("nltri", "⋪"),
("nltrie", "⋬"),
("nmid", "∤"),
("nopf", "𝕟"),
("not", "¬"),
("notin", "∉"),
("notinE", "⋹̸"),
("notindot", "⋵̸"),
("notinva", "∉"),
("notinvb", "⋷"),
("notinvc", "⋶"),
("notni", "∌"),
("notniva", "∌"),
("notnivb", "⋾"),
("notnivc", "⋽"),
("npar", "∦"),
("nparallel", "∦"),
("nparsl", "⫽⃥"),
("npart", "∂̸"),
("npolint", "⨔"),
("npr", "⊀"),
("nprcue", "⋠"),
("npre", "⪯̸"),
("nprec", "⊀"),
("npreceq", "⪯̸"),
("nrArr", "⇏"),
("nrarr", "↛"),
("nrarrc", "⤳̸"),
("nrarrw", "↝̸"),
("nrightarrow", "↛"),
("nrtri", "⋫"),
("nrtrie", "⋭"),
("nsc", "⊁"),
("nsccue", "⋡"),
("nsce", "⪰̸"),
("nscr", "𝓃"),
("nshortmid", "∤"),
("nshortparallel", "∦"),
("nsim", "≁"),
("nsime", "≄"),
("nsimeq", "≄"),
("nsmid", "∤"),
("nspar", "∦"),
("nsqsube", "⋢"),
("nsqsupe", "⋣"),
("nsub", "⊄"),
("nsubE", "⫅̸"),
("nsube", "⊈"),
("nsubset", "⊂⃒"),
("nsubseteq", "⊈"),
("nsubseteqq", "⫅̸"),
("nsucc", "⊁"),
("nsucceq", "⪰̸"),
("nsup", "⊅"),
("nsupE", "⫆̸"),
("nsupe", "⊉"),
("nsupset", "⊃⃒"),
("nsupseteq", "⊉"),
("nsupseteqq", "⫆̸"),
("ntgl", "≹"),
("ntilde", "ñ"),
("ntlg", "≸"),
("ntriangleleft", "⋪"),
("ntrianglelefteq", "⋬"),
("ntriangleright", "⋫"),
("ntrianglerighteq", "⋭"),
("nu", "ν"),
("num", "#"),
("numero", "№"),
("numsp", " "),
("nvDash", "⊭"),
("nvHarr", "⤄"),
("nvap", "≍⃒"),
("nvdash", "⊬"),
("nvge", "≥⃒"),
("nvgt", ">⃒"),
("nvinfin", "⧞"),
("nvlArr", "⤂"),
("nvle", "≤⃒"),
("nvlt", "<⃒"),
("nvltrie", "⊴⃒"),
("nvrArr", "⤃"),
("nvrtrie", "⊵⃒"),
("nvsim", "∼⃒"),
("nwArr", "⇖"),
("nwarhk", "⤣"),
("nwarr", "↖"),
("nwarrow", "↖"),
("nwnear", "⤧"),
("oS", "Ⓢ"),
("oacute", "ó"),
("oast", "⊛"),
("ocir", "⊚"),
("ocirc", "ô"),
("ocy", "о"),
("odash", "⊝"),
("odblac", "ő"),
("odiv", "⨸"),
("odot", "⊙"),
("odsold", "⦼"),
("oelig", "œ"),
("ofcir", "⦿"),
("ofr", "𝔬"),
("ogon", "˛"),
("ograve", "ò"),
("ogt", "⧁"),
("ohbar", "⦵"),
("ohm", "Ω"),
("oint", "∮"),
("olarr", "↺"),
("olcir", "⦾"),
("olcross", "⦻"),
("oline", "‾"),
("olt", "⧀"),
("omacr", "ō"),
("omega", "ω"),
("omicron", "ο"),
("omid", "⦶"),
("ominus", "⊖"),
("oopf", "𝕠"),
("opar", "⦷"),
("operp", "⦹"),
("oplus", "⊕"),
("or", "∨"),
("orarr", "↻"),
("ord", "⩝"),
("order", "ℴ"),
("orderof", "ℴ"),
("ordf", "ª"),
("ordm", "º"),
("origof", "⊶"),
("oror", "⩖"),
("orslope", "⩗"),
("orv", "⩛"),
("oscr", "ℴ"),
("oslash", "ø"),
("osol", "⊘"),
("otilde", "õ"),
("otimes", "⊗"),
("otimesas", "⨶"),
("ouml", "ö"),
("ovbar", "⌽"),
("par", "∥"),
("para", "¶"),
("parallel", "∥"),
("parsim", "⫳"),
("parsl", "⫽"),
("part", "∂"),
("pcy", "п"),
("percnt", "%"),
("period", "."),
("permil", "‰"),
("perp", "⊥"),
("pertenk", "‱"),
("pfr", "𝔭"),
("phi", "φ"),
("phiv", "ϕ"),
("phmmat", "ℳ"),
("phone", "☎"),
("pi", "π"),
("pitchfork", "⋔"),
("piv", "ϖ"),
("planck", "ℏ"),
("planckh", "ℎ"),
("plankv", "ℏ"),
("plus", "+"),
("plusacir", "⨣"),
("plusb", "⊞"),
("pluscir", "⨢"),
("plusdo", "∔"),
("plusdu", "⨥"),
("pluse", "⩲"),
("plusmn", "±"),
("plussim", "⨦"),
("plustwo", "⨧"),
("pm", "±"),
("pointint", "⨕"),
("popf", "𝕡"),
("pound", "£"),
("pr", "≺"),
("prE", "⪳"),
("prap", "⪷"),
("prcue", "≼"),
("pre", "⪯"),
("prec", "≺"),
("precapprox", "⪷"),
("preccurlyeq", "≼"),
("preceq", "⪯"),
("precnapprox", "⪹"),
("precneqq", "⪵"),
("precnsim", "⋨"),
("precsim", "≾"),
("prime", "′"),
("primes", "ℙ"),
("prnE", "⪵"),
("prnap", "⪹"),
("prnsim", "⋨"),
("prod", "∏"),
("profalar", "⌮"),
("profline", "⌒"),
("profsurf", "⌓"),
("prop", "∝"),
("propto", "∝"),
("prsim", "≾"),
("prurel", "⊰"),
("pscr", "𝓅"),
("psi", "ψ"),
("puncsp", " "),
("qfr", "𝔮"),
("qint", "⨌"),
("qopf", "𝕢"),
("qprime", "⁗"),
("qscr", "𝓆"),
("quaternions", "ℍ"),
("quatint", "⨖"),
("quest", "?"),
("questeq", "≟"),
("quot", "\""),
("rAarr", "⇛"),
("rArr", "⇒"),
("rAtail", "⤜"),
("rBarr", "⤏"),
("rHar", "⥤"),
("race", "∽̱"),
("racute", "ŕ"),
("radic", "√"),
("raemptyv", "⦳"),
("rang", "⟩"),
("rangd", "⦒"),
("range", "⦥"),
("rangle", "⟩"),
("raquo", "»"),
("rarr", "→"),
("rarrap", "⥵"),
("rarrb", "⇥"),
("rarrbfs", "⤠"),
("rarrc", "⤳"),
("rarrfs", "⤞"),
("rarrhk", "↪"),
("rarrlp", "↬"),
("rarrpl", "⥅"),
("rarrsim", "⥴"),
("rarrtl", "↣"),
("rarrw", "↝"),
("ratail", "⤚"),
("ratio", "∶"),
("rationals", "ℚ"),
("rbarr", "⤍"),
("rbbrk", "❳"),
("rbrace", "}"),
("rbrack", "]"),
("rbrke", "⦌"),
("rbrksld", "⦎"),
("rbrkslu", "⦐"),
("rcaron", "ř"),
("rcedil", "ŗ"),
("rceil", "⌉"),
("rcub", "}"),
("rcy", "р"),
("rdca", "⤷"),
("rdldhar", "⥩"),
("rdquo", "”"),
("rdquor", "”"),
("rdsh", "↳"),
("real", "ℜ"),
("realine", "ℛ"),
("realpart", "ℜ"),
("reals", "ℝ"),
("rect", "▭"),
("reg", "®"),
("rfisht", "⥽"),
("rfloor", "⌋"),
("rfr", "𝔯"),
("rhard", "⇁"),
("rharu", "⇀"),
("rharul", "⥬"),
("rho", "ρ"),
("rhov", "ϱ"),
("rightarrow", "→"),
("rightarrowtail", "↣"),
("rightharpoondown", "⇁"),
("rightharpoonup", "⇀"),
("rightleftarrows", "⇄"),
("rightleftharpoons", "⇌"),
("rightrightarrows", "⇉"),
("rightsquigarrow", "↝"),
("rightthreetimes", "⋌"),
("ring", "˚"),
("risingdotseq", "≓"),
("rlarr", "⇄"),
("rlhar", "⇌"),
("rlm", ""),
("rmoust", "⎱"),
("rmoustache", "⎱"),
("rnmid", "⫮"),
("roang", "⟭"),
("roarr", "⇾"),
("robrk", "⟧"),
("ropar", "⦆"),
("ropf", "𝕣"),
("roplus", "⨮"),
("rotimes", "⨵"),
("rpar", ")"),
("rpargt", "⦔"),
("rppolint", "⨒"),
("rrarr", "⇉"),
("rsaquo", "›"),
("rscr", "𝓇"),
("rsh", "↱"),
("rsqb", "]"),
("rsquo", "’"),
("rsquor", "’"),
("rthree", "⋌"),
("rtimes", "⋊"),
("rtri", "▹"),
("rtrie", "⊵"),
("rtrif", "▸"),
("rtriltri", "⧎"),
("ruluhar", "⥨"),
("rx", "℞"),
("sacute", "ś"),
("sbquo", "‚"),
("sc", "≻"),
("scE", "⪴"),
("scap", "⪸"),
("scaron", "š"),
("sccue", "≽"),
("sce", "⪰"),
("scedil", "ş"),
("scirc", "ŝ"),
("scnE", "⪶"),
("scnap", "⪺"),
("scnsim", "⋩"),
("scpolint", "⨓"),
("scsim", "≿"),
("scy", "с"),
("sdot", "⋅"),
("sdotb", "⊡"),
("sdote", "⩦"),
("seArr", "⇘"),
("searhk", "⤥"),
("searr", "↘"),
("searrow", "↘"),
("sect", "§"),
("semi", ";"),
("seswar", "⤩"),
("setminus", "∖"),
("setmn", "∖"),
("sext", "✶"),
("sfr", "𝔰"),
("sfrown", "⌢"),
("sharp", "♯"),
("shchcy", "щ"),
("shcy", "ш"),
("shortmid", "∣"),
("shortparallel", "∥"),
("shy", "\u{AD}"),
("sigma", "σ"),
("sigmaf", "ς"),
("sigmav", "ς"),
("sim", "∼"),
("simdot", "⩪"),
("sime", "≃"),
("simeq", "≃"),
("simg", "⪞"),
("simgE", "⪠"),
("siml", "⪝"),
("simlE", "⪟"),
("simne", "≆"),
("simplus", "⨤"),
("simrarr", "⥲"),
("slarr", "←"),
("smallsetminus", "∖"),
("smashp", "⨳"),
("smeparsl", "⧤"),
("smid", "∣"),
("smile", "⌣"),
("smt", "⪪"),
("smte", "⪬"),
("smtes", "⪬︀"),
("softcy", "ь"),
("sol", "/"),
("solb", "⧄"),
("solbar", "⌿"),
("sopf", "𝕤"),
("spades", "♠"),
("spadesuit", "♠"),
("spar", "∥"),
("sqcap", "⊓"),
("sqcaps", "⊓︀"),
("sqcup", "⊔"),
("sqcups", "⊔︀"),
("sqsub", "⊏"),
("sqsube", "⊑"),
("sqsubset", "⊏"),
("sqsubseteq", "⊑"),
("sqsup", "⊐"),
("sqsupe", "⊒"),
("sqsupset", "⊐"),
("sqsupseteq", "⊒"),
("squ", "□"),
("square", "□"),
("squarf", "▪"),
("squf", "▪"),
("srarr", "→"),
("sscr", "𝓈"),
("ssetmn", "∖"),
("ssmile", "⌣"),
("sstarf", "⋆"),
("star", "☆"),
("starf", "★"),
("straightepsilon", "ϵ"),
("straightphi", "ϕ"),
("strns", "¯"),
("sub", "⊂"),
("subE", "⫅"),
("subdot", "⪽"),
("sube", "⊆"),
("subedot", "⫃"),
("submult", "⫁"),
("subnE", "⫋"),
("subne", "⊊"),
("subplus", "⪿"),
("subrarr", "⥹"),
("subset", "⊂"),
("subseteq", "⊆"),
("subseteqq", "⫅"),
("subsetneq", "⊊"),
("subsetneqq", "⫋"),
("subsim", "⫇"),
("subsub", "⫕"),
("subsup", "⫓"),
("succ", "≻"),
("succapprox", "⪸"),
("succcurlyeq", "≽"),
("succeq", "⪰"),
("succnapprox", "⪺"),
("succneqq", "⪶"),
("succnsim", "⋩"),
("succsim", "≿"),
("sum", "∑"),
("sung", "♪"),
("sup1", "¹"),
("sup2", "²"),
("sup3", "³"),
("sup", "⊃"),
("supE", "⫆"),
("supdot", "⪾"),
("supdsub", "⫘"),
("supe", "⊇"),
("supedot", "⫄"),
("suphsol", "⟉"),
("suphsub", "⫗"),
("suplarr", "⥻"),
("supmult", "⫂"),
("supnE", "⫌"),
("supne", "⊋"),
("supplus", "⫀"),
("supset", "⊃"),
("supseteq", "⊇"),
("supseteqq", "⫆"),
("supsetneq", "⊋"),
("supsetneqq", "⫌"),
("supsim", "⫈"),
("supsub", "⫔"),
("supsup", "⫖"),
("swArr", "⇙"),
("swarhk", "⤦"),
("swarr", "↙"),
("swarrow", "↙"),
("swnwar", "⤪"),
("szlig", "ß"),
("target", "⌖"),
("tau", "τ"),
("tbrk", "⎴"),
("tcaron", "ť"),
("tcedil", "ţ"),
("tcy", "т"),
("tdot", "⃛"),
("telrec", "⌕"),
("tfr", "𝔱"),
("there4", "∴"),
("therefore", "∴"),
("theta", "θ"),
("thetasym", "ϑ"),
("thetav", "ϑ"),
("thickapprox", "≈"),
("thicksim", "∼"),
("thinsp", " "),
("thkap", "≈"),
("thksim", "∼"),
("thorn", "þ"),
("tilde", "˜"),
("times", "×"),
("timesb", "⊠"),
("timesbar", "⨱"),
("timesd", "⨰"),
("tint", "∭"),
("toea", "⤨"),
("top", "⊤"),
("topbot", "⌶"),
("topcir", "⫱"),
("topf", "𝕥"),
("topfork", "⫚"),
("tosa", "⤩"),
("tprime", "‴"),
("trade", "™"),
("triangle", "▵"),
("triangledown", "▿"),
("triangleleft", "◃"),
("trianglelefteq", "⊴"),
("triangleq", "≜"),
("triangleright", "▹"),
("trianglerighteq", "⊵"),
("tridot", "◬"),
("trie", "≜"),
("triminus", "⨺"),
("triplus", "⨹"),
("trisb", "⧍"),
("tritime", "⨻"),
("trpezium", "⏢"),
("tscr", "𝓉"),
("tscy", "ц"),
("tshcy", "ћ"),
("tstrok", "ŧ"),
("twixt", "≬"),
("twoheadleftarrow", "↞"),
("twoheadrightarrow", "↠"),
("uArr", "⇑"),
("uHar", "⥣"),
("uacute", "ú"),
("uarr", "↑"),
("ubrcy", "ў"),
("ubreve", "ŭ"),
("ucirc", "û"),
("ucy", "у"),
("udarr", "⇅"),
("udblac", "ű"),
("udhar", "⥮"),
("ufisht", "⥾"),
("ufr", "𝔲"),
("ugrave", "ù"),
("uharl", "↿"),
("uharr", "↾"),
("uhblk", "▀"),
("ulcorn", "⌜"),
("ulcorner", "⌜"),
("ulcrop", "⌏"),
("ultri", "◸"),
("umacr", "ū"),
("uml", "¨"),
("uogon", "ų"),
("uopf", "𝕦"),
("uparrow", "↑"),
("updownarrow", "↕"),
("upharpoonleft", "↿"),
("upharpoonright", "↾"),
("uplus", "⊎"),
("upsi", "υ"),
("upsih", "ϒ"),
("upsilon", "υ"),
("upuparrows", "⇈"),
("urcorn", "⌝"),
("urcorner", "⌝"),
("urcrop", "⌎"),
("uring", "ů"),
("urtri", "◹"),
("uscr", "𝓊"),
("utdot", "⋰"),
("utilde", "ũ"),
("utri", "▵"),
("utrif", "▴"),
("uuarr", "⇈"),
("uuml", "ü"),
("uwangle", "⦧"),
("vArr", "⇕"),
("vBar", "⫨"),
("vBarv", "⫩"),
("vDash", "⊨"),
("vangrt", "⦜"),
("varepsilon", "ϵ"),
("varkappa", "ϰ"),
("varnothing", "∅"),
("varphi", "ϕ"),
("varpi", "ϖ"),
("varpropto", "∝"),
("varr", "↕"),
("varrho", "ϱ"),
("varsigma", "ς"),
("varsubsetneq", "⊊︀"),
("varsubsetneqq", "⫋︀"),
("varsupsetneq", "⊋︀"),
("varsupsetneqq", "⫌︀"),
("vartheta", "ϑ"),
("vartriangleleft", "⊲"),
("vartriangleright", "⊳"),
("vcy", "в"),
("vdash", "⊢"),
("vee", "∨"),
("veebar", "⊻"),
("veeeq", "≚"),
("vellip", "⋮"),
("verbar", "|"),
("vert", "|"),
("vfr", "𝔳"),
("vltri", "⊲"),
("vnsub", "⊂⃒"),
("vnsup", "⊃⃒"),
("vopf", "𝕧"),
("vprop", "∝"),
("vrtri", "⊳"),
("vscr", "𝓋"),
("vsubnE", "⫋︀"),
("vsubne", "⊊︀"),
("vsupnE", "⫌︀"),
("vsupne", "⊋︀"),
("vzigzag", "⦚"),
("wcirc", "ŵ"),
("wedbar", "⩟"),
("wedge", "∧"),
("wedgeq", "≙"),
("weierp", "℘"),
("wfr", "𝔴"),
("wopf", "𝕨"),
("wp", "℘"),
("wr", "≀"),
("wreath", "≀"),
("wscr", "𝓌"),
("xcap", "⋂"),
("xcirc", "◯"),
("xcup", "⋃"),
("xdtri", "▽"),
("xfr", "𝔵"),
("xhArr", "⟺"),
("xharr", "⟷"),
("xi", "ξ"),
("xlArr", "⟸"),
("xlarr", "⟵"),
("xmap", "⟼"),
("xnis", "⋻"),
("xodot", "⨀"),
("xopf", "𝕩"),
("xoplus", "⨁"),
("xotime", "⨂"),
("xrArr", "⟹"),
("xrarr", "⟶"),
("xscr", "𝓍"),
("xsqcup", "⨆"),
("xuplus", "⨄"),
("xutri", "△"),
("xvee", "⋁"),
("xwedge", "⋀"),
("yacute", "ý"),
("yacy", "я"),
("ycirc", "ŷ"),
("ycy", "ы"),
("yen", "¥"),
("yfr", "𝔶"),
("yicy", "ї"),
("yopf", "𝕪"),
("yscr", "𝓎"),
("yucy", "ю"),
("yuml", "ÿ"),
("zacute", "ź"),
("zcaron", "ž"),
("zcy", "з"),
("zdot", "ż"),
("zeetrf", "ℨ"),
("zeta", "ζ"),
("zfr", "𝔷"),
("zhcy", "ж"),
("zigrarr", "⇝"),
("zopf", "𝕫"),
("zscr", "𝓏"),
("zwj", ""),
("zwnj", ""),
];
// Important: please touch the below lists as few times as possible to keep Git small.
/// List of names and values that form named character reference in HTML 4.
///
/// This list is normally not used in markdown, but it is used in MDX, because
/// in JSX attribute values, only the old HTML 4 character references are
/// supported.
///
/// This list is sensitive to casing.
///
/// ## References
///
/// * [*§ 1.5.2 HTML Character References* in `JSX`](https://facebook.github.io/jsx/#sec-HTMLCharacterReference)
pub const CHARACTER_REFERENCES_HTML_4: [(&str, &str); 252] = [
("AElig", "Æ"),
("Aacute", "Á"),
("Acirc", "Â"),
("Agrave", "À"),
("Alpha", "Α"),
("Aring", "Å"),
("Atilde", "Ã"),
("Auml", "Ä"),
("Beta", "Β"),
("Ccedil", "Ç"),
("Chi", "Χ"),
("Dagger", "‡"),
("Delta", "Δ"),
("ETH", "Ð"),
("Eacute", "É"),
("Ecirc", "Ê"),
("Egrave", "È"),
("Epsilon", "Ε"),
("Eta", "Η"),
("Euml", "Ë"),
("Gamma", "Γ"),
("Iacute", "Í"),
("Icirc", "Î"),
("Igrave", "Ì"),
("Iota", "Ι"),
("Iuml", "Ï"),
("Kappa", "Κ"),
("Lambda", "Λ"),
("Mu", "Μ"),
("Ntilde", "Ñ"),
("Nu", "Ν"),
("OElig", "Œ"),
("Oacute", "Ó"),
("Ocirc", "Ô"),
("Ograve", "Ò"),
("Omega", "Ω"),
("Omicron", "Ο"),
("Oslash", "Ø"),
("Otilde", "Õ"),
("Ouml", "Ö"),
("Phi", "Φ"),
("Pi", "Π"),
("Prime", "″"),
("Psi", "Ψ"),
("Rho", "Ρ"),
("Scaron", "Š"),
("Sigma", "Σ"),
("THORN", "Þ"),
("Tau", "Τ"),
("Theta", "Θ"),
("Uacute", "Ú"),
("Ucirc", "Û"),
("Ugrave", "Ù"),
("Upsilon", "Υ"),
("Uuml", "Ü"),
("Xi", "Ξ"),
("Yacute", "Ý"),
("Yuml", "Ÿ"),
("Zeta", "Ζ"),
("aacute", "á"),
("acirc", "â"),
("acute", "´"),
("aelig", "æ"),
("agrave", "à"),
("alefsym", "ℵ"),
("alpha", "α"),
("amp", "&"),
("and", "∧"),
("ang", "∠"),
("aring", "å"),
("asymp", "≈"),
("atilde", "ã"),
("auml", "ä"),
("bdquo", "„"),
("beta", "β"),
("brvbar", "¦"),
("bull", "•"),
("cap", "∩"),
("ccedil", "ç"),
("cedil", "¸"),
("cent", "¢"),
("chi", "χ"),
("circ", "ˆ"),
("clubs", "♣"),
("cong", "≅"),
("copy", "©"),
("crarr", "↵"),
("cup", "∪"),
("curren", "¤"),
("dArr", "⇓"),
("dagger", "†"),
("darr", "↓"),
("deg", "°"),
("delta", "δ"),
("diams", "♦"),
("divide", "÷"),
("eacute", "é"),
("ecirc", "ê"),
("egrave", "è"),
("empty", "∅"),
("emsp", " "),
("ensp", " "),
("epsilon", "ε"),
("equiv", "≡"),
("eta", "η"),
("eth", "ð"),
("euml", "ë"),
("euro", "€"),
("exist", "∃"),
("fnof", "ƒ"),
("forall", "∀"),
("frac12", "½"),
("frac14", "¼"),
("frac34", "¾"),
("frasl", "⁄"),
("gamma", "γ"),
("ge", "≥"),
("gt", ">"),
("hArr", "⇔"),
("harr", "↔"),
("hearts", "♥"),
("hellip", "…"),
("iacute", "í"),
("icirc", "î"),
("iexcl", "¡"),
("igrave", "ì"),
("image", "ℑ"),
("infin", "∞"),
("int", "∫"),
("iota", "ι"),
("iquest", "¿"),
("isin", "∈"),
("iuml", "ï"),
("kappa", "κ"),
("lArr", "⇐"),
("lambda", "λ"),
("lang", "〈"),
("laquo", "«"),
("larr", "←"),
("lceil", "⌈"),
("ldquo", "“"),
("le", "≤"),
("lfloor", "⌊"),
("lowast", "∗"),
("loz", "◊"),
("lrm", ""),
("lsaquo", "‹"),
("lsquo", "‘"),
("lt", "<"),
("macr", "¯"),
("mdash", "—"),
("micro", "µ"),
("middot", "·"),
("minus", "−"),
("mu", "μ"),
("nabla", "∇"),
("nbsp", " "),
("ndash", "–"),
("ne", "≠"),
("ni", "∋"),
("not", "¬"),
("notin", "∉"),
("nsub", "⊄"),
("ntilde", "ñ"),
("nu", "ν"),
("oacute", "ó"),
("ocirc", "ô"),
("oelig", "œ"),
("ograve", "ò"),
("oline", "‾"),
("omega", "ω"),
("omicron", "ο"),
("oplus", "⊕"),
("or", "∨"),
("ordf", "ª"),
("ordm", "º"),
("oslash", "ø"),
("otilde", "õ"),
("otimes", "⊗"),
("ouml", "ö"),
("para", "¶"),
("part", "∂"),
("permil", "‰"),
("perp", "⊥"),
("phi", "φ"),
("pi", "π"),
("piv", "ϖ"),
("plusmn", "±"),
("pound", "£"),
("prime", "′"),
("prod", "∏"),
("prop", "∝"),
("psi", "ψ"),
("quot", "\""),
("rArr", "⇒"),
("radic", "√"),
("rang", "〉"),
("raquo", "»"),
("rarr", "→"),
("rceil", "⌉"),
("rdquo", "”"),
("real", "ℜ"),
("reg", "®"),
("rfloor", "⌋"),
("rho", "ρ"),
("rlm", ""),
("rsaquo", "›"),
("rsquo", "’"),
("sbquo", "‚"),
("scaron", "š"),
("sdot", "⋅"),
("sect", "§"),
("shy", "\u{AD}"),
("sigma", "σ"),
("sigmaf", "ς"),
("sim", "∼"),
("spades", "♠"),
("sub", "⊂"),
("sube", "⊆"),
("sum", "∑"),
("sup", "⊃"),
("sup1", "¹"),
("sup2", "²"),
("sup3", "³"),
("supe", "⊇"),
("szlig", "ß"),
("tau", "τ"),
("there4", "∴"),
("theta", "θ"),
("thetasym", "ϑ"),
("thinsp", " "),
("thorn", "þ"),
("tilde", "˜"),
("times", "×"),
("trade", "™"),
("uArr", "⇑"),
("uacute", "ú"),
("uarr", "↑"),
("ucirc", "û"),
("ugrave", "ù"),
("uml", "¨"),
("upsih", "ϒ"),
("upsilon", "υ"),
("uuml", "ü"),
("weierp", "℘"),
("xi", "ξ"),
("yacute", "ý"),
("yen", "¥"),
("yuml", "ÿ"),
("zeta", "ζ"),
("zwj", ""),
("zwnj", ""),
];
#[cfg(test)]
mod tests {
use super::*;
use alloc::format;
#[test]
fn constants() {
assert_eq!(
CHARACTER_REFERENCE_DECIMAL_SIZE_MAX,
format!("{}", 0x0010_ffff).len(),
"`CHARACTER_REFERENCE_DECIMAL_SIZE_MAX`"
);
assert_eq!(
CHARACTER_REFERENCE_HEXADECIMAL_SIZE_MAX,
format!("{:x}", 0x0010_ffff).len(),
"`CHARACTER_REFERENCE_HEXADECIMAL_SIZE_MAX`"
);
assert_eq!(
CHARACTER_REFERENCE_NAMED_SIZE_MAX,
longest(&CHARACTER_REFERENCES.map(|d| d.0)).unwrap().len(),
"`CHARACTER_REFERENCE_NAMED_SIZE_MAX`"
);
assert_eq!(
GFM_HTML_TAGFILTER_SIZE_MAX,
longest(&GFM_HTML_TAGFILTER_NAMES).unwrap().len(),
"`GFM_HTML_TAGFILTER_SIZE_MAX`"
);
assert_eq!(
HTML_RAW_SIZE_MAX,
longest(&HTML_RAW_NAMES).unwrap().len(),
"`HTML_RAW_SIZE_MAX`"
);
}
fn longest<'a>(list: &[&'a str]) -> Option<&'a str> {
let mut max = 0;
let mut result = None;
for name in list {
let len = name.len();
if len > max {
max = len;
result = Some(*name);
}
}
result
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/edit_map.rs | Rust | //! Deal with several changes in events, batching them together.
//!
//! Preferably, changes should be kept to a minimum.
//! Sometimes, it’s needed to change the list of events, because parsing can be
//! messy, and it helps to expose a cleaner interface of events to the compiler
//! and other users.
//! It can also help to merge many adjacent similar events.
//! And, in other cases, it’s needed to parse subcontent: pass some events
//! through another tokenizer and inject the result.
use crate::event::Event;
use alloc::{vec, vec::Vec};
/// Shift `previous` and `next` links according to `jumps`.
///
/// This fixes links in case there are events removed or added between them.
fn shift_links(events: &mut [Event], jumps: &[(usize, usize, usize)]) {
let mut jump_index = 0;
let mut index = 0;
let mut add = 0;
let mut rm = 0;
while index < events.len() {
let rm_curr = rm;
while jump_index < jumps.len() && jumps[jump_index].0 <= index {
add = jumps[jump_index].2;
rm = jumps[jump_index].1;
jump_index += 1;
}
// Ignore items that will be removed.
if rm > rm_curr {
index += rm - rm_curr;
} else {
if let Some(link) = &events[index].link {
if let Some(next) = link.next {
events[next].link.as_mut().unwrap().previous = Some(index + add - rm);
while jump_index < jumps.len() && jumps[jump_index].0 <= next {
add = jumps[jump_index].2;
rm = jumps[jump_index].1;
jump_index += 1;
}
events[index].link.as_mut().unwrap().next = Some(next + add - rm);
index = next;
continue;
}
}
index += 1;
}
}
}
/// Tracks a bunch of edits.
#[derive(Debug)]
pub struct EditMap {
/// Record of changes.
map: Vec<(usize, usize, Vec<Event>)>,
}
impl EditMap {
/// Create a new edit map.
pub fn new() -> EditMap {
EditMap { map: vec![] }
}
/// Create an edit: a remove and/or add at a certain place.
pub fn add(&mut self, index: usize, remove: usize, add: Vec<Event>) {
add_impl(self, index, remove, add, false);
}
/// Create an edit: but insert `add` before existing additions.
pub fn add_before(&mut self, index: usize, remove: usize, add: Vec<Event>) {
add_impl(self, index, remove, add, true);
}
/// Done, change the events.
pub fn consume(&mut self, events: &mut Vec<Event>) {
self.map
.sort_unstable_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
if self.map.is_empty() {
return;
}
// Calculate jumps: where items in the current list move to.
let mut jumps = Vec::with_capacity(self.map.len());
let mut index = 0;
let mut add_acc = 0;
let mut remove_acc = 0;
while index < self.map.len() {
let (at, remove, add) = &self.map[index];
remove_acc += remove;
add_acc += add.len();
jumps.push((*at, remove_acc, add_acc));
index += 1;
}
shift_links(events, &jumps);
let len_before = events.len();
let mut index = self.map.len();
let mut vecs = Vec::with_capacity(index * 2 + 1);
while index > 0 {
index -= 1;
vecs.push(events.split_off(self.map[index].0 + self.map[index].1));
vecs.push(self.map[index].2.split_off(0));
events.truncate(self.map[index].0);
}
vecs.push(events.split_off(0));
events.reserve(len_before + add_acc - remove_acc);
while let Some(mut slice) = vecs.pop() {
events.append(&mut slice);
}
self.map.truncate(0);
}
}
/// Create an edit.
fn add_impl(edit_map: &mut EditMap, at: usize, remove: usize, mut add: Vec<Event>, before: bool) {
let mut index = 0;
if remove == 0 && add.is_empty() {
return;
}
while index < edit_map.map.len() {
if edit_map.map[index].0 == at {
edit_map.map[index].1 += remove;
if before {
add.append(&mut edit_map.map[index].2);
edit_map.map[index].2 = add;
} else {
edit_map.map[index].2.append(&mut add);
}
return;
}
index += 1;
}
edit_map.map.push((at, remove, add));
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/encode.rs | Rust | //! Encode HTML.
use alloc::string::String;
/// Encode dangerous html characters.
///
/// This ensures that certain characters which have special meaning in HTML are
/// dealt with.
/// Technically, we can skip `>` and `"` in many cases, but `CommonMark`
/// includes them.
///
/// This behavior is not explained in prose in `CommonMark` but can be inferred
/// from the input/output test cases.
///
/// ## Examples
///
/// ```rust ignore
/// use markdown::util::encode;
///
/// assert_eq!(encode("I <3 🦀"), "I <3 🦀");
/// ```
///
/// ## References
///
/// * [`micromark-util-encode` in `micromark`](https://github.com/micromark/micromark/tree/main/packages/micromark-util-encode)
pub fn encode(value: &str, encode_html: bool) -> String {
// It’ll grow a bit bigger for each dangerous character.
let mut result = String::with_capacity(value.len());
let bytes = value.as_bytes();
let mut index = 0;
let mut start = 0;
while index < bytes.len() {
let byte = bytes[index];
if matches!(byte, b'\0') || (encode_html && matches!(byte, b'&' | b'"' | b'<' | b'>')) {
result.push_str(&value[start..index]);
result.push_str(match byte {
b'\0' => "�",
b'&' => "&",
b'"' => """,
b'<' => "<",
// `b'>'`
_ => ">",
});
start = index + 1;
}
index += 1;
}
result.push_str(&value[start..]);
result
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/gfm_tagfilter.rs | Rust | //! Make dangerous HTML a tiny bit safer.
use crate::util::constant::{GFM_HTML_TAGFILTER_NAMES, GFM_HTML_TAGFILTER_SIZE_MAX};
use alloc::string::String;
use core::str;
extern crate std;
/// Make dangerous HTML a tiny bit safer.
///
/// The tagfilter is kinda weird and kinda useless.
/// The tag filter is a naïve attempt at XSS protection.
/// You should use a proper HTML sanitizing algorithm.
///
/// ## Examples
///
/// ```rust ignore
/// use markdown::util::gfm_tagfilter::gfm_tagfilter;
///
/// assert_eq!(gfm_tagfilter("<iframe>"), "<iframe>");
/// ```
///
/// ## References
///
/// * [*§ 6.1 Disallowed Raw HTML (extension)* in GFM](https://github.github.com/gfm/#disallowed-raw-html-extension-)
/// * [`cmark-gfm#extensions/tagfilter.c`](https://github.com/github/cmark-gfm/blob/master/extensions/tagfilter.c)
pub fn gfm_tagfilter(value: &str) -> String {
let bytes = value.as_bytes();
// It’ll grow a bit bigger for each encoded `<`.
let mut result = String::with_capacity(bytes.len());
let mut index = 0;
let mut start = 0;
let len = bytes.len();
while index < len {
if bytes[index] == b'<' {
let mut name_start = index + 1;
// Optional `/`.
if name_start < len && bytes[name_start] == b'/' {
name_start += 1;
}
// Tag name.
let mut name_end = name_start;
while name_end < len
&& name_end - name_start < GFM_HTML_TAGFILTER_SIZE_MAX
&& bytes[name_end].is_ascii_alphabetic()
{
name_end += 1;
}
// Non-empty.
if (name_end == len || (name_end != name_start &&
// HTML whitespace, closing slash, or closing angle bracket.
matches!(bytes[name_end], b'\t' | b'\n' | 12 /* `\f` */ | b'\r' | b' ' | b'/' | b'>'))) &&
// Known name.
GFM_HTML_TAGFILTER_NAMES.contains(&str::from_utf8(&bytes[name_start..name_end])
.unwrap()
.to_ascii_lowercase().as_str())
{
result.push_str(&value[start..index]);
result.push_str("<");
start = index + 1;
}
// There was no `<` before `name_end`, so move to that next.
index = name_end;
continue;
}
index += 1;
}
result.push_str(&value[start..]);
result
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/identifier.rs | Rust | //! Info on JavaScript identifiers.
use unicode_id::UnicodeID;
/// Check if a character can start a JS identifier.
#[must_use]
pub fn id_start(char: char) -> bool {
UnicodeID::is_id_start(char) || matches!(char, '$' | '_')
}
/// Check if a character can continue a JS (or JSX) identifier.
#[must_use]
pub fn id_cont(char: char, jsx: bool) -> bool {
UnicodeID::is_id_continue(char)
|| matches!(char, '\u{200c}' | '\u{200d}')
|| (jsx && char == '-')
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/infer.rs | Rust | //! Infer things from events.
//!
//! Used to share between `to_html` and `to_mdast`.
use crate::event::{Event, Kind, Name};
use crate::mdast::AlignKind;
use alloc::{vec, vec::Vec};
/// Figure out if a list is spread or not.
///
/// When `include_items: true` is passed, infers whether the list as a whole
/// is “loose”.
pub fn list_loose(events: &[Event], mut index: usize, include_items: bool) -> bool {
let mut balance = 0;
let name = &events[index].name;
debug_assert!(
matches!(name, Name::ListOrdered | Name::ListUnordered),
"expected list"
);
while index < events.len() {
let event = &events[index];
if event.kind == Kind::Enter {
balance += 1;
if include_items
&& balance == 2
&& event.name == Name::ListItem
&& list_item_loose(events, index)
{
return true;
}
} else {
balance -= 1;
if balance == 1 && event.name == Name::BlankLineEnding {
// Blank line directly after item, which is just a prefix.
//
// ```markdown
// > | -␊
// ^
// | - a
// ```
let mut at_empty_list_item = false;
// Blank line at block quote prefix:
//
// ```markdown
// > | * >␊
// ^
// | * a
// ```
let mut at_empty_block_quote = false;
// List.
let mut before = index - 2;
if events[before].name == Name::ListItem {
before -= 1;
if events[before].name == Name::SpaceOrTab {
before -= 2;
}
if events[before].name == Name::BlockQuote
&& events[before - 1].name == Name::BlockQuotePrefix
{
at_empty_block_quote = true;
} else if events[before].name == Name::ListItemPrefix {
at_empty_list_item = true;
}
}
if !at_empty_list_item && !at_empty_block_quote {
return true;
}
}
// Done.
if balance == 0 && event.name == *name {
break;
}
}
index += 1;
}
false
}
/// Figure out if an item is spread or not.
pub fn list_item_loose(events: &[Event], mut index: usize) -> bool {
debug_assert!(
matches!(events[index].name, Name::ListItem),
"expected list item"
);
let mut balance = 0;
while index < events.len() {
let event = &events[index];
if event.kind == Kind::Enter {
balance += 1;
} else {
balance -= 1;
if balance == 1 && event.name == Name::BlankLineEnding {
// Blank line directly after a prefix:
//
// ```markdown
// > | -␊
// ^
// | a
// ```
let mut at_prefix = false;
// List item.
let mut before = index - 2;
if events[before].name == Name::SpaceOrTab {
before -= 2;
}
if events[before].name == Name::ListItemPrefix {
at_prefix = true;
}
if !at_prefix {
return true;
}
}
// Done.
if balance == 0 && event.name == Name::ListItem {
break;
}
}
index += 1;
}
false
}
/// Figure out the alignment of a GFM table.
pub fn gfm_table_align(events: &[Event], mut index: usize) -> Vec<AlignKind> {
debug_assert!(
matches!(events[index].name, Name::GfmTable),
"expected table"
);
let mut in_delimiter_row = false;
let mut align = vec![];
while index < events.len() {
let event = &events[index];
if in_delimiter_row {
if event.kind == Kind::Enter {
// Start of alignment value: set a new column.
if event.name == Name::GfmTableDelimiterCellValue {
align.push(if events[index + 1].name == Name::GfmTableDelimiterMarker {
AlignKind::Left
} else {
AlignKind::None
});
}
} else {
// End of alignment value: change the column.
if event.name == Name::GfmTableDelimiterCellValue {
if events[index - 1].name == Name::GfmTableDelimiterMarker {
let align_index = align.len() - 1;
align[align_index] = if align[align_index] == AlignKind::Left {
AlignKind::Center
} else {
AlignKind::Right
}
}
}
// Done!
else if event.name == Name::GfmTableDelimiterRow {
break;
}
}
} else if event.kind == Kind::Enter && event.name == Name::GfmTableDelimiterRow {
in_delimiter_row = true;
}
index += 1;
}
align
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus | |
src/util/line_ending.rs | Rust | use alloc::{str::FromStr, string::String};
/// Type of line endings in markdown.
///
/// Particularly when working with Windows, you might want to use
/// `LineEnding::CarriageReturnLineFeed`.
///
/// ## Examples
///
/// ```
/// use markdown::LineEnding;
/// # fn main() {
///
/// // Use a CR + LF combination:
/// let crlf = LineEnding::CarriageReturnLineFeed;
/// # }
/// ```
#[derive(Clone, Debug, Default, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum LineEnding {
/// Both a carriage return (`\r`) and a line feed (`\n`).
///
/// ## Example
///
/// ```markdown
/// a␍␊
/// b
/// ```
#[cfg_attr(feature = "serde", serde(rename = "\r\n"))]
CarriageReturnLineFeed,
/// Sole carriage return (`\r`).
///
/// ## Example
///
/// ```markdown
/// a␍
/// b
/// ```
#[cfg_attr(feature = "serde", serde(rename = "\r"))]
CarriageReturn,
/// Sole line feed (`\n`).
///
/// ## Example
///
/// ```markdown
/// a␊
/// b
/// ```
#[default]
#[cfg_attr(feature = "serde", serde(rename = "\n"))]
LineFeed,
}
// xxxxxxxxxxxxxxx
impl LineEnding {
/// Turn the line ending into a [str].
#[must_use]
pub fn as_str(&self) -> &str {
match self {
LineEnding::CarriageReturnLineFeed => "\r\n",
LineEnding::CarriageReturn => "\r",
LineEnding::LineFeed => "\n",
}
}
}
impl FromStr for LineEnding {
type Err = String;
/// Turn a string into a line ending.
///
/// ## Panics
///
/// Panics if `code` is not `\r\n`, `\r`, or `\n`.
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"\r\n" => Ok(LineEnding::CarriageReturnLineFeed),
"\r" => Ok(LineEnding::CarriageReturn),
"\n" => Ok(LineEnding::LineFeed),
_ => Err("Expected CR, LF, or CRLF".into()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_line_ending() {
assert_eq!(
"\r".parse(),
Ok(LineEnding::CarriageReturn),
"should support turning a string into a carriage return"
);
assert_eq!(
LineEnding::CarriageReturn.as_str(),
"\r",
"should support turning a carriage return into a string"
);
assert_eq!(
"\n".parse(),
Ok(LineEnding::LineFeed),
"should support turning a string into a line feed"
);
assert_eq!(
LineEnding::LineFeed.as_str(),
"\n",
"should support turning a line feed into a string"
);
assert_eq!(
"\r\n".parse(),
Ok(LineEnding::CarriageReturnLineFeed),
"should support turning a string into a carriage return + line feed"
);
assert_eq!(
LineEnding::CarriageReturnLineFeed.as_str(),
"\r\n",
"should support turning a carriage return + line feed into a string"
);
assert_eq!(
"aaa".parse::<LineEnding>(),
Err("Expected CR, LF, or CRLF".into()),
"should error when parsing a non-eol"
);
}
}
| wooorm/markdown-rs | 1,459 | CommonMark compliant markdown parser in Rust with ASTs and extensions | Rust | wooorm | Titus |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.